1 /*- 2 * Bus independent FreeBSD shim for the aic79xx based Adaptec SCSI controllers 3 * 4 * Copyright (c) 1994-2002, 2004 Justin T. Gibbs. 5 * Copyright (c) 2001-2002 Adaptec Inc. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions, and the following disclaimer, 13 * without modification. 14 * 2. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * Alternatively, this software may be distributed under the terms of the 18 * GNU Public License ("GPL"). 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 24 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * $Id: //depot/aic7xxx/freebsd/dev/aic7xxx/aic79xx_osm.c#35 $ 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include <dev/aic7xxx/aic79xx_osm.h> 39 #include <dev/aic7xxx/aic79xx_inline.h> 40 41 #include <sys/kthread.h> 42 43 #include "opt_ddb.h" 44 #ifdef DDB 45 #include <ddb/ddb.h> 46 #endif 47 48 #ifndef AHD_TMODE_ENABLE 49 #define AHD_TMODE_ENABLE 0 50 #endif 51 52 #include <dev/aic7xxx/aic_osm_lib.c> 53 54 #define ccb_scb_ptr spriv_ptr0 55 56 #if 0 57 static void ahd_dump_targcmd(struct target_cmd *cmd); 58 #endif 59 static int ahd_modevent(module_t mod, int type, void *data); 60 static void ahd_action(struct cam_sim *sim, union ccb *ccb); 61 static void ahd_set_tran_settings(struct ahd_softc *ahd, 62 int our_id, char channel, 63 struct ccb_trans_settings *cts); 64 static void ahd_get_tran_settings(struct ahd_softc *ahd, 65 int our_id, char channel, 66 struct ccb_trans_settings *cts); 67 static void ahd_async(void *callback_arg, uint32_t code, 68 struct cam_path *path, void *arg); 69 static void ahd_execute_scb(void *arg, bus_dma_segment_t *dm_segs, 70 int nsegments, int error); 71 static void ahd_poll(struct cam_sim *sim); 72 static void ahd_setup_data(struct ahd_softc *ahd, struct cam_sim *sim, 73 struct ccb_scsiio *csio, struct scb *scb); 74 static void ahd_abort_ccb(struct ahd_softc *ahd, struct cam_sim *sim, 75 union ccb *ccb); 76 static int ahd_create_path(struct ahd_softc *ahd, 77 char channel, u_int target, u_int lun, 78 struct cam_path **path); 79 80 static const char *ahd_sysctl_node_elements[] = { 81 "root", 82 "summary", 83 "debug" 84 }; 85 86 static const char *ahd_sysctl_node_descriptions[] = { 87 "root error collection for aic79xx controllers", 88 "summary collection for aic79xx controllers", 89 "debug collection for aic79xx controllers" 90 }; 91 92 static const char *ahd_sysctl_errors_elements[] = { 93 "Cerrors", 94 "Uerrors", 95 "Ferrors" 96 }; 97 98 static const char *ahd_sysctl_errors_descriptions[] = { 99 "Correctable errors", 100 "Uncorrectable errors", 101 "Fatal errors" 102 }; 103 104 static int 105 ahd_set_debugcounters(SYSCTL_HANDLER_ARGS) 106 { 107 struct ahd_softc *sc; 108 int error, tmpv; 109 110 tmpv = 0; 111 sc = arg1; 112 error = sysctl_handle_int(oidp, &tmpv, 0, req); 113 if (error != 0 || req->newptr == NULL) 114 return (error); 115 if (tmpv < 0 || tmpv >= AHD_ERRORS_NUMBER) 116 return (EINVAL); 117 sc->summerr[arg2] = tmpv; 118 return (0); 119 } 120 121 static int 122 ahd_clear_allcounters(SYSCTL_HANDLER_ARGS) 123 { 124 struct ahd_softc *sc; 125 int error, tmpv; 126 127 tmpv = 0; 128 sc = arg1; 129 error = sysctl_handle_int(oidp, &tmpv, 0, req); 130 if (error != 0 || req->newptr == NULL) 131 return (error); 132 if (tmpv != 0) 133 bzero(sc->summerr, sizeof(sc->summerr)); 134 return (0); 135 } 136 137 static int 138 ahd_create_path(struct ahd_softc *ahd, char channel, u_int target, 139 u_int lun, struct cam_path **path) 140 { 141 path_id_t path_id; 142 143 path_id = cam_sim_path(ahd->platform_data->sim); 144 return (xpt_create_path(path, /*periph*/NULL, 145 path_id, target, lun)); 146 } 147 148 void 149 ahd_sysctl(struct ahd_softc *ahd) 150 { 151 u_int i; 152 153 for (i = 0; i < AHD_SYSCTL_NUMBER; i++) 154 sysctl_ctx_init(&ahd->sysctl_ctx[i]); 155 156 ahd->sysctl_tree[AHD_SYSCTL_ROOT] = 157 SYSCTL_ADD_NODE(&ahd->sysctl_ctx[AHD_SYSCTL_ROOT], 158 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, 159 device_get_nameunit(ahd->dev_softc), CTLFLAG_RD, 0, 160 ahd_sysctl_node_descriptions[AHD_SYSCTL_ROOT]); 161 SYSCTL_ADD_PROC(&ahd->sysctl_ctx[AHD_SYSCTL_ROOT], 162 SYSCTL_CHILDREN(ahd->sysctl_tree[AHD_SYSCTL_ROOT]), 163 OID_AUTO, "clear", CTLTYPE_UINT | CTLFLAG_RW, ahd, 164 0, ahd_clear_allcounters, "IU", 165 "Clear all counters"); 166 167 for (i = AHD_SYSCTL_SUMMARY; i < AHD_SYSCTL_NUMBER; i++) 168 ahd->sysctl_tree[i] = 169 SYSCTL_ADD_NODE(&ahd->sysctl_ctx[i], 170 SYSCTL_CHILDREN(ahd->sysctl_tree[AHD_SYSCTL_ROOT]), 171 OID_AUTO, ahd_sysctl_node_elements[i], 172 CTLFLAG_RD, 0, 173 ahd_sysctl_node_descriptions[i]); 174 175 for (i = AHD_ERRORS_CORRECTABLE; i < AHD_ERRORS_NUMBER; i++) { 176 SYSCTL_ADD_UINT(&ahd->sysctl_ctx[AHD_SYSCTL_SUMMARY], 177 SYSCTL_CHILDREN(ahd->sysctl_tree[AHD_SYSCTL_SUMMARY]), 178 OID_AUTO, ahd_sysctl_errors_elements[i], 179 CTLFLAG_RD, &ahd->summerr[i], i, 180 ahd_sysctl_errors_descriptions[i]); 181 SYSCTL_ADD_PROC(&ahd->sysctl_ctx[AHD_SYSCTL_DEBUG], 182 SYSCTL_CHILDREN(ahd->sysctl_tree[AHD_SYSCTL_DEBUG]), 183 OID_AUTO, ahd_sysctl_errors_elements[i], 184 CTLFLAG_RW | CTLTYPE_UINT, ahd, i, 185 ahd_set_debugcounters, "IU", 186 ahd_sysctl_errors_descriptions[i]); 187 } 188 } 189 190 int 191 ahd_map_int(struct ahd_softc *ahd) 192 { 193 int error; 194 195 /* Hook up our interrupt handler */ 196 error = bus_setup_intr(ahd->dev_softc, ahd->platform_data->irq, 197 INTR_TYPE_CAM|INTR_MPSAFE, NULL, 198 ahd_platform_intr, ahd, &ahd->platform_data->ih); 199 if (error != 0) 200 device_printf(ahd->dev_softc, "bus_setup_intr() failed: %d\n", 201 error); 202 return (error); 203 } 204 205 /* 206 * Attach all the sub-devices we can find 207 */ 208 int 209 ahd_attach(struct ahd_softc *ahd) 210 { 211 char ahd_info[256]; 212 struct ccb_setasync csa; 213 struct cam_devq *devq; 214 struct cam_sim *sim; 215 struct cam_path *path; 216 int count; 217 218 count = 0; 219 devq = NULL; 220 sim = NULL; 221 222 /* 223 * Create a thread to perform all recovery. 224 */ 225 if (ahd_spawn_recovery_thread(ahd) != 0) 226 goto fail; 227 228 ahd_controller_info(ahd, ahd_info); 229 printf("%s\n", ahd_info); 230 ahd_lock(ahd); 231 232 /* 233 * Create the device queue for our SIM(s). 234 */ 235 devq = cam_simq_alloc(AHD_MAX_QUEUE); 236 if (devq == NULL) 237 goto fail; 238 239 /* 240 * Construct our SIM entry 241 */ 242 sim = cam_sim_alloc(ahd_action, ahd_poll, "ahd", ahd, 243 device_get_unit(ahd->dev_softc), 244 &ahd->platform_data->mtx, 1, /*XXX*/256, devq); 245 if (sim == NULL) { 246 cam_simq_free(devq); 247 goto fail; 248 } 249 250 if (xpt_bus_register(sim, ahd->dev_softc, /*bus_id*/0) != CAM_SUCCESS) { 251 cam_sim_free(sim, /*free_devq*/TRUE); 252 sim = NULL; 253 goto fail; 254 } 255 256 if (xpt_create_path(&path, /*periph*/NULL, 257 cam_sim_path(sim), CAM_TARGET_WILDCARD, 258 CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 259 xpt_bus_deregister(cam_sim_path(sim)); 260 cam_sim_free(sim, /*free_devq*/TRUE); 261 sim = NULL; 262 goto fail; 263 } 264 265 xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5); 266 csa.ccb_h.func_code = XPT_SASYNC_CB; 267 csa.event_enable = AC_LOST_DEVICE; 268 csa.callback = ahd_async; 269 csa.callback_arg = sim; 270 xpt_action((union ccb *)&csa); 271 count++; 272 273 fail: 274 ahd->platform_data->sim = sim; 275 ahd->platform_data->path = path; 276 ahd_unlock(ahd); 277 if (count != 0) { 278 /* We have to wait until after any system dumps... */ 279 ahd->platform_data->eh = 280 EVENTHANDLER_REGISTER(shutdown_final, ahd_shutdown, 281 ahd, SHUTDOWN_PRI_DEFAULT); 282 ahd_intr_enable(ahd, TRUE); 283 } 284 285 286 return (count); 287 } 288 289 /* 290 * Catch an interrupt from the adapter 291 */ 292 void 293 ahd_platform_intr(void *arg) 294 { 295 struct ahd_softc *ahd; 296 297 ahd = (struct ahd_softc *)arg; 298 ahd_lock(ahd); 299 ahd_intr(ahd); 300 ahd_unlock(ahd); 301 } 302 303 /* 304 * We have an scb which has been processed by the 305 * adaptor, now we look to see how the operation 306 * went. 307 */ 308 void 309 ahd_done(struct ahd_softc *ahd, struct scb *scb) 310 { 311 union ccb *ccb; 312 313 CAM_DEBUG(scb->io_ctx->ccb_h.path, CAM_DEBUG_TRACE, 314 ("ahd_done - scb %d\n", SCB_GET_TAG(scb))); 315 316 ccb = scb->io_ctx; 317 LIST_REMOVE(scb, pending_links); 318 if ((scb->flags & SCB_TIMEDOUT) != 0) 319 LIST_REMOVE(scb, timedout_links); 320 321 callout_stop(&scb->io_timer); 322 323 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 324 bus_dmasync_op_t op; 325 326 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 327 op = BUS_DMASYNC_POSTREAD; 328 else 329 op = BUS_DMASYNC_POSTWRITE; 330 bus_dmamap_sync(ahd->buffer_dmat, scb->dmamap, op); 331 bus_dmamap_unload(ahd->buffer_dmat, scb->dmamap); 332 } 333 334 #ifdef AHD_TARGET_MODE 335 if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) { 336 struct cam_path *ccb_path; 337 338 /* 339 * If we have finally disconnected, clean up our 340 * pending device state. 341 * XXX - There may be error states that cause where 342 * we will remain connected. 343 */ 344 ccb_path = ccb->ccb_h.path; 345 if (ahd->pending_device != NULL 346 && xpt_path_comp(ahd->pending_device->path, ccb_path) == 0) { 347 348 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) { 349 ahd->pending_device = NULL; 350 } else { 351 xpt_print_path(ccb->ccb_h.path); 352 printf("Still disconnected\n"); 353 ahd_freeze_ccb(ccb); 354 } 355 } 356 357 if (aic_get_transaction_status(scb) == CAM_REQ_INPROG) 358 ccb->ccb_h.status |= CAM_REQ_CMP; 359 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 360 ahd_free_scb(ahd, scb); 361 xpt_done(ccb); 362 return; 363 } 364 #endif 365 366 if ((scb->flags & SCB_RECOVERY_SCB) != 0) { 367 struct scb *list_scb; 368 369 ahd->scb_data.recovery_scbs--; 370 371 if (aic_get_transaction_status(scb) == CAM_BDR_SENT 372 || aic_get_transaction_status(scb) == CAM_REQ_ABORTED) 373 aic_set_transaction_status(scb, CAM_CMD_TIMEOUT); 374 375 if (ahd->scb_data.recovery_scbs == 0) { 376 /* 377 * All recovery actions have completed successfully, 378 * so reinstate the timeouts for all other pending 379 * commands. 380 */ 381 LIST_FOREACH(list_scb, 382 &ahd->pending_scbs, pending_links) { 383 384 aic_scb_timer_reset(list_scb, 385 aic_get_timeout(scb)); 386 } 387 388 ahd_print_path(ahd, scb); 389 printf("no longer in timeout, status = %x\n", 390 ccb->ccb_h.status); 391 } 392 } 393 394 /* Don't clobber any existing error state */ 395 if (aic_get_transaction_status(scb) == CAM_REQ_INPROG) { 396 ccb->ccb_h.status |= CAM_REQ_CMP; 397 } else if ((scb->flags & SCB_SENSE) != 0) { 398 /* 399 * We performed autosense retrieval. 400 * 401 * Zero any sense not transferred by the 402 * device. The SCSI spec mandates that any 403 * untransfered data should be assumed to be 404 * zero. Complete the 'bounce' of sense information 405 * through buffers accessible via bus-space by 406 * copying it into the clients csio. 407 */ 408 memset(&ccb->csio.sense_data, 0, sizeof(ccb->csio.sense_data)); 409 memcpy(&ccb->csio.sense_data, 410 ahd_get_sense_buf(ahd, scb), 411 /* XXX What size do we want to use??? */ 412 sizeof(ccb->csio.sense_data) 413 - ccb->csio.sense_resid); 414 scb->io_ctx->ccb_h.status |= CAM_AUTOSNS_VALID; 415 } else if ((scb->flags & SCB_PKT_SENSE) != 0) { 416 struct scsi_status_iu_header *siu; 417 u_int sense_len; 418 419 /* 420 * Copy only the sense data into the provided buffer. 421 */ 422 siu = (struct scsi_status_iu_header *)scb->sense_data; 423 sense_len = MIN(scsi_4btoul(siu->sense_length), 424 sizeof(ccb->csio.sense_data)); 425 memset(&ccb->csio.sense_data, 0, sizeof(ccb->csio.sense_data)); 426 memcpy(&ccb->csio.sense_data, 427 ahd_get_sense_buf(ahd, scb) + SIU_SENSE_OFFSET(siu), 428 sense_len); 429 #ifdef AHD_DEBUG 430 if ((ahd_debug & AHD_SHOW_SENSE) != 0) { 431 uint8_t *sense_data = (uint8_t *)&ccb->csio.sense_data; 432 u_int i; 433 434 printf("Copied %d bytes of sense data offset %d:", 435 sense_len, SIU_SENSE_OFFSET(siu)); 436 for (i = 0; i < sense_len; i++) 437 printf(" 0x%x", *sense_data++); 438 printf("\n"); 439 } 440 #endif 441 scb->io_ctx->ccb_h.status |= CAM_AUTOSNS_VALID; 442 } 443 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 444 ahd_free_scb(ahd, scb); 445 xpt_done(ccb); 446 } 447 448 static void 449 ahd_action(struct cam_sim *sim, union ccb *ccb) 450 { 451 struct ahd_softc *ahd; 452 #ifdef AHD_TARGET_MODE 453 struct ahd_tmode_lstate *lstate; 454 #endif 455 u_int target_id; 456 u_int our_id; 457 458 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ahd_action\n")); 459 460 ahd = (struct ahd_softc *)cam_sim_softc(sim); 461 462 target_id = ccb->ccb_h.target_id; 463 our_id = SIM_SCSI_ID(ahd, sim); 464 465 switch (ccb->ccb_h.func_code) { 466 /* Common cases first */ 467 #ifdef AHD_TARGET_MODE 468 case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */ 469 case XPT_CONT_TARGET_IO:/* Continue Host Target I/O Connection*/ 470 { 471 struct ahd_tmode_tstate *tstate; 472 cam_status status; 473 474 status = ahd_find_tmode_devs(ahd, sim, ccb, &tstate, 475 &lstate, TRUE); 476 477 if (status != CAM_REQ_CMP) { 478 if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) { 479 /* Response from the black hole device */ 480 tstate = NULL; 481 lstate = ahd->black_hole; 482 } else { 483 ccb->ccb_h.status = status; 484 xpt_done(ccb); 485 break; 486 } 487 } 488 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 489 490 SLIST_INSERT_HEAD(&lstate->accept_tios, &ccb->ccb_h, 491 sim_links.sle); 492 ccb->ccb_h.status = CAM_REQ_INPROG; 493 if ((ahd->flags & AHD_TQINFIFO_BLOCKED) != 0) 494 ahd_run_tqinfifo(ahd, /*paused*/FALSE); 495 break; 496 } 497 498 /* 499 * The target_id represents the target we attempt to 500 * select. In target mode, this is the initiator of 501 * the original command. 502 */ 503 our_id = target_id; 504 target_id = ccb->csio.init_id; 505 /* FALLTHROUGH */ 506 } 507 #endif 508 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 509 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ 510 { 511 struct scb *scb; 512 struct hardware_scb *hscb; 513 struct ahd_initiator_tinfo *tinfo; 514 struct ahd_tmode_tstate *tstate; 515 u_int col_idx; 516 517 if ((ahd->flags & AHD_INITIATORROLE) == 0 518 && (ccb->ccb_h.func_code == XPT_SCSI_IO 519 || ccb->ccb_h.func_code == XPT_RESET_DEV)) { 520 ccb->ccb_h.status = CAM_PROVIDE_FAIL; 521 xpt_done(ccb); 522 return; 523 } 524 525 /* 526 * get an scb to use. 527 */ 528 tinfo = ahd_fetch_transinfo(ahd, 'A', our_id, 529 target_id, &tstate); 530 if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) == 0 531 || (tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0 532 || ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) { 533 col_idx = AHD_NEVER_COL_IDX; 534 } else { 535 col_idx = AHD_BUILD_COL_IDX(target_id, 536 ccb->ccb_h.target_lun); 537 } 538 if ((scb = ahd_get_scb(ahd, col_idx)) == NULL) { 539 540 xpt_freeze_simq(sim, /*count*/1); 541 ahd->flags |= AHD_RESOURCE_SHORTAGE; 542 ccb->ccb_h.status = CAM_REQUEUE_REQ; 543 xpt_done(ccb); 544 return; 545 } 546 547 hscb = scb->hscb; 548 549 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_SUBTRACE, 550 ("start scb(%p)\n", scb)); 551 scb->io_ctx = ccb; 552 /* 553 * So we can find the SCB when an abort is requested 554 */ 555 ccb->ccb_h.ccb_scb_ptr = scb; 556 557 /* 558 * Put all the arguments for the xfer in the scb 559 */ 560 hscb->control = 0; 561 hscb->scsiid = BUILD_SCSIID(ahd, sim, target_id, our_id); 562 hscb->lun = ccb->ccb_h.target_lun; 563 if (ccb->ccb_h.func_code == XPT_RESET_DEV) { 564 hscb->cdb_len = 0; 565 scb->flags |= SCB_DEVICE_RESET; 566 hscb->control |= MK_MESSAGE; 567 hscb->task_management = SIU_TASKMGMT_LUN_RESET; 568 ahd_execute_scb(scb, NULL, 0, 0); 569 } else { 570 #ifdef AHD_TARGET_MODE 571 if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) { 572 struct target_data *tdata; 573 574 tdata = &hscb->shared_data.tdata; 575 if (ahd->pending_device == lstate) 576 scb->flags |= SCB_TARGET_IMMEDIATE; 577 hscb->control |= TARGET_SCB; 578 tdata->target_phases = 0; 579 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) { 580 tdata->target_phases |= SPHASE_PENDING; 581 tdata->scsi_status = 582 ccb->csio.scsi_status; 583 } 584 if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) 585 tdata->target_phases |= NO_DISCONNECT; 586 587 tdata->initiator_tag = 588 ahd_htole16(ccb->csio.tag_id); 589 } 590 #endif 591 hscb->task_management = 0; 592 if (ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) 593 hscb->control |= ccb->csio.tag_action; 594 595 ahd_setup_data(ahd, sim, &ccb->csio, scb); 596 } 597 break; 598 } 599 #ifdef AHD_TARGET_MODE 600 case XPT_NOTIFY_ACK: 601 case XPT_IMMED_NOTIFY: 602 { 603 struct ahd_tmode_tstate *tstate; 604 struct ahd_tmode_lstate *lstate; 605 cam_status status; 606 607 status = ahd_find_tmode_devs(ahd, sim, ccb, &tstate, 608 &lstate, TRUE); 609 610 if (status != CAM_REQ_CMP) { 611 ccb->ccb_h.status = status; 612 xpt_done(ccb); 613 break; 614 } 615 SLIST_INSERT_HEAD(&lstate->immed_notifies, &ccb->ccb_h, 616 sim_links.sle); 617 ccb->ccb_h.status = CAM_REQ_INPROG; 618 ahd_send_lstate_events(ahd, lstate); 619 break; 620 } 621 case XPT_EN_LUN: /* Enable LUN as a target */ 622 ahd_handle_en_lun(ahd, sim, ccb); 623 xpt_done(ccb); 624 break; 625 #endif 626 case XPT_ABORT: /* Abort the specified CCB */ 627 { 628 ahd_abort_ccb(ahd, sim, ccb); 629 break; 630 } 631 case XPT_SET_TRAN_SETTINGS: 632 { 633 ahd_set_tran_settings(ahd, SIM_SCSI_ID(ahd, sim), 634 SIM_CHANNEL(ahd, sim), &ccb->cts); 635 xpt_done(ccb); 636 break; 637 } 638 case XPT_GET_TRAN_SETTINGS: 639 /* Get default/user set transfer settings for the target */ 640 { 641 ahd_get_tran_settings(ahd, SIM_SCSI_ID(ahd, sim), 642 SIM_CHANNEL(ahd, sim), &ccb->cts); 643 xpt_done(ccb); 644 break; 645 } 646 case XPT_CALC_GEOMETRY: 647 { 648 aic_calc_geometry(&ccb->ccg, ahd->flags & AHD_EXTENDED_TRANS_A); 649 xpt_done(ccb); 650 break; 651 } 652 case XPT_RESET_BUS: /* Reset the specified SCSI bus */ 653 { 654 int found; 655 656 found = ahd_reset_channel(ahd, SIM_CHANNEL(ahd, sim), 657 /*initiate reset*/TRUE); 658 if (bootverbose) { 659 xpt_print_path(SIM_PATH(ahd, sim)); 660 printf("SCSI bus reset delivered. " 661 "%d SCBs aborted.\n", found); 662 } 663 ccb->ccb_h.status = CAM_REQ_CMP; 664 xpt_done(ccb); 665 break; 666 } 667 case XPT_TERM_IO: /* Terminate the I/O process */ 668 /* XXX Implement */ 669 ccb->ccb_h.status = CAM_REQ_INVALID; 670 xpt_done(ccb); 671 break; 672 case XPT_PATH_INQ: /* Path routing inquiry */ 673 { 674 struct ccb_pathinq *cpi = &ccb->cpi; 675 676 cpi->version_num = 1; /* XXX??? */ 677 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE; 678 if ((ahd->features & AHD_WIDE) != 0) 679 cpi->hba_inquiry |= PI_WIDE_16; 680 if ((ahd->features & AHD_TARGETMODE) != 0) { 681 cpi->target_sprt = PIT_PROCESSOR 682 | PIT_DISCONNECT 683 | PIT_TERM_IO; 684 } else { 685 cpi->target_sprt = 0; 686 } 687 cpi->hba_misc = 0; 688 cpi->hba_eng_cnt = 0; 689 cpi->max_target = (ahd->features & AHD_WIDE) ? 15 : 7; 690 cpi->max_lun = AHD_NUM_LUNS_NONPKT - 1; 691 cpi->initiator_id = ahd->our_id; 692 if ((ahd->flags & AHD_RESET_BUS_A) == 0) { 693 cpi->hba_misc |= PIM_NOBUSRESET; 694 } 695 cpi->bus_id = cam_sim_bus(sim); 696 cpi->base_transfer_speed = 3300; 697 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 698 strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN); 699 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 700 cpi->unit_number = cam_sim_unit(sim); 701 cpi->protocol = PROTO_SCSI; 702 cpi->protocol_version = SCSI_REV_2; 703 cpi->transport = XPORT_SPI; 704 cpi->transport_version = 4; 705 cpi->xport_specific.spi.ppr_options = SID_SPI_CLOCK_DT_ST 706 | SID_SPI_IUS 707 | SID_SPI_QAS; 708 cpi->ccb_h.status = CAM_REQ_CMP; 709 xpt_done(ccb); 710 break; 711 } 712 default: 713 ccb->ccb_h.status = CAM_PROVIDE_FAIL; 714 xpt_done(ccb); 715 break; 716 } 717 } 718 719 720 static void 721 ahd_set_tran_settings(struct ahd_softc *ahd, int our_id, char channel, 722 struct ccb_trans_settings *cts) 723 { 724 struct ahd_devinfo devinfo; 725 struct ccb_trans_settings_scsi *scsi; 726 struct ccb_trans_settings_spi *spi; 727 struct ahd_initiator_tinfo *tinfo; 728 struct ahd_tmode_tstate *tstate; 729 uint16_t *discenable; 730 uint16_t *tagenable; 731 u_int update_type; 732 733 scsi = &cts->proto_specific.scsi; 734 spi = &cts->xport_specific.spi; 735 ahd_compile_devinfo(&devinfo, SIM_SCSI_ID(ahd, sim), 736 cts->ccb_h.target_id, 737 cts->ccb_h.target_lun, 738 SIM_CHANNEL(ahd, sim), 739 ROLE_UNKNOWN); 740 tinfo = ahd_fetch_transinfo(ahd, devinfo.channel, 741 devinfo.our_scsiid, 742 devinfo.target, &tstate); 743 update_type = 0; 744 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { 745 update_type |= AHD_TRANS_GOAL; 746 discenable = &tstate->discenable; 747 tagenable = &tstate->tagenable; 748 tinfo->curr.protocol_version = cts->protocol_version; 749 tinfo->curr.transport_version = cts->transport_version; 750 tinfo->goal.protocol_version = cts->protocol_version; 751 tinfo->goal.transport_version = cts->transport_version; 752 } else if (cts->type == CTS_TYPE_USER_SETTINGS) { 753 update_type |= AHD_TRANS_USER; 754 discenable = &ahd->user_discenable; 755 tagenable = &ahd->user_tagenable; 756 tinfo->user.protocol_version = cts->protocol_version; 757 tinfo->user.transport_version = cts->transport_version; 758 } else { 759 cts->ccb_h.status = CAM_REQ_INVALID; 760 return; 761 } 762 763 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { 764 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) 765 *discenable |= devinfo.target_mask; 766 else 767 *discenable &= ~devinfo.target_mask; 768 } 769 770 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { 771 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) 772 *tagenable |= devinfo.target_mask; 773 else 774 *tagenable &= ~devinfo.target_mask; 775 } 776 777 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) { 778 ahd_validate_width(ahd, /*tinfo limit*/NULL, 779 &spi->bus_width, ROLE_UNKNOWN); 780 ahd_set_width(ahd, &devinfo, spi->bus_width, 781 update_type, /*paused*/FALSE); 782 } 783 784 if ((spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0) { 785 if (update_type == AHD_TRANS_USER) 786 spi->ppr_options = tinfo->user.ppr_options; 787 else 788 spi->ppr_options = tinfo->goal.ppr_options; 789 } 790 791 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0) { 792 if (update_type == AHD_TRANS_USER) 793 spi->sync_offset = tinfo->user.offset; 794 else 795 spi->sync_offset = tinfo->goal.offset; 796 } 797 798 if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) { 799 if (update_type == AHD_TRANS_USER) 800 spi->sync_period = tinfo->user.period; 801 else 802 spi->sync_period = tinfo->goal.period; 803 } 804 805 if (((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0) 806 || ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0)) { 807 u_int maxsync; 808 809 maxsync = AHD_SYNCRATE_MAX; 810 811 if (spi->bus_width != MSG_EXT_WDTR_BUS_16_BIT) 812 spi->ppr_options &= ~MSG_EXT_PPR_DT_REQ; 813 814 if ((*discenable & devinfo.target_mask) == 0) 815 spi->ppr_options &= ~MSG_EXT_PPR_IU_REQ; 816 817 ahd_find_syncrate(ahd, &spi->sync_period, 818 &spi->ppr_options, maxsync); 819 ahd_validate_offset(ahd, /*tinfo limit*/NULL, 820 spi->sync_period, &spi->sync_offset, 821 spi->bus_width, ROLE_UNKNOWN); 822 823 /* We use a period of 0 to represent async */ 824 if (spi->sync_offset == 0) { 825 spi->sync_period = 0; 826 spi->ppr_options = 0; 827 } 828 829 ahd_set_syncrate(ahd, &devinfo, spi->sync_period, 830 spi->sync_offset, spi->ppr_options, 831 update_type, /*paused*/FALSE); 832 } 833 cts->ccb_h.status = CAM_REQ_CMP; 834 } 835 836 static void 837 ahd_get_tran_settings(struct ahd_softc *ahd, int our_id, char channel, 838 struct ccb_trans_settings *cts) 839 { 840 struct ahd_devinfo devinfo; 841 struct ccb_trans_settings_scsi *scsi; 842 struct ccb_trans_settings_spi *spi; 843 struct ahd_initiator_tinfo *targ_info; 844 struct ahd_tmode_tstate *tstate; 845 struct ahd_transinfo *tinfo; 846 847 scsi = &cts->proto_specific.scsi; 848 spi = &cts->xport_specific.spi; 849 ahd_compile_devinfo(&devinfo, our_id, 850 cts->ccb_h.target_id, 851 cts->ccb_h.target_lun, 852 channel, ROLE_UNKNOWN); 853 targ_info = ahd_fetch_transinfo(ahd, devinfo.channel, 854 devinfo.our_scsiid, 855 devinfo.target, &tstate); 856 857 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) 858 tinfo = &targ_info->curr; 859 else 860 tinfo = &targ_info->user; 861 862 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; 863 spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; 864 if (cts->type == CTS_TYPE_USER_SETTINGS) { 865 if ((ahd->user_discenable & devinfo.target_mask) != 0) 866 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 867 868 if ((ahd->user_tagenable & devinfo.target_mask) != 0) 869 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 870 } else { 871 if ((tstate->discenable & devinfo.target_mask) != 0) 872 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 873 874 if ((tstate->tagenable & devinfo.target_mask) != 0) 875 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 876 } 877 cts->protocol_version = tinfo->protocol_version; 878 cts->transport_version = tinfo->transport_version; 879 880 spi->sync_period = tinfo->period; 881 spi->sync_offset = tinfo->offset; 882 spi->bus_width = tinfo->width; 883 spi->ppr_options = tinfo->ppr_options; 884 885 cts->protocol = PROTO_SCSI; 886 cts->transport = XPORT_SPI; 887 spi->valid = CTS_SPI_VALID_SYNC_RATE 888 | CTS_SPI_VALID_SYNC_OFFSET 889 | CTS_SPI_VALID_BUS_WIDTH 890 | CTS_SPI_VALID_PPR_OPTIONS; 891 892 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { 893 scsi->valid = CTS_SCSI_VALID_TQ; 894 spi->valid |= CTS_SPI_VALID_DISC; 895 } else { 896 scsi->valid = 0; 897 } 898 899 cts->ccb_h.status = CAM_REQ_CMP; 900 } 901 902 static void 903 ahd_async(void *callback_arg, uint32_t code, struct cam_path *path, void *arg) 904 { 905 struct ahd_softc *ahd; 906 struct cam_sim *sim; 907 908 sim = (struct cam_sim *)callback_arg; 909 ahd = (struct ahd_softc *)cam_sim_softc(sim); 910 switch (code) { 911 case AC_LOST_DEVICE: 912 { 913 struct ahd_devinfo devinfo; 914 915 ahd_compile_devinfo(&devinfo, SIM_SCSI_ID(ahd, sim), 916 xpt_path_target_id(path), 917 xpt_path_lun_id(path), 918 SIM_CHANNEL(ahd, sim), 919 ROLE_UNKNOWN); 920 921 /* 922 * Revert to async/narrow transfers 923 * for the next device. 924 */ 925 ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, 926 AHD_TRANS_GOAL|AHD_TRANS_CUR, /*paused*/FALSE); 927 ahd_set_syncrate(ahd, &devinfo, /*period*/0, /*offset*/0, 928 /*ppr_options*/0, AHD_TRANS_GOAL|AHD_TRANS_CUR, 929 /*paused*/FALSE); 930 break; 931 } 932 default: 933 break; 934 } 935 } 936 937 static void 938 ahd_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments, 939 int error) 940 { 941 struct scb *scb; 942 union ccb *ccb; 943 struct ahd_softc *ahd; 944 struct ahd_initiator_tinfo *tinfo; 945 struct ahd_tmode_tstate *tstate; 946 u_int mask; 947 948 scb = (struct scb *)arg; 949 ccb = scb->io_ctx; 950 ahd = scb->ahd_softc; 951 952 if (error != 0) { 953 if (error == EFBIG) 954 aic_set_transaction_status(scb, CAM_REQ_TOO_BIG); 955 else 956 aic_set_transaction_status(scb, CAM_REQ_CMP_ERR); 957 if (nsegments != 0) 958 bus_dmamap_unload(ahd->buffer_dmat, scb->dmamap); 959 ahd_free_scb(ahd, scb); 960 xpt_done(ccb); 961 return; 962 } 963 scb->sg_count = 0; 964 if (nsegments != 0) { 965 void *sg; 966 bus_dmasync_op_t op; 967 u_int i; 968 969 /* Copy the segments into our SG list */ 970 for (i = nsegments, sg = scb->sg_list; i > 0; i--) { 971 972 sg = ahd_sg_setup(ahd, scb, sg, dm_segs->ds_addr, 973 dm_segs->ds_len, 974 /*last*/i == 1); 975 dm_segs++; 976 } 977 978 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 979 op = BUS_DMASYNC_PREREAD; 980 else 981 op = BUS_DMASYNC_PREWRITE; 982 983 bus_dmamap_sync(ahd->buffer_dmat, scb->dmamap, op); 984 985 if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) { 986 struct target_data *tdata; 987 988 tdata = &scb->hscb->shared_data.tdata; 989 tdata->target_phases |= DPHASE_PENDING; 990 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) 991 tdata->data_phase = P_DATAOUT; 992 else 993 tdata->data_phase = P_DATAIN; 994 } 995 } 996 997 /* 998 * Last time we need to check if this SCB needs to 999 * be aborted. 1000 */ 1001 if (aic_get_transaction_status(scb) != CAM_REQ_INPROG) { 1002 if (nsegments != 0) 1003 bus_dmamap_unload(ahd->buffer_dmat, 1004 scb->dmamap); 1005 ahd_free_scb(ahd, scb); 1006 xpt_done(ccb); 1007 return; 1008 } 1009 1010 tinfo = ahd_fetch_transinfo(ahd, SCSIID_CHANNEL(ahd, scb->hscb->scsiid), 1011 SCSIID_OUR_ID(scb->hscb->scsiid), 1012 SCSIID_TARGET(ahd, scb->hscb->scsiid), 1013 &tstate); 1014 1015 mask = SCB_GET_TARGET_MASK(ahd, scb); 1016 1017 if ((tstate->discenable & mask) != 0 1018 && (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0) 1019 scb->hscb->control |= DISCENB; 1020 1021 if ((tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0) { 1022 scb->flags |= SCB_PACKETIZED; 1023 if (scb->hscb->task_management != 0) 1024 scb->hscb->control &= ~MK_MESSAGE; 1025 } 1026 1027 if ((ccb->ccb_h.flags & CAM_NEGOTIATE) != 0 1028 && (tinfo->goal.width != 0 1029 || tinfo->goal.period != 0 1030 || tinfo->goal.ppr_options != 0)) { 1031 scb->flags |= SCB_NEGOTIATE; 1032 scb->hscb->control |= MK_MESSAGE; 1033 } else if ((tstate->auto_negotiate & mask) != 0) { 1034 scb->flags |= SCB_AUTO_NEGOTIATE; 1035 scb->hscb->control |= MK_MESSAGE; 1036 } 1037 1038 LIST_INSERT_HEAD(&ahd->pending_scbs, scb, pending_links); 1039 1040 ccb->ccb_h.status |= CAM_SIM_QUEUED; 1041 1042 aic_scb_timer_start(scb); 1043 1044 if ((scb->flags & SCB_TARGET_IMMEDIATE) != 0) { 1045 /* Define a mapping from our tag to the SCB. */ 1046 ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = scb; 1047 ahd_pause(ahd); 1048 ahd_set_scbptr(ahd, SCB_GET_TAG(scb)); 1049 ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_TARG); 1050 ahd_unpause(ahd); 1051 } else { 1052 ahd_queue_scb(ahd, scb); 1053 } 1054 1055 } 1056 1057 static void 1058 ahd_poll(struct cam_sim *sim) 1059 { 1060 ahd_intr(cam_sim_softc(sim)); 1061 } 1062 1063 static void 1064 ahd_setup_data(struct ahd_softc *ahd, struct cam_sim *sim, 1065 struct ccb_scsiio *csio, struct scb *scb) 1066 { 1067 struct hardware_scb *hscb; 1068 struct ccb_hdr *ccb_h; 1069 1070 hscb = scb->hscb; 1071 ccb_h = &csio->ccb_h; 1072 1073 csio->resid = 0; 1074 csio->sense_resid = 0; 1075 if (ccb_h->func_code == XPT_SCSI_IO) { 1076 hscb->cdb_len = csio->cdb_len; 1077 if ((ccb_h->flags & CAM_CDB_POINTER) != 0) { 1078 1079 if (hscb->cdb_len > MAX_CDB_LEN 1080 && (ccb_h->flags & CAM_CDB_PHYS) == 0) { 1081 1082 /* 1083 * Should CAM start to support CDB sizes 1084 * greater than 16 bytes, we could use 1085 * the sense buffer to store the CDB. 1086 */ 1087 aic_set_transaction_status(scb, 1088 CAM_REQ_INVALID); 1089 ahd_free_scb(ahd, scb); 1090 xpt_done((union ccb *)csio); 1091 return; 1092 } 1093 if ((ccb_h->flags & CAM_CDB_PHYS) != 0) { 1094 hscb->shared_data.idata.cdb_from_host.cdbptr = 1095 aic_htole64((uintptr_t)csio->cdb_io.cdb_ptr); 1096 hscb->shared_data.idata.cdb_from_host.cdblen = 1097 csio->cdb_len; 1098 hscb->cdb_len |= SCB_CDB_LEN_PTR; 1099 } else { 1100 memcpy(hscb->shared_data.idata.cdb, 1101 csio->cdb_io.cdb_ptr, 1102 hscb->cdb_len); 1103 } 1104 } else { 1105 if (hscb->cdb_len > MAX_CDB_LEN) { 1106 1107 aic_set_transaction_status(scb, 1108 CAM_REQ_INVALID); 1109 ahd_free_scb(ahd, scb); 1110 xpt_done((union ccb *)csio); 1111 return; 1112 } 1113 memcpy(hscb->shared_data.idata.cdb, 1114 csio->cdb_io.cdb_bytes, hscb->cdb_len); 1115 } 1116 } 1117 1118 /* Only use S/G if there is a transfer */ 1119 if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 1120 if ((ccb_h->flags & CAM_SCATTER_VALID) == 0) { 1121 /* We've been given a pointer to a single buffer */ 1122 if ((ccb_h->flags & CAM_DATA_PHYS) == 0) { 1123 int s; 1124 int error; 1125 1126 s = splsoftvm(); 1127 error = bus_dmamap_load(ahd->buffer_dmat, 1128 scb->dmamap, 1129 csio->data_ptr, 1130 csio->dxfer_len, 1131 ahd_execute_scb, 1132 scb, /*flags*/0); 1133 if (error == EINPROGRESS) { 1134 /* 1135 * So as to maintain ordering, 1136 * freeze the controller queue 1137 * until our mapping is 1138 * returned. 1139 */ 1140 xpt_freeze_simq(sim, 1141 /*count*/1); 1142 scb->io_ctx->ccb_h.status |= 1143 CAM_RELEASE_SIMQ; 1144 } 1145 splx(s); 1146 } else { 1147 struct bus_dma_segment seg; 1148 1149 /* Pointer to physical buffer */ 1150 if (csio->dxfer_len > AHD_MAXTRANSFER_SIZE) 1151 panic("ahd_setup_data - Transfer size " 1152 "larger than can device max"); 1153 1154 seg.ds_addr = 1155 (bus_addr_t)(vm_offset_t)csio->data_ptr; 1156 seg.ds_len = csio->dxfer_len; 1157 ahd_execute_scb(scb, &seg, 1, 0); 1158 } 1159 } else { 1160 struct bus_dma_segment *segs; 1161 1162 if ((ccb_h->flags & CAM_DATA_PHYS) != 0) 1163 panic("ahd_setup_data - Physical segment " 1164 "pointers unsupported"); 1165 1166 if ((ccb_h->flags & CAM_SG_LIST_PHYS) == 0) 1167 panic("ahd_setup_data - Virtual segment " 1168 "addresses unsupported"); 1169 1170 /* Just use the segments provided */ 1171 segs = (struct bus_dma_segment *)csio->data_ptr; 1172 ahd_execute_scb(scb, segs, csio->sglist_cnt, 0); 1173 } 1174 } else { 1175 ahd_execute_scb(scb, NULL, 0, 0); 1176 } 1177 } 1178 1179 static void 1180 ahd_abort_ccb(struct ahd_softc *ahd, struct cam_sim *sim, union ccb *ccb) 1181 { 1182 union ccb *abort_ccb; 1183 1184 abort_ccb = ccb->cab.abort_ccb; 1185 switch (abort_ccb->ccb_h.func_code) { 1186 #ifdef AHD_TARGET_MODE 1187 case XPT_ACCEPT_TARGET_IO: 1188 case XPT_IMMED_NOTIFY: 1189 case XPT_CONT_TARGET_IO: 1190 { 1191 struct ahd_tmode_tstate *tstate; 1192 struct ahd_tmode_lstate *lstate; 1193 struct ccb_hdr_slist *list; 1194 cam_status status; 1195 1196 status = ahd_find_tmode_devs(ahd, sim, abort_ccb, &tstate, 1197 &lstate, TRUE); 1198 1199 if (status != CAM_REQ_CMP) { 1200 ccb->ccb_h.status = status; 1201 break; 1202 } 1203 1204 if (abort_ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) 1205 list = &lstate->accept_tios; 1206 else if (abort_ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) 1207 list = &lstate->immed_notifies; 1208 else 1209 list = NULL; 1210 1211 if (list != NULL) { 1212 struct ccb_hdr *curelm; 1213 int found; 1214 1215 curelm = SLIST_FIRST(list); 1216 found = 0; 1217 if (curelm == &abort_ccb->ccb_h) { 1218 found = 1; 1219 SLIST_REMOVE_HEAD(list, sim_links.sle); 1220 } else { 1221 while(curelm != NULL) { 1222 struct ccb_hdr *nextelm; 1223 1224 nextelm = 1225 SLIST_NEXT(curelm, sim_links.sle); 1226 1227 if (nextelm == &abort_ccb->ccb_h) { 1228 found = 1; 1229 SLIST_NEXT(curelm, 1230 sim_links.sle) = 1231 SLIST_NEXT(nextelm, 1232 sim_links.sle); 1233 break; 1234 } 1235 curelm = nextelm; 1236 } 1237 } 1238 1239 if (found) { 1240 abort_ccb->ccb_h.status = CAM_REQ_ABORTED; 1241 xpt_done(abort_ccb); 1242 ccb->ccb_h.status = CAM_REQ_CMP; 1243 } else { 1244 xpt_print_path(abort_ccb->ccb_h.path); 1245 printf("Not found\n"); 1246 ccb->ccb_h.status = CAM_PATH_INVALID; 1247 } 1248 break; 1249 } 1250 /* FALLTHROUGH */ 1251 } 1252 #endif 1253 case XPT_SCSI_IO: 1254 /* XXX Fully implement the hard ones */ 1255 ccb->ccb_h.status = CAM_UA_ABORT; 1256 break; 1257 default: 1258 ccb->ccb_h.status = CAM_REQ_INVALID; 1259 break; 1260 } 1261 xpt_done(ccb); 1262 } 1263 1264 void 1265 ahd_send_async(struct ahd_softc *ahd, char channel, u_int target, 1266 u_int lun, ac_code code, void *opt_arg) 1267 { 1268 struct ccb_trans_settings cts; 1269 struct cam_path *path; 1270 void *arg; 1271 int error; 1272 1273 arg = NULL; 1274 error = ahd_create_path(ahd, channel, target, lun, &path); 1275 1276 if (error != CAM_REQ_CMP) 1277 return; 1278 1279 switch (code) { 1280 case AC_TRANSFER_NEG: 1281 { 1282 struct ccb_trans_settings_scsi *scsi; 1283 1284 cts.type = CTS_TYPE_CURRENT_SETTINGS; 1285 scsi = &cts.proto_specific.scsi; 1286 cts.ccb_h.path = path; 1287 cts.ccb_h.target_id = target; 1288 cts.ccb_h.target_lun = lun; 1289 ahd_get_tran_settings(ahd, ahd->our_id, channel, &cts); 1290 arg = &cts; 1291 scsi->valid &= ~CTS_SCSI_VALID_TQ; 1292 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; 1293 if (opt_arg == NULL) 1294 break; 1295 if (*((ahd_queue_alg *)opt_arg) == AHD_QUEUE_TAGGED) 1296 scsi->flags |= ~CTS_SCSI_FLAGS_TAG_ENB; 1297 scsi->valid |= CTS_SCSI_VALID_TQ; 1298 break; 1299 } 1300 case AC_SENT_BDR: 1301 case AC_BUS_RESET: 1302 break; 1303 default: 1304 panic("ahd_send_async: Unexpected async event"); 1305 } 1306 xpt_async(code, path, arg); 1307 xpt_free_path(path); 1308 } 1309 1310 void 1311 ahd_platform_set_tags(struct ahd_softc *ahd, 1312 struct ahd_devinfo *devinfo, int enable) 1313 { 1314 } 1315 1316 int 1317 ahd_platform_alloc(struct ahd_softc *ahd, void *platform_arg) 1318 { 1319 ahd->platform_data = malloc(sizeof(struct ahd_platform_data), M_DEVBUF, 1320 M_NOWAIT | M_ZERO); 1321 if (ahd->platform_data == NULL) 1322 return (ENOMEM); 1323 return (0); 1324 } 1325 1326 void 1327 ahd_platform_free(struct ahd_softc *ahd) 1328 { 1329 struct ahd_platform_data *pdata; 1330 1331 pdata = ahd->platform_data; 1332 if (pdata != NULL) { 1333 if (pdata->regs[0] != NULL) 1334 bus_release_resource(ahd->dev_softc, 1335 pdata->regs_res_type[0], 1336 pdata->regs_res_id[0], 1337 pdata->regs[0]); 1338 1339 if (pdata->regs[1] != NULL) 1340 bus_release_resource(ahd->dev_softc, 1341 pdata->regs_res_type[1], 1342 pdata->regs_res_id[1], 1343 pdata->regs[1]); 1344 1345 if (pdata->irq != NULL) 1346 bus_release_resource(ahd->dev_softc, 1347 pdata->irq_res_type, 1348 0, pdata->irq); 1349 1350 if (pdata->sim != NULL) { 1351 xpt_async(AC_LOST_DEVICE, pdata->path, NULL); 1352 xpt_free_path(pdata->path); 1353 xpt_bus_deregister(cam_sim_path(pdata->sim)); 1354 cam_sim_free(pdata->sim, /*free_devq*/TRUE); 1355 } 1356 if (pdata->eh != NULL) 1357 EVENTHANDLER_DEREGISTER(shutdown_final, pdata->eh); 1358 free(ahd->platform_data, M_DEVBUF); 1359 } 1360 } 1361 1362 int 1363 ahd_softc_comp(struct ahd_softc *lahd, struct ahd_softc *rahd) 1364 { 1365 /* We don't sort softcs under FreeBSD so report equal always */ 1366 return (0); 1367 } 1368 1369 int 1370 ahd_detach(device_t dev) 1371 { 1372 struct ahd_softc *ahd; 1373 1374 device_printf(dev, "detaching device\n"); 1375 ahd = device_get_softc(dev); 1376 ahd_lock(ahd); 1377 TAILQ_REMOVE(&ahd_tailq, ahd, links); 1378 ahd_intr_enable(ahd, FALSE); 1379 bus_teardown_intr(dev, ahd->platform_data->irq, ahd->platform_data->ih); 1380 ahd_unlock(ahd); 1381 ahd_free(ahd); 1382 return (0); 1383 } 1384 1385 #if 0 1386 static void 1387 ahd_dump_targcmd(struct target_cmd *cmd) 1388 { 1389 uint8_t *byte; 1390 uint8_t *last_byte; 1391 int i; 1392 1393 byte = &cmd->initiator_channel; 1394 /* Debugging info for received commands */ 1395 last_byte = &cmd[1].initiator_channel; 1396 1397 i = 0; 1398 while (byte < last_byte) { 1399 if (i == 0) 1400 printf("\t"); 1401 printf("%#x", *byte++); 1402 i++; 1403 if (i == 8) { 1404 printf("\n"); 1405 i = 0; 1406 } else { 1407 printf(", "); 1408 } 1409 } 1410 } 1411 #endif 1412 1413 static int 1414 ahd_modevent(module_t mod, int type, void *data) 1415 { 1416 /* XXX Deal with busy status on unload. */ 1417 /* XXX Deal with unknown events */ 1418 return 0; 1419 } 1420 1421 static moduledata_t ahd_mod = { 1422 "ahd", 1423 ahd_modevent, 1424 NULL 1425 }; 1426 1427 /********************************** DDB Hooks *********************************/ 1428 #ifdef DDB 1429 static struct ahd_softc *ahd_ddb_softc; 1430 static int ahd_ddb_paused; 1431 static int ahd_ddb_paused_on_entry; 1432 DB_COMMAND(ahd_sunit, ahd_ddb_sunit) 1433 { 1434 struct ahd_softc *list_ahd; 1435 1436 ahd_ddb_softc = NULL; 1437 TAILQ_FOREACH(list_ahd, &ahd_tailq, links) { 1438 if (list_ahd->unit == addr) 1439 ahd_ddb_softc = list_ahd; 1440 } 1441 if (ahd_ddb_softc == NULL) 1442 db_error("No matching softc found!\n"); 1443 } 1444 1445 DB_COMMAND(ahd_pause, ahd_ddb_pause) 1446 { 1447 if (ahd_ddb_softc == NULL) { 1448 db_error("Must set unit with ahd_sunit first!\n"); 1449 return; 1450 } 1451 if (ahd_ddb_paused == 0) { 1452 ahd_ddb_paused++; 1453 if (ahd_is_paused(ahd_ddb_softc)) { 1454 ahd_ddb_paused_on_entry++; 1455 return; 1456 } 1457 ahd_pause(ahd_ddb_softc); 1458 } 1459 } 1460 1461 DB_COMMAND(ahd_unpause, ahd_ddb_unpause) 1462 { 1463 if (ahd_ddb_softc == NULL) { 1464 db_error("Must set unit with ahd_sunit first!\n"); 1465 return; 1466 } 1467 if (ahd_ddb_paused != 0) { 1468 ahd_ddb_paused = 0; 1469 if (ahd_ddb_paused_on_entry) 1470 return; 1471 ahd_unpause(ahd_ddb_softc); 1472 } else if (ahd_ddb_paused_on_entry != 0) { 1473 /* Two unpauses to clear a paused on entry. */ 1474 ahd_ddb_paused_on_entry = 0; 1475 ahd_unpause(ahd_ddb_softc); 1476 } 1477 } 1478 1479 DB_COMMAND(ahd_in, ahd_ddb_in) 1480 { 1481 int c; 1482 int size; 1483 1484 if (ahd_ddb_softc == NULL) { 1485 db_error("Must set unit with ahd_sunit first!\n"); 1486 return; 1487 } 1488 if (have_addr == 0) 1489 return; 1490 1491 size = 1; 1492 while ((c = *modif++) != '\0') { 1493 switch (c) { 1494 case 'b': 1495 size = 1; 1496 break; 1497 case 'w': 1498 size = 2; 1499 break; 1500 case 'l': 1501 size = 4; 1502 break; 1503 } 1504 } 1505 1506 if (count <= 0) 1507 count = 1; 1508 while (--count >= 0) { 1509 db_printf("%04lx (M)%x: \t", (u_long)addr, 1510 ahd_inb(ahd_ddb_softc, MODE_PTR)); 1511 switch (size) { 1512 case 1: 1513 db_printf("%02x\n", ahd_inb(ahd_ddb_softc, addr)); 1514 break; 1515 case 2: 1516 db_printf("%04x\n", ahd_inw(ahd_ddb_softc, addr)); 1517 break; 1518 case 4: 1519 db_printf("%08x\n", ahd_inl(ahd_ddb_softc, addr)); 1520 break; 1521 } 1522 } 1523 } 1524 1525 DB_FUNC(ahd_out, ahd_ddb_out, db_cmd_table, CS_MORE, NULL) 1526 { 1527 db_expr_t old_value; 1528 db_expr_t new_value; 1529 int size; 1530 1531 if (ahd_ddb_softc == NULL) { 1532 db_error("Must set unit with ahd_sunit first!\n"); 1533 return; 1534 } 1535 1536 switch (modif[0]) { 1537 case '\0': 1538 case 'b': 1539 size = 1; 1540 break; 1541 case 'h': 1542 size = 2; 1543 break; 1544 case 'l': 1545 size = 4; 1546 break; 1547 default: 1548 db_error("Unknown size\n"); 1549 return; 1550 } 1551 1552 while (db_expression(&new_value)) { 1553 switch (size) { 1554 default: 1555 case 1: 1556 old_value = ahd_inb(ahd_ddb_softc, addr); 1557 ahd_outb(ahd_ddb_softc, addr, new_value); 1558 break; 1559 case 2: 1560 old_value = ahd_inw(ahd_ddb_softc, addr); 1561 ahd_outw(ahd_ddb_softc, addr, new_value); 1562 break; 1563 case 4: 1564 old_value = ahd_inl(ahd_ddb_softc, addr); 1565 ahd_outl(ahd_ddb_softc, addr, new_value); 1566 break; 1567 } 1568 db_printf("%04lx (M)%x: \t0x%lx\t=\t0x%lx", 1569 (u_long)addr, ahd_inb(ahd_ddb_softc, MODE_PTR), 1570 (u_long)old_value, (u_long)new_value); 1571 addr += size; 1572 } 1573 db_skip_to_eol(); 1574 } 1575 1576 DB_COMMAND(ahd_dump, ahd_ddb_dump) 1577 { 1578 if (ahd_ddb_softc == NULL) { 1579 db_error("Must set unit with ahd_sunit first!\n"); 1580 return; 1581 } 1582 ahd_dump_card_state(ahd_ddb_softc); 1583 } 1584 1585 #endif 1586 1587 1588 DECLARE_MODULE(ahd, ahd_mod, SI_SUB_DRIVERS, SI_ORDER_MIDDLE); 1589 MODULE_DEPEND(ahd, cam, 1, 1, 1); 1590 MODULE_VERSION(ahd, 1); 1591