1 /*- 2 * Copyright (c) 2009 Alexander Motin <mav@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/bus.h> 32 #include <sys/endian.h> 33 #include <sys/systm.h> 34 #include <sys/types.h> 35 #include <sys/malloc.h> 36 #include <sys/kernel.h> 37 #include <sys/time.h> 38 #include <sys/conf.h> 39 #include <sys/fcntl.h> 40 #include <sys/md5.h> 41 #include <sys/interrupt.h> 42 #include <sys/sbuf.h> 43 44 #include <sys/lock.h> 45 #include <sys/mutex.h> 46 #include <sys/sysctl.h> 47 48 #ifdef PC98 49 #include <pc98/pc98/pc98_machdep.h> /* geometry translation */ 50 #endif 51 52 #include <cam/cam.h> 53 #include <cam/cam_ccb.h> 54 #include <cam/cam_queue.h> 55 #include <cam/cam_periph.h> 56 #include <cam/cam_sim.h> 57 #include <cam/cam_xpt.h> 58 #include <cam/cam_xpt_sim.h> 59 #include <cam/cam_xpt_periph.h> 60 #include <cam/cam_xpt_internal.h> 61 #include <cam/cam_debug.h> 62 63 #include <cam/scsi/scsi_all.h> 64 #include <cam/scsi/scsi_message.h> 65 #include <cam/scsi/scsi_pass.h> 66 #include <cam/ata/ata_all.h> 67 #include <machine/stdarg.h> /* for xpt_print below */ 68 #include "opt_cam.h" 69 70 struct scsi_quirk_entry { 71 struct scsi_inquiry_pattern inq_pat; 72 u_int8_t quirks; 73 #define CAM_QUIRK_NOLUNS 0x01 74 #define CAM_QUIRK_NOSERIAL 0x02 75 #define CAM_QUIRK_HILUNS 0x04 76 #define CAM_QUIRK_NOHILUNS 0x08 77 u_int mintags; 78 u_int maxtags; 79 }; 80 #define SCSI_QUIRK(dev) ((struct scsi_quirk_entry *)((dev)->quirk)) 81 82 static periph_init_t probe_periph_init; 83 84 static struct periph_driver probe_driver = 85 { 86 probe_periph_init, "aprobe", 87 TAILQ_HEAD_INITIALIZER(probe_driver.units) 88 }; 89 90 PERIPHDRIVER_DECLARE(aprobe, probe_driver); 91 92 typedef enum { 93 PROBE_RESET, 94 PROBE_IDENTIFY, 95 PROBE_SETMODE, 96 PROBE_INQUIRY, 97 PROBE_FULL_INQUIRY, 98 PROBE_PM_PID, 99 PROBE_PM_PRV, 100 PROBE_PM_PORTS, 101 PROBE_PM_RESET, 102 PROBE_PM_CONNECT, 103 PROBE_PM_CHECK, 104 PROBE_PM_CLEAR, 105 PROBE_INVALID 106 } probe_action; 107 108 static char *probe_action_text[] = { 109 "PROBE_RESET", 110 "PROBE_IDENTIFY", 111 "PROBE_SETMODE", 112 "PROBE_INQUIRY", 113 "PROBE_FULL_INQUIRY", 114 "PROBE_PM_PID", 115 "PROBE_PM_PRV", 116 "PROBE_PM_PORTS", 117 "PROBE_PM_RESET", 118 "PROBE_PM_CONNECT", 119 "PROBE_PM_CHECK", 120 "PROBE_PM_CLEAR", 121 "PROBE_INVALID" 122 }; 123 124 #define PROBE_SET_ACTION(softc, newaction) \ 125 do { \ 126 char **text; \ 127 text = probe_action_text; \ 128 CAM_DEBUG((softc)->periph->path, CAM_DEBUG_INFO, \ 129 ("Probe %s to %s\n", text[(softc)->action], \ 130 text[(newaction)])); \ 131 (softc)->action = (newaction); \ 132 } while(0) 133 134 typedef enum { 135 PROBE_NO_ANNOUNCE = 0x04 136 } probe_flags; 137 138 typedef struct { 139 TAILQ_HEAD(, ccb_hdr) request_ccbs; 140 probe_action action; 141 union ccb saved_ccb; 142 probe_flags flags; 143 u_int8_t digest[16]; 144 uint32_t pm_pid; 145 uint32_t pm_prv; 146 int pm_ports; 147 int pm_step; 148 int pm_try; 149 struct cam_periph *periph; 150 } probe_softc; 151 152 static struct scsi_quirk_entry scsi_quirk_table[] = 153 { 154 { 155 /* Default tagged queuing parameters for all devices */ 156 { 157 T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, 158 /*vendor*/"*", /*product*/"*", /*revision*/"*" 159 }, 160 /*quirks*/0, /*mintags*/2, /*maxtags*/32 161 }, 162 }; 163 164 static const int scsi_quirk_table_size = 165 sizeof(scsi_quirk_table) / sizeof(*scsi_quirk_table); 166 167 static cam_status proberegister(struct cam_periph *periph, 168 void *arg); 169 static void probeschedule(struct cam_periph *probe_periph); 170 static void probestart(struct cam_periph *periph, union ccb *start_ccb); 171 //static void proberequestdefaultnegotiation(struct cam_periph *periph); 172 //static int proberequestbackoff(struct cam_periph *periph, 173 // struct cam_ed *device); 174 static void probedone(struct cam_periph *periph, union ccb *done_ccb); 175 static void probecleanup(struct cam_periph *periph); 176 static void scsi_find_quirk(struct cam_ed *device); 177 static void ata_scan_bus(struct cam_periph *periph, union ccb *ccb); 178 static void ata_scan_lun(struct cam_periph *periph, 179 struct cam_path *path, cam_flags flags, 180 union ccb *ccb); 181 static void xptscandone(struct cam_periph *periph, union ccb *done_ccb); 182 static struct cam_ed * 183 ata_alloc_device(struct cam_eb *bus, struct cam_et *target, 184 lun_id_t lun_id); 185 static void ata_device_transport(struct cam_path *path); 186 static void scsi_set_transfer_settings(struct ccb_trans_settings *cts, 187 struct cam_ed *device, 188 int async_update); 189 static void scsi_toggle_tags(struct cam_path *path); 190 static void ata_dev_async(u_int32_t async_code, 191 struct cam_eb *bus, 192 struct cam_et *target, 193 struct cam_ed *device, 194 void *async_arg); 195 static void ata_action(union ccb *start_ccb); 196 197 static struct xpt_xport ata_xport = { 198 .alloc_device = ata_alloc_device, 199 .action = ata_action, 200 .async = ata_dev_async, 201 }; 202 203 struct xpt_xport * 204 ata_get_xport(void) 205 { 206 return (&ata_xport); 207 } 208 209 static void 210 probe_periph_init() 211 { 212 } 213 214 static cam_status 215 proberegister(struct cam_periph *periph, void *arg) 216 { 217 union ccb *request_ccb; /* CCB representing the probe request */ 218 cam_status status; 219 probe_softc *softc; 220 221 request_ccb = (union ccb *)arg; 222 if (periph == NULL) { 223 printf("proberegister: periph was NULL!!\n"); 224 return(CAM_REQ_CMP_ERR); 225 } 226 227 if (request_ccb == NULL) { 228 printf("proberegister: no probe CCB, " 229 "can't register device\n"); 230 return(CAM_REQ_CMP_ERR); 231 } 232 233 softc = (probe_softc *)malloc(sizeof(*softc), M_CAMXPT, M_NOWAIT); 234 235 if (softc == NULL) { 236 printf("proberegister: Unable to probe new device. " 237 "Unable to allocate softc\n"); 238 return(CAM_REQ_CMP_ERR); 239 } 240 TAILQ_INIT(&softc->request_ccbs); 241 TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h, 242 periph_links.tqe); 243 softc->flags = 0; 244 periph->softc = softc; 245 softc->periph = periph; 246 softc->action = PROBE_INVALID; 247 status = cam_periph_acquire(periph); 248 if (status != CAM_REQ_CMP) { 249 return (status); 250 } 251 252 253 /* 254 * Ensure we've waited at least a bus settle 255 * delay before attempting to probe the device. 256 * For HBAs that don't do bus resets, this won't make a difference. 257 */ 258 cam_periph_freeze_after_event(periph, &periph->path->bus->last_reset, 259 scsi_delay); 260 probeschedule(periph); 261 return(CAM_REQ_CMP); 262 } 263 264 static void 265 probeschedule(struct cam_periph *periph) 266 { 267 struct ccb_pathinq cpi; 268 union ccb *ccb; 269 probe_softc *softc; 270 271 softc = (probe_softc *)periph->softc; 272 ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs); 273 274 xpt_setup_ccb(&cpi.ccb_h, periph->path, /*priority*/1); 275 cpi.ccb_h.func_code = XPT_PATH_INQ; 276 xpt_action((union ccb *)&cpi); 277 278 if (periph->path->device->flags & CAM_DEV_UNCONFIGURED) 279 PROBE_SET_ACTION(softc, PROBE_RESET); 280 else if (periph->path->device->protocol == PROTO_SATAPM) 281 PROBE_SET_ACTION(softc, PROBE_PM_PID); 282 else 283 PROBE_SET_ACTION(softc, PROBE_IDENTIFY); 284 285 if (ccb->crcn.flags & CAM_EXPECT_INQ_CHANGE) 286 softc->flags |= PROBE_NO_ANNOUNCE; 287 else 288 softc->flags &= ~PROBE_NO_ANNOUNCE; 289 290 xpt_schedule(periph, ccb->ccb_h.pinfo.priority); 291 } 292 293 static void 294 probestart(struct cam_periph *periph, union ccb *start_ccb) 295 { 296 /* Probe the device that our peripheral driver points to */ 297 struct ccb_ataio *ataio; 298 struct ccb_scsiio *csio; 299 struct ccb_trans_settings cts; 300 probe_softc *softc; 301 302 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probestart\n")); 303 304 softc = (probe_softc *)periph->softc; 305 ataio = &start_ccb->ataio; 306 csio = &start_ccb->csio; 307 308 switch (softc->action) { 309 case PROBE_RESET: 310 if (start_ccb->ccb_h.target_id == 15) { 311 /* Report SIM that we have no knowledge about PM presence. */ 312 bzero(&cts, sizeof(cts)); 313 xpt_setup_ccb(&cts.ccb_h, start_ccb->ccb_h.path, 1); 314 cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS; 315 cts.type = CTS_TYPE_CURRENT_SETTINGS; 316 cts.xport_specific.sata.pm_present = 0; 317 cts.xport_specific.sata.valid = CTS_SATA_VALID_PM; 318 xpt_action((union ccb *)&cts); 319 } 320 cam_fill_ataio(ataio, 321 0, 322 probedone, 323 /*flags*/CAM_DIR_NONE, 324 MSG_SIMPLE_Q_TAG, 325 /*data_ptr*/NULL, 326 /*dxfer_len*/0, 327 (start_ccb->ccb_h.target_id == 15 ? 3 : 15) * 1000); 328 ata_reset_cmd(ataio); 329 break; 330 case PROBE_IDENTIFY: 331 { 332 struct ata_params *ident_buf = 333 &periph->path->device->ident_data; 334 335 if ((periph->path->device->flags & CAM_DEV_UNCONFIGURED) == 0) { 336 /* Prepare check that it is the same device. */ 337 MD5_CTX context; 338 339 MD5Init(&context); 340 MD5Update(&context, 341 (unsigned char *)ident_buf->model, 342 sizeof(ident_buf->model)); 343 MD5Update(&context, 344 (unsigned char *)ident_buf->revision, 345 sizeof(ident_buf->revision)); 346 MD5Update(&context, 347 (unsigned char *)ident_buf->serial, 348 sizeof(ident_buf->serial)); 349 MD5Final(softc->digest, &context); 350 } 351 cam_fill_ataio(ataio, 352 1, 353 probedone, 354 /*flags*/CAM_DIR_IN, 355 MSG_SIMPLE_Q_TAG, 356 /*data_ptr*/(u_int8_t *)ident_buf, 357 /*dxfer_len*/sizeof(struct ata_params), 358 30 * 1000); 359 if (periph->path->device->protocol == PROTO_ATA) 360 ata_36bit_cmd(ataio, ATA_ATA_IDENTIFY, 0, 0, 0); 361 else 362 ata_36bit_cmd(ataio, ATA_ATAPI_IDENTIFY, 0, 0, 0); 363 break; 364 } 365 case PROBE_SETMODE: 366 { 367 struct ata_params *ident_buf = 368 &periph->path->device->ident_data; 369 370 cam_fill_ataio(ataio, 371 1, 372 probedone, 373 /*flags*/CAM_DIR_NONE, 374 0, 375 /*data_ptr*/NULL, 376 /*dxfer_len*/0, 377 30 * 1000); 378 ata_36bit_cmd(ataio, ATA_SETFEATURES, ATA_SF_SETXFER, 0, 379 ata_max_mode(ident_buf, ATA_UDMA6, ATA_UDMA6)); 380 break; 381 } 382 case PROBE_INQUIRY: 383 case PROBE_FULL_INQUIRY: 384 { 385 u_int inquiry_len; 386 struct scsi_inquiry_data *inq_buf = 387 &periph->path->device->inq_data; 388 389 if (softc->action == PROBE_INQUIRY) 390 inquiry_len = SHORT_INQUIRY_LENGTH; 391 else 392 inquiry_len = SID_ADDITIONAL_LENGTH(inq_buf); 393 /* 394 * Some parallel SCSI devices fail to send an 395 * ignore wide residue message when dealing with 396 * odd length inquiry requests. Round up to be 397 * safe. 398 */ 399 inquiry_len = roundup2(inquiry_len, 2); 400 scsi_inquiry(csio, 401 /*retries*/1, 402 probedone, 403 MSG_SIMPLE_Q_TAG, 404 (u_int8_t *)inq_buf, 405 inquiry_len, 406 /*evpd*/FALSE, 407 /*page_code*/0, 408 SSD_MIN_SIZE, 409 /*timeout*/60 * 1000); 410 break; 411 } 412 case PROBE_PM_PID: 413 cam_fill_ataio(ataio, 414 1, 415 probedone, 416 /*flags*/CAM_DIR_NONE, 417 MSG_SIMPLE_Q_TAG, 418 /*data_ptr*/NULL, 419 /*dxfer_len*/0, 420 10 * 1000); 421 ata_pm_read_cmd(ataio, 0, 15); 422 break; 423 case PROBE_PM_PRV: 424 cam_fill_ataio(ataio, 425 1, 426 probedone, 427 /*flags*/CAM_DIR_NONE, 428 MSG_SIMPLE_Q_TAG, 429 /*data_ptr*/NULL, 430 /*dxfer_len*/0, 431 10 * 1000); 432 ata_pm_read_cmd(ataio, 1, 15); 433 break; 434 case PROBE_PM_PORTS: 435 cam_fill_ataio(ataio, 436 1, 437 probedone, 438 /*flags*/CAM_DIR_NONE, 439 MSG_SIMPLE_Q_TAG, 440 /*data_ptr*/NULL, 441 /*dxfer_len*/0, 442 10 * 1000); 443 ata_pm_read_cmd(ataio, 2, 15); 444 break; 445 case PROBE_PM_RESET: 446 { 447 struct ata_params *ident_buf = 448 &periph->path->device->ident_data; 449 cam_fill_ataio(ataio, 450 1, 451 probedone, 452 /*flags*/CAM_DIR_NONE, 453 MSG_SIMPLE_Q_TAG, 454 /*data_ptr*/NULL, 455 /*dxfer_len*/0, 456 10 * 1000); 457 ata_pm_write_cmd(ataio, 2, softc->pm_step, 458 (ident_buf->cylinders & (1 << softc->pm_step)) ? 0 : 1); 459 printf("PM RESET %d %04x %d\n", softc->pm_step, ident_buf->cylinders, 460 (ident_buf->cylinders & (1 << softc->pm_step)) ? 0 : 1); 461 break; 462 } 463 case PROBE_PM_CONNECT: 464 cam_fill_ataio(ataio, 465 1, 466 probedone, 467 /*flags*/CAM_DIR_NONE, 468 MSG_SIMPLE_Q_TAG, 469 /*data_ptr*/NULL, 470 /*dxfer_len*/0, 471 10 * 1000); 472 ata_pm_write_cmd(ataio, 2, softc->pm_step, 0); 473 break; 474 case PROBE_PM_CHECK: 475 cam_fill_ataio(ataio, 476 1, 477 probedone, 478 /*flags*/CAM_DIR_NONE, 479 MSG_SIMPLE_Q_TAG, 480 /*data_ptr*/NULL, 481 /*dxfer_len*/0, 482 10 * 1000); 483 ata_pm_read_cmd(ataio, 0, softc->pm_step); 484 break; 485 case PROBE_PM_CLEAR: 486 cam_fill_ataio(ataio, 487 1, 488 probedone, 489 /*flags*/CAM_DIR_NONE, 490 MSG_SIMPLE_Q_TAG, 491 /*data_ptr*/NULL, 492 /*dxfer_len*/0, 493 10 * 1000); 494 ata_pm_write_cmd(ataio, 1, softc->pm_step, 0xFFFFFFFF); 495 break; 496 case PROBE_INVALID: 497 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_INFO, 498 ("probestart: invalid action state\n")); 499 default: 500 break; 501 } 502 xpt_action(start_ccb); 503 } 504 #if 0 505 static void 506 proberequestdefaultnegotiation(struct cam_periph *periph) 507 { 508 struct ccb_trans_settings cts; 509 510 xpt_setup_ccb(&cts.ccb_h, periph->path, /*priority*/1); 511 cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; 512 cts.type = CTS_TYPE_USER_SETTINGS; 513 xpt_action((union ccb *)&cts); 514 if ((cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 515 return; 516 } 517 cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS; 518 cts.type = CTS_TYPE_CURRENT_SETTINGS; 519 xpt_action((union ccb *)&cts); 520 } 521 522 /* 523 * Backoff Negotiation Code- only pertinent for SPI devices. 524 */ 525 static int 526 proberequestbackoff(struct cam_periph *periph, struct cam_ed *device) 527 { 528 struct ccb_trans_settings cts; 529 struct ccb_trans_settings_spi *spi; 530 531 memset(&cts, 0, sizeof (cts)); 532 xpt_setup_ccb(&cts.ccb_h, periph->path, /*priority*/1); 533 cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; 534 cts.type = CTS_TYPE_CURRENT_SETTINGS; 535 xpt_action((union ccb *)&cts); 536 if ((cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 537 if (bootverbose) { 538 xpt_print(periph->path, 539 "failed to get current device settings\n"); 540 } 541 return (0); 542 } 543 if (cts.transport != XPORT_SPI) { 544 if (bootverbose) { 545 xpt_print(periph->path, "not SPI transport\n"); 546 } 547 return (0); 548 } 549 spi = &cts.xport_specific.spi; 550 551 /* 552 * We cannot renegotiate sync rate if we don't have one. 553 */ 554 if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) { 555 if (bootverbose) { 556 xpt_print(periph->path, "no sync rate known\n"); 557 } 558 return (0); 559 } 560 561 /* 562 * We'll assert that we don't have to touch PPR options- the 563 * SIM will see what we do with period and offset and adjust 564 * the PPR options as appropriate. 565 */ 566 567 /* 568 * A sync rate with unknown or zero offset is nonsensical. 569 * A sync period of zero means Async. 570 */ 571 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0 572 || spi->sync_offset == 0 || spi->sync_period == 0) { 573 if (bootverbose) { 574 xpt_print(periph->path, "no sync rate available\n"); 575 } 576 return (0); 577 } 578 579 if (device->flags & CAM_DEV_DV_HIT_BOTTOM) { 580 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, 581 ("hit async: giving up on DV\n")); 582 return (0); 583 } 584 585 586 /* 587 * Jump sync_period up by one, but stop at 5MHz and fall back to Async. 588 * We don't try to remember 'last' settings to see if the SIM actually 589 * gets into the speed we want to set. We check on the SIM telling 590 * us that a requested speed is bad, but otherwise don't try and 591 * check the speed due to the asynchronous and handshake nature 592 * of speed setting. 593 */ 594 spi->valid = CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET; 595 for (;;) { 596 spi->sync_period++; 597 if (spi->sync_period >= 0xf) { 598 spi->sync_period = 0; 599 spi->sync_offset = 0; 600 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, 601 ("setting to async for DV\n")); 602 /* 603 * Once we hit async, we don't want to try 604 * any more settings. 605 */ 606 device->flags |= CAM_DEV_DV_HIT_BOTTOM; 607 } else if (bootverbose) { 608 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, 609 ("DV: period 0x%x\n", spi->sync_period)); 610 printf("setting period to 0x%x\n", spi->sync_period); 611 } 612 cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS; 613 cts.type = CTS_TYPE_CURRENT_SETTINGS; 614 xpt_action((union ccb *)&cts); 615 if ((cts.ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 616 break; 617 } 618 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, 619 ("DV: failed to set period 0x%x\n", spi->sync_period)); 620 if (spi->sync_period == 0) { 621 return (0); 622 } 623 } 624 return (1); 625 } 626 #endif 627 static void 628 probedone(struct cam_periph *periph, union ccb *done_ccb) 629 { 630 struct ata_params *ident_buf; 631 probe_softc *softc; 632 struct cam_path *path; 633 u_int32_t priority; 634 int found = 0; 635 636 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probedone\n")); 637 638 softc = (probe_softc *)periph->softc; 639 path = done_ccb->ccb_h.path; 640 priority = done_ccb->ccb_h.pinfo.priority; 641 ident_buf = &path->device->ident_data; 642 643 switch (softc->action) { 644 case PROBE_RESET: 645 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 646 int sign = (done_ccb->ataio.res.lba_high << 8) + 647 done_ccb->ataio.res.lba_mid; 648 xpt_print(path, "SIGNATURE: %04x\n", sign); 649 if (sign == 0x0000 && 650 done_ccb->ccb_h.target_id != 15) { 651 path->device->protocol = PROTO_ATA; 652 PROBE_SET_ACTION(softc, PROBE_IDENTIFY); 653 } else if (sign == 0x9669 && 654 done_ccb->ccb_h.target_id == 15) { 655 struct ccb_trans_settings cts; 656 657 /* Report SIM that PM is present. */ 658 bzero(&cts, sizeof(cts)); 659 xpt_setup_ccb(&cts.ccb_h, path, 1); 660 cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS; 661 cts.type = CTS_TYPE_CURRENT_SETTINGS; 662 cts.xport_specific.sata.pm_present = 1; 663 cts.xport_specific.sata.valid = CTS_SATA_VALID_PM; 664 xpt_action((union ccb *)&cts); 665 path->device->protocol = PROTO_SATAPM; 666 PROBE_SET_ACTION(softc, PROBE_PM_PID); 667 } else if (sign == 0xeb14 && 668 done_ccb->ccb_h.target_id != 15) { 669 path->device->protocol = PROTO_SCSI; 670 PROBE_SET_ACTION(softc, PROBE_IDENTIFY); 671 } else { 672 if (done_ccb->ccb_h.target_id != 15) { 673 xpt_print(path, 674 "Unexpected signature 0x%04x\n", sign); 675 } 676 xpt_release_ccb(done_ccb); 677 break; 678 } 679 xpt_release_ccb(done_ccb); 680 xpt_schedule(periph, priority); 681 return; 682 } else if (cam_periph_error(done_ccb, 0, 0, 683 &softc->saved_ccb) == ERESTART) { 684 return; 685 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 686 /* Don't wedge the queue */ 687 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1, 688 /*run_queue*/TRUE); 689 } 690 goto device_fail; 691 case PROBE_IDENTIFY: 692 { 693 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 694 int16_t *ptr; 695 696 for (ptr = (int16_t *)ident_buf; 697 ptr < (int16_t *)ident_buf + sizeof(struct ata_params)/2; ptr++) { 698 *ptr = le16toh(*ptr); 699 } 700 if (strncmp(ident_buf->model, "FX", 2) && 701 strncmp(ident_buf->model, "NEC", 3) && 702 strncmp(ident_buf->model, "Pioneer", 7) && 703 strncmp(ident_buf->model, "SHARP", 5)) { 704 ata_bswap(ident_buf->model, sizeof(ident_buf->model)); 705 ata_bswap(ident_buf->revision, sizeof(ident_buf->revision)); 706 ata_bswap(ident_buf->serial, sizeof(ident_buf->serial)); 707 } 708 ata_btrim(ident_buf->model, sizeof(ident_buf->model)); 709 ata_bpack(ident_buf->model, ident_buf->model, sizeof(ident_buf->model)); 710 ata_btrim(ident_buf->revision, sizeof(ident_buf->revision)); 711 ata_bpack(ident_buf->revision, ident_buf->revision, sizeof(ident_buf->revision)); 712 ata_btrim(ident_buf->serial, sizeof(ident_buf->serial)); 713 ata_bpack(ident_buf->serial, ident_buf->serial, sizeof(ident_buf->serial)); 714 715 if ((periph->path->device->flags & CAM_DEV_UNCONFIGURED) == 0) { 716 /* Check that it is the same device. */ 717 MD5_CTX context; 718 u_int8_t digest[16]; 719 720 MD5Init(&context); 721 MD5Update(&context, 722 (unsigned char *)ident_buf->model, 723 sizeof(ident_buf->model)); 724 MD5Update(&context, 725 (unsigned char *)ident_buf->revision, 726 sizeof(ident_buf->revision)); 727 MD5Update(&context, 728 (unsigned char *)ident_buf->serial, 729 sizeof(ident_buf->serial)); 730 MD5Final(digest, &context); 731 if (bcmp(digest, softc->digest, sizeof(digest))) { 732 /* Device changed. */ 733 xpt_async(AC_LOST_DEVICE, path, NULL); 734 } 735 xpt_release_ccb(done_ccb); 736 break; 737 } 738 739 /* Clean up from previous instance of this device */ 740 if (path->device->serial_num != NULL) { 741 free(path->device->serial_num, M_CAMXPT); 742 path->device->serial_num = NULL; 743 path->device->serial_num_len = 0; 744 } 745 path->device->serial_num = 746 (u_int8_t *)malloc((sizeof(ident_buf->serial) + 1), 747 M_CAMXPT, M_NOWAIT); 748 if (path->device->serial_num != NULL) { 749 bcopy(ident_buf->serial, 750 path->device->serial_num, 751 sizeof(ident_buf->serial)); 752 path->device->serial_num[sizeof(ident_buf->serial)] 753 = '\0'; 754 path->device->serial_num_len = 755 strlen(path->device->serial_num); 756 } 757 758 path->device->flags |= CAM_DEV_INQUIRY_DATA_VALID; 759 760 scsi_find_quirk(path->device); 761 ata_device_transport(path); 762 763 PROBE_SET_ACTION(softc, PROBE_SETMODE); 764 xpt_release_ccb(done_ccb); 765 xpt_schedule(periph, priority); 766 return; 767 } else if (cam_periph_error(done_ccb, 0, 0, 768 &softc->saved_ccb) == ERESTART) { 769 return; 770 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 771 /* Don't wedge the queue */ 772 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1, 773 /*run_queue*/TRUE); 774 } 775 device_fail: 776 /* 777 * If we get to this point, we got an error status back 778 * from the inquiry and the error status doesn't require 779 * automatically retrying the command. Therefore, the 780 * inquiry failed. If we had inquiry information before 781 * for this device, but this latest inquiry command failed, 782 * the device has probably gone away. If this device isn't 783 * already marked unconfigured, notify the peripheral 784 * drivers that this device is no more. 785 */ 786 if ((path->device->flags & CAM_DEV_UNCONFIGURED) == 0) 787 /* Send the async notification. */ 788 xpt_async(AC_LOST_DEVICE, path, NULL); 789 790 xpt_release_ccb(done_ccb); 791 break; 792 } 793 case PROBE_SETMODE: 794 { 795 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 796 if (path->device->protocol == PROTO_ATA) { 797 path->device->flags &= ~CAM_DEV_UNCONFIGURED; 798 done_ccb->ccb_h.func_code = XPT_GDEV_TYPE; 799 xpt_action(done_ccb); 800 xpt_async(AC_FOUND_DEVICE, done_ccb->ccb_h.path, 801 done_ccb); 802 xpt_release_ccb(done_ccb); 803 break; 804 } else { 805 PROBE_SET_ACTION(softc, PROBE_INQUIRY); 806 xpt_release_ccb(done_ccb); 807 xpt_schedule(periph, priority); 808 return; 809 } 810 } else if (cam_periph_error(done_ccb, 0, 0, 811 &softc->saved_ccb) == ERESTART) { 812 return; 813 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 814 /* Don't wedge the queue */ 815 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1, 816 /*run_queue*/TRUE); 817 } 818 goto device_fail; 819 } 820 case PROBE_INQUIRY: 821 case PROBE_FULL_INQUIRY: 822 { 823 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 824 struct scsi_inquiry_data *inq_buf; 825 u_int8_t periph_qual; 826 827 path->device->flags |= CAM_DEV_INQUIRY_DATA_VALID; 828 inq_buf = &path->device->inq_data; 829 830 periph_qual = SID_QUAL(inq_buf); 831 832 if (periph_qual == SID_QUAL_LU_CONNECTED) { 833 u_int8_t len; 834 835 /* 836 * We conservatively request only 837 * SHORT_INQUIRY_LEN bytes of inquiry 838 * information during our first try 839 * at sending an INQUIRY. If the device 840 * has more information to give, 841 * perform a second request specifying 842 * the amount of information the device 843 * is willing to give. 844 */ 845 len = inq_buf->additional_length 846 + offsetof(struct scsi_inquiry_data, 847 additional_length) + 1; 848 if (softc->action == PROBE_INQUIRY 849 && len > SHORT_INQUIRY_LENGTH) { 850 PROBE_SET_ACTION(softc, PROBE_FULL_INQUIRY); 851 xpt_release_ccb(done_ccb); 852 xpt_schedule(periph, priority); 853 return; 854 } 855 856 scsi_find_quirk(path->device); 857 858 // scsi_devise_transport(path); 859 path->device->flags &= ~CAM_DEV_UNCONFIGURED; 860 done_ccb->ccb_h.func_code = XPT_GDEV_TYPE; 861 xpt_action(done_ccb); 862 xpt_async(AC_FOUND_DEVICE, done_ccb->ccb_h.path, 863 done_ccb); 864 xpt_release_ccb(done_ccb); 865 break; 866 } 867 } else if (cam_periph_error(done_ccb, 0, 0, 868 &softc->saved_ccb) == ERESTART) { 869 return; 870 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 871 /* Don't wedge the queue */ 872 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1, 873 /*run_queue*/TRUE); 874 } 875 goto device_fail; 876 } 877 case PROBE_PM_PID: 878 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 879 if ((path->device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0) 880 bzero(ident_buf, sizeof(*ident_buf)); 881 softc->pm_pid = (done_ccb->ataio.res.lba_high << 24) + 882 (done_ccb->ataio.res.lba_mid << 16) + 883 (done_ccb->ataio.res.lba_low << 8) + 884 done_ccb->ataio.res.sector_count; 885 printf("PM Product ID: %08x\n", softc->pm_pid); 886 snprintf(ident_buf->model, sizeof(ident_buf->model), 887 "Port Multiplier %08x", softc->pm_pid); 888 PROBE_SET_ACTION(softc, PROBE_PM_PRV); 889 xpt_release_ccb(done_ccb); 890 xpt_schedule(periph, priority); 891 return; 892 } else if (cam_periph_error(done_ccb, 0, 0, 893 &softc->saved_ccb) == ERESTART) { 894 return; 895 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 896 /* Don't wedge the queue */ 897 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1, 898 /*run_queue*/TRUE); 899 } 900 goto device_fail; 901 case PROBE_PM_PRV: 902 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 903 softc->pm_prv = (done_ccb->ataio.res.lba_high << 24) + 904 (done_ccb->ataio.res.lba_mid << 16) + 905 (done_ccb->ataio.res.lba_low << 8) + 906 done_ccb->ataio.res.sector_count; 907 printf("PM Revision: %08x\n", softc->pm_prv); 908 snprintf(ident_buf->revision, sizeof(ident_buf->revision), 909 "%04x", softc->pm_prv); 910 PROBE_SET_ACTION(softc, PROBE_PM_PORTS); 911 xpt_release_ccb(done_ccb); 912 xpt_schedule(periph, priority); 913 return; 914 } else if (cam_periph_error(done_ccb, 0, 0, 915 &softc->saved_ccb) == ERESTART) { 916 return; 917 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 918 /* Don't wedge the queue */ 919 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1, 920 /*run_queue*/TRUE); 921 } 922 goto device_fail; 923 case PROBE_PM_PORTS: 924 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 925 softc->pm_ports = (done_ccb->ataio.res.lba_high << 24) + 926 (done_ccb->ataio.res.lba_mid << 16) + 927 (done_ccb->ataio.res.lba_low << 8) + 928 done_ccb->ataio.res.sector_count; 929 /* This PM declares 6 ports, while only 5 of them are real. 930 * Port 5 is enclosure management bridge port, which has implementation 931 * problems, causing probe faults. Hide it for now. */ 932 if (softc->pm_pid == 0x37261095 && softc->pm_ports == 6) 933 softc->pm_ports = 5; 934 /* This PM declares 7 ports, while only 5 of them are real. 935 * Port 5 is some fake "Config Disk" with 640 sectors size, 936 * port 6 is enclosure management bridge port. 937 * Both fake ports has implementation problems, causing 938 * probe faults. Hide them for now. */ 939 if (softc->pm_pid == 0x47261095 && softc->pm_ports == 7) 940 softc->pm_ports = 5; 941 printf("PM ports: %d\n", softc->pm_ports); 942 ident_buf->config = softc->pm_ports; 943 path->device->flags |= CAM_DEV_INQUIRY_DATA_VALID; 944 softc->pm_step = 0; 945 PROBE_SET_ACTION(softc, PROBE_PM_RESET); 946 xpt_release_ccb(done_ccb); 947 xpt_schedule(periph, priority); 948 return; 949 } else if (cam_periph_error(done_ccb, 0, 0, 950 &softc->saved_ccb) == ERESTART) { 951 return; 952 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 953 /* Don't wedge the queue */ 954 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1, 955 /*run_queue*/TRUE); 956 } 957 goto device_fail; 958 case PROBE_PM_RESET: 959 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 960 softc->pm_step++; 961 if (softc->pm_step < softc->pm_ports) { 962 xpt_release_ccb(done_ccb); 963 xpt_schedule(periph, priority); 964 return; 965 } else { 966 softc->pm_step = 0; 967 DELAY(5000); 968 printf("PM reset done\n"); 969 PROBE_SET_ACTION(softc, PROBE_PM_CONNECT); 970 xpt_release_ccb(done_ccb); 971 xpt_schedule(periph, priority); 972 return; 973 } 974 } else if (cam_periph_error(done_ccb, 0, 0, 975 &softc->saved_ccb) == ERESTART) { 976 return; 977 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 978 /* Don't wedge the queue */ 979 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1, 980 /*run_queue*/TRUE); 981 } 982 goto device_fail; 983 case PROBE_PM_CONNECT: 984 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 985 softc->pm_step++; 986 if (softc->pm_step < softc->pm_ports) { 987 xpt_release_ccb(done_ccb); 988 xpt_schedule(periph, priority); 989 return; 990 } else { 991 softc->pm_step = 0; 992 softc->pm_try = 0; 993 printf("PM connect done\n"); 994 PROBE_SET_ACTION(softc, PROBE_PM_CHECK); 995 xpt_release_ccb(done_ccb); 996 xpt_schedule(periph, priority); 997 return; 998 } 999 } else if (cam_periph_error(done_ccb, 0, 0, 1000 &softc->saved_ccb) == ERESTART) { 1001 return; 1002 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 1003 /* Don't wedge the queue */ 1004 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1, 1005 /*run_queue*/TRUE); 1006 } 1007 goto device_fail; 1008 case PROBE_PM_CHECK: 1009 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 1010 int res = (done_ccb->ataio.res.lba_high << 24) + 1011 (done_ccb->ataio.res.lba_mid << 16) + 1012 (done_ccb->ataio.res.lba_low << 8) + 1013 done_ccb->ataio.res.sector_count; 1014 if ((res & 0xf0f) == 0x103 && (res & 0x0f0) != 0) { 1015 printf("PM status: %d - %08x\n", softc->pm_step, res); 1016 ident_buf->cylinders |= (1 << softc->pm_step); 1017 softc->pm_step++; 1018 } else { 1019 if (softc->pm_try < 100) { 1020 DELAY(10000); 1021 softc->pm_try++; 1022 } else { 1023 printf("PM status: %d - %08x\n", softc->pm_step, res); 1024 ident_buf->cylinders &= ~(1 << softc->pm_step); 1025 softc->pm_step++; 1026 } 1027 } 1028 if (softc->pm_step < softc->pm_ports) { 1029 xpt_release_ccb(done_ccb); 1030 xpt_schedule(periph, priority); 1031 return; 1032 } else { 1033 softc->pm_step = 0; 1034 PROBE_SET_ACTION(softc, PROBE_PM_CLEAR); 1035 xpt_release_ccb(done_ccb); 1036 xpt_schedule(periph, priority); 1037 return; 1038 } 1039 } else if (cam_periph_error(done_ccb, 0, 0, 1040 &softc->saved_ccb) == ERESTART) { 1041 return; 1042 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 1043 /* Don't wedge the queue */ 1044 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1, 1045 /*run_queue*/TRUE); 1046 } 1047 goto device_fail; 1048 case PROBE_PM_CLEAR: 1049 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 1050 softc->pm_step++; 1051 if (softc->pm_step < softc->pm_ports) { 1052 xpt_release_ccb(done_ccb); 1053 xpt_schedule(periph, priority); 1054 return; 1055 } 1056 found = ident_buf->cylinders | 0x8000; 1057 if (path->device->flags & CAM_DEV_UNCONFIGURED) { 1058 path->device->flags &= ~CAM_DEV_UNCONFIGURED; 1059 done_ccb->ccb_h.func_code = XPT_GDEV_TYPE; 1060 xpt_action(done_ccb); 1061 xpt_async(AC_FOUND_DEVICE, done_ccb->ccb_h.path, 1062 done_ccb); 1063 xpt_release_ccb(done_ccb); 1064 } 1065 break; 1066 } else if (cam_periph_error(done_ccb, 0, 0, 1067 &softc->saved_ccb) == ERESTART) { 1068 return; 1069 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 1070 /* Don't wedge the queue */ 1071 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1, 1072 /*run_queue*/TRUE); 1073 } 1074 goto device_fail; 1075 case PROBE_INVALID: 1076 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_INFO, 1077 ("probedone: invalid action state\n")); 1078 default: 1079 break; 1080 } 1081 done_ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs); 1082 TAILQ_REMOVE(&softc->request_ccbs, &done_ccb->ccb_h, periph_links.tqe); 1083 done_ccb->ccb_h.status = CAM_REQ_CMP; 1084 done_ccb->ccb_h.ppriv_field1 = found; 1085 xpt_done(done_ccb); 1086 if (TAILQ_FIRST(&softc->request_ccbs) == NULL) { 1087 cam_periph_invalidate(periph); 1088 cam_periph_release_locked(periph); 1089 } else { 1090 probeschedule(periph); 1091 } 1092 } 1093 1094 static void 1095 probecleanup(struct cam_periph *periph) 1096 { 1097 free(periph->softc, M_CAMXPT); 1098 } 1099 1100 static void 1101 scsi_find_quirk(struct cam_ed *device) 1102 { 1103 struct scsi_quirk_entry *quirk; 1104 caddr_t match; 1105 1106 match = cam_quirkmatch((caddr_t)&device->inq_data, 1107 (caddr_t)scsi_quirk_table, 1108 sizeof(scsi_quirk_table) / 1109 sizeof(*scsi_quirk_table), 1110 sizeof(*scsi_quirk_table), scsi_inquiry_match); 1111 1112 if (match == NULL) 1113 panic("xpt_find_quirk: device didn't match wildcard entry!!"); 1114 1115 quirk = (struct scsi_quirk_entry *)match; 1116 device->quirk = quirk; 1117 device->mintags = quirk->mintags; 1118 device->maxtags = quirk->maxtags; 1119 } 1120 1121 typedef struct { 1122 union ccb *request_ccb; 1123 struct ccb_pathinq *cpi; 1124 int counter; 1125 int found; 1126 } ata_scan_bus_info; 1127 1128 /* 1129 * To start a scan, request_ccb is an XPT_SCAN_BUS ccb. 1130 * As the scan progresses, xpt_scan_bus is used as the 1131 * callback on completion function. 1132 */ 1133 static void 1134 ata_scan_bus(struct cam_periph *periph, union ccb *request_ccb) 1135 { 1136 struct cam_path *path; 1137 ata_scan_bus_info *scan_info; 1138 union ccb *work_ccb; 1139 cam_status status; 1140 1141 CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE, 1142 ("xpt_scan_bus\n")); 1143 switch (request_ccb->ccb_h.func_code) { 1144 case XPT_SCAN_BUS: 1145 /* Find out the characteristics of the bus */ 1146 work_ccb = xpt_alloc_ccb_nowait(); 1147 if (work_ccb == NULL) { 1148 request_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 1149 xpt_done(request_ccb); 1150 return; 1151 } 1152 xpt_setup_ccb(&work_ccb->ccb_h, request_ccb->ccb_h.path, 1153 request_ccb->ccb_h.pinfo.priority); 1154 work_ccb->ccb_h.func_code = XPT_PATH_INQ; 1155 xpt_action(work_ccb); 1156 if (work_ccb->ccb_h.status != CAM_REQ_CMP) { 1157 request_ccb->ccb_h.status = work_ccb->ccb_h.status; 1158 xpt_free_ccb(work_ccb); 1159 xpt_done(request_ccb); 1160 return; 1161 } 1162 1163 /* Save some state for use while we probe for devices */ 1164 scan_info = (ata_scan_bus_info *) 1165 malloc(sizeof(ata_scan_bus_info), M_CAMXPT, M_NOWAIT); 1166 if (scan_info == NULL) { 1167 request_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 1168 xpt_done(request_ccb); 1169 return; 1170 } 1171 scan_info->request_ccb = request_ccb; 1172 scan_info->cpi = &work_ccb->cpi; 1173 scan_info->found = 0x8001; 1174 scan_info->counter = 0; 1175 /* If PM supported, probe it first. */ 1176 if (scan_info->cpi->hba_inquiry & PI_SATAPM) 1177 scan_info->counter = 15; 1178 1179 work_ccb = xpt_alloc_ccb_nowait(); 1180 if (work_ccb == NULL) { 1181 free(scan_info, M_CAMXPT); 1182 request_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 1183 xpt_done(request_ccb); 1184 break; 1185 } 1186 goto scan_next; 1187 case XPT_SCAN_LUN: 1188 work_ccb = request_ccb; 1189 /* Reuse the same CCB to query if a device was really found */ 1190 scan_info = (ata_scan_bus_info *)work_ccb->ccb_h.ppriv_ptr0; 1191 /* Free the current request path- we're done with it. */ 1192 xpt_free_path(work_ccb->ccb_h.path); 1193 /* If there is PM... */ 1194 if (scan_info->counter == 15) { 1195 if (work_ccb->ccb_h.ppriv_field1 != 0) { 1196 /* Save PM probe result. */ 1197 scan_info->found = work_ccb->ccb_h.ppriv_field1; 1198 } else { 1199 struct ccb_trans_settings cts; 1200 1201 /* Report SIM that PM is absent. */ 1202 bzero(&cts, sizeof(cts)); 1203 xpt_setup_ccb(&cts.ccb_h, 1204 scan_info->request_ccb->ccb_h.path, 1); 1205 cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS; 1206 cts.type = CTS_TYPE_CURRENT_SETTINGS; 1207 cts.xport_specific.sata.pm_present = 0; 1208 cts.xport_specific.sata.valid = CTS_SATA_VALID_PM; 1209 xpt_action((union ccb *)&cts); 1210 } 1211 } 1212 take_next: 1213 /* Take next device. Wrap from 15 (PM) to 0. */ 1214 scan_info->counter = (scan_info->counter + 1 ) & 0x0f; 1215 if (scan_info->counter >= scan_info->cpi->max_target+1) { 1216 xpt_free_ccb(work_ccb); 1217 xpt_free_ccb((union ccb *)scan_info->cpi); 1218 request_ccb = scan_info->request_ccb; 1219 free(scan_info, M_CAMXPT); 1220 request_ccb->ccb_h.status = CAM_REQ_CMP; 1221 xpt_done(request_ccb); 1222 break; 1223 } 1224 scan_next: 1225 status = xpt_create_path(&path, xpt_periph, 1226 scan_info->request_ccb->ccb_h.path_id, 1227 scan_info->counter, 0); 1228 if (status != CAM_REQ_CMP) { 1229 printf("xpt_scan_bus: xpt_create_path failed" 1230 " with status %#x, bus scan halted\n", 1231 status); 1232 xpt_free_ccb(work_ccb); 1233 xpt_free_ccb((union ccb *)scan_info->cpi); 1234 request_ccb = scan_info->request_ccb; 1235 free(scan_info, M_CAMXPT); 1236 request_ccb->ccb_h.status = status; 1237 xpt_done(request_ccb); 1238 break; 1239 } 1240 if ((scan_info->found & (1 << scan_info->counter)) == 0) { 1241 xpt_async(AC_LOST_DEVICE, path, NULL); 1242 xpt_free_path(path); 1243 goto take_next; 1244 } 1245 xpt_setup_ccb(&work_ccb->ccb_h, path, 1246 scan_info->request_ccb->ccb_h.pinfo.priority); 1247 work_ccb->ccb_h.func_code = XPT_SCAN_LUN; 1248 work_ccb->ccb_h.cbfcnp = ata_scan_bus; 1249 work_ccb->ccb_h.ppriv_ptr0 = scan_info; 1250 work_ccb->crcn.flags = scan_info->request_ccb->crcn.flags; 1251 xpt_action(work_ccb); 1252 break; 1253 default: 1254 break; 1255 } 1256 } 1257 1258 static void 1259 ata_scan_lun(struct cam_periph *periph, struct cam_path *path, 1260 cam_flags flags, union ccb *request_ccb) 1261 { 1262 struct ccb_pathinq cpi; 1263 cam_status status; 1264 struct cam_path *new_path; 1265 struct cam_periph *old_periph; 1266 1267 CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE, 1268 ("xpt_scan_lun\n")); 1269 1270 xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1); 1271 cpi.ccb_h.func_code = XPT_PATH_INQ; 1272 xpt_action((union ccb *)&cpi); 1273 1274 if (cpi.ccb_h.status != CAM_REQ_CMP) { 1275 if (request_ccb != NULL) { 1276 request_ccb->ccb_h.status = cpi.ccb_h.status; 1277 xpt_done(request_ccb); 1278 } 1279 return; 1280 } 1281 1282 if (request_ccb == NULL) { 1283 request_ccb = malloc(sizeof(union ccb), M_CAMXPT, M_NOWAIT); 1284 if (request_ccb == NULL) { 1285 xpt_print(path, "xpt_scan_lun: can't allocate CCB, " 1286 "can't continue\n"); 1287 return; 1288 } 1289 new_path = malloc(sizeof(*new_path), M_CAMXPT, M_NOWAIT); 1290 if (new_path == NULL) { 1291 xpt_print(path, "xpt_scan_lun: can't allocate path, " 1292 "can't continue\n"); 1293 free(request_ccb, M_CAMXPT); 1294 return; 1295 } 1296 status = xpt_compile_path(new_path, xpt_periph, 1297 path->bus->path_id, 1298 path->target->target_id, 1299 path->device->lun_id); 1300 1301 if (status != CAM_REQ_CMP) { 1302 xpt_print(path, "xpt_scan_lun: can't compile path, " 1303 "can't continue\n"); 1304 free(request_ccb, M_CAMXPT); 1305 free(new_path, M_CAMXPT); 1306 return; 1307 } 1308 xpt_setup_ccb(&request_ccb->ccb_h, new_path, /*priority*/ 1); 1309 request_ccb->ccb_h.cbfcnp = xptscandone; 1310 request_ccb->ccb_h.func_code = XPT_SCAN_LUN; 1311 request_ccb->crcn.flags = flags; 1312 } 1313 1314 if ((old_periph = cam_periph_find(path, "aprobe")) != NULL) { 1315 probe_softc *softc; 1316 1317 softc = (probe_softc *)old_periph->softc; 1318 TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h, 1319 periph_links.tqe); 1320 } else { 1321 status = cam_periph_alloc(proberegister, NULL, probecleanup, 1322 probestart, "aprobe", 1323 CAM_PERIPH_BIO, 1324 request_ccb->ccb_h.path, NULL, 0, 1325 request_ccb); 1326 1327 if (status != CAM_REQ_CMP) { 1328 xpt_print(path, "xpt_scan_lun: cam_alloc_periph " 1329 "returned an error, can't continue probe\n"); 1330 request_ccb->ccb_h.status = status; 1331 xpt_done(request_ccb); 1332 } 1333 } 1334 } 1335 1336 static void 1337 xptscandone(struct cam_periph *periph, union ccb *done_ccb) 1338 { 1339 xpt_release_path(done_ccb->ccb_h.path); 1340 free(done_ccb->ccb_h.path, M_CAMXPT); 1341 free(done_ccb, M_CAMXPT); 1342 } 1343 1344 static struct cam_ed * 1345 ata_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id) 1346 { 1347 struct cam_path path; 1348 struct scsi_quirk_entry *quirk; 1349 struct cam_ed *device; 1350 struct cam_ed *cur_device; 1351 1352 device = xpt_alloc_device(bus, target, lun_id); 1353 if (device == NULL) 1354 return (NULL); 1355 1356 /* 1357 * Take the default quirk entry until we have inquiry 1358 * data and can determine a better quirk to use. 1359 */ 1360 quirk = &scsi_quirk_table[scsi_quirk_table_size - 1]; 1361 device->quirk = (void *)quirk; 1362 device->mintags = quirk->mintags; 1363 device->maxtags = quirk->maxtags; 1364 bzero(&device->inq_data, sizeof(device->inq_data)); 1365 device->inq_flags = 0; 1366 device->queue_flags = 0; 1367 device->serial_num = NULL; 1368 device->serial_num_len = 0; 1369 1370 /* 1371 * XXX should be limited by number of CCBs this bus can 1372 * do. 1373 */ 1374 bus->sim->max_ccbs += device->ccbq.devq_openings; 1375 /* Insertion sort into our target's device list */ 1376 cur_device = TAILQ_FIRST(&target->ed_entries); 1377 while (cur_device != NULL && cur_device->lun_id < lun_id) 1378 cur_device = TAILQ_NEXT(cur_device, links); 1379 if (cur_device != NULL) { 1380 TAILQ_INSERT_BEFORE(cur_device, device, links); 1381 } else { 1382 TAILQ_INSERT_TAIL(&target->ed_entries, device, links); 1383 } 1384 target->generation++; 1385 if (lun_id != CAM_LUN_WILDCARD) { 1386 xpt_compile_path(&path, 1387 NULL, 1388 bus->path_id, 1389 target->target_id, 1390 lun_id); 1391 ata_device_transport(&path); 1392 xpt_release_path(&path); 1393 } 1394 1395 return (device); 1396 } 1397 1398 static void 1399 ata_device_transport(struct cam_path *path) 1400 { 1401 struct ccb_pathinq cpi; 1402 // struct ccb_trans_settings cts; 1403 struct scsi_inquiry_data *inq_buf; 1404 1405 /* Get transport information from the SIM */ 1406 xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1); 1407 cpi.ccb_h.func_code = XPT_PATH_INQ; 1408 xpt_action((union ccb *)&cpi); 1409 1410 inq_buf = NULL; 1411 // if ((path->device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0) 1412 // inq_buf = &path->device->inq_data; 1413 // path->device->protocol = cpi.protocol; 1414 // path->device->protocol_version = 1415 // inq_buf != NULL ? SID_ANSI_REV(inq_buf) : cpi.protocol_version; 1416 path->device->transport = cpi.transport; 1417 path->device->transport_version = cpi.transport_version; 1418 #if 0 1419 /* 1420 * Any device not using SPI3 features should 1421 * be considered SPI2 or lower. 1422 */ 1423 if (inq_buf != NULL) { 1424 if (path->device->transport == XPORT_SPI 1425 && (inq_buf->spi3data & SID_SPI_MASK) == 0 1426 && path->device->transport_version > 2) 1427 path->device->transport_version = 2; 1428 } else { 1429 struct cam_ed* otherdev; 1430 1431 for (otherdev = TAILQ_FIRST(&path->target->ed_entries); 1432 otherdev != NULL; 1433 otherdev = TAILQ_NEXT(otherdev, links)) { 1434 if (otherdev != path->device) 1435 break; 1436 } 1437 1438 if (otherdev != NULL) { 1439 /* 1440 * Initially assume the same versioning as 1441 * prior luns for this target. 1442 */ 1443 path->device->protocol_version = 1444 otherdev->protocol_version; 1445 path->device->transport_version = 1446 otherdev->transport_version; 1447 } else { 1448 /* Until we know better, opt for safty */ 1449 path->device->protocol_version = 2; 1450 if (path->device->transport == XPORT_SPI) 1451 path->device->transport_version = 2; 1452 else 1453 path->device->transport_version = 0; 1454 } 1455 } 1456 1457 /* 1458 * XXX 1459 * For a device compliant with SPC-2 we should be able 1460 * to determine the transport version supported by 1461 * scrutinizing the version descriptors in the 1462 * inquiry buffer. 1463 */ 1464 1465 /* Tell the controller what we think */ 1466 xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1); 1467 cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS; 1468 cts.type = CTS_TYPE_CURRENT_SETTINGS; 1469 cts.transport = path->device->transport; 1470 cts.transport_version = path->device->transport_version; 1471 cts.protocol = path->device->protocol; 1472 cts.protocol_version = path->device->protocol_version; 1473 cts.proto_specific.valid = 0; 1474 cts.xport_specific.valid = 0; 1475 xpt_action((union ccb *)&cts); 1476 #endif 1477 } 1478 1479 static void 1480 ata_action(union ccb *start_ccb) 1481 { 1482 1483 switch (start_ccb->ccb_h.func_code) { 1484 case XPT_SET_TRAN_SETTINGS: 1485 { 1486 scsi_set_transfer_settings(&start_ccb->cts, 1487 start_ccb->ccb_h.path->device, 1488 /*async_update*/FALSE); 1489 break; 1490 } 1491 case XPT_SCAN_BUS: 1492 ata_scan_bus(start_ccb->ccb_h.path->periph, start_ccb); 1493 break; 1494 case XPT_SCAN_LUN: 1495 ata_scan_lun(start_ccb->ccb_h.path->periph, 1496 start_ccb->ccb_h.path, start_ccb->crcn.flags, 1497 start_ccb); 1498 break; 1499 case XPT_GET_TRAN_SETTINGS: 1500 { 1501 struct cam_sim *sim; 1502 1503 sim = start_ccb->ccb_h.path->bus->sim; 1504 (*(sim->sim_action))(sim, start_ccb); 1505 break; 1506 } 1507 default: 1508 xpt_action_default(start_ccb); 1509 break; 1510 } 1511 } 1512 1513 static void 1514 scsi_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device, 1515 int async_update) 1516 { 1517 struct ccb_pathinq cpi; 1518 struct ccb_trans_settings cur_cts; 1519 struct ccb_trans_settings_scsi *scsi; 1520 struct ccb_trans_settings_scsi *cur_scsi; 1521 struct cam_sim *sim; 1522 struct scsi_inquiry_data *inq_data; 1523 1524 if (device == NULL) { 1525 cts->ccb_h.status = CAM_PATH_INVALID; 1526 xpt_done((union ccb *)cts); 1527 return; 1528 } 1529 1530 if (cts->protocol == PROTO_UNKNOWN 1531 || cts->protocol == PROTO_UNSPECIFIED) { 1532 cts->protocol = device->protocol; 1533 cts->protocol_version = device->protocol_version; 1534 } 1535 1536 if (cts->protocol_version == PROTO_VERSION_UNKNOWN 1537 || cts->protocol_version == PROTO_VERSION_UNSPECIFIED) 1538 cts->protocol_version = device->protocol_version; 1539 1540 if (cts->protocol != device->protocol) { 1541 xpt_print(cts->ccb_h.path, "Uninitialized Protocol %x:%x?\n", 1542 cts->protocol, device->protocol); 1543 cts->protocol = device->protocol; 1544 } 1545 1546 if (cts->protocol_version > device->protocol_version) { 1547 if (bootverbose) { 1548 xpt_print(cts->ccb_h.path, "Down reving Protocol " 1549 "Version from %d to %d?\n", cts->protocol_version, 1550 device->protocol_version); 1551 } 1552 cts->protocol_version = device->protocol_version; 1553 } 1554 1555 if (cts->transport == XPORT_UNKNOWN 1556 || cts->transport == XPORT_UNSPECIFIED) { 1557 cts->transport = device->transport; 1558 cts->transport_version = device->transport_version; 1559 } 1560 1561 if (cts->transport_version == XPORT_VERSION_UNKNOWN 1562 || cts->transport_version == XPORT_VERSION_UNSPECIFIED) 1563 cts->transport_version = device->transport_version; 1564 1565 if (cts->transport != device->transport) { 1566 xpt_print(cts->ccb_h.path, "Uninitialized Transport %x:%x?\n", 1567 cts->transport, device->transport); 1568 cts->transport = device->transport; 1569 } 1570 1571 if (cts->transport_version > device->transport_version) { 1572 if (bootverbose) { 1573 xpt_print(cts->ccb_h.path, "Down reving Transport " 1574 "Version from %d to %d?\n", cts->transport_version, 1575 device->transport_version); 1576 } 1577 cts->transport_version = device->transport_version; 1578 } 1579 1580 sim = cts->ccb_h.path->bus->sim; 1581 1582 /* 1583 * Nothing more of interest to do unless 1584 * this is a device connected via the 1585 * SCSI protocol. 1586 */ 1587 if (cts->protocol != PROTO_SCSI) { 1588 if (async_update == FALSE) 1589 (*(sim->sim_action))(sim, (union ccb *)cts); 1590 return; 1591 } 1592 1593 inq_data = &device->inq_data; 1594 scsi = &cts->proto_specific.scsi; 1595 xpt_setup_ccb(&cpi.ccb_h, cts->ccb_h.path, /*priority*/1); 1596 cpi.ccb_h.func_code = XPT_PATH_INQ; 1597 xpt_action((union ccb *)&cpi); 1598 1599 /* SCSI specific sanity checking */ 1600 if ((cpi.hba_inquiry & PI_TAG_ABLE) == 0 1601 || (INQ_DATA_TQ_ENABLED(inq_data)) == 0 1602 || (device->queue_flags & SCP_QUEUE_DQUE) != 0 1603 || (device->mintags == 0)) { 1604 /* 1605 * Can't tag on hardware that doesn't support tags, 1606 * doesn't have it enabled, or has broken tag support. 1607 */ 1608 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; 1609 } 1610 1611 if (async_update == FALSE) { 1612 /* 1613 * Perform sanity checking against what the 1614 * controller and device can do. 1615 */ 1616 xpt_setup_ccb(&cur_cts.ccb_h, cts->ccb_h.path, /*priority*/1); 1617 cur_cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; 1618 cur_cts.type = cts->type; 1619 xpt_action((union ccb *)&cur_cts); 1620 if ((cur_cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1621 return; 1622 } 1623 cur_scsi = &cur_cts.proto_specific.scsi; 1624 if ((scsi->valid & CTS_SCSI_VALID_TQ) == 0) { 1625 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; 1626 scsi->flags |= cur_scsi->flags & CTS_SCSI_FLAGS_TAG_ENB; 1627 } 1628 if ((cur_scsi->valid & CTS_SCSI_VALID_TQ) == 0) 1629 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; 1630 } 1631 1632 /* SPI specific sanity checking */ 1633 if (cts->transport == XPORT_SPI && async_update == FALSE) { 1634 u_int spi3caps; 1635 struct ccb_trans_settings_spi *spi; 1636 struct ccb_trans_settings_spi *cur_spi; 1637 1638 spi = &cts->xport_specific.spi; 1639 1640 cur_spi = &cur_cts.xport_specific.spi; 1641 1642 /* Fill in any gaps in what the user gave us */ 1643 if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) 1644 spi->sync_period = cur_spi->sync_period; 1645 if ((cur_spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) 1646 spi->sync_period = 0; 1647 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0) 1648 spi->sync_offset = cur_spi->sync_offset; 1649 if ((cur_spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0) 1650 spi->sync_offset = 0; 1651 if ((spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0) 1652 spi->ppr_options = cur_spi->ppr_options; 1653 if ((cur_spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0) 1654 spi->ppr_options = 0; 1655 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) == 0) 1656 spi->bus_width = cur_spi->bus_width; 1657 if ((cur_spi->valid & CTS_SPI_VALID_BUS_WIDTH) == 0) 1658 spi->bus_width = 0; 1659 if ((spi->valid & CTS_SPI_VALID_DISC) == 0) { 1660 spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; 1661 spi->flags |= cur_spi->flags & CTS_SPI_FLAGS_DISC_ENB; 1662 } 1663 if ((cur_spi->valid & CTS_SPI_VALID_DISC) == 0) 1664 spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; 1665 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0 1666 && (inq_data->flags & SID_Sync) == 0 1667 && cts->type == CTS_TYPE_CURRENT_SETTINGS) 1668 || ((cpi.hba_inquiry & PI_SDTR_ABLE) == 0)) { 1669 /* Force async */ 1670 spi->sync_period = 0; 1671 spi->sync_offset = 0; 1672 } 1673 1674 switch (spi->bus_width) { 1675 case MSG_EXT_WDTR_BUS_32_BIT: 1676 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0 1677 || (inq_data->flags & SID_WBus32) != 0 1678 || cts->type == CTS_TYPE_USER_SETTINGS) 1679 && (cpi.hba_inquiry & PI_WIDE_32) != 0) 1680 break; 1681 /* Fall Through to 16-bit */ 1682 case MSG_EXT_WDTR_BUS_16_BIT: 1683 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0 1684 || (inq_data->flags & SID_WBus16) != 0 1685 || cts->type == CTS_TYPE_USER_SETTINGS) 1686 && (cpi.hba_inquiry & PI_WIDE_16) != 0) { 1687 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 1688 break; 1689 } 1690 /* Fall Through to 8-bit */ 1691 default: /* New bus width?? */ 1692 case MSG_EXT_WDTR_BUS_8_BIT: 1693 /* All targets can do this */ 1694 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 1695 break; 1696 } 1697 1698 spi3caps = cpi.xport_specific.spi.ppr_options; 1699 if ((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0 1700 && cts->type == CTS_TYPE_CURRENT_SETTINGS) 1701 spi3caps &= inq_data->spi3data; 1702 1703 if ((spi3caps & SID_SPI_CLOCK_DT) == 0) 1704 spi->ppr_options &= ~MSG_EXT_PPR_DT_REQ; 1705 1706 if ((spi3caps & SID_SPI_IUS) == 0) 1707 spi->ppr_options &= ~MSG_EXT_PPR_IU_REQ; 1708 1709 if ((spi3caps & SID_SPI_QAS) == 0) 1710 spi->ppr_options &= ~MSG_EXT_PPR_QAS_REQ; 1711 1712 /* No SPI Transfer settings are allowed unless we are wide */ 1713 if (spi->bus_width == 0) 1714 spi->ppr_options = 0; 1715 1716 if ((spi->valid & CTS_SPI_VALID_DISC) 1717 && ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) == 0)) { 1718 /* 1719 * Can't tag queue without disconnection. 1720 */ 1721 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; 1722 scsi->valid |= CTS_SCSI_VALID_TQ; 1723 } 1724 1725 /* 1726 * If we are currently performing tagged transactions to 1727 * this device and want to change its negotiation parameters, 1728 * go non-tagged for a bit to give the controller a chance to 1729 * negotiate unhampered by tag messages. 1730 */ 1731 if (cts->type == CTS_TYPE_CURRENT_SETTINGS 1732 && (device->inq_flags & SID_CmdQue) != 0 1733 && (scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0 1734 && (spi->flags & (CTS_SPI_VALID_SYNC_RATE| 1735 CTS_SPI_VALID_SYNC_OFFSET| 1736 CTS_SPI_VALID_BUS_WIDTH)) != 0) 1737 scsi_toggle_tags(cts->ccb_h.path); 1738 } 1739 1740 if (cts->type == CTS_TYPE_CURRENT_SETTINGS 1741 && (scsi->valid & CTS_SCSI_VALID_TQ) != 0) { 1742 int device_tagenb; 1743 1744 /* 1745 * If we are transitioning from tags to no-tags or 1746 * vice-versa, we need to carefully freeze and restart 1747 * the queue so that we don't overlap tagged and non-tagged 1748 * commands. We also temporarily stop tags if there is 1749 * a change in transfer negotiation settings to allow 1750 * "tag-less" negotiation. 1751 */ 1752 if ((device->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 1753 || (device->inq_flags & SID_CmdQue) != 0) 1754 device_tagenb = TRUE; 1755 else 1756 device_tagenb = FALSE; 1757 1758 if (((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0 1759 && device_tagenb == FALSE) 1760 || ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) == 0 1761 && device_tagenb == TRUE)) { 1762 1763 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) { 1764 /* 1765 * Delay change to use tags until after a 1766 * few commands have gone to this device so 1767 * the controller has time to perform transfer 1768 * negotiations without tagged messages getting 1769 * in the way. 1770 */ 1771 device->tag_delay_count = CAM_TAG_DELAY_COUNT; 1772 device->flags |= CAM_DEV_TAG_AFTER_COUNT; 1773 } else { 1774 struct ccb_relsim crs; 1775 1776 xpt_freeze_devq(cts->ccb_h.path, /*count*/1); 1777 device->inq_flags &= ~SID_CmdQue; 1778 xpt_dev_ccbq_resize(cts->ccb_h.path, 1779 sim->max_dev_openings); 1780 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT; 1781 device->tag_delay_count = 0; 1782 1783 xpt_setup_ccb(&crs.ccb_h, cts->ccb_h.path, 1784 /*priority*/1); 1785 crs.ccb_h.func_code = XPT_REL_SIMQ; 1786 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY; 1787 crs.openings 1788 = crs.release_timeout 1789 = crs.qfrozen_cnt 1790 = 0; 1791 xpt_action((union ccb *)&crs); 1792 } 1793 } 1794 } 1795 if (async_update == FALSE) 1796 (*(sim->sim_action))(sim, (union ccb *)cts); 1797 } 1798 1799 static void 1800 scsi_toggle_tags(struct cam_path *path) 1801 { 1802 struct cam_ed *dev; 1803 1804 /* 1805 * Give controllers a chance to renegotiate 1806 * before starting tag operations. We 1807 * "toggle" tagged queuing off then on 1808 * which causes the tag enable command delay 1809 * counter to come into effect. 1810 */ 1811 dev = path->device; 1812 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 1813 || ((dev->inq_flags & SID_CmdQue) != 0 1814 && (dev->inq_flags & (SID_Sync|SID_WBus16|SID_WBus32)) != 0)) { 1815 struct ccb_trans_settings cts; 1816 1817 xpt_setup_ccb(&cts.ccb_h, path, 1); 1818 cts.protocol = PROTO_SCSI; 1819 cts.protocol_version = PROTO_VERSION_UNSPECIFIED; 1820 cts.transport = XPORT_UNSPECIFIED; 1821 cts.transport_version = XPORT_VERSION_UNSPECIFIED; 1822 cts.proto_specific.scsi.flags = 0; 1823 cts.proto_specific.scsi.valid = CTS_SCSI_VALID_TQ; 1824 scsi_set_transfer_settings(&cts, path->device, 1825 /*async_update*/TRUE); 1826 cts.proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB; 1827 scsi_set_transfer_settings(&cts, path->device, 1828 /*async_update*/TRUE); 1829 } 1830 } 1831 1832 /* 1833 * Handle any per-device event notifications that require action by the XPT. 1834 */ 1835 static void 1836 ata_dev_async(u_int32_t async_code, struct cam_eb *bus, struct cam_et *target, 1837 struct cam_ed *device, void *async_arg) 1838 { 1839 cam_status status; 1840 struct cam_path newpath; 1841 1842 /* 1843 * We only need to handle events for real devices. 1844 */ 1845 if (target->target_id == CAM_TARGET_WILDCARD 1846 || device->lun_id == CAM_LUN_WILDCARD) 1847 return; 1848 1849 /* 1850 * We need our own path with wildcards expanded to 1851 * handle certain types of events. 1852 */ 1853 if ((async_code == AC_SENT_BDR) 1854 || (async_code == AC_BUS_RESET) 1855 || (async_code == AC_INQ_CHANGED)) 1856 status = xpt_compile_path(&newpath, NULL, 1857 bus->path_id, 1858 target->target_id, 1859 device->lun_id); 1860 else 1861 status = CAM_REQ_CMP_ERR; 1862 1863 if (status == CAM_REQ_CMP) { 1864 1865 /* 1866 * Allow transfer negotiation to occur in a 1867 * tag free environment. 1868 */ 1869 if (async_code == AC_SENT_BDR 1870 || async_code == AC_BUS_RESET) 1871 scsi_toggle_tags(&newpath); 1872 1873 if (async_code == AC_INQ_CHANGED) { 1874 /* 1875 * We've sent a start unit command, or 1876 * something similar to a device that 1877 * may have caused its inquiry data to 1878 * change. So we re-scan the device to 1879 * refresh the inquiry data for it. 1880 */ 1881 ata_scan_lun(newpath.periph, &newpath, 1882 CAM_EXPECT_INQ_CHANGE, NULL); 1883 } 1884 xpt_release_path(&newpath); 1885 } else if (async_code == AC_LOST_DEVICE) { 1886 device->flags |= CAM_DEV_UNCONFIGURED; 1887 } else if (async_code == AC_TRANSFER_NEG) { 1888 struct ccb_trans_settings *settings; 1889 1890 settings = (struct ccb_trans_settings *)async_arg; 1891 scsi_set_transfer_settings(settings, device, 1892 /*async_update*/TRUE); 1893 } 1894 } 1895 1896