1 /*- 2 * Implementation of the SCSI Transport 3 * 4 * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs. 5 * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions, and the following disclaimer, 13 * without modification, immediately at the beginning of the file. 14 * 2. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include <sys/param.h> 34 #include <sys/bus.h> 35 #include <sys/systm.h> 36 #include <sys/types.h> 37 #include <sys/malloc.h> 38 #include <sys/kernel.h> 39 #include <sys/time.h> 40 #include <sys/conf.h> 41 #include <sys/fcntl.h> 42 #include <sys/md5.h> 43 #include <sys/interrupt.h> 44 #include <sys/sbuf.h> 45 46 #include <sys/lock.h> 47 #include <sys/mutex.h> 48 #include <sys/sysctl.h> 49 50 #include <cam/cam.h> 51 #include <cam/cam_ccb.h> 52 #include <cam/cam_queue.h> 53 #include <cam/cam_periph.h> 54 #include <cam/cam_sim.h> 55 #include <cam/cam_xpt.h> 56 #include <cam/cam_xpt_sim.h> 57 #include <cam/cam_xpt_periph.h> 58 #include <cam/cam_xpt_internal.h> 59 #include <cam/cam_debug.h> 60 61 #include <cam/scsi/scsi_all.h> 62 #include <cam/scsi/scsi_message.h> 63 #include <cam/scsi/scsi_pass.h> 64 #include <machine/stdarg.h> /* for xpt_print below */ 65 #include "opt_cam.h" 66 67 struct scsi_quirk_entry { 68 struct scsi_inquiry_pattern inq_pat; 69 u_int8_t quirks; 70 #define CAM_QUIRK_NOLUNS 0x01 71 #define CAM_QUIRK_NOSERIAL 0x02 72 #define CAM_QUIRK_HILUNS 0x04 73 #define CAM_QUIRK_NOHILUNS 0x08 74 #define CAM_QUIRK_NORPTLUNS 0x10 75 u_int mintags; 76 u_int maxtags; 77 }; 78 #define SCSI_QUIRK(dev) ((struct scsi_quirk_entry *)((dev)->quirk)) 79 80 static int cam_srch_hi = 0; 81 TUNABLE_INT("kern.cam.cam_srch_hi", &cam_srch_hi); 82 static int sysctl_cam_search_luns(SYSCTL_HANDLER_ARGS); 83 SYSCTL_PROC(_kern_cam, OID_AUTO, cam_srch_hi, CTLTYPE_INT|CTLFLAG_RW, 0, 0, 84 sysctl_cam_search_luns, "I", 85 "allow search above LUN 7 for SCSI3 and greater devices"); 86 87 #define CAM_SCSI2_MAXLUN 8 88 #define CAM_CAN_GET_SIMPLE_LUN(x, i) \ 89 ((((x)->luns[i].lundata[0] & RPL_LUNDATA_ATYP_MASK) == \ 90 RPL_LUNDATA_ATYP_PERIPH) || \ 91 (((x)->luns[i].lundata[0] & RPL_LUNDATA_ATYP_MASK) == \ 92 RPL_LUNDATA_ATYP_FLAT)) 93 #define CAM_GET_SIMPLE_LUN(lp, i, lval) \ 94 if (((lp)->luns[(i)].lundata[0] & RPL_LUNDATA_ATYP_MASK) == \ 95 RPL_LUNDATA_ATYP_PERIPH) { \ 96 (lval) = (lp)->luns[(i)].lundata[1]; \ 97 } else { \ 98 (lval) = (lp)->luns[(i)].lundata[0]; \ 99 (lval) &= RPL_LUNDATA_FLAT_LUN_MASK; \ 100 (lval) <<= 8; \ 101 (lval) |= (lp)->luns[(i)].lundata[1]; \ 102 } 103 /* 104 * If we're not quirked to search <= the first 8 luns 105 * and we are either quirked to search above lun 8, 106 * or we're > SCSI-2 and we've enabled hilun searching, 107 * or we're > SCSI-2 and the last lun was a success, 108 * we can look for luns above lun 8. 109 */ 110 #define CAN_SRCH_HI_SPARSE(dv) \ 111 (((SCSI_QUIRK(dv)->quirks & CAM_QUIRK_NOHILUNS) == 0) \ 112 && ((SCSI_QUIRK(dv)->quirks & CAM_QUIRK_HILUNS) \ 113 || (SID_ANSI_REV(&dv->inq_data) > SCSI_REV_2 && cam_srch_hi))) 114 115 #define CAN_SRCH_HI_DENSE(dv) \ 116 (((SCSI_QUIRK(dv)->quirks & CAM_QUIRK_NOHILUNS) == 0) \ 117 && ((SCSI_QUIRK(dv)->quirks & CAM_QUIRK_HILUNS) \ 118 || (SID_ANSI_REV(&dv->inq_data) > SCSI_REV_2))) 119 120 static periph_init_t probe_periph_init; 121 122 static struct periph_driver probe_driver = 123 { 124 probe_periph_init, "probe", 125 TAILQ_HEAD_INITIALIZER(probe_driver.units), /* generation */ 0, 126 CAM_PERIPH_DRV_EARLY 127 }; 128 129 PERIPHDRIVER_DECLARE(probe, probe_driver); 130 131 typedef enum { 132 PROBE_TUR, 133 PROBE_INQUIRY, /* this counts as DV0 for Basic Domain Validation */ 134 PROBE_FULL_INQUIRY, 135 PROBE_REPORT_LUNS, 136 PROBE_MODE_SENSE, 137 PROBE_SERIAL_NUM_0, 138 PROBE_SERIAL_NUM_1, 139 PROBE_TUR_FOR_NEGOTIATION, 140 PROBE_INQUIRY_BASIC_DV1, 141 PROBE_INQUIRY_BASIC_DV2, 142 PROBE_DV_EXIT, 143 PROBE_INVALID 144 } probe_action; 145 146 static char *probe_action_text[] = { 147 "PROBE_TUR", 148 "PROBE_INQUIRY", 149 "PROBE_FULL_INQUIRY", 150 "PROBE_REPORT_LUNS", 151 "PROBE_MODE_SENSE", 152 "PROBE_SERIAL_NUM_0", 153 "PROBE_SERIAL_NUM_1", 154 "PROBE_TUR_FOR_NEGOTIATION", 155 "PROBE_INQUIRY_BASIC_DV1", 156 "PROBE_INQUIRY_BASIC_DV2", 157 "PROBE_DV_EXIT", 158 "PROBE_INVALID" 159 }; 160 161 #define PROBE_SET_ACTION(softc, newaction) \ 162 do { \ 163 char **text; \ 164 text = probe_action_text; \ 165 CAM_DEBUG((softc)->periph->path, CAM_DEBUG_INFO, \ 166 ("Probe %s to %s\n", text[(softc)->action], \ 167 text[(newaction)])); \ 168 (softc)->action = (newaction); \ 169 } while(0) 170 171 typedef enum { 172 PROBE_INQUIRY_CKSUM = 0x01, 173 PROBE_SERIAL_CKSUM = 0x02, 174 PROBE_NO_ANNOUNCE = 0x04 175 } probe_flags; 176 177 typedef struct { 178 TAILQ_HEAD(, ccb_hdr) request_ccbs; 179 probe_action action; 180 union ccb saved_ccb; 181 probe_flags flags; 182 MD5_CTX context; 183 u_int8_t digest[16]; 184 struct cam_periph *periph; 185 } probe_softc; 186 187 static const char quantum[] = "QUANTUM"; 188 static const char sony[] = "SONY"; 189 static const char west_digital[] = "WDIGTL"; 190 static const char samsung[] = "SAMSUNG"; 191 static const char seagate[] = "SEAGATE"; 192 static const char microp[] = "MICROP"; 193 194 static struct scsi_quirk_entry scsi_quirk_table[] = 195 { 196 { 197 /* Reports QUEUE FULL for temporary resource shortages */ 198 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP39100*", "*" }, 199 /*quirks*/0, /*mintags*/24, /*maxtags*/32 200 }, 201 { 202 /* Reports QUEUE FULL for temporary resource shortages */ 203 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP34550*", "*" }, 204 /*quirks*/0, /*mintags*/24, /*maxtags*/32 205 }, 206 { 207 /* Reports QUEUE FULL for temporary resource shortages */ 208 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP32275*", "*" }, 209 /*quirks*/0, /*mintags*/24, /*maxtags*/32 210 }, 211 { 212 /* Broken tagged queuing drive */ 213 { T_DIRECT, SIP_MEDIA_FIXED, microp, "4421-07*", "*" }, 214 /*quirks*/0, /*mintags*/0, /*maxtags*/0 215 }, 216 { 217 /* Broken tagged queuing drive */ 218 { T_DIRECT, SIP_MEDIA_FIXED, "HP", "C372*", "*" }, 219 /*quirks*/0, /*mintags*/0, /*maxtags*/0 220 }, 221 { 222 /* Broken tagged queuing drive */ 223 { T_DIRECT, SIP_MEDIA_FIXED, microp, "3391*", "x43h" }, 224 /*quirks*/0, /*mintags*/0, /*maxtags*/0 225 }, 226 { 227 /* 228 * Unfortunately, the Quantum Atlas III has the same 229 * problem as the Atlas II drives above. 230 * Reported by: "Johan Granlund" <johan@granlund.nu> 231 * 232 * For future reference, the drive with the problem was: 233 * QUANTUM QM39100TD-SW N1B0 234 * 235 * It's possible that Quantum will fix the problem in later 236 * firmware revisions. If that happens, the quirk entry 237 * will need to be made specific to the firmware revisions 238 * with the problem. 239 * 240 */ 241 /* Reports QUEUE FULL for temporary resource shortages */ 242 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM39100*", "*" }, 243 /*quirks*/0, /*mintags*/24, /*maxtags*/32 244 }, 245 { 246 /* 247 * 18 Gig Atlas III, same problem as the 9G version. 248 * Reported by: Andre Albsmeier 249 * <andre.albsmeier@mchp.siemens.de> 250 * 251 * For future reference, the drive with the problem was: 252 * QUANTUM QM318000TD-S N491 253 */ 254 /* Reports QUEUE FULL for temporary resource shortages */ 255 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM318000*", "*" }, 256 /*quirks*/0, /*mintags*/24, /*maxtags*/32 257 }, 258 { 259 /* 260 * Broken tagged queuing drive 261 * Reported by: Bret Ford <bford@uop.cs.uop.edu> 262 * and: Martin Renters <martin@tdc.on.ca> 263 */ 264 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST410800*", "71*" }, 265 /*quirks*/0, /*mintags*/0, /*maxtags*/0 266 }, 267 /* 268 * The Seagate Medalist Pro drives have very poor write 269 * performance with anything more than 2 tags. 270 * 271 * Reported by: Paul van der Zwan <paulz@trantor.xs4all.nl> 272 * Drive: <SEAGATE ST36530N 1444> 273 * 274 * Reported by: Jeremy Lea <reg@shale.csir.co.za> 275 * Drive: <SEAGATE ST34520W 1281> 276 * 277 * No one has actually reported that the 9G version 278 * (ST39140*) of the Medalist Pro has the same problem, but 279 * we're assuming that it does because the 4G and 6.5G 280 * versions of the drive are broken. 281 */ 282 { 283 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST34520*", "*"}, 284 /*quirks*/0, /*mintags*/2, /*maxtags*/2 285 }, 286 { 287 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST36530*", "*"}, 288 /*quirks*/0, /*mintags*/2, /*maxtags*/2 289 }, 290 { 291 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST39140*", "*"}, 292 /*quirks*/0, /*mintags*/2, /*maxtags*/2 293 }, 294 { 295 /* 296 * Slow when tagged queueing is enabled. Write performance 297 * steadily drops off with more and more concurrent 298 * transactions. Best sequential write performance with 299 * tagged queueing turned off and write caching turned on. 300 * 301 * PR: kern/10398 302 * Submitted by: Hideaki Okada <hokada@isl.melco.co.jp> 303 * Drive: DCAS-34330 w/ "S65A" firmware. 304 * 305 * The drive with the problem had the "S65A" firmware 306 * revision, and has also been reported (by Stephen J. 307 * Roznowski <sjr@home.net>) for a drive with the "S61A" 308 * firmware revision. 309 * 310 * Although no one has reported problems with the 2 gig 311 * version of the DCAS drive, the assumption is that it 312 * has the same problems as the 4 gig version. Therefore 313 * this quirk entries disables tagged queueing for all 314 * DCAS drives. 315 */ 316 { T_DIRECT, SIP_MEDIA_FIXED, "IBM", "DCAS*", "*" }, 317 /*quirks*/0, /*mintags*/0, /*maxtags*/0 318 }, 319 { 320 /* Broken tagged queuing drive */ 321 { T_DIRECT, SIP_MEDIA_REMOVABLE, "iomega", "jaz*", "*" }, 322 /*quirks*/0, /*mintags*/0, /*maxtags*/0 323 }, 324 { 325 /* Broken tagged queuing drive */ 326 { T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CFP2107*", "*" }, 327 /*quirks*/0, /*mintags*/0, /*maxtags*/0 328 }, 329 { 330 /* This does not support other than LUN 0 */ 331 { T_DIRECT, SIP_MEDIA_FIXED, "VMware*", "*", "*" }, 332 CAM_QUIRK_NOLUNS, /*mintags*/2, /*maxtags*/255 333 }, 334 { 335 /* 336 * Broken tagged queuing drive. 337 * Submitted by: 338 * NAKAJI Hiroyuki <nakaji@zeisei.dpri.kyoto-u.ac.jp> 339 * in PR kern/9535 340 */ 341 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN34324U*", "*" }, 342 /*quirks*/0, /*mintags*/0, /*maxtags*/0 343 }, 344 { 345 /* 346 * Slow when tagged queueing is enabled. (1.5MB/sec versus 347 * 8MB/sec.) 348 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu> 349 * Best performance with these drives is achieved with 350 * tagged queueing turned off, and write caching turned on. 351 */ 352 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "WDE*", "*" }, 353 /*quirks*/0, /*mintags*/0, /*maxtags*/0 354 }, 355 { 356 /* 357 * Slow when tagged queueing is enabled. (1.5MB/sec versus 358 * 8MB/sec.) 359 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu> 360 * Best performance with these drives is achieved with 361 * tagged queueing turned off, and write caching turned on. 362 */ 363 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "ENTERPRISE", "*" }, 364 /*quirks*/0, /*mintags*/0, /*maxtags*/0 365 }, 366 { 367 /* 368 * Doesn't handle queue full condition correctly, 369 * so we need to limit maxtags to what the device 370 * can handle instead of determining this automatically. 371 */ 372 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN321010S*", "*" }, 373 /*quirks*/0, /*mintags*/2, /*maxtags*/32 374 }, 375 { 376 /* Really only one LUN */ 377 { T_ENCLOSURE, SIP_MEDIA_FIXED, "SUN", "SENA", "*" }, 378 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 379 }, 380 { 381 /* I can't believe we need a quirk for DPT volumes. */ 382 { T_ANY, SIP_MEDIA_FIXED|SIP_MEDIA_REMOVABLE, "DPT", "*", "*" }, 383 CAM_QUIRK_NOLUNS, 384 /*mintags*/0, /*maxtags*/255 385 }, 386 { 387 /* 388 * Many Sony CDROM drives don't like multi-LUN probing. 389 */ 390 { T_CDROM, SIP_MEDIA_REMOVABLE, sony, "CD-ROM CDU*", "*" }, 391 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 392 }, 393 { 394 /* 395 * This drive doesn't like multiple LUN probing. 396 * Submitted by: Parag Patel <parag@cgt.com> 397 */ 398 { T_WORM, SIP_MEDIA_REMOVABLE, sony, "CD-R CDU9*", "*" }, 399 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 400 }, 401 { 402 { T_WORM, SIP_MEDIA_REMOVABLE, "YAMAHA", "CDR100*", "*" }, 403 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 404 }, 405 { 406 /* 407 * The 8200 doesn't like multi-lun probing, and probably 408 * don't like serial number requests either. 409 */ 410 { 411 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE", 412 "EXB-8200*", "*" 413 }, 414 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 415 }, 416 { 417 /* 418 * Let's try the same as above, but for a drive that says 419 * it's an IPL-6860 but is actually an EXB 8200. 420 */ 421 { 422 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE", 423 "IPL-6860*", "*" 424 }, 425 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 426 }, 427 { 428 /* 429 * These Hitachi drives don't like multi-lun probing. 430 * The PR submitter has a DK319H, but says that the Linux 431 * kernel has a similar work-around for the DK312 and DK314, 432 * so all DK31* drives are quirked here. 433 * PR: misc/18793 434 * Submitted by: Paul Haddad <paul@pth.com> 435 */ 436 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK31*", "*" }, 437 CAM_QUIRK_NOLUNS, /*mintags*/2, /*maxtags*/255 438 }, 439 { 440 /* 441 * The Hitachi CJ series with J8A8 firmware apparantly has 442 * problems with tagged commands. 443 * PR: 23536 444 * Reported by: amagai@nue.org 445 */ 446 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK32CJ*", "J8A8" }, 447 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 448 }, 449 { 450 /* 451 * These are the large storage arrays. 452 * Submitted by: William Carrel <william.carrel@infospace.com> 453 */ 454 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "OPEN*", "*" }, 455 CAM_QUIRK_HILUNS, 2, 1024 456 }, 457 { 458 /* 459 * This old revision of the TDC3600 is also SCSI-1, and 460 * hangs upon serial number probing. 461 */ 462 { 463 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "TANDBERG", 464 " TDC 3600", "U07:" 465 }, 466 CAM_QUIRK_NOSERIAL, /*mintags*/0, /*maxtags*/0 467 }, 468 { 469 /* 470 * Would repond to all LUNs if asked for. 471 */ 472 { 473 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "CALIPER", 474 "CP150", "*" 475 }, 476 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 477 }, 478 { 479 /* 480 * Would repond to all LUNs if asked for. 481 */ 482 { 483 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "KENNEDY", 484 "96X2*", "*" 485 }, 486 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 487 }, 488 { 489 /* Submitted by: Matthew Dodd <winter@jurai.net> */ 490 { T_PROCESSOR, SIP_MEDIA_FIXED, "Cabletrn", "EA41*", "*" }, 491 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 492 }, 493 { 494 /* Submitted by: Matthew Dodd <winter@jurai.net> */ 495 { T_PROCESSOR, SIP_MEDIA_FIXED, "CABLETRN", "EA41*", "*" }, 496 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 497 }, 498 { 499 /* TeraSolutions special settings for TRC-22 RAID */ 500 { T_DIRECT, SIP_MEDIA_FIXED, "TERASOLU", "TRC-22", "*" }, 501 /*quirks*/0, /*mintags*/55, /*maxtags*/255 502 }, 503 { 504 /* Veritas Storage Appliance */ 505 { T_DIRECT, SIP_MEDIA_FIXED, "VERITAS", "*", "*" }, 506 CAM_QUIRK_HILUNS, /*mintags*/2, /*maxtags*/1024 507 }, 508 { 509 /* 510 * Would respond to all LUNs. Device type and removable 511 * flag are jumper-selectable. 512 */ 513 { T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, "MaxOptix", 514 "Tahiti 1", "*" 515 }, 516 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 517 }, 518 { 519 /* EasyRAID E5A aka. areca ARC-6010 */ 520 { T_DIRECT, SIP_MEDIA_FIXED, "easyRAID", "*", "*" }, 521 CAM_QUIRK_NOHILUNS, /*mintags*/2, /*maxtags*/255 522 }, 523 { 524 { T_ENCLOSURE, SIP_MEDIA_FIXED, "DP", "BACKPLANE", "*" }, 525 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 526 }, 527 { 528 /* Default tagged queuing parameters for all devices */ 529 { 530 T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, 531 /*vendor*/"*", /*product*/"*", /*revision*/"*" 532 }, 533 /*quirks*/0, /*mintags*/2, /*maxtags*/255 534 }, 535 }; 536 537 static const int scsi_quirk_table_size = 538 sizeof(scsi_quirk_table) / sizeof(*scsi_quirk_table); 539 540 static cam_status proberegister(struct cam_periph *periph, 541 void *arg); 542 static void probeschedule(struct cam_periph *probe_periph); 543 static void probestart(struct cam_periph *periph, union ccb *start_ccb); 544 static void proberequestdefaultnegotiation(struct cam_periph *periph); 545 static int proberequestbackoff(struct cam_periph *periph, 546 struct cam_ed *device); 547 static void probedone(struct cam_periph *periph, union ccb *done_ccb); 548 static int probe_strange_rpl_data(struct scsi_report_luns_data *rp, 549 uint32_t maxlun); 550 static void probe_purge_old(struct cam_path *path, 551 struct scsi_report_luns_data *new); 552 static void probecleanup(struct cam_periph *periph); 553 static void scsi_find_quirk(struct cam_ed *device); 554 static void scsi_scan_bus(struct cam_periph *periph, union ccb *ccb); 555 static void scsi_scan_lun(struct cam_periph *periph, 556 struct cam_path *path, cam_flags flags, 557 union ccb *ccb); 558 static void xptscandone(struct cam_periph *periph, union ccb *done_ccb); 559 static struct cam_ed * 560 scsi_alloc_device(struct cam_eb *bus, struct cam_et *target, 561 lun_id_t lun_id); 562 static void scsi_devise_transport(struct cam_path *path); 563 static void scsi_set_transfer_settings(struct ccb_trans_settings *cts, 564 struct cam_ed *device, 565 int async_update); 566 static void scsi_toggle_tags(struct cam_path *path); 567 static void scsi_dev_async(u_int32_t async_code, 568 struct cam_eb *bus, 569 struct cam_et *target, 570 struct cam_ed *device, 571 void *async_arg); 572 static void scsi_action(union ccb *start_ccb); 573 static void scsi_announce_periph(struct cam_periph *periph); 574 575 static struct xpt_xport scsi_xport = { 576 .alloc_device = scsi_alloc_device, 577 .action = scsi_action, 578 .async = scsi_dev_async, 579 .announce = scsi_announce_periph, 580 }; 581 582 struct xpt_xport * 583 scsi_get_xport(void) 584 { 585 return (&scsi_xport); 586 } 587 588 static void 589 probe_periph_init() 590 { 591 } 592 593 static cam_status 594 proberegister(struct cam_periph *periph, void *arg) 595 { 596 union ccb *request_ccb; /* CCB representing the probe request */ 597 cam_status status; 598 probe_softc *softc; 599 600 request_ccb = (union ccb *)arg; 601 if (periph == NULL) { 602 printf("proberegister: periph was NULL!!\n"); 603 return(CAM_REQ_CMP_ERR); 604 } 605 606 if (request_ccb == NULL) { 607 printf("proberegister: no probe CCB, " 608 "can't register device\n"); 609 return(CAM_REQ_CMP_ERR); 610 } 611 612 softc = (probe_softc *)malloc(sizeof(*softc), M_CAMXPT, M_NOWAIT); 613 614 if (softc == NULL) { 615 printf("proberegister: Unable to probe new device. " 616 "Unable to allocate softc\n"); 617 return(CAM_REQ_CMP_ERR); 618 } 619 TAILQ_INIT(&softc->request_ccbs); 620 TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h, 621 periph_links.tqe); 622 softc->flags = 0; 623 periph->softc = softc; 624 softc->periph = periph; 625 softc->action = PROBE_INVALID; 626 status = cam_periph_acquire(periph); 627 if (status != CAM_REQ_CMP) { 628 return (status); 629 } 630 631 632 /* 633 * Ensure we've waited at least a bus settle 634 * delay before attempting to probe the device. 635 * For HBAs that don't do bus resets, this won't make a difference. 636 */ 637 cam_periph_freeze_after_event(periph, &periph->path->bus->last_reset, 638 scsi_delay); 639 /* 640 * Ensure nobody slip in until probe finish. 641 */ 642 cam_freeze_devq_arg(periph->path, 643 RELSIM_RELEASE_RUNLEVEL, CAM_RL_XPT + 1); 644 probeschedule(periph); 645 return(CAM_REQ_CMP); 646 } 647 648 static void 649 probeschedule(struct cam_periph *periph) 650 { 651 struct ccb_pathinq cpi; 652 union ccb *ccb; 653 probe_softc *softc; 654 655 softc = (probe_softc *)periph->softc; 656 ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs); 657 658 xpt_setup_ccb(&cpi.ccb_h, periph->path, CAM_PRIORITY_NONE); 659 cpi.ccb_h.func_code = XPT_PATH_INQ; 660 xpt_action((union ccb *)&cpi); 661 662 /* 663 * If a device has gone away and another device, or the same one, 664 * is back in the same place, it should have a unit attention 665 * condition pending. It will not report the unit attention in 666 * response to an inquiry, which may leave invalid transfer 667 * negotiations in effect. The TUR will reveal the unit attention 668 * condition. Only send the TUR for lun 0, since some devices 669 * will get confused by commands other than inquiry to non-existent 670 * luns. If you think a device has gone away start your scan from 671 * lun 0. This will insure that any bogus transfer settings are 672 * invalidated. 673 * 674 * If we haven't seen the device before and the controller supports 675 * some kind of transfer negotiation, negotiate with the first 676 * sent command if no bus reset was performed at startup. This 677 * ensures that the device is not confused by transfer negotiation 678 * settings left over by loader or BIOS action. 679 */ 680 if (((ccb->ccb_h.path->device->flags & CAM_DEV_UNCONFIGURED) == 0) 681 && (ccb->ccb_h.target_lun == 0)) { 682 PROBE_SET_ACTION(softc, PROBE_TUR); 683 } else if ((cpi.hba_inquiry & (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE)) != 0 684 && (cpi.hba_misc & PIM_NOBUSRESET) != 0) { 685 proberequestdefaultnegotiation(periph); 686 PROBE_SET_ACTION(softc, PROBE_INQUIRY); 687 } else { 688 PROBE_SET_ACTION(softc, PROBE_INQUIRY); 689 } 690 691 if (ccb->crcn.flags & CAM_EXPECT_INQ_CHANGE) 692 softc->flags |= PROBE_NO_ANNOUNCE; 693 else 694 softc->flags &= ~PROBE_NO_ANNOUNCE; 695 696 xpt_schedule(periph, CAM_PRIORITY_XPT); 697 } 698 699 static void 700 probestart(struct cam_periph *periph, union ccb *start_ccb) 701 { 702 /* Probe the device that our peripheral driver points to */ 703 struct ccb_scsiio *csio; 704 probe_softc *softc; 705 706 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probestart\n")); 707 708 softc = (probe_softc *)periph->softc; 709 csio = &start_ccb->csio; 710 again: 711 712 switch (softc->action) { 713 case PROBE_TUR: 714 case PROBE_TUR_FOR_NEGOTIATION: 715 case PROBE_DV_EXIT: 716 { 717 scsi_test_unit_ready(csio, 718 /*retries*/10, 719 probedone, 720 MSG_SIMPLE_Q_TAG, 721 SSD_FULL_SIZE, 722 /*timeout*/60000); 723 break; 724 } 725 case PROBE_INQUIRY: 726 case PROBE_FULL_INQUIRY: 727 case PROBE_INQUIRY_BASIC_DV1: 728 case PROBE_INQUIRY_BASIC_DV2: 729 { 730 u_int inquiry_len; 731 struct scsi_inquiry_data *inq_buf; 732 733 inq_buf = &periph->path->device->inq_data; 734 735 /* 736 * If the device is currently configured, we calculate an 737 * MD5 checksum of the inquiry data, and if the serial number 738 * length is greater than 0, add the serial number data 739 * into the checksum as well. Once the inquiry and the 740 * serial number check finish, we attempt to figure out 741 * whether we still have the same device. 742 */ 743 if ((periph->path->device->flags & CAM_DEV_UNCONFIGURED) == 0) { 744 745 MD5Init(&softc->context); 746 MD5Update(&softc->context, (unsigned char *)inq_buf, 747 sizeof(struct scsi_inquiry_data)); 748 softc->flags |= PROBE_INQUIRY_CKSUM; 749 if (periph->path->device->serial_num_len > 0) { 750 MD5Update(&softc->context, 751 periph->path->device->serial_num, 752 periph->path->device->serial_num_len); 753 softc->flags |= PROBE_SERIAL_CKSUM; 754 } 755 MD5Final(softc->digest, &softc->context); 756 } 757 758 if (softc->action == PROBE_INQUIRY) 759 inquiry_len = SHORT_INQUIRY_LENGTH; 760 else 761 inquiry_len = SID_ADDITIONAL_LENGTH(inq_buf); 762 763 /* 764 * Some parallel SCSI devices fail to send an 765 * ignore wide residue message when dealing with 766 * odd length inquiry requests. Round up to be 767 * safe. 768 */ 769 inquiry_len = roundup2(inquiry_len, 2); 770 771 if (softc->action == PROBE_INQUIRY_BASIC_DV1 772 || softc->action == PROBE_INQUIRY_BASIC_DV2) { 773 inq_buf = malloc(inquiry_len, M_CAMXPT, M_NOWAIT); 774 } 775 if (inq_buf == NULL) { 776 xpt_print(periph->path, "malloc failure- skipping Basic" 777 "Domain Validation\n"); 778 PROBE_SET_ACTION(softc, PROBE_DV_EXIT); 779 scsi_test_unit_ready(csio, 780 /*retries*/4, 781 probedone, 782 MSG_SIMPLE_Q_TAG, 783 SSD_FULL_SIZE, 784 /*timeout*/60000); 785 break; 786 } 787 scsi_inquiry(csio, 788 /*retries*/4, 789 probedone, 790 MSG_SIMPLE_Q_TAG, 791 (u_int8_t *)inq_buf, 792 inquiry_len, 793 /*evpd*/FALSE, 794 /*page_code*/0, 795 SSD_MIN_SIZE, 796 /*timeout*/60 * 1000); 797 break; 798 } 799 case PROBE_REPORT_LUNS: 800 { 801 void *rp; 802 803 rp = malloc(periph->path->target->rpl_size, 804 M_CAMXPT, M_NOWAIT | M_ZERO); 805 if (rp == NULL) { 806 struct scsi_inquiry_data *inq_buf; 807 inq_buf = &periph->path->device->inq_data; 808 xpt_print(periph->path, 809 "Unable to alloc report luns storage\n"); 810 if (INQ_DATA_TQ_ENABLED(inq_buf)) 811 PROBE_SET_ACTION(softc, PROBE_MODE_SENSE); 812 else 813 PROBE_SET_ACTION(softc, PROBE_SERIAL_NUM_0); 814 goto again; 815 } 816 scsi_report_luns(csio, 5, probedone, MSG_SIMPLE_Q_TAG, 817 RPL_REPORT_DEFAULT, rp, periph->path->target->rpl_size, 818 SSD_FULL_SIZE, 60000); break; 819 break; 820 } 821 case PROBE_MODE_SENSE: 822 { 823 void *mode_buf; 824 int mode_buf_len; 825 826 mode_buf_len = sizeof(struct scsi_mode_header_6) 827 + sizeof(struct scsi_mode_blk_desc) 828 + sizeof(struct scsi_control_page); 829 mode_buf = malloc(mode_buf_len, M_CAMXPT, M_NOWAIT); 830 if (mode_buf != NULL) { 831 scsi_mode_sense(csio, 832 /*retries*/4, 833 probedone, 834 MSG_SIMPLE_Q_TAG, 835 /*dbd*/FALSE, 836 SMS_PAGE_CTRL_CURRENT, 837 SMS_CONTROL_MODE_PAGE, 838 mode_buf, 839 mode_buf_len, 840 SSD_FULL_SIZE, 841 /*timeout*/60000); 842 break; 843 } 844 xpt_print(periph->path, "Unable to mode sense control page - " 845 "malloc failure\n"); 846 PROBE_SET_ACTION(softc, PROBE_SERIAL_NUM_0); 847 } 848 /* FALLTHROUGH */ 849 case PROBE_SERIAL_NUM_0: 850 { 851 struct scsi_vpd_supported_page_list *vpd_list = NULL; 852 struct cam_ed *device; 853 854 device = periph->path->device; 855 if ((SCSI_QUIRK(device)->quirks & CAM_QUIRK_NOSERIAL) == 0) { 856 vpd_list = malloc(sizeof(*vpd_list), M_CAMXPT, 857 M_NOWAIT | M_ZERO); 858 } 859 860 if (vpd_list != NULL) { 861 scsi_inquiry(csio, 862 /*retries*/4, 863 probedone, 864 MSG_SIMPLE_Q_TAG, 865 (u_int8_t *)vpd_list, 866 sizeof(*vpd_list), 867 /*evpd*/TRUE, 868 SVPD_SUPPORTED_PAGE_LIST, 869 SSD_MIN_SIZE, 870 /*timeout*/60 * 1000); 871 break; 872 } 873 /* 874 * We'll have to do without, let our probedone 875 * routine finish up for us. 876 */ 877 start_ccb->csio.data_ptr = NULL; 878 probedone(periph, start_ccb); 879 return; 880 } 881 case PROBE_SERIAL_NUM_1: 882 { 883 struct scsi_vpd_unit_serial_number *serial_buf; 884 struct cam_ed* device; 885 886 serial_buf = NULL; 887 device = periph->path->device; 888 if (device->serial_num != NULL) { 889 free(device->serial_num, M_CAMXPT); 890 device->serial_num = NULL; 891 device->serial_num_len = 0; 892 } 893 894 serial_buf = (struct scsi_vpd_unit_serial_number *) 895 malloc(sizeof(*serial_buf), M_CAMXPT, M_NOWAIT|M_ZERO); 896 897 if (serial_buf != NULL) { 898 scsi_inquiry(csio, 899 /*retries*/4, 900 probedone, 901 MSG_SIMPLE_Q_TAG, 902 (u_int8_t *)serial_buf, 903 sizeof(*serial_buf), 904 /*evpd*/TRUE, 905 SVPD_UNIT_SERIAL_NUMBER, 906 SSD_MIN_SIZE, 907 /*timeout*/60 * 1000); 908 break; 909 } 910 /* 911 * We'll have to do without, let our probedone 912 * routine finish up for us. 913 */ 914 start_ccb->csio.data_ptr = NULL; 915 probedone(periph, start_ccb); 916 return; 917 } 918 case PROBE_INVALID: 919 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_INFO, 920 ("probestart: invalid action state\n")); 921 default: 922 break; 923 } 924 xpt_action(start_ccb); 925 } 926 927 static void 928 proberequestdefaultnegotiation(struct cam_periph *periph) 929 { 930 struct ccb_trans_settings cts; 931 932 xpt_setup_ccb(&cts.ccb_h, periph->path, CAM_PRIORITY_NONE); 933 cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; 934 cts.type = CTS_TYPE_USER_SETTINGS; 935 xpt_action((union ccb *)&cts); 936 if ((cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 937 return; 938 } 939 cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS; 940 cts.type = CTS_TYPE_CURRENT_SETTINGS; 941 xpt_action((union ccb *)&cts); 942 } 943 944 /* 945 * Backoff Negotiation Code- only pertinent for SPI devices. 946 */ 947 static int 948 proberequestbackoff(struct cam_periph *periph, struct cam_ed *device) 949 { 950 struct ccb_trans_settings cts; 951 struct ccb_trans_settings_spi *spi; 952 953 memset(&cts, 0, sizeof (cts)); 954 xpt_setup_ccb(&cts.ccb_h, periph->path, CAM_PRIORITY_NONE); 955 cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; 956 cts.type = CTS_TYPE_CURRENT_SETTINGS; 957 xpt_action((union ccb *)&cts); 958 if ((cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 959 if (bootverbose) { 960 xpt_print(periph->path, 961 "failed to get current device settings\n"); 962 } 963 return (0); 964 } 965 if (cts.transport != XPORT_SPI) { 966 if (bootverbose) { 967 xpt_print(periph->path, "not SPI transport\n"); 968 } 969 return (0); 970 } 971 spi = &cts.xport_specific.spi; 972 973 /* 974 * We cannot renegotiate sync rate if we don't have one. 975 */ 976 if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) { 977 if (bootverbose) { 978 xpt_print(periph->path, "no sync rate known\n"); 979 } 980 return (0); 981 } 982 983 /* 984 * We'll assert that we don't have to touch PPR options- the 985 * SIM will see what we do with period and offset and adjust 986 * the PPR options as appropriate. 987 */ 988 989 /* 990 * A sync rate with unknown or zero offset is nonsensical. 991 * A sync period of zero means Async. 992 */ 993 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0 994 || spi->sync_offset == 0 || spi->sync_period == 0) { 995 if (bootverbose) { 996 xpt_print(periph->path, "no sync rate available\n"); 997 } 998 return (0); 999 } 1000 1001 if (device->flags & CAM_DEV_DV_HIT_BOTTOM) { 1002 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, 1003 ("hit async: giving up on DV\n")); 1004 return (0); 1005 } 1006 1007 1008 /* 1009 * Jump sync_period up by one, but stop at 5MHz and fall back to Async. 1010 * We don't try to remember 'last' settings to see if the SIM actually 1011 * gets into the speed we want to set. We check on the SIM telling 1012 * us that a requested speed is bad, but otherwise don't try and 1013 * check the speed due to the asynchronous and handshake nature 1014 * of speed setting. 1015 */ 1016 spi->valid = CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET; 1017 for (;;) { 1018 spi->sync_period++; 1019 if (spi->sync_period >= 0xf) { 1020 spi->sync_period = 0; 1021 spi->sync_offset = 0; 1022 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, 1023 ("setting to async for DV\n")); 1024 /* 1025 * Once we hit async, we don't want to try 1026 * any more settings. 1027 */ 1028 device->flags |= CAM_DEV_DV_HIT_BOTTOM; 1029 } else if (bootverbose) { 1030 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, 1031 ("DV: period 0x%x\n", spi->sync_period)); 1032 printf("setting period to 0x%x\n", spi->sync_period); 1033 } 1034 cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS; 1035 cts.type = CTS_TYPE_CURRENT_SETTINGS; 1036 xpt_action((union ccb *)&cts); 1037 if ((cts.ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 1038 break; 1039 } 1040 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, 1041 ("DV: failed to set period 0x%x\n", spi->sync_period)); 1042 if (spi->sync_period == 0) { 1043 return (0); 1044 } 1045 } 1046 return (1); 1047 } 1048 1049 static void 1050 probedone(struct cam_periph *periph, union ccb *done_ccb) 1051 { 1052 probe_softc *softc; 1053 struct cam_path *path; 1054 u_int32_t priority; 1055 1056 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probedone\n")); 1057 1058 softc = (probe_softc *)periph->softc; 1059 path = done_ccb->ccb_h.path; 1060 priority = done_ccb->ccb_h.pinfo.priority; 1061 1062 switch (softc->action) { 1063 case PROBE_TUR: 1064 { 1065 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1066 1067 if (cam_periph_error(done_ccb, 0, 1068 SF_NO_PRINT, NULL) == ERESTART) 1069 return; 1070 else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 1071 /* Don't wedge the queue */ 1072 xpt_release_devq(done_ccb->ccb_h.path, 1073 /*count*/1, 1074 /*run_queue*/TRUE); 1075 } 1076 PROBE_SET_ACTION(softc, PROBE_INQUIRY); 1077 xpt_release_ccb(done_ccb); 1078 xpt_schedule(periph, priority); 1079 return; 1080 } 1081 case PROBE_INQUIRY: 1082 case PROBE_FULL_INQUIRY: 1083 { 1084 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 1085 struct scsi_inquiry_data *inq_buf; 1086 u_int8_t periph_qual; 1087 1088 path->device->flags |= CAM_DEV_INQUIRY_DATA_VALID; 1089 inq_buf = &path->device->inq_data; 1090 1091 periph_qual = SID_QUAL(inq_buf); 1092 1093 if (periph_qual == SID_QUAL_LU_CONNECTED) { 1094 u_int8_t len; 1095 1096 /* 1097 * We conservatively request only 1098 * SHORT_INQUIRY_LEN bytes of inquiry 1099 * information during our first try 1100 * at sending an INQUIRY. If the device 1101 * has more information to give, 1102 * perform a second request specifying 1103 * the amount of information the device 1104 * is willing to give. 1105 */ 1106 len = inq_buf->additional_length 1107 + offsetof(struct scsi_inquiry_data, 1108 additional_length) + 1; 1109 if (softc->action == PROBE_INQUIRY 1110 && len > SHORT_INQUIRY_LENGTH) { 1111 PROBE_SET_ACTION(softc, PROBE_FULL_INQUIRY); 1112 xpt_release_ccb(done_ccb); 1113 xpt_schedule(periph, priority); 1114 return; 1115 } 1116 1117 scsi_find_quirk(path->device); 1118 1119 scsi_devise_transport(path); 1120 1121 if (path->device->lun_id == 0 && 1122 SID_ANSI_REV(inq_buf) > SCSI_REV_SPC2 && 1123 (SCSI_QUIRK(path->device)->quirks & 1124 CAM_QUIRK_NORPTLUNS) == 0) { 1125 PROBE_SET_ACTION(softc, 1126 PROBE_REPORT_LUNS); 1127 /* 1128 * Start with room for *one* lun. 1129 */ 1130 periph->path->target->rpl_size = 16; 1131 } else if (INQ_DATA_TQ_ENABLED(inq_buf)) 1132 PROBE_SET_ACTION(softc, 1133 PROBE_MODE_SENSE); 1134 else 1135 PROBE_SET_ACTION(softc, 1136 PROBE_SERIAL_NUM_0); 1137 1138 if (path->device->flags & CAM_DEV_UNCONFIGURED) { 1139 path->device->flags &= ~CAM_DEV_UNCONFIGURED; 1140 xpt_acquire_device(path->device); 1141 } 1142 xpt_release_ccb(done_ccb); 1143 xpt_schedule(periph, priority); 1144 return; 1145 } else if (path->device->lun_id == 0 && 1146 SID_ANSI_REV(inq_buf) > SCSI_REV_SPC2 && 1147 (SCSI_QUIRK(path->device)->quirks & 1148 CAM_QUIRK_NORPTLUNS) == 0) { 1149 if (path->device->flags & 1150 CAM_DEV_UNCONFIGURED) { 1151 path->device->flags &= 1152 ~CAM_DEV_UNCONFIGURED; 1153 xpt_acquire_device(path->device); 1154 } 1155 PROBE_SET_ACTION(softc, PROBE_REPORT_LUNS); 1156 periph->path->target->rpl_size = 16; 1157 xpt_release_ccb(done_ccb); 1158 xpt_schedule(periph, priority); 1159 return; 1160 } 1161 } else if (cam_periph_error(done_ccb, 0, 1162 done_ccb->ccb_h.target_lun > 0 1163 ? SF_RETRY_UA|SF_QUIET_IR 1164 : SF_RETRY_UA, 1165 &softc->saved_ccb) == ERESTART) { 1166 return; 1167 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 1168 /* Don't wedge the queue */ 1169 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1, 1170 /*run_queue*/TRUE); 1171 } 1172 /* 1173 * If we get to this point, we got an error status back 1174 * from the inquiry and the error status doesn't require 1175 * automatically retrying the command. Therefore, the 1176 * inquiry failed. If we had inquiry information before 1177 * for this device, but this latest inquiry command failed, 1178 * the device has probably gone away. If this device isn't 1179 * already marked unconfigured, notify the peripheral 1180 * drivers that this device is no more. 1181 */ 1182 if ((path->device->flags & CAM_DEV_UNCONFIGURED) == 0) 1183 /* Send the async notification. */ 1184 xpt_async(AC_LOST_DEVICE, path, NULL); 1185 1186 xpt_release_ccb(done_ccb); 1187 break; 1188 } 1189 case PROBE_REPORT_LUNS: 1190 { 1191 struct ccb_scsiio *csio; 1192 struct scsi_report_luns_data *lp; 1193 u_int nlun, maxlun; 1194 1195 csio = &done_ccb->csio; 1196 1197 lp = (struct scsi_report_luns_data *)csio->data_ptr; 1198 nlun = scsi_4btoul(lp->length) / 8; 1199 maxlun = (csio->dxfer_len / 8) - 1; 1200 1201 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1202 if (cam_periph_error(done_ccb, 0, 1203 done_ccb->ccb_h.target_lun > 0 ? 1204 SF_RETRY_UA|SF_QUIET_IR : SF_RETRY_UA, 1205 &softc->saved_ccb) == ERESTART) { 1206 return; 1207 } 1208 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 1209 xpt_release_devq(done_ccb->ccb_h.path, 1, 1210 TRUE); 1211 } 1212 free(lp, M_CAMXPT); 1213 lp = NULL; 1214 } else if (nlun > maxlun) { 1215 /* 1216 * Reallocate and retry to cover all luns 1217 */ 1218 CAM_DEBUG_PATH_PRINT(CAM_DEBUG_PROBE, path, 1219 ("reallocating REPORT_LUNS for %u luns\n", nlun)); 1220 free(lp, M_CAMXPT); 1221 path->target->rpl_size = (nlun << 3) + 8; 1222 xpt_release_ccb(done_ccb); 1223 xpt_schedule(periph, priority); 1224 return; 1225 } else if (nlun == 0) { 1226 /* 1227 * If there don't appear to be any luns, bail. 1228 */ 1229 free(lp, M_CAMXPT); 1230 lp = NULL; 1231 } else if (probe_strange_rpl_data(lp, maxlun)) { 1232 /* 1233 * If we can't understand the lun format 1234 * of any entry, bail. 1235 */ 1236 free(lp, M_CAMXPT); 1237 lp = NULL; 1238 } else { 1239 lun_id_t lun; 1240 int idx; 1241 1242 CAM_DEBUG_PATH_PRINT(CAM_DEBUG_PROBE, path, 1243 ("%u luns reported\n", nlun)); 1244 1245 CAM_GET_SIMPLE_LUN(lp, 0, lun); 1246 /* 1247 * If the first lun is not lun 0, then either there 1248 * is no lun 0 in the list, or the list is unsorted. 1249 */ 1250 if (lun != 0) { 1251 for (idx = 0; idx < nlun; idx++) { 1252 CAM_GET_SIMPLE_LUN(lp, idx, lun); 1253 if (lun == 0) { 1254 break; 1255 } 1256 } 1257 if (idx != nlun) { 1258 uint8_t tlun[8]; 1259 memcpy(tlun, 1260 lp->luns[0].lundata, 8); 1261 memcpy(lp->luns[0].lundata, 1262 lp->luns[idx].lundata, 8); 1263 memcpy(lp->luns[idx].lundata, 1264 tlun, 8); 1265 CAM_DEBUG_PATH_PRINT(CAM_DEBUG_PROBE, 1266 path, ("lun 0 in position %u\n", idx)); 1267 } else { 1268 /* 1269 * There is no lun 0 in our list. Destroy 1270 * the validity of the inquiry data so we 1271 * bail here and now. 1272 */ 1273 path->device->flags &= 1274 ~CAM_DEV_INQUIRY_DATA_VALID; 1275 } 1276 } 1277 /* 1278 * If we have an old lun list, We can either 1279 * retest luns that appear to have been dropped, 1280 * or just nuke them. We'll opt for the latter. 1281 * This function will also install the new list 1282 * in the target structure. 1283 */ 1284 probe_purge_old(path, lp); 1285 lp = NULL; 1286 } 1287 if (path->device->flags & CAM_DEV_INQUIRY_DATA_VALID) { 1288 struct scsi_inquiry_data *inq_buf; 1289 inq_buf = &path->device->inq_data; 1290 if (INQ_DATA_TQ_ENABLED(inq_buf)) 1291 PROBE_SET_ACTION(softc, PROBE_MODE_SENSE); 1292 else 1293 PROBE_SET_ACTION(softc, PROBE_SERIAL_NUM_0); 1294 xpt_release_ccb(done_ccb); 1295 xpt_schedule(periph, priority); 1296 return; 1297 } 1298 if (lp) { 1299 free(lp, M_CAMXPT); 1300 } 1301 break; 1302 } 1303 case PROBE_MODE_SENSE: 1304 { 1305 struct ccb_scsiio *csio; 1306 struct scsi_mode_header_6 *mode_hdr; 1307 1308 csio = &done_ccb->csio; 1309 mode_hdr = (struct scsi_mode_header_6 *)csio->data_ptr; 1310 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 1311 struct scsi_control_page *page; 1312 u_int8_t *offset; 1313 1314 offset = ((u_int8_t *)&mode_hdr[1]) 1315 + mode_hdr->blk_desc_len; 1316 page = (struct scsi_control_page *)offset; 1317 path->device->queue_flags = page->queue_flags; 1318 } else if (cam_periph_error(done_ccb, 0, 1319 SF_RETRY_UA|SF_NO_PRINT, 1320 &softc->saved_ccb) == ERESTART) { 1321 return; 1322 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 1323 /* Don't wedge the queue */ 1324 xpt_release_devq(done_ccb->ccb_h.path, 1325 /*count*/1, /*run_queue*/TRUE); 1326 } 1327 xpt_release_ccb(done_ccb); 1328 free(mode_hdr, M_CAMXPT); 1329 PROBE_SET_ACTION(softc, PROBE_SERIAL_NUM_0); 1330 xpt_schedule(periph, priority); 1331 return; 1332 } 1333 case PROBE_SERIAL_NUM_0: 1334 { 1335 struct ccb_scsiio *csio; 1336 struct scsi_vpd_supported_page_list *page_list; 1337 int length, serialnum_supported, i; 1338 1339 serialnum_supported = 0; 1340 csio = &done_ccb->csio; 1341 page_list = 1342 (struct scsi_vpd_supported_page_list *)csio->data_ptr; 1343 1344 if (page_list == NULL) { 1345 /* 1346 * Don't process the command as it was never sent 1347 */ 1348 } else if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP 1349 && (page_list->length > 0)) { 1350 length = min(page_list->length, 1351 SVPD_SUPPORTED_PAGES_SIZE); 1352 for (i = 0; i < length; i++) { 1353 if (page_list->list[i] == 1354 SVPD_UNIT_SERIAL_NUMBER) { 1355 serialnum_supported = 1; 1356 break; 1357 } 1358 } 1359 } else if (cam_periph_error(done_ccb, 0, 1360 SF_RETRY_UA|SF_NO_PRINT, 1361 &softc->saved_ccb) == ERESTART) { 1362 return; 1363 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 1364 /* Don't wedge the queue */ 1365 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1, 1366 /*run_queue*/TRUE); 1367 } 1368 1369 if (page_list != NULL) 1370 free(page_list, M_CAMXPT); 1371 1372 if (serialnum_supported) { 1373 xpt_release_ccb(done_ccb); 1374 PROBE_SET_ACTION(softc, PROBE_SERIAL_NUM_1); 1375 xpt_schedule(periph, priority); 1376 return; 1377 } 1378 1379 csio->data_ptr = NULL; 1380 /* FALLTHROUGH */ 1381 } 1382 1383 case PROBE_SERIAL_NUM_1: 1384 { 1385 struct ccb_scsiio *csio; 1386 struct scsi_vpd_unit_serial_number *serial_buf; 1387 u_int32_t priority; 1388 int changed; 1389 int have_serialnum; 1390 1391 changed = 1; 1392 have_serialnum = 0; 1393 csio = &done_ccb->csio; 1394 priority = done_ccb->ccb_h.pinfo.priority; 1395 serial_buf = 1396 (struct scsi_vpd_unit_serial_number *)csio->data_ptr; 1397 1398 /* Clean up from previous instance of this device */ 1399 if (path->device->serial_num != NULL) { 1400 free(path->device->serial_num, M_CAMXPT); 1401 path->device->serial_num = NULL; 1402 path->device->serial_num_len = 0; 1403 } 1404 1405 if (serial_buf == NULL) { 1406 /* 1407 * Don't process the command as it was never sent 1408 */ 1409 } else if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP 1410 && (serial_buf->length > 0)) { 1411 1412 have_serialnum = 1; 1413 path->device->serial_num = 1414 (u_int8_t *)malloc((serial_buf->length + 1), 1415 M_CAMXPT, M_NOWAIT); 1416 if (path->device->serial_num != NULL) { 1417 bcopy(serial_buf->serial_num, 1418 path->device->serial_num, 1419 serial_buf->length); 1420 path->device->serial_num_len = 1421 serial_buf->length; 1422 path->device->serial_num[serial_buf->length] 1423 = '\0'; 1424 } 1425 } else if (cam_periph_error(done_ccb, 0, 1426 SF_RETRY_UA|SF_NO_PRINT, 1427 &softc->saved_ccb) == ERESTART) { 1428 return; 1429 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 1430 /* Don't wedge the queue */ 1431 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1, 1432 /*run_queue*/TRUE); 1433 } 1434 1435 /* 1436 * Let's see if we have seen this device before. 1437 */ 1438 if ((softc->flags & PROBE_INQUIRY_CKSUM) != 0) { 1439 MD5_CTX context; 1440 u_int8_t digest[16]; 1441 1442 MD5Init(&context); 1443 1444 MD5Update(&context, 1445 (unsigned char *)&path->device->inq_data, 1446 sizeof(struct scsi_inquiry_data)); 1447 1448 if (have_serialnum) 1449 MD5Update(&context, serial_buf->serial_num, 1450 serial_buf->length); 1451 1452 MD5Final(digest, &context); 1453 if (bcmp(softc->digest, digest, 16) == 0) 1454 changed = 0; 1455 1456 /* 1457 * XXX Do we need to do a TUR in order to ensure 1458 * that the device really hasn't changed??? 1459 */ 1460 if ((changed != 0) 1461 && ((softc->flags & PROBE_NO_ANNOUNCE) == 0)) 1462 xpt_async(AC_LOST_DEVICE, path, NULL); 1463 } 1464 if (serial_buf != NULL) 1465 free(serial_buf, M_CAMXPT); 1466 1467 if (changed != 0) { 1468 /* 1469 * Now that we have all the necessary 1470 * information to safely perform transfer 1471 * negotiations... Controllers don't perform 1472 * any negotiation or tagged queuing until 1473 * after the first XPT_SET_TRAN_SETTINGS ccb is 1474 * received. So, on a new device, just retrieve 1475 * the user settings, and set them as the current 1476 * settings to set the device up. 1477 */ 1478 proberequestdefaultnegotiation(periph); 1479 xpt_release_ccb(done_ccb); 1480 1481 /* 1482 * Perform a TUR to allow the controller to 1483 * perform any necessary transfer negotiation. 1484 */ 1485 PROBE_SET_ACTION(softc, PROBE_TUR_FOR_NEGOTIATION); 1486 xpt_schedule(periph, priority); 1487 return; 1488 } 1489 xpt_release_ccb(done_ccb); 1490 break; 1491 } 1492 case PROBE_TUR_FOR_NEGOTIATION: 1493 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1494 DELAY(500000); 1495 if (cam_periph_error(done_ccb, 0, SF_RETRY_UA, 1496 NULL) == ERESTART) 1497 return; 1498 } 1499 /* FALLTHROUGH */ 1500 case PROBE_DV_EXIT: 1501 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 1502 /* Don't wedge the queue */ 1503 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1, 1504 /*run_queue*/TRUE); 1505 } 1506 /* 1507 * Do Domain Validation for lun 0 on devices that claim 1508 * to support Synchronous Transfer modes. 1509 */ 1510 if (softc->action == PROBE_TUR_FOR_NEGOTIATION 1511 && done_ccb->ccb_h.target_lun == 0 1512 && (path->device->inq_data.flags & SID_Sync) != 0 1513 && (path->device->flags & CAM_DEV_IN_DV) == 0) { 1514 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, 1515 ("Begin Domain Validation\n")); 1516 path->device->flags |= CAM_DEV_IN_DV; 1517 xpt_release_ccb(done_ccb); 1518 PROBE_SET_ACTION(softc, PROBE_INQUIRY_BASIC_DV1); 1519 xpt_schedule(periph, priority); 1520 return; 1521 } 1522 if (softc->action == PROBE_DV_EXIT) { 1523 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, 1524 ("Leave Domain Validation\n")); 1525 } 1526 if (path->device->flags & CAM_DEV_UNCONFIGURED) { 1527 path->device->flags &= ~CAM_DEV_UNCONFIGURED; 1528 xpt_acquire_device(path->device); 1529 } 1530 path->device->flags &= 1531 ~(CAM_DEV_IN_DV|CAM_DEV_DV_HIT_BOTTOM); 1532 if ((softc->flags & PROBE_NO_ANNOUNCE) == 0) { 1533 /* Inform the XPT that a new device has been found */ 1534 done_ccb->ccb_h.func_code = XPT_GDEV_TYPE; 1535 xpt_action(done_ccb); 1536 xpt_async(AC_FOUND_DEVICE, done_ccb->ccb_h.path, 1537 done_ccb); 1538 } 1539 xpt_release_ccb(done_ccb); 1540 break; 1541 case PROBE_INQUIRY_BASIC_DV1: 1542 case PROBE_INQUIRY_BASIC_DV2: 1543 { 1544 struct scsi_inquiry_data *nbuf; 1545 struct ccb_scsiio *csio; 1546 1547 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 1548 /* Don't wedge the queue */ 1549 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1, 1550 /*run_queue*/TRUE); 1551 } 1552 csio = &done_ccb->csio; 1553 nbuf = (struct scsi_inquiry_data *)csio->data_ptr; 1554 if (bcmp(nbuf, &path->device->inq_data, SHORT_INQUIRY_LENGTH)) { 1555 xpt_print(path, 1556 "inquiry data fails comparison at DV%d step\n", 1557 softc->action == PROBE_INQUIRY_BASIC_DV1 ? 1 : 2); 1558 if (proberequestbackoff(periph, path->device)) { 1559 path->device->flags &= ~CAM_DEV_IN_DV; 1560 PROBE_SET_ACTION(softc, PROBE_TUR_FOR_NEGOTIATION); 1561 } else { 1562 /* give up */ 1563 PROBE_SET_ACTION(softc, PROBE_DV_EXIT); 1564 } 1565 free(nbuf, M_CAMXPT); 1566 xpt_release_ccb(done_ccb); 1567 xpt_schedule(periph, priority); 1568 return; 1569 } 1570 free(nbuf, M_CAMXPT); 1571 if (softc->action == PROBE_INQUIRY_BASIC_DV1) { 1572 PROBE_SET_ACTION(softc, PROBE_INQUIRY_BASIC_DV2); 1573 xpt_release_ccb(done_ccb); 1574 xpt_schedule(periph, priority); 1575 return; 1576 } 1577 if (softc->action == PROBE_INQUIRY_BASIC_DV2) { 1578 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, 1579 ("Leave Domain Validation Successfully\n")); 1580 } 1581 if (path->device->flags & CAM_DEV_UNCONFIGURED) { 1582 path->device->flags &= ~CAM_DEV_UNCONFIGURED; 1583 xpt_acquire_device(path->device); 1584 } 1585 path->device->flags &= 1586 ~(CAM_DEV_IN_DV|CAM_DEV_DV_HIT_BOTTOM); 1587 if ((softc->flags & PROBE_NO_ANNOUNCE) == 0) { 1588 /* Inform the XPT that a new device has been found */ 1589 done_ccb->ccb_h.func_code = XPT_GDEV_TYPE; 1590 xpt_action(done_ccb); 1591 xpt_async(AC_FOUND_DEVICE, done_ccb->ccb_h.path, 1592 done_ccb); 1593 } 1594 xpt_release_ccb(done_ccb); 1595 break; 1596 } 1597 case PROBE_INVALID: 1598 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_INFO, 1599 ("probedone: invalid action state\n")); 1600 default: 1601 break; 1602 } 1603 done_ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs); 1604 TAILQ_REMOVE(&softc->request_ccbs, &done_ccb->ccb_h, periph_links.tqe); 1605 done_ccb->ccb_h.status = CAM_REQ_CMP; 1606 xpt_done(done_ccb); 1607 if (TAILQ_FIRST(&softc->request_ccbs) == NULL) { 1608 cam_release_devq(periph->path, 1609 RELSIM_RELEASE_RUNLEVEL, 0, CAM_RL_XPT + 1, FALSE); 1610 cam_periph_invalidate(periph); 1611 cam_periph_release_locked(periph); 1612 } else { 1613 probeschedule(periph); 1614 } 1615 } 1616 1617 static int 1618 probe_strange_rpl_data(struct scsi_report_luns_data *rp, uint32_t maxlun) 1619 { 1620 uint32_t idx; 1621 uint32_t nlun = MIN(maxlun, (scsi_4btoul(rp->length) / 8)); 1622 1623 for (idx = 0; idx < nlun; idx++) { 1624 if (!CAM_CAN_GET_SIMPLE_LUN(rp, idx)) { 1625 return (-1); 1626 } 1627 } 1628 return (0); 1629 } 1630 1631 static void 1632 probe_purge_old(struct cam_path *path, struct scsi_report_luns_data *new) 1633 { 1634 struct cam_path *tp; 1635 struct scsi_report_luns_data *old; 1636 u_int idx1, idx2, nlun_old, nlun_new, this_lun; 1637 u_int8_t *ol, *nl; 1638 1639 if (path->target == NULL) { 1640 return; 1641 } 1642 if (path->target->luns == NULL) { 1643 path->target->luns = new; 1644 return; 1645 } 1646 old = path->target->luns; 1647 nlun_old = scsi_4btoul(old->length) / 8; 1648 nlun_new = scsi_4btoul(new->length) / 8; 1649 1650 /* 1651 * We are not going to assume sorted lists. Deal. 1652 */ 1653 for (idx1 = 0; idx1 < nlun_old; idx1++) { 1654 ol = old->luns[idx1].lundata; 1655 for (idx2 = 0; idx2 < nlun_new; idx2++) { 1656 nl = new->luns[idx2].lundata; 1657 if (memcmp(nl, ol, 8) == 0) { 1658 break; 1659 } 1660 } 1661 if (idx2 < nlun_new) { 1662 continue; 1663 } 1664 /* 1665 * An 'old' item not in the 'new' list. 1666 * Nuke it. Except that if it is lun 0, 1667 * that would be what the probe state 1668 * machine is currently working on, 1669 * so we won't do that. 1670 * 1671 * We also cannot nuke it if it is 1672 * not in a lun format we understand. 1673 */ 1674 if (!CAM_CAN_GET_SIMPLE_LUN(old, idx1)) { 1675 continue; 1676 } 1677 CAM_GET_SIMPLE_LUN(old, idx1, this_lun); 1678 if (this_lun == 0) { 1679 continue; 1680 } 1681 if (xpt_create_path(&tp, NULL, xpt_path_path_id(path), 1682 xpt_path_target_id(path), this_lun) == CAM_REQ_CMP) { 1683 xpt_async(AC_LOST_DEVICE, tp, NULL); 1684 xpt_free_path(tp); 1685 } 1686 } 1687 free(old, M_CAMXPT); 1688 path->target->luns = new; 1689 } 1690 1691 static void 1692 probecleanup(struct cam_periph *periph) 1693 { 1694 free(periph->softc, M_CAMXPT); 1695 } 1696 1697 static void 1698 scsi_find_quirk(struct cam_ed *device) 1699 { 1700 struct scsi_quirk_entry *quirk; 1701 caddr_t match; 1702 1703 match = cam_quirkmatch((caddr_t)&device->inq_data, 1704 (caddr_t)scsi_quirk_table, 1705 sizeof(scsi_quirk_table) / 1706 sizeof(*scsi_quirk_table), 1707 sizeof(*scsi_quirk_table), scsi_inquiry_match); 1708 1709 if (match == NULL) 1710 panic("xpt_find_quirk: device didn't match wildcard entry!!"); 1711 1712 quirk = (struct scsi_quirk_entry *)match; 1713 device->quirk = quirk; 1714 device->mintags = quirk->mintags; 1715 device->maxtags = quirk->maxtags; 1716 } 1717 1718 static int 1719 sysctl_cam_search_luns(SYSCTL_HANDLER_ARGS) 1720 { 1721 int error, bool; 1722 1723 bool = cam_srch_hi; 1724 error = sysctl_handle_int(oidp, &bool, 0, req); 1725 if (error != 0 || req->newptr == NULL) 1726 return (error); 1727 if (bool == 0 || bool == 1) { 1728 cam_srch_hi = bool; 1729 return (0); 1730 } else { 1731 return (EINVAL); 1732 } 1733 } 1734 1735 typedef struct { 1736 union ccb *request_ccb; 1737 struct ccb_pathinq *cpi; 1738 int counter; 1739 int lunindex[0]; 1740 } scsi_scan_bus_info; 1741 1742 /* 1743 * To start a scan, request_ccb is an XPT_SCAN_BUS ccb. 1744 * As the scan progresses, scsi_scan_bus is used as the 1745 * callback on completion function. 1746 */ 1747 static void 1748 scsi_scan_bus(struct cam_periph *periph, union ccb *request_ccb) 1749 { 1750 CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE, 1751 ("scsi_scan_bus\n")); 1752 switch (request_ccb->ccb_h.func_code) { 1753 case XPT_SCAN_BUS: 1754 case XPT_SCAN_TGT: 1755 { 1756 scsi_scan_bus_info *scan_info; 1757 union ccb *work_ccb, *reset_ccb; 1758 struct cam_path *path; 1759 u_int i; 1760 u_int low_target, max_target; 1761 u_int initiator_id; 1762 1763 /* Find out the characteristics of the bus */ 1764 work_ccb = xpt_alloc_ccb_nowait(); 1765 if (work_ccb == NULL) { 1766 request_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 1767 xpt_done(request_ccb); 1768 return; 1769 } 1770 xpt_setup_ccb(&work_ccb->ccb_h, request_ccb->ccb_h.path, 1771 request_ccb->ccb_h.pinfo.priority); 1772 work_ccb->ccb_h.func_code = XPT_PATH_INQ; 1773 xpt_action(work_ccb); 1774 if (work_ccb->ccb_h.status != CAM_REQ_CMP) { 1775 request_ccb->ccb_h.status = work_ccb->ccb_h.status; 1776 xpt_free_ccb(work_ccb); 1777 xpt_done(request_ccb); 1778 return; 1779 } 1780 1781 if ((work_ccb->cpi.hba_misc & PIM_NOINITIATOR) != 0) { 1782 /* 1783 * Can't scan the bus on an adapter that 1784 * cannot perform the initiator role. 1785 */ 1786 request_ccb->ccb_h.status = CAM_REQ_CMP; 1787 xpt_free_ccb(work_ccb); 1788 xpt_done(request_ccb); 1789 return; 1790 } 1791 1792 /* We may need to reset bus first, if we haven't done it yet. */ 1793 if ((work_ccb->cpi.hba_inquiry & 1794 (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE)) && 1795 !(work_ccb->cpi.hba_misc & PIM_NOBUSRESET) && 1796 !timevalisset(&request_ccb->ccb_h.path->bus->last_reset)) { 1797 reset_ccb = xpt_alloc_ccb_nowait(); 1798 xpt_setup_ccb(&reset_ccb->ccb_h, request_ccb->ccb_h.path, 1799 CAM_PRIORITY_NONE); 1800 reset_ccb->ccb_h.func_code = XPT_RESET_BUS; 1801 xpt_action(reset_ccb); 1802 if (reset_ccb->ccb_h.status != CAM_REQ_CMP) { 1803 request_ccb->ccb_h.status = reset_ccb->ccb_h.status; 1804 xpt_free_ccb(reset_ccb); 1805 xpt_free_ccb(work_ccb); 1806 xpt_done(request_ccb); 1807 return; 1808 } 1809 xpt_free_ccb(reset_ccb); 1810 } 1811 1812 /* Save some state for use while we probe for devices */ 1813 scan_info = (scsi_scan_bus_info *) malloc(sizeof(scsi_scan_bus_info) + 1814 (work_ccb->cpi.max_target * sizeof (u_int)), M_CAMXPT, M_ZERO|M_NOWAIT); 1815 if (scan_info == NULL) { 1816 request_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 1817 xpt_done(request_ccb); 1818 return; 1819 } 1820 CAM_DEBUG_PATH_PRINT(CAM_DEBUG_PROBE, request_ccb->ccb_h.path, 1821 ("SCAN start for %p\n", scan_info)); 1822 scan_info->request_ccb = request_ccb; 1823 scan_info->cpi = &work_ccb->cpi; 1824 1825 /* Cache on our stack so we can work asynchronously */ 1826 max_target = scan_info->cpi->max_target; 1827 low_target = 0; 1828 initiator_id = scan_info->cpi->initiator_id; 1829 1830 1831 /* 1832 * We can scan all targets in parallel, or do it sequentially. 1833 */ 1834 1835 if (request_ccb->ccb_h.func_code == XPT_SCAN_TGT) { 1836 max_target = low_target = request_ccb->ccb_h.target_id; 1837 scan_info->counter = 0; 1838 } else if (scan_info->cpi->hba_misc & PIM_SEQSCAN) { 1839 max_target = 0; 1840 scan_info->counter = 0; 1841 } else { 1842 scan_info->counter = scan_info->cpi->max_target + 1; 1843 if (scan_info->cpi->initiator_id < scan_info->counter) { 1844 scan_info->counter--; 1845 } 1846 } 1847 1848 for (i = low_target; i <= max_target; i++) { 1849 cam_status status; 1850 if (i == initiator_id) 1851 continue; 1852 1853 status = xpt_create_path(&path, xpt_periph, 1854 request_ccb->ccb_h.path_id, 1855 i, 0); 1856 if (status != CAM_REQ_CMP) { 1857 printf("scsi_scan_bus: xpt_create_path failed" 1858 " with status %#x, bus scan halted\n", 1859 status); 1860 free(scan_info, M_CAMXPT); 1861 request_ccb->ccb_h.status = status; 1862 xpt_free_ccb(work_ccb); 1863 xpt_done(request_ccb); 1864 break; 1865 } 1866 work_ccb = xpt_alloc_ccb_nowait(); 1867 if (work_ccb == NULL) { 1868 xpt_free_ccb((union ccb *)scan_info->cpi); 1869 free(scan_info, M_CAMXPT); 1870 xpt_free_path(path); 1871 request_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 1872 xpt_done(request_ccb); 1873 break; 1874 } 1875 xpt_setup_ccb(&work_ccb->ccb_h, path, 1876 request_ccb->ccb_h.pinfo.priority); 1877 work_ccb->ccb_h.func_code = XPT_SCAN_LUN; 1878 work_ccb->ccb_h.cbfcnp = scsi_scan_bus; 1879 work_ccb->ccb_h.ppriv_ptr0 = scan_info; 1880 work_ccb->crcn.flags = request_ccb->crcn.flags; 1881 xpt_action(work_ccb); 1882 } 1883 break; 1884 } 1885 case XPT_SCAN_LUN: 1886 { 1887 cam_status status; 1888 struct cam_path *path, *oldpath; 1889 scsi_scan_bus_info *scan_info; 1890 struct cam_et *target; 1891 struct cam_ed *device; 1892 int next_target; 1893 path_id_t path_id; 1894 target_id_t target_id; 1895 lun_id_t lun_id; 1896 1897 oldpath = request_ccb->ccb_h.path; 1898 1899 status = request_ccb->ccb_h.status & CAM_STATUS_MASK; 1900 /* Reuse the same CCB to query if a device was really found */ 1901 scan_info = (scsi_scan_bus_info *)request_ccb->ccb_h.ppriv_ptr0; 1902 xpt_setup_ccb(&request_ccb->ccb_h, request_ccb->ccb_h.path, 1903 request_ccb->ccb_h.pinfo.priority); 1904 request_ccb->ccb_h.func_code = XPT_GDEV_TYPE; 1905 1906 1907 path_id = request_ccb->ccb_h.path_id; 1908 target_id = request_ccb->ccb_h.target_id; 1909 lun_id = request_ccb->ccb_h.target_lun; 1910 xpt_action(request_ccb); 1911 1912 target = request_ccb->ccb_h.path->target; 1913 next_target = 1; 1914 1915 if (target->luns) { 1916 uint32_t first; 1917 u_int nluns = scsi_4btoul(target->luns->length) / 8; 1918 1919 /* 1920 * Make sure we skip over lun 0 if it's the first member 1921 * of the list as we've actually just finished probing 1922 * it. 1923 */ 1924 CAM_GET_SIMPLE_LUN(target->luns, 0, first); 1925 if (first == 0 && scan_info->lunindex[target_id] == 0) { 1926 scan_info->lunindex[target_id]++; 1927 } 1928 1929 if (scan_info->lunindex[target_id] < nluns) { 1930 CAM_GET_SIMPLE_LUN(target->luns, 1931 scan_info->lunindex[target_id], lun_id); 1932 next_target = 0; 1933 CAM_DEBUG_PATH_PRINT(CAM_DEBUG_PROBE, 1934 request_ccb->ccb_h.path, 1935 ("next lun to try at index %u is %u\n", 1936 scan_info->lunindex[target_id], lun_id)); 1937 scan_info->lunindex[target_id]++; 1938 } else { 1939 /* 1940 * We're done with scanning all luns. 1941 * 1942 * Nuke the bogus device for lun 0 if lun 0 1943 * wasn't on the list. 1944 */ 1945 if (first != 0) { 1946 TAILQ_FOREACH(device, 1947 &target->ed_entries, links) { 1948 if (device->lun_id == 0) { 1949 break; 1950 } 1951 } 1952 if (device) { 1953 xpt_release_device(device); 1954 } 1955 } 1956 } 1957 } else if (request_ccb->ccb_h.status != CAM_REQ_CMP) { 1958 int phl; 1959 1960 /* 1961 * If we already probed lun 0 successfully, or 1962 * we have additional configured luns on this 1963 * target that might have "gone away", go onto 1964 * the next lun. 1965 */ 1966 /* 1967 * We may touch devices that we don't 1968 * hold references too, so ensure they 1969 * don't disappear out from under us. 1970 * The target above is referenced by the 1971 * path in the request ccb. 1972 */ 1973 phl = 0; 1974 device = TAILQ_FIRST(&target->ed_entries); 1975 if (device != NULL) { 1976 phl = CAN_SRCH_HI_SPARSE(device); 1977 if (device->lun_id == 0) 1978 device = TAILQ_NEXT(device, links); 1979 } 1980 if ((lun_id != 0) || (device != NULL)) { 1981 if (lun_id < (CAM_SCSI2_MAXLUN-1) || phl) { 1982 lun_id++; 1983 next_target = 0; 1984 } 1985 } 1986 if (lun_id == request_ccb->ccb_h.target_lun 1987 || lun_id > scan_info->cpi->max_lun) 1988 next_target = 1; 1989 } else { 1990 1991 device = request_ccb->ccb_h.path->device; 1992 1993 if ((SCSI_QUIRK(device)->quirks & 1994 CAM_QUIRK_NOLUNS) == 0) { 1995 /* Try the next lun */ 1996 if (lun_id < (CAM_SCSI2_MAXLUN-1) 1997 || CAN_SRCH_HI_DENSE(device)) { 1998 lun_id++; 1999 next_target = 0; 2000 } 2001 } 2002 if (lun_id == request_ccb->ccb_h.target_lun 2003 || lun_id > scan_info->cpi->max_lun) 2004 next_target = 1; 2005 } 2006 2007 /* 2008 * Check to see if we scan any further luns. 2009 */ 2010 if (next_target) { 2011 int done; 2012 2013 /* 2014 * Free the current request path- we're done with it. 2015 */ 2016 xpt_free_path(oldpath); 2017 hop_again: 2018 done = 0; 2019 if (scan_info->request_ccb->ccb_h.func_code == XPT_SCAN_TGT) { 2020 done = 1; 2021 } else if (scan_info->cpi->hba_misc & PIM_SEQSCAN) { 2022 scan_info->counter++; 2023 if (scan_info->counter == 2024 scan_info->cpi->initiator_id) { 2025 scan_info->counter++; 2026 } 2027 if (scan_info->counter >= 2028 scan_info->cpi->max_target+1) { 2029 done = 1; 2030 } 2031 } else { 2032 scan_info->counter--; 2033 if (scan_info->counter == 0) { 2034 done = 1; 2035 } 2036 } 2037 if (done) { 2038 xpt_free_ccb(request_ccb); 2039 xpt_free_ccb((union ccb *)scan_info->cpi); 2040 request_ccb = scan_info->request_ccb; 2041 CAM_DEBUG_PATH_PRINT(CAM_DEBUG_PROBE, 2042 request_ccb->ccb_h.path, 2043 ("SCAN done for %p\n", scan_info)); 2044 free(scan_info, M_CAMXPT); 2045 request_ccb->ccb_h.status = CAM_REQ_CMP; 2046 xpt_done(request_ccb); 2047 break; 2048 } 2049 2050 if ((scan_info->cpi->hba_misc & PIM_SEQSCAN) == 0) { 2051 xpt_free_ccb(request_ccb); 2052 break; 2053 } 2054 status = xpt_create_path(&path, xpt_periph, 2055 scan_info->request_ccb->ccb_h.path_id, 2056 scan_info->counter, 0); 2057 if (status != CAM_REQ_CMP) { 2058 printf("scsi_scan_bus: xpt_create_path failed" 2059 " with status %#x, bus scan halted\n", 2060 status); 2061 xpt_free_ccb(request_ccb); 2062 xpt_free_ccb((union ccb *)scan_info->cpi); 2063 request_ccb = scan_info->request_ccb; 2064 free(scan_info, M_CAMXPT); 2065 request_ccb->ccb_h.status = status; 2066 xpt_done(request_ccb); 2067 break; 2068 } 2069 xpt_setup_ccb(&request_ccb->ccb_h, path, 2070 request_ccb->ccb_h.pinfo.priority); 2071 request_ccb->ccb_h.func_code = XPT_SCAN_LUN; 2072 request_ccb->ccb_h.cbfcnp = scsi_scan_bus; 2073 request_ccb->ccb_h.ppriv_ptr0 = scan_info; 2074 request_ccb->crcn.flags = 2075 scan_info->request_ccb->crcn.flags; 2076 } else { 2077 status = xpt_create_path(&path, xpt_periph, 2078 path_id, target_id, lun_id); 2079 /* 2080 * Free the old request path- we're done with it. We 2081 * do this *after* creating the new path so that 2082 * we don't remove a target that has our lun list 2083 * in the case that lun 0 is not present. 2084 */ 2085 xpt_free_path(oldpath); 2086 if (status != CAM_REQ_CMP) { 2087 printf("scsi_scan_bus: xpt_create_path failed " 2088 "with status %#x, halting LUN scan\n", 2089 status); 2090 goto hop_again; 2091 } 2092 xpt_setup_ccb(&request_ccb->ccb_h, path, 2093 request_ccb->ccb_h.pinfo.priority); 2094 request_ccb->ccb_h.func_code = XPT_SCAN_LUN; 2095 request_ccb->ccb_h.cbfcnp = scsi_scan_bus; 2096 request_ccb->ccb_h.ppriv_ptr0 = scan_info; 2097 request_ccb->crcn.flags = 2098 scan_info->request_ccb->crcn.flags; 2099 } 2100 xpt_action(request_ccb); 2101 break; 2102 } 2103 default: 2104 break; 2105 } 2106 } 2107 2108 static void 2109 scsi_scan_lun(struct cam_periph *periph, struct cam_path *path, 2110 cam_flags flags, union ccb *request_ccb) 2111 { 2112 struct ccb_pathinq cpi; 2113 cam_status status; 2114 struct cam_path *new_path; 2115 struct cam_periph *old_periph; 2116 2117 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("scsi_scan_lun\n")); 2118 2119 xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NONE); 2120 cpi.ccb_h.func_code = XPT_PATH_INQ; 2121 xpt_action((union ccb *)&cpi); 2122 2123 if (cpi.ccb_h.status != CAM_REQ_CMP) { 2124 if (request_ccb != NULL) { 2125 request_ccb->ccb_h.status = cpi.ccb_h.status; 2126 xpt_done(request_ccb); 2127 } 2128 return; 2129 } 2130 2131 if ((cpi.hba_misc & PIM_NOINITIATOR) != 0) { 2132 /* 2133 * Can't scan the bus on an adapter that 2134 * cannot perform the initiator role. 2135 */ 2136 if (request_ccb != NULL) { 2137 request_ccb->ccb_h.status = CAM_REQ_CMP; 2138 xpt_done(request_ccb); 2139 } 2140 return; 2141 } 2142 2143 if (request_ccb == NULL) { 2144 request_ccb = malloc(sizeof(union ccb), M_CAMXPT, M_NOWAIT); 2145 if (request_ccb == NULL) { 2146 xpt_print(path, "scsi_scan_lun: can't allocate CCB, " 2147 "can't continue\n"); 2148 return; 2149 } 2150 new_path = malloc(sizeof(*new_path), M_CAMXPT, M_NOWAIT); 2151 if (new_path == NULL) { 2152 xpt_print(path, "scsi_scan_lun: can't allocate path, " 2153 "can't continue\n"); 2154 free(request_ccb, M_CAMXPT); 2155 return; 2156 } 2157 status = xpt_compile_path(new_path, xpt_periph, 2158 path->bus->path_id, 2159 path->target->target_id, 2160 path->device->lun_id); 2161 2162 if (status != CAM_REQ_CMP) { 2163 xpt_print(path, "scsi_scan_lun: can't compile path, " 2164 "can't continue\n"); 2165 free(request_ccb, M_CAMXPT); 2166 free(new_path, M_CAMXPT); 2167 return; 2168 } 2169 xpt_setup_ccb(&request_ccb->ccb_h, new_path, CAM_PRIORITY_XPT); 2170 request_ccb->ccb_h.cbfcnp = xptscandone; 2171 request_ccb->ccb_h.func_code = XPT_SCAN_LUN; 2172 request_ccb->crcn.flags = flags; 2173 } 2174 2175 if ((old_periph = cam_periph_find(path, "probe")) != NULL) { 2176 probe_softc *softc; 2177 2178 softc = (probe_softc *)old_periph->softc; 2179 TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h, 2180 periph_links.tqe); 2181 } else { 2182 status = cam_periph_alloc(proberegister, NULL, probecleanup, 2183 probestart, "probe", 2184 CAM_PERIPH_BIO, 2185 request_ccb->ccb_h.path, NULL, 0, 2186 request_ccb); 2187 2188 if (status != CAM_REQ_CMP) { 2189 xpt_print(path, "scsi_scan_lun: cam_alloc_periph " 2190 "returned an error, can't continue probe\n"); 2191 request_ccb->ccb_h.status = status; 2192 xpt_done(request_ccb); 2193 } 2194 } 2195 } 2196 2197 static void 2198 xptscandone(struct cam_periph *periph, union ccb *done_ccb) 2199 { 2200 xpt_release_path(done_ccb->ccb_h.path); 2201 free(done_ccb->ccb_h.path, M_CAMXPT); 2202 free(done_ccb, M_CAMXPT); 2203 } 2204 2205 static struct cam_ed * 2206 scsi_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id) 2207 { 2208 struct cam_path path; 2209 struct scsi_quirk_entry *quirk; 2210 struct cam_ed *device; 2211 struct cam_ed *cur_device; 2212 2213 device = xpt_alloc_device(bus, target, lun_id); 2214 if (device == NULL) 2215 return (NULL); 2216 2217 /* 2218 * Take the default quirk entry until we have inquiry 2219 * data and can determine a better quirk to use. 2220 */ 2221 quirk = &scsi_quirk_table[scsi_quirk_table_size - 1]; 2222 device->quirk = (void *)quirk; 2223 device->mintags = quirk->mintags; 2224 device->maxtags = quirk->maxtags; 2225 bzero(&device->inq_data, sizeof(device->inq_data)); 2226 device->inq_flags = 0; 2227 device->queue_flags = 0; 2228 device->serial_num = NULL; 2229 device->serial_num_len = 0; 2230 2231 /* 2232 * XXX should be limited by number of CCBs this bus can 2233 * do. 2234 */ 2235 bus->sim->max_ccbs += device->ccbq.devq_openings; 2236 /* Insertion sort into our target's device list */ 2237 cur_device = TAILQ_FIRST(&target->ed_entries); 2238 while (cur_device != NULL && cur_device->lun_id < lun_id) 2239 cur_device = TAILQ_NEXT(cur_device, links); 2240 if (cur_device != NULL) { 2241 TAILQ_INSERT_BEFORE(cur_device, device, links); 2242 } else { 2243 TAILQ_INSERT_TAIL(&target->ed_entries, device, links); 2244 } 2245 target->generation++; 2246 if (lun_id != CAM_LUN_WILDCARD) { 2247 xpt_compile_path(&path, 2248 NULL, 2249 bus->path_id, 2250 target->target_id, 2251 lun_id); 2252 scsi_devise_transport(&path); 2253 xpt_release_path(&path); 2254 } 2255 2256 return (device); 2257 } 2258 2259 static void 2260 scsi_devise_transport(struct cam_path *path) 2261 { 2262 struct ccb_pathinq cpi; 2263 struct ccb_trans_settings cts; 2264 struct scsi_inquiry_data *inq_buf; 2265 2266 /* Get transport information from the SIM */ 2267 xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NONE); 2268 cpi.ccb_h.func_code = XPT_PATH_INQ; 2269 xpt_action((union ccb *)&cpi); 2270 2271 inq_buf = NULL; 2272 if ((path->device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0) 2273 inq_buf = &path->device->inq_data; 2274 path->device->protocol = PROTO_SCSI; 2275 path->device->protocol_version = 2276 inq_buf != NULL ? SID_ANSI_REV(inq_buf) : cpi.protocol_version; 2277 path->device->transport = cpi.transport; 2278 path->device->transport_version = cpi.transport_version; 2279 2280 /* 2281 * Any device not using SPI3 features should 2282 * be considered SPI2 or lower. 2283 */ 2284 if (inq_buf != NULL) { 2285 if (path->device->transport == XPORT_SPI 2286 && (inq_buf->spi3data & SID_SPI_MASK) == 0 2287 && path->device->transport_version > 2) 2288 path->device->transport_version = 2; 2289 } else { 2290 struct cam_ed* otherdev; 2291 2292 for (otherdev = TAILQ_FIRST(&path->target->ed_entries); 2293 otherdev != NULL; 2294 otherdev = TAILQ_NEXT(otherdev, links)) { 2295 if (otherdev != path->device) 2296 break; 2297 } 2298 2299 if (otherdev != NULL) { 2300 /* 2301 * Initially assume the same versioning as 2302 * prior luns for this target. 2303 */ 2304 path->device->protocol_version = 2305 otherdev->protocol_version; 2306 path->device->transport_version = 2307 otherdev->transport_version; 2308 } else { 2309 /* Until we know better, opt for safty */ 2310 path->device->protocol_version = 2; 2311 if (path->device->transport == XPORT_SPI) 2312 path->device->transport_version = 2; 2313 else 2314 path->device->transport_version = 0; 2315 } 2316 } 2317 2318 /* 2319 * XXX 2320 * For a device compliant with SPC-2 we should be able 2321 * to determine the transport version supported by 2322 * scrutinizing the version descriptors in the 2323 * inquiry buffer. 2324 */ 2325 2326 /* Tell the controller what we think */ 2327 xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NONE); 2328 cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS; 2329 cts.type = CTS_TYPE_CURRENT_SETTINGS; 2330 cts.transport = path->device->transport; 2331 cts.transport_version = path->device->transport_version; 2332 cts.protocol = path->device->protocol; 2333 cts.protocol_version = path->device->protocol_version; 2334 cts.proto_specific.valid = 0; 2335 cts.xport_specific.valid = 0; 2336 xpt_action((union ccb *)&cts); 2337 } 2338 2339 static void 2340 scsi_action(union ccb *start_ccb) 2341 { 2342 2343 switch (start_ccb->ccb_h.func_code) { 2344 case XPT_SET_TRAN_SETTINGS: 2345 { 2346 scsi_set_transfer_settings(&start_ccb->cts, 2347 start_ccb->ccb_h.path->device, 2348 /*async_update*/FALSE); 2349 break; 2350 } 2351 case XPT_SCAN_BUS: 2352 case XPT_SCAN_TGT: 2353 scsi_scan_bus(start_ccb->ccb_h.path->periph, start_ccb); 2354 break; 2355 case XPT_SCAN_LUN: 2356 scsi_scan_lun(start_ccb->ccb_h.path->periph, 2357 start_ccb->ccb_h.path, start_ccb->crcn.flags, 2358 start_ccb); 2359 break; 2360 case XPT_GET_TRAN_SETTINGS: 2361 { 2362 struct cam_sim *sim; 2363 2364 sim = start_ccb->ccb_h.path->bus->sim; 2365 (*(sim->sim_action))(sim, start_ccb); 2366 break; 2367 } 2368 default: 2369 xpt_action_default(start_ccb); 2370 break; 2371 } 2372 } 2373 2374 static void 2375 scsi_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device, 2376 int async_update) 2377 { 2378 struct ccb_pathinq cpi; 2379 struct ccb_trans_settings cur_cts; 2380 struct ccb_trans_settings_scsi *scsi; 2381 struct ccb_trans_settings_scsi *cur_scsi; 2382 struct cam_sim *sim; 2383 struct scsi_inquiry_data *inq_data; 2384 2385 if (device == NULL) { 2386 cts->ccb_h.status = CAM_PATH_INVALID; 2387 xpt_done((union ccb *)cts); 2388 return; 2389 } 2390 2391 if (cts->protocol == PROTO_UNKNOWN 2392 || cts->protocol == PROTO_UNSPECIFIED) { 2393 cts->protocol = device->protocol; 2394 cts->protocol_version = device->protocol_version; 2395 } 2396 2397 if (cts->protocol_version == PROTO_VERSION_UNKNOWN 2398 || cts->protocol_version == PROTO_VERSION_UNSPECIFIED) 2399 cts->protocol_version = device->protocol_version; 2400 2401 if (cts->protocol != device->protocol) { 2402 xpt_print(cts->ccb_h.path, "Uninitialized Protocol %x:%x?\n", 2403 cts->protocol, device->protocol); 2404 cts->protocol = device->protocol; 2405 } 2406 2407 if (cts->protocol_version > device->protocol_version) { 2408 if (bootverbose) { 2409 xpt_print(cts->ccb_h.path, "Down reving Protocol " 2410 "Version from %d to %d?\n", cts->protocol_version, 2411 device->protocol_version); 2412 } 2413 cts->protocol_version = device->protocol_version; 2414 } 2415 2416 if (cts->transport == XPORT_UNKNOWN 2417 || cts->transport == XPORT_UNSPECIFIED) { 2418 cts->transport = device->transport; 2419 cts->transport_version = device->transport_version; 2420 } 2421 2422 if (cts->transport_version == XPORT_VERSION_UNKNOWN 2423 || cts->transport_version == XPORT_VERSION_UNSPECIFIED) 2424 cts->transport_version = device->transport_version; 2425 2426 if (cts->transport != device->transport) { 2427 xpt_print(cts->ccb_h.path, "Uninitialized Transport %x:%x?\n", 2428 cts->transport, device->transport); 2429 cts->transport = device->transport; 2430 } 2431 2432 if (cts->transport_version > device->transport_version) { 2433 if (bootverbose) { 2434 xpt_print(cts->ccb_h.path, "Down reving Transport " 2435 "Version from %d to %d?\n", cts->transport_version, 2436 device->transport_version); 2437 } 2438 cts->transport_version = device->transport_version; 2439 } 2440 2441 sim = cts->ccb_h.path->bus->sim; 2442 2443 /* 2444 * Nothing more of interest to do unless 2445 * this is a device connected via the 2446 * SCSI protocol. 2447 */ 2448 if (cts->protocol != PROTO_SCSI) { 2449 if (async_update == FALSE) 2450 (*(sim->sim_action))(sim, (union ccb *)cts); 2451 return; 2452 } 2453 2454 inq_data = &device->inq_data; 2455 scsi = &cts->proto_specific.scsi; 2456 xpt_setup_ccb(&cpi.ccb_h, cts->ccb_h.path, CAM_PRIORITY_NONE); 2457 cpi.ccb_h.func_code = XPT_PATH_INQ; 2458 xpt_action((union ccb *)&cpi); 2459 2460 /* SCSI specific sanity checking */ 2461 if ((cpi.hba_inquiry & PI_TAG_ABLE) == 0 2462 || (INQ_DATA_TQ_ENABLED(inq_data)) == 0 2463 || (device->queue_flags & SCP_QUEUE_DQUE) != 0 2464 || (device->mintags == 0)) { 2465 /* 2466 * Can't tag on hardware that doesn't support tags, 2467 * doesn't have it enabled, or has broken tag support. 2468 */ 2469 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; 2470 } 2471 2472 if (async_update == FALSE) { 2473 /* 2474 * Perform sanity checking against what the 2475 * controller and device can do. 2476 */ 2477 xpt_setup_ccb(&cur_cts.ccb_h, cts->ccb_h.path, CAM_PRIORITY_NONE); 2478 cur_cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; 2479 cur_cts.type = cts->type; 2480 xpt_action((union ccb *)&cur_cts); 2481 if ((cur_cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 2482 return; 2483 } 2484 cur_scsi = &cur_cts.proto_specific.scsi; 2485 if ((scsi->valid & CTS_SCSI_VALID_TQ) == 0) { 2486 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; 2487 scsi->flags |= cur_scsi->flags & CTS_SCSI_FLAGS_TAG_ENB; 2488 } 2489 if ((cur_scsi->valid & CTS_SCSI_VALID_TQ) == 0) 2490 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; 2491 } 2492 2493 /* SPI specific sanity checking */ 2494 if (cts->transport == XPORT_SPI && async_update == FALSE) { 2495 u_int spi3caps; 2496 struct ccb_trans_settings_spi *spi; 2497 struct ccb_trans_settings_spi *cur_spi; 2498 2499 spi = &cts->xport_specific.spi; 2500 2501 cur_spi = &cur_cts.xport_specific.spi; 2502 2503 /* Fill in any gaps in what the user gave us */ 2504 if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) 2505 spi->sync_period = cur_spi->sync_period; 2506 if ((cur_spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) 2507 spi->sync_period = 0; 2508 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0) 2509 spi->sync_offset = cur_spi->sync_offset; 2510 if ((cur_spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0) 2511 spi->sync_offset = 0; 2512 if ((spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0) 2513 spi->ppr_options = cur_spi->ppr_options; 2514 if ((cur_spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0) 2515 spi->ppr_options = 0; 2516 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) == 0) 2517 spi->bus_width = cur_spi->bus_width; 2518 if ((cur_spi->valid & CTS_SPI_VALID_BUS_WIDTH) == 0) 2519 spi->bus_width = 0; 2520 if ((spi->valid & CTS_SPI_VALID_DISC) == 0) { 2521 spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; 2522 spi->flags |= cur_spi->flags & CTS_SPI_FLAGS_DISC_ENB; 2523 } 2524 if ((cur_spi->valid & CTS_SPI_VALID_DISC) == 0) 2525 spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; 2526 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0 2527 && (inq_data->flags & SID_Sync) == 0 2528 && cts->type == CTS_TYPE_CURRENT_SETTINGS) 2529 || ((cpi.hba_inquiry & PI_SDTR_ABLE) == 0)) { 2530 /* Force async */ 2531 spi->sync_period = 0; 2532 spi->sync_offset = 0; 2533 } 2534 2535 switch (spi->bus_width) { 2536 case MSG_EXT_WDTR_BUS_32_BIT: 2537 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0 2538 || (inq_data->flags & SID_WBus32) != 0 2539 || cts->type == CTS_TYPE_USER_SETTINGS) 2540 && (cpi.hba_inquiry & PI_WIDE_32) != 0) 2541 break; 2542 /* Fall Through to 16-bit */ 2543 case MSG_EXT_WDTR_BUS_16_BIT: 2544 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0 2545 || (inq_data->flags & SID_WBus16) != 0 2546 || cts->type == CTS_TYPE_USER_SETTINGS) 2547 && (cpi.hba_inquiry & PI_WIDE_16) != 0) { 2548 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 2549 break; 2550 } 2551 /* Fall Through to 8-bit */ 2552 default: /* New bus width?? */ 2553 case MSG_EXT_WDTR_BUS_8_BIT: 2554 /* All targets can do this */ 2555 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 2556 break; 2557 } 2558 2559 spi3caps = cpi.xport_specific.spi.ppr_options; 2560 if ((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0 2561 && cts->type == CTS_TYPE_CURRENT_SETTINGS) 2562 spi3caps &= inq_data->spi3data; 2563 2564 if ((spi3caps & SID_SPI_CLOCK_DT) == 0) 2565 spi->ppr_options &= ~MSG_EXT_PPR_DT_REQ; 2566 2567 if ((spi3caps & SID_SPI_IUS) == 0) 2568 spi->ppr_options &= ~MSG_EXT_PPR_IU_REQ; 2569 2570 if ((spi3caps & SID_SPI_QAS) == 0) 2571 spi->ppr_options &= ~MSG_EXT_PPR_QAS_REQ; 2572 2573 /* No SPI Transfer settings are allowed unless we are wide */ 2574 if (spi->bus_width == 0) 2575 spi->ppr_options = 0; 2576 2577 if ((spi->valid & CTS_SPI_VALID_DISC) 2578 && ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) == 0)) { 2579 /* 2580 * Can't tag queue without disconnection. 2581 */ 2582 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; 2583 scsi->valid |= CTS_SCSI_VALID_TQ; 2584 } 2585 2586 /* 2587 * If we are currently performing tagged transactions to 2588 * this device and want to change its negotiation parameters, 2589 * go non-tagged for a bit to give the controller a chance to 2590 * negotiate unhampered by tag messages. 2591 */ 2592 if (cts->type == CTS_TYPE_CURRENT_SETTINGS 2593 && (device->inq_flags & SID_CmdQue) != 0 2594 && (scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0 2595 && (spi->flags & (CTS_SPI_VALID_SYNC_RATE| 2596 CTS_SPI_VALID_SYNC_OFFSET| 2597 CTS_SPI_VALID_BUS_WIDTH)) != 0) 2598 scsi_toggle_tags(cts->ccb_h.path); 2599 } 2600 2601 if (cts->type == CTS_TYPE_CURRENT_SETTINGS 2602 && (scsi->valid & CTS_SCSI_VALID_TQ) != 0) { 2603 int device_tagenb; 2604 2605 /* 2606 * If we are transitioning from tags to no-tags or 2607 * vice-versa, we need to carefully freeze and restart 2608 * the queue so that we don't overlap tagged and non-tagged 2609 * commands. We also temporarily stop tags if there is 2610 * a change in transfer negotiation settings to allow 2611 * "tag-less" negotiation. 2612 */ 2613 if ((device->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 2614 || (device->inq_flags & SID_CmdQue) != 0) 2615 device_tagenb = TRUE; 2616 else 2617 device_tagenb = FALSE; 2618 2619 if (((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0 2620 && device_tagenb == FALSE) 2621 || ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) == 0 2622 && device_tagenb == TRUE)) { 2623 2624 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) { 2625 /* 2626 * Delay change to use tags until after a 2627 * few commands have gone to this device so 2628 * the controller has time to perform transfer 2629 * negotiations without tagged messages getting 2630 * in the way. 2631 */ 2632 device->tag_delay_count = CAM_TAG_DELAY_COUNT; 2633 device->flags |= CAM_DEV_TAG_AFTER_COUNT; 2634 } else { 2635 xpt_stop_tags(cts->ccb_h.path); 2636 } 2637 } 2638 } 2639 if (async_update == FALSE) 2640 (*(sim->sim_action))(sim, (union ccb *)cts); 2641 } 2642 2643 static void 2644 scsi_toggle_tags(struct cam_path *path) 2645 { 2646 struct cam_ed *dev; 2647 2648 /* 2649 * Give controllers a chance to renegotiate 2650 * before starting tag operations. We 2651 * "toggle" tagged queuing off then on 2652 * which causes the tag enable command delay 2653 * counter to come into effect. 2654 */ 2655 dev = path->device; 2656 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 2657 || ((dev->inq_flags & SID_CmdQue) != 0 2658 && (dev->inq_flags & (SID_Sync|SID_WBus16|SID_WBus32)) != 0)) { 2659 struct ccb_trans_settings cts; 2660 2661 xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NONE); 2662 cts.protocol = PROTO_SCSI; 2663 cts.protocol_version = PROTO_VERSION_UNSPECIFIED; 2664 cts.transport = XPORT_UNSPECIFIED; 2665 cts.transport_version = XPORT_VERSION_UNSPECIFIED; 2666 cts.proto_specific.scsi.flags = 0; 2667 cts.proto_specific.scsi.valid = CTS_SCSI_VALID_TQ; 2668 scsi_set_transfer_settings(&cts, path->device, 2669 /*async_update*/TRUE); 2670 cts.proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB; 2671 scsi_set_transfer_settings(&cts, path->device, 2672 /*async_update*/TRUE); 2673 } 2674 } 2675 2676 /* 2677 * Handle any per-device event notifications that require action by the XPT. 2678 */ 2679 static void 2680 scsi_dev_async(u_int32_t async_code, struct cam_eb *bus, struct cam_et *target, 2681 struct cam_ed *device, void *async_arg) 2682 { 2683 cam_status status; 2684 struct cam_path newpath; 2685 2686 /* 2687 * We only need to handle events for real devices. 2688 */ 2689 if (target->target_id == CAM_TARGET_WILDCARD 2690 || device->lun_id == CAM_LUN_WILDCARD) 2691 return; 2692 2693 /* 2694 * We need our own path with wildcards expanded to 2695 * handle certain types of events. 2696 */ 2697 if ((async_code == AC_SENT_BDR) 2698 || (async_code == AC_BUS_RESET) 2699 || (async_code == AC_INQ_CHANGED)) 2700 status = xpt_compile_path(&newpath, NULL, 2701 bus->path_id, 2702 target->target_id, 2703 device->lun_id); 2704 else 2705 status = CAM_REQ_CMP_ERR; 2706 2707 if (status == CAM_REQ_CMP) { 2708 2709 /* 2710 * Allow transfer negotiation to occur in a 2711 * tag free environment and after settle delay. 2712 */ 2713 if (async_code == AC_SENT_BDR 2714 || async_code == AC_BUS_RESET) { 2715 cam_freeze_devq(&newpath); 2716 cam_release_devq(&newpath, 2717 RELSIM_RELEASE_AFTER_TIMEOUT, 2718 /*reduction*/0, 2719 /*timeout*/scsi_delay, 2720 /*getcount_only*/0); 2721 scsi_toggle_tags(&newpath); 2722 } 2723 2724 if (async_code == AC_INQ_CHANGED) { 2725 /* 2726 * We've sent a start unit command, or 2727 * something similar to a device that 2728 * may have caused its inquiry data to 2729 * change. So we re-scan the device to 2730 * refresh the inquiry data for it. 2731 */ 2732 scsi_scan_lun(newpath.periph, &newpath, 2733 CAM_EXPECT_INQ_CHANGE, NULL); 2734 } 2735 xpt_release_path(&newpath); 2736 } else if (async_code == AC_LOST_DEVICE && 2737 (device->flags & CAM_DEV_UNCONFIGURED) == 0) { 2738 device->flags |= CAM_DEV_UNCONFIGURED; 2739 xpt_release_device(device); 2740 } else if (async_code == AC_TRANSFER_NEG) { 2741 struct ccb_trans_settings *settings; 2742 2743 settings = (struct ccb_trans_settings *)async_arg; 2744 scsi_set_transfer_settings(settings, device, 2745 /*async_update*/TRUE); 2746 } 2747 } 2748 2749 static void 2750 scsi_announce_periph(struct cam_periph *periph) 2751 { 2752 struct ccb_pathinq cpi; 2753 struct ccb_trans_settings cts; 2754 struct cam_path *path = periph->path; 2755 u_int speed; 2756 u_int freq; 2757 u_int mb; 2758 2759 mtx_assert(periph->sim->mtx, MA_OWNED); 2760 2761 xpt_setup_ccb(&cts.ccb_h, path, CAM_PRIORITY_NORMAL); 2762 cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; 2763 cts.type = CTS_TYPE_CURRENT_SETTINGS; 2764 xpt_action((union ccb*)&cts); 2765 if ((cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) 2766 return; 2767 /* Ask the SIM for its base transfer speed */ 2768 xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NORMAL); 2769 cpi.ccb_h.func_code = XPT_PATH_INQ; 2770 xpt_action((union ccb *)&cpi); 2771 /* Report connection speed */ 2772 speed = cpi.base_transfer_speed; 2773 freq = 0; 2774 if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) { 2775 struct ccb_trans_settings_spi *spi = 2776 &cts.xport_specific.spi; 2777 2778 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0 2779 && spi->sync_offset != 0) { 2780 freq = scsi_calc_syncsrate(spi->sync_period); 2781 speed = freq; 2782 } 2783 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) 2784 speed *= (0x01 << spi->bus_width); 2785 } 2786 if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) { 2787 struct ccb_trans_settings_fc *fc = 2788 &cts.xport_specific.fc; 2789 2790 if (fc->valid & CTS_FC_VALID_SPEED) 2791 speed = fc->bitrate; 2792 } 2793 if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SAS) { 2794 struct ccb_trans_settings_sas *sas = 2795 &cts.xport_specific.sas; 2796 2797 if (sas->valid & CTS_SAS_VALID_SPEED) 2798 speed = sas->bitrate; 2799 } 2800 mb = speed / 1000; 2801 if (mb > 0) 2802 printf("%s%d: %d.%03dMB/s transfers", 2803 periph->periph_name, periph->unit_number, 2804 mb, speed % 1000); 2805 else 2806 printf("%s%d: %dKB/s transfers", periph->periph_name, 2807 periph->unit_number, speed); 2808 /* Report additional information about SPI connections */ 2809 if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) { 2810 struct ccb_trans_settings_spi *spi; 2811 2812 spi = &cts.xport_specific.spi; 2813 if (freq != 0) { 2814 printf(" (%d.%03dMHz%s, offset %d", freq / 1000, 2815 freq % 1000, 2816 (spi->ppr_options & MSG_EXT_PPR_DT_REQ) != 0 2817 ? " DT" : "", 2818 spi->sync_offset); 2819 } 2820 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0 2821 && spi->bus_width > 0) { 2822 if (freq != 0) { 2823 printf(", "); 2824 } else { 2825 printf(" ("); 2826 } 2827 printf("%dbit)", 8 * (0x01 << spi->bus_width)); 2828 } else if (freq != 0) { 2829 printf(")"); 2830 } 2831 } 2832 if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) { 2833 struct ccb_trans_settings_fc *fc; 2834 2835 fc = &cts.xport_specific.fc; 2836 if (fc->valid & CTS_FC_VALID_WWNN) 2837 printf(" WWNN 0x%llx", (long long) fc->wwnn); 2838 if (fc->valid & CTS_FC_VALID_WWPN) 2839 printf(" WWPN 0x%llx", (long long) fc->wwpn); 2840 if (fc->valid & CTS_FC_VALID_PORT) 2841 printf(" PortID 0x%x", fc->port); 2842 } 2843 printf("\n"); 2844 } 2845 2846