1 /* 2 * Implementation of the Common Access Method Transport (XPT) layer. 3 * 4 * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs. 5 * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions, and the following disclaimer, 13 * without modification, immediately at the beginning of the file. 14 * 2. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * $FreeBSD$ 30 */ 31 #include <sys/param.h> 32 #include <sys/bus.h> 33 #include <sys/systm.h> 34 #include <sys/types.h> 35 #include <sys/malloc.h> 36 #include <sys/kernel.h> 37 #include <sys/time.h> 38 #include <sys/conf.h> 39 #include <sys/fcntl.h> 40 #include <sys/md5.h> 41 #include <sys/devicestat.h> 42 #include <sys/interrupt.h> 43 #include <sys/sbuf.h> 44 45 #ifdef PC98 46 #include <pc98/pc98/pc98_machdep.h> /* geometry translation */ 47 #endif 48 49 #include <cam/cam.h> 50 #include <cam/cam_ccb.h> 51 #include <cam/cam_periph.h> 52 #include <cam/cam_sim.h> 53 #include <cam/cam_xpt.h> 54 #include <cam/cam_xpt_sim.h> 55 #include <cam/cam_xpt_periph.h> 56 #include <cam/cam_debug.h> 57 58 #include <cam/scsi/scsi_all.h> 59 #include <cam/scsi/scsi_message.h> 60 #include <cam/scsi/scsi_pass.h> 61 #include "opt_cam.h" 62 63 /* Datastructures internal to the xpt layer */ 64 65 /* 66 * Definition of an async handler callback block. These are used to add 67 * SIMs and peripherals to the async callback lists. 68 */ 69 struct async_node { 70 SLIST_ENTRY(async_node) links; 71 u_int32_t event_enable; /* Async Event enables */ 72 void (*callback)(void *arg, u_int32_t code, 73 struct cam_path *path, void *args); 74 void *callback_arg; 75 }; 76 77 SLIST_HEAD(async_list, async_node); 78 SLIST_HEAD(periph_list, cam_periph); 79 static STAILQ_HEAD(highpowerlist, ccb_hdr) highpowerq; 80 81 /* 82 * This is the maximum number of high powered commands (e.g. start unit) 83 * that can be outstanding at a particular time. 84 */ 85 #ifndef CAM_MAX_HIGHPOWER 86 #define CAM_MAX_HIGHPOWER 4 87 #endif 88 89 /* number of high powered commands that can go through right now */ 90 static int num_highpower = CAM_MAX_HIGHPOWER; 91 92 /* 93 * Structure for queueing a device in a run queue. 94 * There is one run queue for allocating new ccbs, 95 * and another for sending ccbs to the controller. 96 */ 97 struct cam_ed_qinfo { 98 cam_pinfo pinfo; 99 struct cam_ed *device; 100 }; 101 102 /* 103 * The CAM EDT (Existing Device Table) contains the device information for 104 * all devices for all busses in the system. The table contains a 105 * cam_ed structure for each device on the bus. 106 */ 107 struct cam_ed { 108 TAILQ_ENTRY(cam_ed) links; 109 struct cam_ed_qinfo alloc_ccb_entry; 110 struct cam_ed_qinfo send_ccb_entry; 111 struct cam_et *target; 112 lun_id_t lun_id; 113 struct camq drvq; /* 114 * Queue of type drivers wanting to do 115 * work on this device. 116 */ 117 struct cam_ccbq ccbq; /* Queue of pending ccbs */ 118 struct async_list asyncs; /* Async callback info for this B/T/L */ 119 struct periph_list periphs; /* All attached devices */ 120 u_int generation; /* Generation number */ 121 struct cam_periph *owner; /* Peripheral driver's ownership tag */ 122 struct xpt_quirk_entry *quirk; /* Oddities about this device */ 123 /* Storage for the inquiry data */ 124 #ifdef CAM_NEW_TRAN_CODE 125 cam_proto protocol; 126 u_int protocol_version; 127 cam_xport transport; 128 u_int transport_version; 129 #endif /* CAM_NEW_TRAN_CODE */ 130 struct scsi_inquiry_data inq_data; 131 u_int8_t inq_flags; /* 132 * Current settings for inquiry flags. 133 * This allows us to override settings 134 * like disconnection and tagged 135 * queuing for a device. 136 */ 137 u_int8_t queue_flags; /* Queue flags from the control page */ 138 u_int8_t serial_num_len; 139 u_int8_t *serial_num; 140 u_int32_t qfrozen_cnt; 141 u_int32_t flags; 142 #define CAM_DEV_UNCONFIGURED 0x01 143 #define CAM_DEV_REL_TIMEOUT_PENDING 0x02 144 #define CAM_DEV_REL_ON_COMPLETE 0x04 145 #define CAM_DEV_REL_ON_QUEUE_EMPTY 0x08 146 #define CAM_DEV_RESIZE_QUEUE_NEEDED 0x10 147 #define CAM_DEV_TAG_AFTER_COUNT 0x20 148 #define CAM_DEV_INQUIRY_DATA_VALID 0x40 149 u_int32_t tag_delay_count; 150 #define CAM_TAG_DELAY_COUNT 5 151 u_int32_t refcount; 152 struct callout_handle c_handle; 153 }; 154 155 /* 156 * Each target is represented by an ET (Existing Target). These 157 * entries are created when a target is successfully probed with an 158 * identify, and removed when a device fails to respond after a number 159 * of retries, or a bus rescan finds the device missing. 160 */ 161 struct cam_et { 162 TAILQ_HEAD(, cam_ed) ed_entries; 163 TAILQ_ENTRY(cam_et) links; 164 struct cam_eb *bus; 165 target_id_t target_id; 166 u_int32_t refcount; 167 u_int generation; 168 struct timeval last_reset; 169 }; 170 171 /* 172 * Each bus is represented by an EB (Existing Bus). These entries 173 * are created by calls to xpt_bus_register and deleted by calls to 174 * xpt_bus_deregister. 175 */ 176 struct cam_eb { 177 TAILQ_HEAD(, cam_et) et_entries; 178 TAILQ_ENTRY(cam_eb) links; 179 path_id_t path_id; 180 struct cam_sim *sim; 181 struct timeval last_reset; 182 u_int32_t flags; 183 #define CAM_EB_RUNQ_SCHEDULED 0x01 184 u_int32_t refcount; 185 u_int generation; 186 }; 187 188 struct cam_path { 189 struct cam_periph *periph; 190 struct cam_eb *bus; 191 struct cam_et *target; 192 struct cam_ed *device; 193 }; 194 195 struct xpt_quirk_entry { 196 struct scsi_inquiry_pattern inq_pat; 197 u_int8_t quirks; 198 #define CAM_QUIRK_NOLUNS 0x01 199 #define CAM_QUIRK_NOSERIAL 0x02 200 #define CAM_QUIRK_HILUNS 0x04 201 u_int mintags; 202 u_int maxtags; 203 }; 204 #define CAM_SCSI2_MAXLUN 8 205 206 typedef enum { 207 XPT_FLAG_OPEN = 0x01 208 } xpt_flags; 209 210 struct xpt_softc { 211 xpt_flags flags; 212 u_int32_t generation; 213 }; 214 215 static const char quantum[] = "QUANTUM"; 216 static const char sony[] = "SONY"; 217 static const char west_digital[] = "WDIGTL"; 218 static const char samsung[] = "SAMSUNG"; 219 static const char seagate[] = "SEAGATE"; 220 static const char microp[] = "MICROP"; 221 222 static struct xpt_quirk_entry xpt_quirk_table[] = 223 { 224 { 225 /* Reports QUEUE FULL for temporary resource shortages */ 226 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP39100*", "*" }, 227 /*quirks*/0, /*mintags*/24, /*maxtags*/32 228 }, 229 { 230 /* Reports QUEUE FULL for temporary resource shortages */ 231 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP34550*", "*" }, 232 /*quirks*/0, /*mintags*/24, /*maxtags*/32 233 }, 234 { 235 /* Reports QUEUE FULL for temporary resource shortages */ 236 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP32275*", "*" }, 237 /*quirks*/0, /*mintags*/24, /*maxtags*/32 238 }, 239 { 240 /* Broken tagged queuing drive */ 241 { T_DIRECT, SIP_MEDIA_FIXED, microp, "4421-07*", "*" }, 242 /*quirks*/0, /*mintags*/0, /*maxtags*/0 243 }, 244 { 245 /* Broken tagged queuing drive */ 246 { T_DIRECT, SIP_MEDIA_FIXED, "HP", "C372*", "*" }, 247 /*quirks*/0, /*mintags*/0, /*maxtags*/0 248 }, 249 { 250 /* Broken tagged queuing drive */ 251 { T_DIRECT, SIP_MEDIA_FIXED, microp, "3391*", "x43h" }, 252 /*quirks*/0, /*mintags*/0, /*maxtags*/0 253 }, 254 { 255 /* 256 * Unfortunately, the Quantum Atlas III has the same 257 * problem as the Atlas II drives above. 258 * Reported by: "Johan Granlund" <johan@granlund.nu> 259 * 260 * For future reference, the drive with the problem was: 261 * QUANTUM QM39100TD-SW N1B0 262 * 263 * It's possible that Quantum will fix the problem in later 264 * firmware revisions. If that happens, the quirk entry 265 * will need to be made specific to the firmware revisions 266 * with the problem. 267 * 268 */ 269 /* Reports QUEUE FULL for temporary resource shortages */ 270 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM39100*", "*" }, 271 /*quirks*/0, /*mintags*/24, /*maxtags*/32 272 }, 273 { 274 /* 275 * 18 Gig Atlas III, same problem as the 9G version. 276 * Reported by: Andre Albsmeier 277 * <andre.albsmeier@mchp.siemens.de> 278 * 279 * For future reference, the drive with the problem was: 280 * QUANTUM QM318000TD-S N491 281 */ 282 /* Reports QUEUE FULL for temporary resource shortages */ 283 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM318000*", "*" }, 284 /*quirks*/0, /*mintags*/24, /*maxtags*/32 285 }, 286 { 287 /* 288 * Broken tagged queuing drive 289 * Reported by: Bret Ford <bford@uop.cs.uop.edu> 290 * and: Martin Renters <martin@tdc.on.ca> 291 */ 292 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST410800*", "71*" }, 293 /*quirks*/0, /*mintags*/0, /*maxtags*/0 294 }, 295 /* 296 * The Seagate Medalist Pro drives have very poor write 297 * performance with anything more than 2 tags. 298 * 299 * Reported by: Paul van der Zwan <paulz@trantor.xs4all.nl> 300 * Drive: <SEAGATE ST36530N 1444> 301 * 302 * Reported by: Jeremy Lea <reg@shale.csir.co.za> 303 * Drive: <SEAGATE ST34520W 1281> 304 * 305 * No one has actually reported that the 9G version 306 * (ST39140*) of the Medalist Pro has the same problem, but 307 * we're assuming that it does because the 4G and 6.5G 308 * versions of the drive are broken. 309 */ 310 { 311 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST34520*", "*"}, 312 /*quirks*/0, /*mintags*/2, /*maxtags*/2 313 }, 314 { 315 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST36530*", "*"}, 316 /*quirks*/0, /*mintags*/2, /*maxtags*/2 317 }, 318 { 319 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST39140*", "*"}, 320 /*quirks*/0, /*mintags*/2, /*maxtags*/2 321 }, 322 { 323 /* 324 * Slow when tagged queueing is enabled. Write performance 325 * steadily drops off with more and more concurrent 326 * transactions. Best sequential write performance with 327 * tagged queueing turned off and write caching turned on. 328 * 329 * PR: kern/10398 330 * Submitted by: Hideaki Okada <hokada@isl.melco.co.jp> 331 * Drive: DCAS-34330 w/ "S65A" firmware. 332 * 333 * The drive with the problem had the "S65A" firmware 334 * revision, and has also been reported (by Stephen J. 335 * Roznowski <sjr@home.net>) for a drive with the "S61A" 336 * firmware revision. 337 * 338 * Although no one has reported problems with the 2 gig 339 * version of the DCAS drive, the assumption is that it 340 * has the same problems as the 4 gig version. Therefore 341 * this quirk entries disables tagged queueing for all 342 * DCAS drives. 343 */ 344 { T_DIRECT, SIP_MEDIA_FIXED, "IBM", "DCAS*", "*" }, 345 /*quirks*/0, /*mintags*/0, /*maxtags*/0 346 }, 347 { 348 /* Broken tagged queuing drive */ 349 { T_DIRECT, SIP_MEDIA_REMOVABLE, "iomega", "jaz*", "*" }, 350 /*quirks*/0, /*mintags*/0, /*maxtags*/0 351 }, 352 { 353 /* Broken tagged queuing drive */ 354 { T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CFP2107*", "*" }, 355 /*quirks*/0, /*mintags*/0, /*maxtags*/0 356 }, 357 { 358 /* 359 * Broken tagged queuing drive. 360 * Submitted by: 361 * NAKAJI Hiroyuki <nakaji@zeisei.dpri.kyoto-u.ac.jp> 362 * in PR kern/9535 363 */ 364 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN34324U*", "*" }, 365 /*quirks*/0, /*mintags*/0, /*maxtags*/0 366 }, 367 { 368 /* 369 * Slow when tagged queueing is enabled. (1.5MB/sec versus 370 * 8MB/sec.) 371 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu> 372 * Best performance with these drives is achieved with 373 * tagged queueing turned off, and write caching turned on. 374 */ 375 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "WDE*", "*" }, 376 /*quirks*/0, /*mintags*/0, /*maxtags*/0 377 }, 378 { 379 /* 380 * Slow when tagged queueing is enabled. (1.5MB/sec versus 381 * 8MB/sec.) 382 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu> 383 * Best performance with these drives is achieved with 384 * tagged queueing turned off, and write caching turned on. 385 */ 386 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "ENTERPRISE", "*" }, 387 /*quirks*/0, /*mintags*/0, /*maxtags*/0 388 }, 389 { 390 /* 391 * Doesn't handle queue full condition correctly, 392 * so we need to limit maxtags to what the device 393 * can handle instead of determining this automatically. 394 */ 395 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN321010S*", "*" }, 396 /*quirks*/0, /*mintags*/2, /*maxtags*/32 397 }, 398 { 399 /* Really only one LUN */ 400 { T_ENCLOSURE, SIP_MEDIA_FIXED, "SUN", "SENA", "*" }, 401 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 402 }, 403 { 404 /* I can't believe we need a quirk for DPT volumes. */ 405 { T_ANY, SIP_MEDIA_FIXED|SIP_MEDIA_REMOVABLE, "DPT", "*", "*" }, 406 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, 407 /*mintags*/0, /*maxtags*/255 408 }, 409 { 410 /* 411 * Many Sony CDROM drives don't like multi-LUN probing. 412 */ 413 { T_CDROM, SIP_MEDIA_REMOVABLE, sony, "CD-ROM CDU*", "*" }, 414 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 415 }, 416 { 417 /* 418 * This drive doesn't like multiple LUN probing. 419 * Submitted by: Parag Patel <parag@cgt.com> 420 */ 421 { T_WORM, SIP_MEDIA_REMOVABLE, sony, "CD-R CDU9*", "*" }, 422 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 423 }, 424 { 425 { T_WORM, SIP_MEDIA_REMOVABLE, "YAMAHA", "CDR100*", "*" }, 426 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 427 }, 428 { 429 /* 430 * The 8200 doesn't like multi-lun probing, and probably 431 * don't like serial number requests either. 432 */ 433 { 434 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE", 435 "EXB-8200*", "*" 436 }, 437 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 438 }, 439 { 440 /* 441 * Let's try the same as above, but for a drive that says 442 * it's an IPL-6860 but is actually an EXB 8200. 443 */ 444 { 445 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE", 446 "IPL-6860*", "*" 447 }, 448 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 449 }, 450 { 451 /* 452 * These Hitachi drives don't like multi-lun probing. 453 * The PR submitter has a DK319H, but says that the Linux 454 * kernel has a similar work-around for the DK312 and DK314, 455 * so all DK31* drives are quirked here. 456 * PR: misc/18793 457 * Submitted by: Paul Haddad <paul@pth.com> 458 */ 459 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK31*", "*" }, 460 CAM_QUIRK_NOLUNS, /*mintags*/2, /*maxtags*/255 461 }, 462 { 463 /* 464 * This old revision of the TDC3600 is also SCSI-1, and 465 * hangs upon serial number probing. 466 */ 467 { 468 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "TANDBERG", 469 " TDC 3600", "U07:" 470 }, 471 CAM_QUIRK_NOSERIAL, /*mintags*/0, /*maxtags*/0 472 }, 473 { 474 /* 475 * Would repond to all LUNs if asked for. 476 */ 477 { 478 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "CALIPER", 479 "CP150", "*" 480 }, 481 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 482 }, 483 { 484 /* 485 * Would repond to all LUNs if asked for. 486 */ 487 { 488 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "KENNEDY", 489 "96X2*", "*" 490 }, 491 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 492 }, 493 { 494 /* Submitted by: Matthew Dodd <winter@jurai.net> */ 495 { T_PROCESSOR, SIP_MEDIA_FIXED, "Cabletrn", "EA41*", "*" }, 496 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 497 }, 498 { 499 /* Submitted by: Matthew Dodd <winter@jurai.net> */ 500 { T_PROCESSOR, SIP_MEDIA_FIXED, "CABLETRN", "EA41*", "*" }, 501 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 502 }, 503 { 504 /* TeraSolutions special settings for TRC-22 RAID */ 505 { T_DIRECT, SIP_MEDIA_FIXED, "TERASOLU", "TRC-22", "*" }, 506 /*quirks*/0, /*mintags*/55, /*maxtags*/255 507 }, 508 { 509 /* 510 * Would respond to all LUNs. Device type and removable 511 * flag are jumper-selectable. 512 */ 513 { T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, "MaxOptix", 514 "Tahiti 1", "*" 515 }, 516 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 517 }, 518 { 519 /* Default tagged queuing parameters for all devices */ 520 { 521 T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, 522 /*vendor*/"*", /*product*/"*", /*revision*/"*" 523 }, 524 /*quirks*/0, /*mintags*/2, /*maxtags*/255 525 }, 526 }; 527 528 static const int xpt_quirk_table_size = 529 sizeof(xpt_quirk_table) / sizeof(*xpt_quirk_table); 530 531 typedef enum { 532 DM_RET_COPY = 0x01, 533 DM_RET_FLAG_MASK = 0x0f, 534 DM_RET_NONE = 0x00, 535 DM_RET_STOP = 0x10, 536 DM_RET_DESCEND = 0x20, 537 DM_RET_ERROR = 0x30, 538 DM_RET_ACTION_MASK = 0xf0 539 } dev_match_ret; 540 541 typedef enum { 542 XPT_DEPTH_BUS, 543 XPT_DEPTH_TARGET, 544 XPT_DEPTH_DEVICE, 545 XPT_DEPTH_PERIPH 546 } xpt_traverse_depth; 547 548 struct xpt_traverse_config { 549 xpt_traverse_depth depth; 550 void *tr_func; 551 void *tr_arg; 552 }; 553 554 typedef int xpt_busfunc_t (struct cam_eb *bus, void *arg); 555 typedef int xpt_targetfunc_t (struct cam_et *target, void *arg); 556 typedef int xpt_devicefunc_t (struct cam_ed *device, void *arg); 557 typedef int xpt_periphfunc_t (struct cam_periph *periph, void *arg); 558 typedef int xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg); 559 560 /* Transport layer configuration information */ 561 static struct xpt_softc xsoftc; 562 563 /* Queues for our software interrupt handler */ 564 typedef TAILQ_HEAD(cam_isrq, ccb_hdr) cam_isrq_t; 565 static cam_isrq_t cam_bioq; 566 static cam_isrq_t cam_netq; 567 568 /* "Pool" of inactive ccbs managed by xpt_alloc_ccb and xpt_free_ccb */ 569 static SLIST_HEAD(,ccb_hdr) ccb_freeq; 570 static u_int xpt_max_ccbs; /* 571 * Maximum size of ccb pool. Modified as 572 * devices are added/removed or have their 573 * opening counts changed. 574 */ 575 static u_int xpt_ccb_count; /* Current count of allocated ccbs */ 576 577 struct cam_periph *xpt_periph; 578 579 static periph_init_t xpt_periph_init; 580 581 static periph_init_t probe_periph_init; 582 583 static struct periph_driver xpt_driver = 584 { 585 xpt_periph_init, "xpt", 586 TAILQ_HEAD_INITIALIZER(xpt_driver.units) 587 }; 588 589 static struct periph_driver probe_driver = 590 { 591 probe_periph_init, "probe", 592 TAILQ_HEAD_INITIALIZER(probe_driver.units) 593 }; 594 595 PERIPHDRIVER_DECLARE(xpt, xpt_driver); 596 PERIPHDRIVER_DECLARE(probe, probe_driver); 597 598 #define XPT_CDEV_MAJOR 104 599 600 static d_open_t xptopen; 601 static d_close_t xptclose; 602 static d_ioctl_t xptioctl; 603 604 static struct cdevsw xpt_cdevsw = { 605 /* open */ xptopen, 606 /* close */ xptclose, 607 /* read */ noread, 608 /* write */ nowrite, 609 /* ioctl */ xptioctl, 610 /* poll */ nopoll, 611 /* mmap */ nommap, 612 /* strategy */ nostrategy, 613 /* name */ "xpt", 614 /* maj */ XPT_CDEV_MAJOR, 615 /* dump */ nodump, 616 /* psize */ nopsize, 617 /* flags */ 0, 618 }; 619 620 static struct intr_config_hook *xpt_config_hook; 621 622 /* Registered busses */ 623 static TAILQ_HEAD(,cam_eb) xpt_busses; 624 static u_int bus_generation; 625 626 /* Storage for debugging datastructures */ 627 #ifdef CAMDEBUG 628 struct cam_path *cam_dpath; 629 u_int32_t cam_dflags; 630 u_int32_t cam_debug_delay; 631 #endif 632 633 /* Pointers to software interrupt handlers */ 634 void *camnet_ih; 635 void *cambio_ih; 636 637 #if defined(CAM_DEBUG_FLAGS) && !defined(CAMDEBUG) 638 #error "You must have options CAMDEBUG to use options CAM_DEBUG_FLAGS" 639 #endif 640 641 /* 642 * In order to enable the CAM_DEBUG_* options, the user must have CAMDEBUG 643 * enabled. Also, the user must have either none, or all of CAM_DEBUG_BUS, 644 * CAM_DEBUG_TARGET, and CAM_DEBUG_LUN specified. 645 */ 646 #if defined(CAM_DEBUG_BUS) || defined(CAM_DEBUG_TARGET) \ 647 || defined(CAM_DEBUG_LUN) 648 #ifdef CAMDEBUG 649 #if !defined(CAM_DEBUG_BUS) || !defined(CAM_DEBUG_TARGET) \ 650 || !defined(CAM_DEBUG_LUN) 651 #error "You must define all or none of CAM_DEBUG_BUS, CAM_DEBUG_TARGET \ 652 and CAM_DEBUG_LUN" 653 #endif /* !CAM_DEBUG_BUS || !CAM_DEBUG_TARGET || !CAM_DEBUG_LUN */ 654 #else /* !CAMDEBUG */ 655 #error "You must use options CAMDEBUG if you use the CAM_DEBUG_* options" 656 #endif /* CAMDEBUG */ 657 #endif /* CAM_DEBUG_BUS || CAM_DEBUG_TARGET || CAM_DEBUG_LUN */ 658 659 /* Our boot-time initialization hook */ 660 static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *); 661 662 static moduledata_t cam_moduledata = { 663 "cam", 664 cam_module_event_handler, 665 NULL 666 }; 667 668 static void xpt_init(void *); 669 670 DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND); 671 MODULE_VERSION(cam, 1); 672 673 674 static cam_status xpt_compile_path(struct cam_path *new_path, 675 struct cam_periph *perph, 676 path_id_t path_id, 677 target_id_t target_id, 678 lun_id_t lun_id); 679 680 static void xpt_release_path(struct cam_path *path); 681 682 static void xpt_async_bcast(struct async_list *async_head, 683 u_int32_t async_code, 684 struct cam_path *path, 685 void *async_arg); 686 static void xpt_dev_async(u_int32_t async_code, 687 struct cam_eb *bus, 688 struct cam_et *target, 689 struct cam_ed *device, 690 void *async_arg); 691 static path_id_t xptnextfreepathid(void); 692 static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus); 693 static union ccb *xpt_get_ccb(struct cam_ed *device); 694 static int xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo, 695 u_int32_t new_priority); 696 static void xpt_run_dev_allocq(struct cam_eb *bus); 697 static void xpt_run_dev_sendq(struct cam_eb *bus); 698 static timeout_t xpt_release_devq_timeout; 699 static timeout_t xpt_release_simq_timeout; 700 static void xpt_release_bus(struct cam_eb *bus); 701 static void xpt_release_devq_device(struct cam_ed *dev, u_int count, 702 int run_queue); 703 static struct cam_et* 704 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id); 705 static void xpt_release_target(struct cam_eb *bus, struct cam_et *target); 706 static struct cam_ed* 707 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, 708 lun_id_t lun_id); 709 static void xpt_release_device(struct cam_eb *bus, struct cam_et *target, 710 struct cam_ed *device); 711 static u_int32_t xpt_dev_ccbq_resize(struct cam_path *path, int newopenings); 712 static struct cam_eb* 713 xpt_find_bus(path_id_t path_id); 714 static struct cam_et* 715 xpt_find_target(struct cam_eb *bus, target_id_t target_id); 716 static struct cam_ed* 717 xpt_find_device(struct cam_et *target, lun_id_t lun_id); 718 static void xpt_scan_bus(struct cam_periph *periph, union ccb *ccb); 719 static void xpt_scan_lun(struct cam_periph *periph, 720 struct cam_path *path, cam_flags flags, 721 union ccb *ccb); 722 static void xptscandone(struct cam_periph *periph, union ccb *done_ccb); 723 static xpt_busfunc_t xptconfigbuscountfunc; 724 static xpt_busfunc_t xptconfigfunc; 725 static void xpt_config(void *arg); 726 static xpt_devicefunc_t xptpassannouncefunc; 727 static void xpt_finishconfig(struct cam_periph *periph, union ccb *ccb); 728 static void xptaction(struct cam_sim *sim, union ccb *work_ccb); 729 static void xptpoll(struct cam_sim *sim); 730 static void camisr(void *); 731 #if 0 732 static void xptstart(struct cam_periph *periph, union ccb *work_ccb); 733 static void xptasync(struct cam_periph *periph, 734 u_int32_t code, cam_path *path); 735 #endif 736 static dev_match_ret xptbusmatch(struct dev_match_pattern *patterns, 737 u_int num_patterns, struct cam_eb *bus); 738 static dev_match_ret xptdevicematch(struct dev_match_pattern *patterns, 739 u_int num_patterns, 740 struct cam_ed *device); 741 static dev_match_ret xptperiphmatch(struct dev_match_pattern *patterns, 742 u_int num_patterns, 743 struct cam_periph *periph); 744 static xpt_busfunc_t xptedtbusfunc; 745 static xpt_targetfunc_t xptedttargetfunc; 746 static xpt_devicefunc_t xptedtdevicefunc; 747 static xpt_periphfunc_t xptedtperiphfunc; 748 static xpt_pdrvfunc_t xptplistpdrvfunc; 749 static xpt_periphfunc_t xptplistperiphfunc; 750 static int xptedtmatch(struct ccb_dev_match *cdm); 751 static int xptperiphlistmatch(struct ccb_dev_match *cdm); 752 static int xptbustraverse(struct cam_eb *start_bus, 753 xpt_busfunc_t *tr_func, void *arg); 754 static int xpttargettraverse(struct cam_eb *bus, 755 struct cam_et *start_target, 756 xpt_targetfunc_t *tr_func, void *arg); 757 static int xptdevicetraverse(struct cam_et *target, 758 struct cam_ed *start_device, 759 xpt_devicefunc_t *tr_func, void *arg); 760 static int xptperiphtraverse(struct cam_ed *device, 761 struct cam_periph *start_periph, 762 xpt_periphfunc_t *tr_func, void *arg); 763 static int xptpdrvtraverse(struct periph_driver **start_pdrv, 764 xpt_pdrvfunc_t *tr_func, void *arg); 765 static int xptpdperiphtraverse(struct periph_driver **pdrv, 766 struct cam_periph *start_periph, 767 xpt_periphfunc_t *tr_func, 768 void *arg); 769 static xpt_busfunc_t xptdefbusfunc; 770 static xpt_targetfunc_t xptdeftargetfunc; 771 static xpt_devicefunc_t xptdefdevicefunc; 772 static xpt_periphfunc_t xptdefperiphfunc; 773 static int xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg); 774 #ifdef notusedyet 775 static int xpt_for_all_targets(xpt_targetfunc_t *tr_func, 776 void *arg); 777 #endif 778 static int xpt_for_all_devices(xpt_devicefunc_t *tr_func, 779 void *arg); 780 #ifdef notusedyet 781 static int xpt_for_all_periphs(xpt_periphfunc_t *tr_func, 782 void *arg); 783 #endif 784 static xpt_devicefunc_t xptsetasyncfunc; 785 static xpt_busfunc_t xptsetasyncbusfunc; 786 static cam_status xptregister(struct cam_periph *periph, 787 void *arg); 788 static cam_status proberegister(struct cam_periph *periph, 789 void *arg); 790 static void probeschedule(struct cam_periph *probe_periph); 791 static void probestart(struct cam_periph *periph, union ccb *start_ccb); 792 static void proberequestdefaultnegotiation(struct cam_periph *periph); 793 static void probedone(struct cam_periph *periph, union ccb *done_ccb); 794 static void probecleanup(struct cam_periph *periph); 795 static void xpt_find_quirk(struct cam_ed *device); 796 #ifdef CAM_NEW_TRAN_CODE 797 static void xpt_devise_transport(struct cam_path *path); 798 #endif /* CAM_NEW_TRAN_CODE */ 799 static void xpt_set_transfer_settings(struct ccb_trans_settings *cts, 800 struct cam_ed *device, 801 int async_update); 802 static void xpt_toggle_tags(struct cam_path *path); 803 static void xpt_start_tags(struct cam_path *path); 804 static __inline int xpt_schedule_dev_allocq(struct cam_eb *bus, 805 struct cam_ed *dev); 806 static __inline int xpt_schedule_dev_sendq(struct cam_eb *bus, 807 struct cam_ed *dev); 808 static __inline int periph_is_queued(struct cam_periph *periph); 809 static __inline int device_is_alloc_queued(struct cam_ed *device); 810 static __inline int device_is_send_queued(struct cam_ed *device); 811 static __inline int dev_allocq_is_runnable(struct cam_devq *devq); 812 813 static __inline int 814 xpt_schedule_dev_allocq(struct cam_eb *bus, struct cam_ed *dev) 815 { 816 int retval; 817 818 if (dev->ccbq.devq_openings > 0) { 819 if ((dev->flags & CAM_DEV_RESIZE_QUEUE_NEEDED) != 0) { 820 cam_ccbq_resize(&dev->ccbq, 821 dev->ccbq.dev_openings 822 + dev->ccbq.dev_active); 823 dev->flags &= ~CAM_DEV_RESIZE_QUEUE_NEEDED; 824 } 825 /* 826 * The priority of a device waiting for CCB resources 827 * is that of the the highest priority peripheral driver 828 * enqueued. 829 */ 830 retval = xpt_schedule_dev(&bus->sim->devq->alloc_queue, 831 &dev->alloc_ccb_entry.pinfo, 832 CAMQ_GET_HEAD(&dev->drvq)->priority); 833 } else { 834 retval = 0; 835 } 836 837 return (retval); 838 } 839 840 static __inline int 841 xpt_schedule_dev_sendq(struct cam_eb *bus, struct cam_ed *dev) 842 { 843 int retval; 844 845 if (dev->ccbq.dev_openings > 0) { 846 /* 847 * The priority of a device waiting for controller 848 * resources is that of the the highest priority CCB 849 * enqueued. 850 */ 851 retval = 852 xpt_schedule_dev(&bus->sim->devq->send_queue, 853 &dev->send_ccb_entry.pinfo, 854 CAMQ_GET_HEAD(&dev->ccbq.queue)->priority); 855 } else { 856 retval = 0; 857 } 858 return (retval); 859 } 860 861 static __inline int 862 periph_is_queued(struct cam_periph *periph) 863 { 864 return (periph->pinfo.index != CAM_UNQUEUED_INDEX); 865 } 866 867 static __inline int 868 device_is_alloc_queued(struct cam_ed *device) 869 { 870 return (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX); 871 } 872 873 static __inline int 874 device_is_send_queued(struct cam_ed *device) 875 { 876 return (device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX); 877 } 878 879 static __inline int 880 dev_allocq_is_runnable(struct cam_devq *devq) 881 { 882 /* 883 * Have work to do. 884 * Have space to do more work. 885 * Allowed to do work. 886 */ 887 return ((devq->alloc_queue.qfrozen_cnt == 0) 888 && (devq->alloc_queue.entries > 0) 889 && (devq->alloc_openings > 0)); 890 } 891 892 static void 893 xpt_periph_init() 894 { 895 make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0"); 896 } 897 898 static void 899 probe_periph_init() 900 { 901 } 902 903 904 static void 905 xptdone(struct cam_periph *periph, union ccb *done_ccb) 906 { 907 /* Caller will release the CCB */ 908 wakeup(&done_ccb->ccb_h.cbfcnp); 909 } 910 911 static int 912 xptopen(dev_t dev, int flags, int fmt, struct proc *p) 913 { 914 int unit; 915 916 unit = minor(dev) & 0xff; 917 918 /* 919 * Only allow read-write access. 920 */ 921 if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0)) 922 return(EPERM); 923 924 /* 925 * We don't allow nonblocking access. 926 */ 927 if ((flags & O_NONBLOCK) != 0) { 928 printf("xpt%d: can't do nonblocking accesss\n", unit); 929 return(ENODEV); 930 } 931 932 /* 933 * We only have one transport layer right now. If someone accesses 934 * us via something other than minor number 1, point out their 935 * mistake. 936 */ 937 if (unit != 0) { 938 printf("xptopen: got invalid xpt unit %d\n", unit); 939 return(ENXIO); 940 } 941 942 /* Mark ourselves open */ 943 xsoftc.flags |= XPT_FLAG_OPEN; 944 945 return(0); 946 } 947 948 static int 949 xptclose(dev_t dev, int flag, int fmt, struct proc *p) 950 { 951 int unit; 952 953 unit = minor(dev) & 0xff; 954 955 /* 956 * We only have one transport layer right now. If someone accesses 957 * us via something other than minor number 1, point out their 958 * mistake. 959 */ 960 if (unit != 0) { 961 printf("xptclose: got invalid xpt unit %d\n", unit); 962 return(ENXIO); 963 } 964 965 /* Mark ourselves closed */ 966 xsoftc.flags &= ~XPT_FLAG_OPEN; 967 968 return(0); 969 } 970 971 static int 972 xptioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p) 973 { 974 int unit, error; 975 976 error = 0; 977 unit = minor(dev) & 0xff; 978 979 /* 980 * We only have one transport layer right now. If someone accesses 981 * us via something other than minor number 1, point out their 982 * mistake. 983 */ 984 if (unit != 0) { 985 printf("xptioctl: got invalid xpt unit %d\n", unit); 986 return(ENXIO); 987 } 988 989 switch(cmd) { 990 /* 991 * For the transport layer CAMIOCOMMAND ioctl, we really only want 992 * to accept CCB types that don't quite make sense to send through a 993 * passthrough driver. XPT_PATH_INQ is an exception to this, as stated 994 * in the CAM spec. 995 */ 996 case CAMIOCOMMAND: { 997 union ccb *ccb; 998 union ccb *inccb; 999 1000 inccb = (union ccb *)addr; 1001 1002 switch(inccb->ccb_h.func_code) { 1003 case XPT_SCAN_BUS: 1004 case XPT_RESET_BUS: 1005 if ((inccb->ccb_h.target_id != CAM_TARGET_WILDCARD) 1006 || (inccb->ccb_h.target_lun != CAM_LUN_WILDCARD)) { 1007 error = EINVAL; 1008 break; 1009 } 1010 /* FALLTHROUGH */ 1011 case XPT_PATH_INQ: 1012 case XPT_ENG_INQ: 1013 case XPT_SCAN_LUN: 1014 1015 ccb = xpt_alloc_ccb(); 1016 1017 /* 1018 * Create a path using the bus, target, and lun the 1019 * user passed in. 1020 */ 1021 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, 1022 inccb->ccb_h.path_id, 1023 inccb->ccb_h.target_id, 1024 inccb->ccb_h.target_lun) != 1025 CAM_REQ_CMP){ 1026 error = EINVAL; 1027 xpt_free_ccb(ccb); 1028 break; 1029 } 1030 /* Ensure all of our fields are correct */ 1031 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, 1032 inccb->ccb_h.pinfo.priority); 1033 xpt_merge_ccb(ccb, inccb); 1034 ccb->ccb_h.cbfcnp = xptdone; 1035 cam_periph_runccb(ccb, NULL, 0, 0, NULL); 1036 bcopy(ccb, inccb, sizeof(union ccb)); 1037 xpt_free_path(ccb->ccb_h.path); 1038 xpt_free_ccb(ccb); 1039 break; 1040 1041 case XPT_DEBUG: { 1042 union ccb ccb; 1043 1044 /* 1045 * This is an immediate CCB, so it's okay to 1046 * allocate it on the stack. 1047 */ 1048 1049 /* 1050 * Create a path using the bus, target, and lun the 1051 * user passed in. 1052 */ 1053 if (xpt_create_path(&ccb.ccb_h.path, xpt_periph, 1054 inccb->ccb_h.path_id, 1055 inccb->ccb_h.target_id, 1056 inccb->ccb_h.target_lun) != 1057 CAM_REQ_CMP){ 1058 error = EINVAL; 1059 break; 1060 } 1061 /* Ensure all of our fields are correct */ 1062 xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path, 1063 inccb->ccb_h.pinfo.priority); 1064 xpt_merge_ccb(&ccb, inccb); 1065 ccb.ccb_h.cbfcnp = xptdone; 1066 xpt_action(&ccb); 1067 bcopy(&ccb, inccb, sizeof(union ccb)); 1068 xpt_free_path(ccb.ccb_h.path); 1069 break; 1070 1071 } 1072 case XPT_DEV_MATCH: { 1073 struct cam_periph_map_info mapinfo; 1074 struct cam_path *old_path; 1075 1076 /* 1077 * We can't deal with physical addresses for this 1078 * type of transaction. 1079 */ 1080 if (inccb->ccb_h.flags & CAM_DATA_PHYS) { 1081 error = EINVAL; 1082 break; 1083 } 1084 1085 /* 1086 * Save this in case the caller had it set to 1087 * something in particular. 1088 */ 1089 old_path = inccb->ccb_h.path; 1090 1091 /* 1092 * We really don't need a path for the matching 1093 * code. The path is needed because of the 1094 * debugging statements in xpt_action(). They 1095 * assume that the CCB has a valid path. 1096 */ 1097 inccb->ccb_h.path = xpt_periph->path; 1098 1099 bzero(&mapinfo, sizeof(mapinfo)); 1100 1101 /* 1102 * Map the pattern and match buffers into kernel 1103 * virtual address space. 1104 */ 1105 error = cam_periph_mapmem(inccb, &mapinfo); 1106 1107 if (error) { 1108 inccb->ccb_h.path = old_path; 1109 break; 1110 } 1111 1112 /* 1113 * This is an immediate CCB, we can send it on directly. 1114 */ 1115 xpt_action(inccb); 1116 1117 /* 1118 * Map the buffers back into user space. 1119 */ 1120 cam_periph_unmapmem(inccb, &mapinfo); 1121 1122 inccb->ccb_h.path = old_path; 1123 1124 error = 0; 1125 break; 1126 } 1127 default: 1128 error = ENOTSUP; 1129 break; 1130 } 1131 break; 1132 } 1133 /* 1134 * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input, 1135 * with the periphal driver name and unit name filled in. The other 1136 * fields don't really matter as input. The passthrough driver name 1137 * ("pass"), and unit number are passed back in the ccb. The current 1138 * device generation number, and the index into the device peripheral 1139 * driver list, and the status are also passed back. Note that 1140 * since we do everything in one pass, unlike the XPT_GDEVLIST ccb, 1141 * we never return a status of CAM_GDEVLIST_LIST_CHANGED. It is 1142 * (or rather should be) impossible for the device peripheral driver 1143 * list to change since we look at the whole thing in one pass, and 1144 * we do it with splcam protection. 1145 * 1146 */ 1147 case CAMGETPASSTHRU: { 1148 union ccb *ccb; 1149 struct cam_periph *periph; 1150 struct periph_driver **p_drv; 1151 char *name; 1152 u_int unit; 1153 u_int cur_generation; 1154 int base_periph_found; 1155 int splbreaknum; 1156 int s; 1157 1158 ccb = (union ccb *)addr; 1159 unit = ccb->cgdl.unit_number; 1160 name = ccb->cgdl.periph_name; 1161 /* 1162 * Every 100 devices, we want to drop our spl protection to 1163 * give the software interrupt handler a chance to run. 1164 * Most systems won't run into this check, but this should 1165 * avoid starvation in the software interrupt handler in 1166 * large systems. 1167 */ 1168 splbreaknum = 100; 1169 1170 ccb = (union ccb *)addr; 1171 1172 base_periph_found = 0; 1173 1174 /* 1175 * Sanity check -- make sure we don't get a null peripheral 1176 * driver name. 1177 */ 1178 if (*ccb->cgdl.periph_name == '\0') { 1179 error = EINVAL; 1180 break; 1181 } 1182 1183 /* Keep the list from changing while we traverse it */ 1184 s = splcam(); 1185 ptstartover: 1186 cur_generation = xsoftc.generation; 1187 1188 /* first find our driver in the list of drivers */ 1189 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) 1190 if (strcmp((*p_drv)->driver_name, name) == 0) 1191 break; 1192 1193 if (*p_drv == NULL) { 1194 splx(s); 1195 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 1196 ccb->cgdl.status = CAM_GDEVLIST_ERROR; 1197 *ccb->cgdl.periph_name = '\0'; 1198 ccb->cgdl.unit_number = 0; 1199 error = ENOENT; 1200 break; 1201 } 1202 1203 /* 1204 * Run through every peripheral instance of this driver 1205 * and check to see whether it matches the unit passed 1206 * in by the user. If it does, get out of the loops and 1207 * find the passthrough driver associated with that 1208 * peripheral driver. 1209 */ 1210 for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL; 1211 periph = TAILQ_NEXT(periph, unit_links)) { 1212 1213 if (periph->unit_number == unit) { 1214 break; 1215 } else if (--splbreaknum == 0) { 1216 splx(s); 1217 s = splcam(); 1218 splbreaknum = 100; 1219 if (cur_generation != xsoftc.generation) 1220 goto ptstartover; 1221 } 1222 } 1223 /* 1224 * If we found the peripheral driver that the user passed 1225 * in, go through all of the peripheral drivers for that 1226 * particular device and look for a passthrough driver. 1227 */ 1228 if (periph != NULL) { 1229 struct cam_ed *device; 1230 int i; 1231 1232 base_periph_found = 1; 1233 device = periph->path->device; 1234 for (i = 0, periph = SLIST_FIRST(&device->periphs); 1235 periph != NULL; 1236 periph = SLIST_NEXT(periph, periph_links), i++) { 1237 /* 1238 * Check to see whether we have a 1239 * passthrough device or not. 1240 */ 1241 if (strcmp(periph->periph_name, "pass") == 0) { 1242 /* 1243 * Fill in the getdevlist fields. 1244 */ 1245 strcpy(ccb->cgdl.periph_name, 1246 periph->periph_name); 1247 ccb->cgdl.unit_number = 1248 periph->unit_number; 1249 if (SLIST_NEXT(periph, periph_links)) 1250 ccb->cgdl.status = 1251 CAM_GDEVLIST_MORE_DEVS; 1252 else 1253 ccb->cgdl.status = 1254 CAM_GDEVLIST_LAST_DEVICE; 1255 ccb->cgdl.generation = 1256 device->generation; 1257 ccb->cgdl.index = i; 1258 /* 1259 * Fill in some CCB header fields 1260 * that the user may want. 1261 */ 1262 ccb->ccb_h.path_id = 1263 periph->path->bus->path_id; 1264 ccb->ccb_h.target_id = 1265 periph->path->target->target_id; 1266 ccb->ccb_h.target_lun = 1267 periph->path->device->lun_id; 1268 ccb->ccb_h.status = CAM_REQ_CMP; 1269 break; 1270 } 1271 } 1272 } 1273 1274 /* 1275 * If the periph is null here, one of two things has 1276 * happened. The first possibility is that we couldn't 1277 * find the unit number of the particular peripheral driver 1278 * that the user is asking about. e.g. the user asks for 1279 * the passthrough driver for "da11". We find the list of 1280 * "da" peripherals all right, but there is no unit 11. 1281 * The other possibility is that we went through the list 1282 * of peripheral drivers attached to the device structure, 1283 * but didn't find one with the name "pass". Either way, 1284 * we return ENOENT, since we couldn't find something. 1285 */ 1286 if (periph == NULL) { 1287 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 1288 ccb->cgdl.status = CAM_GDEVLIST_ERROR; 1289 *ccb->cgdl.periph_name = '\0'; 1290 ccb->cgdl.unit_number = 0; 1291 error = ENOENT; 1292 /* 1293 * It is unfortunate that this is even necessary, 1294 * but there are many, many clueless users out there. 1295 * If this is true, the user is looking for the 1296 * passthrough driver, but doesn't have one in his 1297 * kernel. 1298 */ 1299 if (base_periph_found == 1) { 1300 printf("xptioctl: pass driver is not in the " 1301 "kernel\n"); 1302 printf("xptioctl: put \"device pass0\" in " 1303 "your kernel config file\n"); 1304 } 1305 } 1306 splx(s); 1307 break; 1308 } 1309 default: 1310 error = ENOTTY; 1311 break; 1312 } 1313 1314 return(error); 1315 } 1316 1317 static int 1318 cam_module_event_handler(module_t mod, int what, void *arg) 1319 { 1320 if (what == MOD_LOAD) { 1321 xpt_init(NULL); 1322 } else if (what == MOD_UNLOAD) { 1323 return EBUSY; 1324 } 1325 1326 return 0; 1327 } 1328 1329 /* Functions accessed by the peripheral drivers */ 1330 static void 1331 xpt_init(dummy) 1332 void *dummy; 1333 { 1334 struct cam_sim *xpt_sim; 1335 struct cam_path *path; 1336 struct cam_devq *devq; 1337 cam_status status; 1338 1339 TAILQ_INIT(&xpt_busses); 1340 TAILQ_INIT(&cam_bioq); 1341 TAILQ_INIT(&cam_netq); 1342 SLIST_INIT(&ccb_freeq); 1343 STAILQ_INIT(&highpowerq); 1344 1345 /* 1346 * The xpt layer is, itself, the equivelent of a SIM. 1347 * Allow 16 ccbs in the ccb pool for it. This should 1348 * give decent parallelism when we probe busses and 1349 * perform other XPT functions. 1350 */ 1351 devq = cam_simq_alloc(16); 1352 xpt_sim = cam_sim_alloc(xptaction, 1353 xptpoll, 1354 "xpt", 1355 /*softc*/NULL, 1356 /*unit*/0, 1357 /*max_dev_transactions*/0, 1358 /*max_tagged_dev_transactions*/0, 1359 devq); 1360 xpt_max_ccbs = 16; 1361 1362 xpt_bus_register(xpt_sim, /*bus #*/0); 1363 1364 /* 1365 * Looking at the XPT from the SIM layer, the XPT is 1366 * the equivelent of a peripheral driver. Allocate 1367 * a peripheral driver entry for us. 1368 */ 1369 if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID, 1370 CAM_TARGET_WILDCARD, 1371 CAM_LUN_WILDCARD)) != CAM_REQ_CMP) { 1372 printf("xpt_init: xpt_create_path failed with status %#x," 1373 " failing attach\n", status); 1374 return; 1375 } 1376 1377 cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO, 1378 path, NULL, 0, NULL); 1379 xpt_free_path(path); 1380 1381 xpt_sim->softc = xpt_periph; 1382 1383 /* 1384 * Register a callback for when interrupts are enabled. 1385 */ 1386 xpt_config_hook = 1387 (struct intr_config_hook *)malloc(sizeof(struct intr_config_hook), 1388 M_TEMP, M_NOWAIT | M_ZERO); 1389 if (xpt_config_hook == NULL) { 1390 printf("xpt_init: Cannot malloc config hook " 1391 "- failing attach\n"); 1392 return; 1393 } 1394 1395 xpt_config_hook->ich_func = xpt_config; 1396 if (config_intrhook_establish(xpt_config_hook) != 0) { 1397 free (xpt_config_hook, M_TEMP); 1398 printf("xpt_init: config_intrhook_establish failed " 1399 "- failing attach\n"); 1400 } 1401 1402 /* Install our software interrupt handlers */ 1403 swi_add(NULL, "camnet", camisr, &cam_netq, SWI_CAMNET, 0, &camnet_ih); 1404 swi_add(NULL, "cambio", camisr, &cam_bioq, SWI_CAMBIO, 0, &cambio_ih); 1405 } 1406 1407 static cam_status 1408 xptregister(struct cam_periph *periph, void *arg) 1409 { 1410 if (periph == NULL) { 1411 printf("xptregister: periph was NULL!!\n"); 1412 return(CAM_REQ_CMP_ERR); 1413 } 1414 1415 periph->softc = NULL; 1416 1417 xpt_periph = periph; 1418 1419 return(CAM_REQ_CMP); 1420 } 1421 1422 int32_t 1423 xpt_add_periph(struct cam_periph *periph) 1424 { 1425 struct cam_ed *device; 1426 int32_t status; 1427 struct periph_list *periph_head; 1428 1429 device = periph->path->device; 1430 1431 periph_head = &device->periphs; 1432 1433 status = CAM_REQ_CMP; 1434 1435 if (device != NULL) { 1436 int s; 1437 1438 /* 1439 * Make room for this peripheral 1440 * so it will fit in the queue 1441 * when it's scheduled to run 1442 */ 1443 s = splsoftcam(); 1444 status = camq_resize(&device->drvq, 1445 device->drvq.array_size + 1); 1446 1447 device->generation++; 1448 1449 SLIST_INSERT_HEAD(periph_head, periph, periph_links); 1450 1451 splx(s); 1452 } 1453 1454 xsoftc.generation++; 1455 1456 return (status); 1457 } 1458 1459 void 1460 xpt_remove_periph(struct cam_periph *periph) 1461 { 1462 struct cam_ed *device; 1463 1464 device = periph->path->device; 1465 1466 if (device != NULL) { 1467 int s; 1468 struct periph_list *periph_head; 1469 1470 periph_head = &device->periphs; 1471 1472 /* Release the slot for this peripheral */ 1473 s = splsoftcam(); 1474 camq_resize(&device->drvq, device->drvq.array_size - 1); 1475 1476 device->generation++; 1477 1478 SLIST_REMOVE(periph_head, periph, cam_periph, periph_links); 1479 1480 splx(s); 1481 } 1482 1483 xsoftc.generation++; 1484 1485 } 1486 1487 #ifdef CAM_NEW_TRAN_CODE 1488 1489 void 1490 xpt_announce_periph(struct cam_periph *periph, char *announce_string) 1491 { 1492 struct ccb_pathinq cpi; 1493 struct ccb_trans_settings cts; 1494 struct cam_path *path; 1495 u_int speed; 1496 u_int freq; 1497 u_int mb; 1498 int s; 1499 1500 path = periph->path; 1501 /* 1502 * To ensure that this is printed in one piece, 1503 * mask out CAM interrupts. 1504 */ 1505 s = splsoftcam(); 1506 printf("%s%d at %s%d bus %d target %d lun %d\n", 1507 periph->periph_name, periph->unit_number, 1508 path->bus->sim->sim_name, 1509 path->bus->sim->unit_number, 1510 path->bus->sim->bus_id, 1511 path->target->target_id, 1512 path->device->lun_id); 1513 printf("%s%d: ", periph->periph_name, periph->unit_number); 1514 scsi_print_inquiry(&path->device->inq_data); 1515 if (bootverbose && path->device->serial_num_len > 0) { 1516 /* Don't wrap the screen - print only the first 60 chars */ 1517 printf("%s%d: Serial Number %.60s\n", periph->periph_name, 1518 periph->unit_number, path->device->serial_num); 1519 } 1520 xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1); 1521 cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; 1522 cts.type = CTS_TYPE_CURRENT_SETTINGS; 1523 xpt_action((union ccb*)&cts); 1524 1525 /* Ask the SIM for its base transfer speed */ 1526 xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1); 1527 cpi.ccb_h.func_code = XPT_PATH_INQ; 1528 xpt_action((union ccb *)&cpi); 1529 1530 speed = cpi.base_transfer_speed; 1531 freq = 0; 1532 if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) { 1533 struct ccb_trans_settings_spi *spi; 1534 1535 spi = &cts.xport_specific.spi; 1536 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0 1537 && spi->sync_offset != 0) { 1538 freq = scsi_calc_syncsrate(spi->sync_period); 1539 speed = freq; 1540 } 1541 1542 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) 1543 speed *= (0x01 << spi->bus_width); 1544 } 1545 1546 if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) { 1547 struct ccb_trans_settings_fc *fc = &cts.xport_specific.fc; 1548 if (fc->valid & CTS_FC_VALID_SPEED) { 1549 speed = fc->bitrate; 1550 } 1551 } 1552 1553 mb = speed / 1000; 1554 if (mb > 0) 1555 printf("%s%d: %d.%03dMB/s transfers", 1556 periph->periph_name, periph->unit_number, 1557 mb, speed % 1000); 1558 else 1559 printf("%s%d: %dKB/s transfers", periph->periph_name, 1560 periph->unit_number, speed); 1561 /* Report additional information about SPI connections */ 1562 if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) { 1563 struct ccb_trans_settings_spi *spi; 1564 1565 spi = &cts.xport_specific.spi; 1566 if (freq != 0) { 1567 printf(" (%d.%03dMHz%s, offset %d", freq / 1000, 1568 freq % 1000, 1569 (spi->ppr_options & MSG_EXT_PPR_DT_REQ) != 0 1570 ? " DT" : "", 1571 spi->sync_offset); 1572 } 1573 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0 1574 && spi->bus_width > 0) { 1575 if (freq != 0) { 1576 printf(", "); 1577 } else { 1578 printf(" ("); 1579 } 1580 printf("%dbit)", 8 * (0x01 << spi->bus_width)); 1581 } else if (freq != 0) { 1582 printf(")"); 1583 } 1584 } 1585 if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) { 1586 struct ccb_trans_settings_fc *fc; 1587 1588 fc = &cts.xport_specific.fc; 1589 if (fc->valid & CTS_FC_VALID_WWNN) 1590 printf(" WWNN 0x%llx", (long long) fc->wwnn); 1591 if (fc->valid & CTS_FC_VALID_WWPN) 1592 printf(" WWPN 0x%llx", (long long) fc->wwpn); 1593 if (fc->valid & CTS_FC_VALID_PORT) 1594 printf(" PortID 0x%x", fc->port); 1595 } 1596 1597 if (path->device->inq_flags & SID_CmdQue 1598 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) { 1599 printf("\n%s%d: Tagged Queueing Enabled", 1600 periph->periph_name, periph->unit_number); 1601 } 1602 printf("\n"); 1603 1604 /* 1605 * We only want to print the caller's announce string if they've 1606 * passed one in.. 1607 */ 1608 if (announce_string != NULL) 1609 printf("%s%d: %s\n", periph->periph_name, 1610 periph->unit_number, announce_string); 1611 splx(s); 1612 } 1613 #else /* CAM_NEW_TRAN_CODE */ 1614 void 1615 xpt_announce_periph(struct cam_periph *periph, char *announce_string) 1616 { 1617 int s; 1618 u_int mb; 1619 struct cam_path *path; 1620 struct ccb_trans_settings cts; 1621 1622 path = periph->path; 1623 /* 1624 * To ensure that this is printed in one piece, 1625 * mask out CAM interrupts. 1626 */ 1627 s = splsoftcam(); 1628 printf("%s%d at %s%d bus %d target %d lun %d\n", 1629 periph->periph_name, periph->unit_number, 1630 path->bus->sim->sim_name, 1631 path->bus->sim->unit_number, 1632 path->bus->sim->bus_id, 1633 path->target->target_id, 1634 path->device->lun_id); 1635 printf("%s%d: ", periph->periph_name, periph->unit_number); 1636 scsi_print_inquiry(&path->device->inq_data); 1637 if ((bootverbose) 1638 && (path->device->serial_num_len > 0)) { 1639 /* Don't wrap the screen - print only the first 60 chars */ 1640 printf("%s%d: Serial Number %.60s\n", periph->periph_name, 1641 periph->unit_number, path->device->serial_num); 1642 } 1643 xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1); 1644 cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; 1645 cts.flags = CCB_TRANS_CURRENT_SETTINGS; 1646 xpt_action((union ccb*)&cts); 1647 if (cts.ccb_h.status == CAM_REQ_CMP) { 1648 u_int speed; 1649 u_int freq; 1650 1651 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0 1652 && cts.sync_offset != 0) { 1653 freq = scsi_calc_syncsrate(cts.sync_period); 1654 speed = freq; 1655 } else { 1656 struct ccb_pathinq cpi; 1657 1658 /* Ask the SIM for its base transfer speed */ 1659 xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1); 1660 cpi.ccb_h.func_code = XPT_PATH_INQ; 1661 xpt_action((union ccb *)&cpi); 1662 1663 speed = cpi.base_transfer_speed; 1664 freq = 0; 1665 } 1666 if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) 1667 speed *= (0x01 << cts.bus_width); 1668 mb = speed / 1000; 1669 if (mb > 0) 1670 printf("%s%d: %d.%03dMB/s transfers", 1671 periph->periph_name, periph->unit_number, 1672 mb, speed % 1000); 1673 else 1674 printf("%s%d: %dKB/s transfers", periph->periph_name, 1675 periph->unit_number, speed); 1676 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0 1677 && cts.sync_offset != 0) { 1678 printf(" (%d.%03dMHz, offset %d", freq / 1000, 1679 freq % 1000, cts.sync_offset); 1680 } 1681 if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0 1682 && cts.bus_width > 0) { 1683 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0 1684 && cts.sync_offset != 0) { 1685 printf(", "); 1686 } else { 1687 printf(" ("); 1688 } 1689 printf("%dbit)", 8 * (0x01 << cts.bus_width)); 1690 } else if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0 1691 && cts.sync_offset != 0) { 1692 printf(")"); 1693 } 1694 1695 if (path->device->inq_flags & SID_CmdQue 1696 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) { 1697 printf(", Tagged Queueing Enabled"); 1698 } 1699 1700 printf("\n"); 1701 } else if (path->device->inq_flags & SID_CmdQue 1702 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) { 1703 printf("%s%d: Tagged Queueing Enabled\n", 1704 periph->periph_name, periph->unit_number); 1705 } 1706 1707 /* 1708 * We only want to print the caller's announce string if they've 1709 * passed one in.. 1710 */ 1711 if (announce_string != NULL) 1712 printf("%s%d: %s\n", periph->periph_name, 1713 periph->unit_number, announce_string); 1714 splx(s); 1715 } 1716 1717 #endif /* CAM_NEW_TRAN_CODE */ 1718 1719 static dev_match_ret 1720 xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns, 1721 struct cam_eb *bus) 1722 { 1723 dev_match_ret retval; 1724 int i; 1725 1726 retval = DM_RET_NONE; 1727 1728 /* 1729 * If we aren't given something to match against, that's an error. 1730 */ 1731 if (bus == NULL) 1732 return(DM_RET_ERROR); 1733 1734 /* 1735 * If there are no match entries, then this bus matches no 1736 * matter what. 1737 */ 1738 if ((patterns == NULL) || (num_patterns == 0)) 1739 return(DM_RET_DESCEND | DM_RET_COPY); 1740 1741 for (i = 0; i < num_patterns; i++) { 1742 struct bus_match_pattern *cur_pattern; 1743 1744 /* 1745 * If the pattern in question isn't for a bus node, we 1746 * aren't interested. However, we do indicate to the 1747 * calling routine that we should continue descending the 1748 * tree, since the user wants to match against lower-level 1749 * EDT elements. 1750 */ 1751 if (patterns[i].type != DEV_MATCH_BUS) { 1752 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) 1753 retval |= DM_RET_DESCEND; 1754 continue; 1755 } 1756 1757 cur_pattern = &patterns[i].pattern.bus_pattern; 1758 1759 /* 1760 * If they want to match any bus node, we give them any 1761 * device node. 1762 */ 1763 if (cur_pattern->flags == BUS_MATCH_ANY) { 1764 /* set the copy flag */ 1765 retval |= DM_RET_COPY; 1766 1767 /* 1768 * If we've already decided on an action, go ahead 1769 * and return. 1770 */ 1771 if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE) 1772 return(retval); 1773 } 1774 1775 /* 1776 * Not sure why someone would do this... 1777 */ 1778 if (cur_pattern->flags == BUS_MATCH_NONE) 1779 continue; 1780 1781 if (((cur_pattern->flags & BUS_MATCH_PATH) != 0) 1782 && (cur_pattern->path_id != bus->path_id)) 1783 continue; 1784 1785 if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0) 1786 && (cur_pattern->bus_id != bus->sim->bus_id)) 1787 continue; 1788 1789 if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0) 1790 && (cur_pattern->unit_number != bus->sim->unit_number)) 1791 continue; 1792 1793 if (((cur_pattern->flags & BUS_MATCH_NAME) != 0) 1794 && (strncmp(cur_pattern->dev_name, bus->sim->sim_name, 1795 DEV_IDLEN) != 0)) 1796 continue; 1797 1798 /* 1799 * If we get to this point, the user definitely wants 1800 * information on this bus. So tell the caller to copy the 1801 * data out. 1802 */ 1803 retval |= DM_RET_COPY; 1804 1805 /* 1806 * If the return action has been set to descend, then we 1807 * know that we've already seen a non-bus matching 1808 * expression, therefore we need to further descend the tree. 1809 * This won't change by continuing around the loop, so we 1810 * go ahead and return. If we haven't seen a non-bus 1811 * matching expression, we keep going around the loop until 1812 * we exhaust the matching expressions. We'll set the stop 1813 * flag once we fall out of the loop. 1814 */ 1815 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND) 1816 return(retval); 1817 } 1818 1819 /* 1820 * If the return action hasn't been set to descend yet, that means 1821 * we haven't seen anything other than bus matching patterns. So 1822 * tell the caller to stop descending the tree -- the user doesn't 1823 * want to match against lower level tree elements. 1824 */ 1825 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) 1826 retval |= DM_RET_STOP; 1827 1828 return(retval); 1829 } 1830 1831 static dev_match_ret 1832 xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns, 1833 struct cam_ed *device) 1834 { 1835 dev_match_ret retval; 1836 int i; 1837 1838 retval = DM_RET_NONE; 1839 1840 /* 1841 * If we aren't given something to match against, that's an error. 1842 */ 1843 if (device == NULL) 1844 return(DM_RET_ERROR); 1845 1846 /* 1847 * If there are no match entries, then this device matches no 1848 * matter what. 1849 */ 1850 if ((patterns == NULL) || (patterns == 0)) 1851 return(DM_RET_DESCEND | DM_RET_COPY); 1852 1853 for (i = 0; i < num_patterns; i++) { 1854 struct device_match_pattern *cur_pattern; 1855 1856 /* 1857 * If the pattern in question isn't for a device node, we 1858 * aren't interested. 1859 */ 1860 if (patterns[i].type != DEV_MATCH_DEVICE) { 1861 if ((patterns[i].type == DEV_MATCH_PERIPH) 1862 && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)) 1863 retval |= DM_RET_DESCEND; 1864 continue; 1865 } 1866 1867 cur_pattern = &patterns[i].pattern.device_pattern; 1868 1869 /* 1870 * If they want to match any device node, we give them any 1871 * device node. 1872 */ 1873 if (cur_pattern->flags == DEV_MATCH_ANY) { 1874 /* set the copy flag */ 1875 retval |= DM_RET_COPY; 1876 1877 1878 /* 1879 * If we've already decided on an action, go ahead 1880 * and return. 1881 */ 1882 if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE) 1883 return(retval); 1884 } 1885 1886 /* 1887 * Not sure why someone would do this... 1888 */ 1889 if (cur_pattern->flags == DEV_MATCH_NONE) 1890 continue; 1891 1892 if (((cur_pattern->flags & DEV_MATCH_PATH) != 0) 1893 && (cur_pattern->path_id != device->target->bus->path_id)) 1894 continue; 1895 1896 if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0) 1897 && (cur_pattern->target_id != device->target->target_id)) 1898 continue; 1899 1900 if (((cur_pattern->flags & DEV_MATCH_LUN) != 0) 1901 && (cur_pattern->target_lun != device->lun_id)) 1902 continue; 1903 1904 if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0) 1905 && (cam_quirkmatch((caddr_t)&device->inq_data, 1906 (caddr_t)&cur_pattern->inq_pat, 1907 1, sizeof(cur_pattern->inq_pat), 1908 scsi_static_inquiry_match) == NULL)) 1909 continue; 1910 1911 /* 1912 * If we get to this point, the user definitely wants 1913 * information on this device. So tell the caller to copy 1914 * the data out. 1915 */ 1916 retval |= DM_RET_COPY; 1917 1918 /* 1919 * If the return action has been set to descend, then we 1920 * know that we've already seen a peripheral matching 1921 * expression, therefore we need to further descend the tree. 1922 * This won't change by continuing around the loop, so we 1923 * go ahead and return. If we haven't seen a peripheral 1924 * matching expression, we keep going around the loop until 1925 * we exhaust the matching expressions. We'll set the stop 1926 * flag once we fall out of the loop. 1927 */ 1928 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND) 1929 return(retval); 1930 } 1931 1932 /* 1933 * If the return action hasn't been set to descend yet, that means 1934 * we haven't seen any peripheral matching patterns. So tell the 1935 * caller to stop descending the tree -- the user doesn't want to 1936 * match against lower level tree elements. 1937 */ 1938 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) 1939 retval |= DM_RET_STOP; 1940 1941 return(retval); 1942 } 1943 1944 /* 1945 * Match a single peripheral against any number of match patterns. 1946 */ 1947 static dev_match_ret 1948 xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns, 1949 struct cam_periph *periph) 1950 { 1951 dev_match_ret retval; 1952 int i; 1953 1954 /* 1955 * If we aren't given something to match against, that's an error. 1956 */ 1957 if (periph == NULL) 1958 return(DM_RET_ERROR); 1959 1960 /* 1961 * If there are no match entries, then this peripheral matches no 1962 * matter what. 1963 */ 1964 if ((patterns == NULL) || (num_patterns == 0)) 1965 return(DM_RET_STOP | DM_RET_COPY); 1966 1967 /* 1968 * There aren't any nodes below a peripheral node, so there's no 1969 * reason to descend the tree any further. 1970 */ 1971 retval = DM_RET_STOP; 1972 1973 for (i = 0; i < num_patterns; i++) { 1974 struct periph_match_pattern *cur_pattern; 1975 1976 /* 1977 * If the pattern in question isn't for a peripheral, we 1978 * aren't interested. 1979 */ 1980 if (patterns[i].type != DEV_MATCH_PERIPH) 1981 continue; 1982 1983 cur_pattern = &patterns[i].pattern.periph_pattern; 1984 1985 /* 1986 * If they want to match on anything, then we will do so. 1987 */ 1988 if (cur_pattern->flags == PERIPH_MATCH_ANY) { 1989 /* set the copy flag */ 1990 retval |= DM_RET_COPY; 1991 1992 /* 1993 * We've already set the return action to stop, 1994 * since there are no nodes below peripherals in 1995 * the tree. 1996 */ 1997 return(retval); 1998 } 1999 2000 /* 2001 * Not sure why someone would do this... 2002 */ 2003 if (cur_pattern->flags == PERIPH_MATCH_NONE) 2004 continue; 2005 2006 if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0) 2007 && (cur_pattern->path_id != periph->path->bus->path_id)) 2008 continue; 2009 2010 /* 2011 * For the target and lun id's, we have to make sure the 2012 * target and lun pointers aren't NULL. The xpt peripheral 2013 * has a wildcard target and device. 2014 */ 2015 if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0) 2016 && ((periph->path->target == NULL) 2017 ||(cur_pattern->target_id != periph->path->target->target_id))) 2018 continue; 2019 2020 if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0) 2021 && ((periph->path->device == NULL) 2022 || (cur_pattern->target_lun != periph->path->device->lun_id))) 2023 continue; 2024 2025 if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0) 2026 && (cur_pattern->unit_number != periph->unit_number)) 2027 continue; 2028 2029 if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0) 2030 && (strncmp(cur_pattern->periph_name, periph->periph_name, 2031 DEV_IDLEN) != 0)) 2032 continue; 2033 2034 /* 2035 * If we get to this point, the user definitely wants 2036 * information on this peripheral. So tell the caller to 2037 * copy the data out. 2038 */ 2039 retval |= DM_RET_COPY; 2040 2041 /* 2042 * The return action has already been set to stop, since 2043 * peripherals don't have any nodes below them in the EDT. 2044 */ 2045 return(retval); 2046 } 2047 2048 /* 2049 * If we get to this point, the peripheral that was passed in 2050 * doesn't match any of the patterns. 2051 */ 2052 return(retval); 2053 } 2054 2055 static int 2056 xptedtbusfunc(struct cam_eb *bus, void *arg) 2057 { 2058 struct ccb_dev_match *cdm; 2059 dev_match_ret retval; 2060 2061 cdm = (struct ccb_dev_match *)arg; 2062 2063 /* 2064 * If our position is for something deeper in the tree, that means 2065 * that we've already seen this node. So, we keep going down. 2066 */ 2067 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 2068 && (cdm->pos.cookie.bus == bus) 2069 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 2070 && (cdm->pos.cookie.target != NULL)) 2071 retval = DM_RET_DESCEND; 2072 else 2073 retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus); 2074 2075 /* 2076 * If we got an error, bail out of the search. 2077 */ 2078 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 2079 cdm->status = CAM_DEV_MATCH_ERROR; 2080 return(0); 2081 } 2082 2083 /* 2084 * If the copy flag is set, copy this bus out. 2085 */ 2086 if (retval & DM_RET_COPY) { 2087 int spaceleft, j; 2088 2089 spaceleft = cdm->match_buf_len - (cdm->num_matches * 2090 sizeof(struct dev_match_result)); 2091 2092 /* 2093 * If we don't have enough space to put in another 2094 * match result, save our position and tell the 2095 * user there are more devices to check. 2096 */ 2097 if (spaceleft < sizeof(struct dev_match_result)) { 2098 bzero(&cdm->pos, sizeof(cdm->pos)); 2099 cdm->pos.position_type = 2100 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS; 2101 2102 cdm->pos.cookie.bus = bus; 2103 cdm->pos.generations[CAM_BUS_GENERATION]= 2104 bus_generation; 2105 cdm->status = CAM_DEV_MATCH_MORE; 2106 return(0); 2107 } 2108 j = cdm->num_matches; 2109 cdm->num_matches++; 2110 cdm->matches[j].type = DEV_MATCH_BUS; 2111 cdm->matches[j].result.bus_result.path_id = bus->path_id; 2112 cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id; 2113 cdm->matches[j].result.bus_result.unit_number = 2114 bus->sim->unit_number; 2115 strncpy(cdm->matches[j].result.bus_result.dev_name, 2116 bus->sim->sim_name, DEV_IDLEN); 2117 } 2118 2119 /* 2120 * If the user is only interested in busses, there's no 2121 * reason to descend to the next level in the tree. 2122 */ 2123 if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP) 2124 return(1); 2125 2126 /* 2127 * If there is a target generation recorded, check it to 2128 * make sure the target list hasn't changed. 2129 */ 2130 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 2131 && (bus == cdm->pos.cookie.bus) 2132 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 2133 && (cdm->pos.generations[CAM_TARGET_GENERATION] != 0) 2134 && (cdm->pos.generations[CAM_TARGET_GENERATION] != 2135 bus->generation)) { 2136 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 2137 return(0); 2138 } 2139 2140 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 2141 && (cdm->pos.cookie.bus == bus) 2142 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 2143 && (cdm->pos.cookie.target != NULL)) 2144 return(xpttargettraverse(bus, 2145 (struct cam_et *)cdm->pos.cookie.target, 2146 xptedttargetfunc, arg)); 2147 else 2148 return(xpttargettraverse(bus, NULL, xptedttargetfunc, arg)); 2149 } 2150 2151 static int 2152 xptedttargetfunc(struct cam_et *target, void *arg) 2153 { 2154 struct ccb_dev_match *cdm; 2155 2156 cdm = (struct ccb_dev_match *)arg; 2157 2158 /* 2159 * If there is a device list generation recorded, check it to 2160 * make sure the device list hasn't changed. 2161 */ 2162 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 2163 && (cdm->pos.cookie.bus == target->bus) 2164 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 2165 && (cdm->pos.cookie.target == target) 2166 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) 2167 && (cdm->pos.generations[CAM_DEV_GENERATION] != 0) 2168 && (cdm->pos.generations[CAM_DEV_GENERATION] != 2169 target->generation)) { 2170 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 2171 return(0); 2172 } 2173 2174 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 2175 && (cdm->pos.cookie.bus == target->bus) 2176 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 2177 && (cdm->pos.cookie.target == target) 2178 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) 2179 && (cdm->pos.cookie.device != NULL)) 2180 return(xptdevicetraverse(target, 2181 (struct cam_ed *)cdm->pos.cookie.device, 2182 xptedtdevicefunc, arg)); 2183 else 2184 return(xptdevicetraverse(target, NULL, xptedtdevicefunc, arg)); 2185 } 2186 2187 static int 2188 xptedtdevicefunc(struct cam_ed *device, void *arg) 2189 { 2190 2191 struct ccb_dev_match *cdm; 2192 dev_match_ret retval; 2193 2194 cdm = (struct ccb_dev_match *)arg; 2195 2196 /* 2197 * If our position is for something deeper in the tree, that means 2198 * that we've already seen this node. So, we keep going down. 2199 */ 2200 if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE) 2201 && (cdm->pos.cookie.device == device) 2202 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 2203 && (cdm->pos.cookie.periph != NULL)) 2204 retval = DM_RET_DESCEND; 2205 else 2206 retval = xptdevicematch(cdm->patterns, cdm->num_patterns, 2207 device); 2208 2209 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 2210 cdm->status = CAM_DEV_MATCH_ERROR; 2211 return(0); 2212 } 2213 2214 /* 2215 * If the copy flag is set, copy this device out. 2216 */ 2217 if (retval & DM_RET_COPY) { 2218 int spaceleft, j; 2219 2220 spaceleft = cdm->match_buf_len - (cdm->num_matches * 2221 sizeof(struct dev_match_result)); 2222 2223 /* 2224 * If we don't have enough space to put in another 2225 * match result, save our position and tell the 2226 * user there are more devices to check. 2227 */ 2228 if (spaceleft < sizeof(struct dev_match_result)) { 2229 bzero(&cdm->pos, sizeof(cdm->pos)); 2230 cdm->pos.position_type = 2231 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS | 2232 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE; 2233 2234 cdm->pos.cookie.bus = device->target->bus; 2235 cdm->pos.generations[CAM_BUS_GENERATION]= 2236 bus_generation; 2237 cdm->pos.cookie.target = device->target; 2238 cdm->pos.generations[CAM_TARGET_GENERATION] = 2239 device->target->bus->generation; 2240 cdm->pos.cookie.device = device; 2241 cdm->pos.generations[CAM_DEV_GENERATION] = 2242 device->target->generation; 2243 cdm->status = CAM_DEV_MATCH_MORE; 2244 return(0); 2245 } 2246 j = cdm->num_matches; 2247 cdm->num_matches++; 2248 cdm->matches[j].type = DEV_MATCH_DEVICE; 2249 cdm->matches[j].result.device_result.path_id = 2250 device->target->bus->path_id; 2251 cdm->matches[j].result.device_result.target_id = 2252 device->target->target_id; 2253 cdm->matches[j].result.device_result.target_lun = 2254 device->lun_id; 2255 bcopy(&device->inq_data, 2256 &cdm->matches[j].result.device_result.inq_data, 2257 sizeof(struct scsi_inquiry_data)); 2258 2259 /* Let the user know whether this device is unconfigured */ 2260 if (device->flags & CAM_DEV_UNCONFIGURED) 2261 cdm->matches[j].result.device_result.flags = 2262 DEV_RESULT_UNCONFIGURED; 2263 else 2264 cdm->matches[j].result.device_result.flags = 2265 DEV_RESULT_NOFLAG; 2266 } 2267 2268 /* 2269 * If the user isn't interested in peripherals, don't descend 2270 * the tree any further. 2271 */ 2272 if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP) 2273 return(1); 2274 2275 /* 2276 * If there is a peripheral list generation recorded, make sure 2277 * it hasn't changed. 2278 */ 2279 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 2280 && (device->target->bus == cdm->pos.cookie.bus) 2281 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 2282 && (device->target == cdm->pos.cookie.target) 2283 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) 2284 && (device == cdm->pos.cookie.device) 2285 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 2286 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0) 2287 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 2288 device->generation)){ 2289 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 2290 return(0); 2291 } 2292 2293 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 2294 && (cdm->pos.cookie.bus == device->target->bus) 2295 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 2296 && (cdm->pos.cookie.target == device->target) 2297 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) 2298 && (cdm->pos.cookie.device == device) 2299 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 2300 && (cdm->pos.cookie.periph != NULL)) 2301 return(xptperiphtraverse(device, 2302 (struct cam_periph *)cdm->pos.cookie.periph, 2303 xptedtperiphfunc, arg)); 2304 else 2305 return(xptperiphtraverse(device, NULL, xptedtperiphfunc, arg)); 2306 } 2307 2308 static int 2309 xptedtperiphfunc(struct cam_periph *periph, void *arg) 2310 { 2311 struct ccb_dev_match *cdm; 2312 dev_match_ret retval; 2313 2314 cdm = (struct ccb_dev_match *)arg; 2315 2316 retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph); 2317 2318 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 2319 cdm->status = CAM_DEV_MATCH_ERROR; 2320 return(0); 2321 } 2322 2323 /* 2324 * If the copy flag is set, copy this peripheral out. 2325 */ 2326 if (retval & DM_RET_COPY) { 2327 int spaceleft, j; 2328 2329 spaceleft = cdm->match_buf_len - (cdm->num_matches * 2330 sizeof(struct dev_match_result)); 2331 2332 /* 2333 * If we don't have enough space to put in another 2334 * match result, save our position and tell the 2335 * user there are more devices to check. 2336 */ 2337 if (spaceleft < sizeof(struct dev_match_result)) { 2338 bzero(&cdm->pos, sizeof(cdm->pos)); 2339 cdm->pos.position_type = 2340 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS | 2341 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE | 2342 CAM_DEV_POS_PERIPH; 2343 2344 cdm->pos.cookie.bus = periph->path->bus; 2345 cdm->pos.generations[CAM_BUS_GENERATION]= 2346 bus_generation; 2347 cdm->pos.cookie.target = periph->path->target; 2348 cdm->pos.generations[CAM_TARGET_GENERATION] = 2349 periph->path->bus->generation; 2350 cdm->pos.cookie.device = periph->path->device; 2351 cdm->pos.generations[CAM_DEV_GENERATION] = 2352 periph->path->target->generation; 2353 cdm->pos.cookie.periph = periph; 2354 cdm->pos.generations[CAM_PERIPH_GENERATION] = 2355 periph->path->device->generation; 2356 cdm->status = CAM_DEV_MATCH_MORE; 2357 return(0); 2358 } 2359 2360 j = cdm->num_matches; 2361 cdm->num_matches++; 2362 cdm->matches[j].type = DEV_MATCH_PERIPH; 2363 cdm->matches[j].result.periph_result.path_id = 2364 periph->path->bus->path_id; 2365 cdm->matches[j].result.periph_result.target_id = 2366 periph->path->target->target_id; 2367 cdm->matches[j].result.periph_result.target_lun = 2368 periph->path->device->lun_id; 2369 cdm->matches[j].result.periph_result.unit_number = 2370 periph->unit_number; 2371 strncpy(cdm->matches[j].result.periph_result.periph_name, 2372 periph->periph_name, DEV_IDLEN); 2373 } 2374 2375 return(1); 2376 } 2377 2378 static int 2379 xptedtmatch(struct ccb_dev_match *cdm) 2380 { 2381 int ret; 2382 2383 cdm->num_matches = 0; 2384 2385 /* 2386 * Check the bus list generation. If it has changed, the user 2387 * needs to reset everything and start over. 2388 */ 2389 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 2390 && (cdm->pos.generations[CAM_BUS_GENERATION] != 0) 2391 && (cdm->pos.generations[CAM_BUS_GENERATION] != bus_generation)) { 2392 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 2393 return(0); 2394 } 2395 2396 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 2397 && (cdm->pos.cookie.bus != NULL)) 2398 ret = xptbustraverse((struct cam_eb *)cdm->pos.cookie.bus, 2399 xptedtbusfunc, cdm); 2400 else 2401 ret = xptbustraverse(NULL, xptedtbusfunc, cdm); 2402 2403 /* 2404 * If we get back 0, that means that we had to stop before fully 2405 * traversing the EDT. It also means that one of the subroutines 2406 * has set the status field to the proper value. If we get back 1, 2407 * we've fully traversed the EDT and copied out any matching entries. 2408 */ 2409 if (ret == 1) 2410 cdm->status = CAM_DEV_MATCH_LAST; 2411 2412 return(ret); 2413 } 2414 2415 static int 2416 xptplistpdrvfunc(struct periph_driver **pdrv, void *arg) 2417 { 2418 struct ccb_dev_match *cdm; 2419 2420 cdm = (struct ccb_dev_match *)arg; 2421 2422 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR) 2423 && (cdm->pos.cookie.pdrv == pdrv) 2424 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 2425 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0) 2426 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 2427 (*pdrv)->generation)) { 2428 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 2429 return(0); 2430 } 2431 2432 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR) 2433 && (cdm->pos.cookie.pdrv == pdrv) 2434 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 2435 && (cdm->pos.cookie.periph != NULL)) 2436 return(xptpdperiphtraverse(pdrv, 2437 (struct cam_periph *)cdm->pos.cookie.periph, 2438 xptplistperiphfunc, arg)); 2439 else 2440 return(xptpdperiphtraverse(pdrv, NULL,xptplistperiphfunc, arg)); 2441 } 2442 2443 static int 2444 xptplistperiphfunc(struct cam_periph *periph, void *arg) 2445 { 2446 struct ccb_dev_match *cdm; 2447 dev_match_ret retval; 2448 2449 cdm = (struct ccb_dev_match *)arg; 2450 2451 retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph); 2452 2453 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 2454 cdm->status = CAM_DEV_MATCH_ERROR; 2455 return(0); 2456 } 2457 2458 /* 2459 * If the copy flag is set, copy this peripheral out. 2460 */ 2461 if (retval & DM_RET_COPY) { 2462 int spaceleft, j; 2463 2464 spaceleft = cdm->match_buf_len - (cdm->num_matches * 2465 sizeof(struct dev_match_result)); 2466 2467 /* 2468 * If we don't have enough space to put in another 2469 * match result, save our position and tell the 2470 * user there are more devices to check. 2471 */ 2472 if (spaceleft < sizeof(struct dev_match_result)) { 2473 struct periph_driver **pdrv; 2474 2475 pdrv = NULL; 2476 bzero(&cdm->pos, sizeof(cdm->pos)); 2477 cdm->pos.position_type = 2478 CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR | 2479 CAM_DEV_POS_PERIPH; 2480 2481 /* 2482 * This may look a bit non-sensical, but it is 2483 * actually quite logical. There are very few 2484 * peripheral drivers, and bloating every peripheral 2485 * structure with a pointer back to its parent 2486 * peripheral driver linker set entry would cost 2487 * more in the long run than doing this quick lookup. 2488 */ 2489 for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) { 2490 if (strcmp((*pdrv)->driver_name, 2491 periph->periph_name) == 0) 2492 break; 2493 } 2494 2495 if (pdrv == NULL) { 2496 cdm->status = CAM_DEV_MATCH_ERROR; 2497 return(0); 2498 } 2499 2500 cdm->pos.cookie.pdrv = pdrv; 2501 /* 2502 * The periph generation slot does double duty, as 2503 * does the periph pointer slot. They are used for 2504 * both edt and pdrv lookups and positioning. 2505 */ 2506 cdm->pos.cookie.periph = periph; 2507 cdm->pos.generations[CAM_PERIPH_GENERATION] = 2508 (*pdrv)->generation; 2509 cdm->status = CAM_DEV_MATCH_MORE; 2510 return(0); 2511 } 2512 2513 j = cdm->num_matches; 2514 cdm->num_matches++; 2515 cdm->matches[j].type = DEV_MATCH_PERIPH; 2516 cdm->matches[j].result.periph_result.path_id = 2517 periph->path->bus->path_id; 2518 2519 /* 2520 * The transport layer peripheral doesn't have a target or 2521 * lun. 2522 */ 2523 if (periph->path->target) 2524 cdm->matches[j].result.periph_result.target_id = 2525 periph->path->target->target_id; 2526 else 2527 cdm->matches[j].result.periph_result.target_id = -1; 2528 2529 if (periph->path->device) 2530 cdm->matches[j].result.periph_result.target_lun = 2531 periph->path->device->lun_id; 2532 else 2533 cdm->matches[j].result.periph_result.target_lun = -1; 2534 2535 cdm->matches[j].result.periph_result.unit_number = 2536 periph->unit_number; 2537 strncpy(cdm->matches[j].result.periph_result.periph_name, 2538 periph->periph_name, DEV_IDLEN); 2539 } 2540 2541 return(1); 2542 } 2543 2544 static int 2545 xptperiphlistmatch(struct ccb_dev_match *cdm) 2546 { 2547 int ret; 2548 2549 cdm->num_matches = 0; 2550 2551 /* 2552 * At this point in the edt traversal function, we check the bus 2553 * list generation to make sure that no busses have been added or 2554 * removed since the user last sent a XPT_DEV_MATCH ccb through. 2555 * For the peripheral driver list traversal function, however, we 2556 * don't have to worry about new peripheral driver types coming or 2557 * going; they're in a linker set, and therefore can't change 2558 * without a recompile. 2559 */ 2560 2561 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR) 2562 && (cdm->pos.cookie.pdrv != NULL)) 2563 ret = xptpdrvtraverse( 2564 (struct periph_driver **)cdm->pos.cookie.pdrv, 2565 xptplistpdrvfunc, cdm); 2566 else 2567 ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm); 2568 2569 /* 2570 * If we get back 0, that means that we had to stop before fully 2571 * traversing the peripheral driver tree. It also means that one of 2572 * the subroutines has set the status field to the proper value. If 2573 * we get back 1, we've fully traversed the EDT and copied out any 2574 * matching entries. 2575 */ 2576 if (ret == 1) 2577 cdm->status = CAM_DEV_MATCH_LAST; 2578 2579 return(ret); 2580 } 2581 2582 static int 2583 xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg) 2584 { 2585 struct cam_eb *bus, *next_bus; 2586 int retval; 2587 2588 retval = 1; 2589 2590 for (bus = (start_bus ? start_bus : TAILQ_FIRST(&xpt_busses)); 2591 bus != NULL; 2592 bus = next_bus) { 2593 next_bus = TAILQ_NEXT(bus, links); 2594 2595 retval = tr_func(bus, arg); 2596 if (retval == 0) 2597 return(retval); 2598 } 2599 2600 return(retval); 2601 } 2602 2603 static int 2604 xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target, 2605 xpt_targetfunc_t *tr_func, void *arg) 2606 { 2607 struct cam_et *target, *next_target; 2608 int retval; 2609 2610 retval = 1; 2611 for (target = (start_target ? start_target : 2612 TAILQ_FIRST(&bus->et_entries)); 2613 target != NULL; target = next_target) { 2614 2615 next_target = TAILQ_NEXT(target, links); 2616 2617 retval = tr_func(target, arg); 2618 2619 if (retval == 0) 2620 return(retval); 2621 } 2622 2623 return(retval); 2624 } 2625 2626 static int 2627 xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device, 2628 xpt_devicefunc_t *tr_func, void *arg) 2629 { 2630 struct cam_ed *device, *next_device; 2631 int retval; 2632 2633 retval = 1; 2634 for (device = (start_device ? start_device : 2635 TAILQ_FIRST(&target->ed_entries)); 2636 device != NULL; 2637 device = next_device) { 2638 2639 next_device = TAILQ_NEXT(device, links); 2640 2641 retval = tr_func(device, arg); 2642 2643 if (retval == 0) 2644 return(retval); 2645 } 2646 2647 return(retval); 2648 } 2649 2650 static int 2651 xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph, 2652 xpt_periphfunc_t *tr_func, void *arg) 2653 { 2654 struct cam_periph *periph, *next_periph; 2655 int retval; 2656 2657 retval = 1; 2658 2659 for (periph = (start_periph ? start_periph : 2660 SLIST_FIRST(&device->periphs)); 2661 periph != NULL; 2662 periph = next_periph) { 2663 2664 next_periph = SLIST_NEXT(periph, periph_links); 2665 2666 retval = tr_func(periph, arg); 2667 if (retval == 0) 2668 return(retval); 2669 } 2670 2671 return(retval); 2672 } 2673 2674 static int 2675 xptpdrvtraverse(struct periph_driver **start_pdrv, 2676 xpt_pdrvfunc_t *tr_func, void *arg) 2677 { 2678 struct periph_driver **pdrv; 2679 int retval; 2680 2681 retval = 1; 2682 2683 /* 2684 * We don't traverse the peripheral driver list like we do the 2685 * other lists, because it is a linker set, and therefore cannot be 2686 * changed during runtime. If the peripheral driver list is ever 2687 * re-done to be something other than a linker set (i.e. it can 2688 * change while the system is running), the list traversal should 2689 * be modified to work like the other traversal functions. 2690 */ 2691 for (pdrv = (start_pdrv ? start_pdrv : periph_drivers); 2692 *pdrv != NULL; pdrv++) { 2693 retval = tr_func(pdrv, arg); 2694 2695 if (retval == 0) 2696 return(retval); 2697 } 2698 2699 return(retval); 2700 } 2701 2702 static int 2703 xptpdperiphtraverse(struct periph_driver **pdrv, 2704 struct cam_periph *start_periph, 2705 xpt_periphfunc_t *tr_func, void *arg) 2706 { 2707 struct cam_periph *periph, *next_periph; 2708 int retval; 2709 2710 retval = 1; 2711 2712 for (periph = (start_periph ? start_periph : 2713 TAILQ_FIRST(&(*pdrv)->units)); periph != NULL; 2714 periph = next_periph) { 2715 2716 next_periph = TAILQ_NEXT(periph, unit_links); 2717 2718 retval = tr_func(periph, arg); 2719 if (retval == 0) 2720 return(retval); 2721 } 2722 return(retval); 2723 } 2724 2725 static int 2726 xptdefbusfunc(struct cam_eb *bus, void *arg) 2727 { 2728 struct xpt_traverse_config *tr_config; 2729 2730 tr_config = (struct xpt_traverse_config *)arg; 2731 2732 if (tr_config->depth == XPT_DEPTH_BUS) { 2733 xpt_busfunc_t *tr_func; 2734 2735 tr_func = (xpt_busfunc_t *)tr_config->tr_func; 2736 2737 return(tr_func(bus, tr_config->tr_arg)); 2738 } else 2739 return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg)); 2740 } 2741 2742 static int 2743 xptdeftargetfunc(struct cam_et *target, void *arg) 2744 { 2745 struct xpt_traverse_config *tr_config; 2746 2747 tr_config = (struct xpt_traverse_config *)arg; 2748 2749 if (tr_config->depth == XPT_DEPTH_TARGET) { 2750 xpt_targetfunc_t *tr_func; 2751 2752 tr_func = (xpt_targetfunc_t *)tr_config->tr_func; 2753 2754 return(tr_func(target, tr_config->tr_arg)); 2755 } else 2756 return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg)); 2757 } 2758 2759 static int 2760 xptdefdevicefunc(struct cam_ed *device, void *arg) 2761 { 2762 struct xpt_traverse_config *tr_config; 2763 2764 tr_config = (struct xpt_traverse_config *)arg; 2765 2766 if (tr_config->depth == XPT_DEPTH_DEVICE) { 2767 xpt_devicefunc_t *tr_func; 2768 2769 tr_func = (xpt_devicefunc_t *)tr_config->tr_func; 2770 2771 return(tr_func(device, tr_config->tr_arg)); 2772 } else 2773 return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg)); 2774 } 2775 2776 static int 2777 xptdefperiphfunc(struct cam_periph *periph, void *arg) 2778 { 2779 struct xpt_traverse_config *tr_config; 2780 xpt_periphfunc_t *tr_func; 2781 2782 tr_config = (struct xpt_traverse_config *)arg; 2783 2784 tr_func = (xpt_periphfunc_t *)tr_config->tr_func; 2785 2786 /* 2787 * Unlike the other default functions, we don't check for depth 2788 * here. The peripheral driver level is the last level in the EDT, 2789 * so if we're here, we should execute the function in question. 2790 */ 2791 return(tr_func(periph, tr_config->tr_arg)); 2792 } 2793 2794 /* 2795 * Execute the given function for every bus in the EDT. 2796 */ 2797 static int 2798 xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg) 2799 { 2800 struct xpt_traverse_config tr_config; 2801 2802 tr_config.depth = XPT_DEPTH_BUS; 2803 tr_config.tr_func = tr_func; 2804 tr_config.tr_arg = arg; 2805 2806 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config)); 2807 } 2808 2809 #ifdef notusedyet 2810 /* 2811 * Execute the given function for every target in the EDT. 2812 */ 2813 static int 2814 xpt_for_all_targets(xpt_targetfunc_t *tr_func, void *arg) 2815 { 2816 struct xpt_traverse_config tr_config; 2817 2818 tr_config.depth = XPT_DEPTH_TARGET; 2819 tr_config.tr_func = tr_func; 2820 tr_config.tr_arg = arg; 2821 2822 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config)); 2823 } 2824 #endif /* notusedyet */ 2825 2826 /* 2827 * Execute the given function for every device in the EDT. 2828 */ 2829 static int 2830 xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg) 2831 { 2832 struct xpt_traverse_config tr_config; 2833 2834 tr_config.depth = XPT_DEPTH_DEVICE; 2835 tr_config.tr_func = tr_func; 2836 tr_config.tr_arg = arg; 2837 2838 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config)); 2839 } 2840 2841 #ifdef notusedyet 2842 /* 2843 * Execute the given function for every peripheral in the EDT. 2844 */ 2845 static int 2846 xpt_for_all_periphs(xpt_periphfunc_t *tr_func, void *arg) 2847 { 2848 struct xpt_traverse_config tr_config; 2849 2850 tr_config.depth = XPT_DEPTH_PERIPH; 2851 tr_config.tr_func = tr_func; 2852 tr_config.tr_arg = arg; 2853 2854 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config)); 2855 } 2856 #endif /* notusedyet */ 2857 2858 static int 2859 xptsetasyncfunc(struct cam_ed *device, void *arg) 2860 { 2861 struct cam_path path; 2862 struct ccb_getdev cgd; 2863 struct async_node *cur_entry; 2864 2865 cur_entry = (struct async_node *)arg; 2866 2867 /* 2868 * Don't report unconfigured devices (Wildcard devs, 2869 * devices only for target mode, device instances 2870 * that have been invalidated but are waiting for 2871 * their last reference count to be released). 2872 */ 2873 if ((device->flags & CAM_DEV_UNCONFIGURED) != 0) 2874 return (1); 2875 2876 xpt_compile_path(&path, 2877 NULL, 2878 device->target->bus->path_id, 2879 device->target->target_id, 2880 device->lun_id); 2881 xpt_setup_ccb(&cgd.ccb_h, &path, /*priority*/1); 2882 cgd.ccb_h.func_code = XPT_GDEV_TYPE; 2883 xpt_action((union ccb *)&cgd); 2884 cur_entry->callback(cur_entry->callback_arg, 2885 AC_FOUND_DEVICE, 2886 &path, &cgd); 2887 xpt_release_path(&path); 2888 2889 return(1); 2890 } 2891 2892 static int 2893 xptsetasyncbusfunc(struct cam_eb *bus, void *arg) 2894 { 2895 struct cam_path path; 2896 struct ccb_pathinq cpi; 2897 struct async_node *cur_entry; 2898 2899 cur_entry = (struct async_node *)arg; 2900 2901 xpt_compile_path(&path, /*periph*/NULL, 2902 bus->sim->path_id, 2903 CAM_TARGET_WILDCARD, 2904 CAM_LUN_WILDCARD); 2905 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1); 2906 cpi.ccb_h.func_code = XPT_PATH_INQ; 2907 xpt_action((union ccb *)&cpi); 2908 cur_entry->callback(cur_entry->callback_arg, 2909 AC_PATH_REGISTERED, 2910 &path, &cpi); 2911 xpt_release_path(&path); 2912 2913 return(1); 2914 } 2915 2916 void 2917 xpt_action(union ccb *start_ccb) 2918 { 2919 int iopl; 2920 2921 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action\n")); 2922 2923 start_ccb->ccb_h.status = CAM_REQ_INPROG; 2924 2925 iopl = splsoftcam(); 2926 switch (start_ccb->ccb_h.func_code) { 2927 case XPT_SCSI_IO: 2928 { 2929 #ifdef CAM_NEW_TRAN_CODE 2930 struct cam_ed *device; 2931 #endif /* CAM_NEW_TRAN_CODE */ 2932 #ifdef CAMDEBUG 2933 char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1]; 2934 struct cam_path *path; 2935 2936 path = start_ccb->ccb_h.path; 2937 #endif 2938 2939 /* 2940 * For the sake of compatibility with SCSI-1 2941 * devices that may not understand the identify 2942 * message, we include lun information in the 2943 * second byte of all commands. SCSI-1 specifies 2944 * that luns are a 3 bit value and reserves only 3 2945 * bits for lun information in the CDB. Later 2946 * revisions of the SCSI spec allow for more than 8 2947 * luns, but have deprecated lun information in the 2948 * CDB. So, if the lun won't fit, we must omit. 2949 * 2950 * Also be aware that during initial probing for devices, 2951 * the inquiry information is unknown but initialized to 0. 2952 * This means that this code will be exercised while probing 2953 * devices with an ANSI revision greater than 2. 2954 */ 2955 #ifdef CAM_NEW_TRAN_CODE 2956 device = start_ccb->ccb_h.path->device; 2957 if (device->protocol_version <= SCSI_REV_2 2958 #else /* CAM_NEW_TRAN_CODE */ 2959 if (SID_ANSI_REV(&start_ccb->ccb_h.path->device->inq_data) <= 2 2960 #endif /* CAM_NEW_TRAN_CODE */ 2961 && start_ccb->ccb_h.target_lun < 8 2962 && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) { 2963 2964 start_ccb->csio.cdb_io.cdb_bytes[1] |= 2965 start_ccb->ccb_h.target_lun << 5; 2966 } 2967 start_ccb->csio.scsi_status = SCSI_STATUS_OK; 2968 CAM_DEBUG(path, CAM_DEBUG_CDB,("%s. CDB: %s\n", 2969 scsi_op_desc(start_ccb->csio.cdb_io.cdb_bytes[0], 2970 &path->device->inq_data), 2971 scsi_cdb_string(start_ccb->csio.cdb_io.cdb_bytes, 2972 cdb_str, sizeof(cdb_str)))); 2973 /* FALLTHROUGH */ 2974 } 2975 case XPT_TARGET_IO: 2976 case XPT_CONT_TARGET_IO: 2977 start_ccb->csio.sense_resid = 0; 2978 start_ccb->csio.resid = 0; 2979 /* FALLTHROUGH */ 2980 case XPT_RESET_DEV: 2981 case XPT_ENG_EXEC: 2982 { 2983 struct cam_path *path; 2984 int s; 2985 int runq; 2986 2987 path = start_ccb->ccb_h.path; 2988 s = splsoftcam(); 2989 2990 cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb); 2991 if (path->device->qfrozen_cnt == 0) 2992 runq = xpt_schedule_dev_sendq(path->bus, path->device); 2993 else 2994 runq = 0; 2995 splx(s); 2996 if (runq != 0) 2997 xpt_run_dev_sendq(path->bus); 2998 break; 2999 } 3000 case XPT_SET_TRAN_SETTINGS: 3001 { 3002 xpt_set_transfer_settings(&start_ccb->cts, 3003 start_ccb->ccb_h.path->device, 3004 /*async_update*/FALSE); 3005 break; 3006 } 3007 case XPT_CALC_GEOMETRY: 3008 { 3009 struct cam_sim *sim; 3010 3011 /* Filter out garbage */ 3012 if (start_ccb->ccg.block_size == 0 3013 || start_ccb->ccg.volume_size == 0) { 3014 start_ccb->ccg.cylinders = 0; 3015 start_ccb->ccg.heads = 0; 3016 start_ccb->ccg.secs_per_track = 0; 3017 start_ccb->ccb_h.status = CAM_REQ_CMP; 3018 break; 3019 } 3020 #ifdef PC98 3021 /* 3022 * In a PC-98 system, geometry translation depens on 3023 * the "real" device geometry obtained from mode page 4. 3024 * SCSI geometry translation is performed in the 3025 * initialization routine of the SCSI BIOS and the result 3026 * stored in host memory. If the translation is available 3027 * in host memory, use it. If not, rely on the default 3028 * translation the device driver performs. 3029 */ 3030 if (scsi_da_bios_params(&start_ccb->ccg) != 0) { 3031 start_ccb->ccb_h.status = CAM_REQ_CMP; 3032 break; 3033 } 3034 #endif 3035 sim = start_ccb->ccb_h.path->bus->sim; 3036 (*(sim->sim_action))(sim, start_ccb); 3037 break; 3038 } 3039 case XPT_ABORT: 3040 { 3041 union ccb* abort_ccb; 3042 int s; 3043 3044 abort_ccb = start_ccb->cab.abort_ccb; 3045 if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) { 3046 3047 if (abort_ccb->ccb_h.pinfo.index >= 0) { 3048 struct cam_ccbq *ccbq; 3049 3050 ccbq = &abort_ccb->ccb_h.path->device->ccbq; 3051 cam_ccbq_remove_ccb(ccbq, abort_ccb); 3052 abort_ccb->ccb_h.status = 3053 CAM_REQ_ABORTED|CAM_DEV_QFRZN; 3054 xpt_freeze_devq(abort_ccb->ccb_h.path, 1); 3055 s = splcam(); 3056 xpt_done(abort_ccb); 3057 splx(s); 3058 start_ccb->ccb_h.status = CAM_REQ_CMP; 3059 break; 3060 } 3061 if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX 3062 && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) { 3063 /* 3064 * We've caught this ccb en route to 3065 * the SIM. Flag it for abort and the 3066 * SIM will do so just before starting 3067 * real work on the CCB. 3068 */ 3069 abort_ccb->ccb_h.status = 3070 CAM_REQ_ABORTED|CAM_DEV_QFRZN; 3071 xpt_freeze_devq(abort_ccb->ccb_h.path, 1); 3072 start_ccb->ccb_h.status = CAM_REQ_CMP; 3073 break; 3074 } 3075 } 3076 if (XPT_FC_IS_QUEUED(abort_ccb) 3077 && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) { 3078 /* 3079 * It's already completed but waiting 3080 * for our SWI to get to it. 3081 */ 3082 start_ccb->ccb_h.status = CAM_UA_ABORT; 3083 break; 3084 } 3085 /* 3086 * If we weren't able to take care of the abort request 3087 * in the XPT, pass the request down to the SIM for processing. 3088 */ 3089 /* FALLTHROUGH */ 3090 } 3091 case XPT_ACCEPT_TARGET_IO: 3092 case XPT_EN_LUN: 3093 case XPT_IMMED_NOTIFY: 3094 case XPT_NOTIFY_ACK: 3095 case XPT_GET_TRAN_SETTINGS: 3096 case XPT_RESET_BUS: 3097 { 3098 struct cam_sim *sim; 3099 3100 sim = start_ccb->ccb_h.path->bus->sim; 3101 (*(sim->sim_action))(sim, start_ccb); 3102 break; 3103 } 3104 case XPT_PATH_INQ: 3105 { 3106 struct cam_sim *sim; 3107 3108 sim = start_ccb->ccb_h.path->bus->sim; 3109 (*(sim->sim_action))(sim, start_ccb); 3110 break; 3111 } 3112 case XPT_PATH_STATS: 3113 start_ccb->cpis.last_reset = 3114 start_ccb->ccb_h.path->bus->last_reset; 3115 start_ccb->ccb_h.status = CAM_REQ_CMP; 3116 break; 3117 case XPT_GDEV_TYPE: 3118 { 3119 struct cam_ed *dev; 3120 int s; 3121 3122 dev = start_ccb->ccb_h.path->device; 3123 s = splcam(); 3124 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) { 3125 start_ccb->ccb_h.status = CAM_DEV_NOT_THERE; 3126 } else { 3127 struct ccb_getdev *cgd; 3128 struct cam_eb *bus; 3129 struct cam_et *tar; 3130 3131 cgd = &start_ccb->cgd; 3132 bus = cgd->ccb_h.path->bus; 3133 tar = cgd->ccb_h.path->target; 3134 cgd->inq_data = dev->inq_data; 3135 cgd->ccb_h.status = CAM_REQ_CMP; 3136 cgd->serial_num_len = dev->serial_num_len; 3137 if ((dev->serial_num_len > 0) 3138 && (dev->serial_num != NULL)) 3139 bcopy(dev->serial_num, cgd->serial_num, 3140 dev->serial_num_len); 3141 } 3142 splx(s); 3143 break; 3144 } 3145 case XPT_GDEV_STATS: 3146 { 3147 struct cam_ed *dev; 3148 int s; 3149 3150 dev = start_ccb->ccb_h.path->device; 3151 s = splcam(); 3152 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) { 3153 start_ccb->ccb_h.status = CAM_DEV_NOT_THERE; 3154 } else { 3155 struct ccb_getdevstats *cgds; 3156 struct cam_eb *bus; 3157 struct cam_et *tar; 3158 3159 cgds = &start_ccb->cgds; 3160 bus = cgds->ccb_h.path->bus; 3161 tar = cgds->ccb_h.path->target; 3162 cgds->dev_openings = dev->ccbq.dev_openings; 3163 cgds->dev_active = dev->ccbq.dev_active; 3164 cgds->devq_openings = dev->ccbq.devq_openings; 3165 cgds->devq_queued = dev->ccbq.queue.entries; 3166 cgds->held = dev->ccbq.held; 3167 cgds->last_reset = tar->last_reset; 3168 cgds->maxtags = dev->quirk->maxtags; 3169 cgds->mintags = dev->quirk->mintags; 3170 if (timevalcmp(&tar->last_reset, &bus->last_reset, <)) 3171 cgds->last_reset = bus->last_reset; 3172 cgds->ccb_h.status = CAM_REQ_CMP; 3173 } 3174 splx(s); 3175 break; 3176 } 3177 case XPT_GDEVLIST: 3178 { 3179 struct cam_periph *nperiph; 3180 struct periph_list *periph_head; 3181 struct ccb_getdevlist *cgdl; 3182 u_int i; 3183 int s; 3184 struct cam_ed *device; 3185 int found; 3186 3187 3188 found = 0; 3189 3190 /* 3191 * Don't want anyone mucking with our data. 3192 */ 3193 s = splcam(); 3194 device = start_ccb->ccb_h.path->device; 3195 periph_head = &device->periphs; 3196 cgdl = &start_ccb->cgdl; 3197 3198 /* 3199 * Check and see if the list has changed since the user 3200 * last requested a list member. If so, tell them that the 3201 * list has changed, and therefore they need to start over 3202 * from the beginning. 3203 */ 3204 if ((cgdl->index != 0) && 3205 (cgdl->generation != device->generation)) { 3206 cgdl->status = CAM_GDEVLIST_LIST_CHANGED; 3207 splx(s); 3208 break; 3209 } 3210 3211 /* 3212 * Traverse the list of peripherals and attempt to find 3213 * the requested peripheral. 3214 */ 3215 for (nperiph = SLIST_FIRST(periph_head), i = 0; 3216 (nperiph != NULL) && (i <= cgdl->index); 3217 nperiph = SLIST_NEXT(nperiph, periph_links), i++) { 3218 if (i == cgdl->index) { 3219 strncpy(cgdl->periph_name, 3220 nperiph->periph_name, 3221 DEV_IDLEN); 3222 cgdl->unit_number = nperiph->unit_number; 3223 found = 1; 3224 } 3225 } 3226 if (found == 0) { 3227 cgdl->status = CAM_GDEVLIST_ERROR; 3228 splx(s); 3229 break; 3230 } 3231 3232 if (nperiph == NULL) 3233 cgdl->status = CAM_GDEVLIST_LAST_DEVICE; 3234 else 3235 cgdl->status = CAM_GDEVLIST_MORE_DEVS; 3236 3237 cgdl->index++; 3238 cgdl->generation = device->generation; 3239 3240 splx(s); 3241 cgdl->ccb_h.status = CAM_REQ_CMP; 3242 break; 3243 } 3244 case XPT_DEV_MATCH: 3245 { 3246 int s; 3247 dev_pos_type position_type; 3248 struct ccb_dev_match *cdm; 3249 int ret; 3250 3251 cdm = &start_ccb->cdm; 3252 3253 /* 3254 * Prevent EDT changes while we traverse it. 3255 */ 3256 s = splcam(); 3257 /* 3258 * There are two ways of getting at information in the EDT. 3259 * The first way is via the primary EDT tree. It starts 3260 * with a list of busses, then a list of targets on a bus, 3261 * then devices/luns on a target, and then peripherals on a 3262 * device/lun. The "other" way is by the peripheral driver 3263 * lists. The peripheral driver lists are organized by 3264 * peripheral driver. (obviously) So it makes sense to 3265 * use the peripheral driver list if the user is looking 3266 * for something like "da1", or all "da" devices. If the 3267 * user is looking for something on a particular bus/target 3268 * or lun, it's generally better to go through the EDT tree. 3269 */ 3270 3271 if (cdm->pos.position_type != CAM_DEV_POS_NONE) 3272 position_type = cdm->pos.position_type; 3273 else { 3274 u_int i; 3275 3276 position_type = CAM_DEV_POS_NONE; 3277 3278 for (i = 0; i < cdm->num_patterns; i++) { 3279 if ((cdm->patterns[i].type == DEV_MATCH_BUS) 3280 ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){ 3281 position_type = CAM_DEV_POS_EDT; 3282 break; 3283 } 3284 } 3285 3286 if (cdm->num_patterns == 0) 3287 position_type = CAM_DEV_POS_EDT; 3288 else if (position_type == CAM_DEV_POS_NONE) 3289 position_type = CAM_DEV_POS_PDRV; 3290 } 3291 3292 switch(position_type & CAM_DEV_POS_TYPEMASK) { 3293 case CAM_DEV_POS_EDT: 3294 ret = xptedtmatch(cdm); 3295 break; 3296 case CAM_DEV_POS_PDRV: 3297 ret = xptperiphlistmatch(cdm); 3298 break; 3299 default: 3300 cdm->status = CAM_DEV_MATCH_ERROR; 3301 break; 3302 } 3303 3304 splx(s); 3305 3306 if (cdm->status == CAM_DEV_MATCH_ERROR) 3307 start_ccb->ccb_h.status = CAM_REQ_CMP_ERR; 3308 else 3309 start_ccb->ccb_h.status = CAM_REQ_CMP; 3310 3311 break; 3312 } 3313 case XPT_SASYNC_CB: 3314 { 3315 struct ccb_setasync *csa; 3316 struct async_node *cur_entry; 3317 struct async_list *async_head; 3318 u_int32_t added; 3319 int s; 3320 3321 csa = &start_ccb->csa; 3322 added = csa->event_enable; 3323 async_head = &csa->ccb_h.path->device->asyncs; 3324 3325 /* 3326 * If there is already an entry for us, simply 3327 * update it. 3328 */ 3329 s = splcam(); 3330 cur_entry = SLIST_FIRST(async_head); 3331 while (cur_entry != NULL) { 3332 if ((cur_entry->callback_arg == csa->callback_arg) 3333 && (cur_entry->callback == csa->callback)) 3334 break; 3335 cur_entry = SLIST_NEXT(cur_entry, links); 3336 } 3337 3338 if (cur_entry != NULL) { 3339 /* 3340 * If the request has no flags set, 3341 * remove the entry. 3342 */ 3343 added &= ~cur_entry->event_enable; 3344 if (csa->event_enable == 0) { 3345 SLIST_REMOVE(async_head, cur_entry, 3346 async_node, links); 3347 csa->ccb_h.path->device->refcount--; 3348 free(cur_entry, M_DEVBUF); 3349 } else { 3350 cur_entry->event_enable = csa->event_enable; 3351 } 3352 } else { 3353 cur_entry = malloc(sizeof(*cur_entry), M_DEVBUF, 3354 M_NOWAIT); 3355 if (cur_entry == NULL) { 3356 splx(s); 3357 csa->ccb_h.status = CAM_RESRC_UNAVAIL; 3358 break; 3359 } 3360 cur_entry->event_enable = csa->event_enable; 3361 cur_entry->callback_arg = csa->callback_arg; 3362 cur_entry->callback = csa->callback; 3363 SLIST_INSERT_HEAD(async_head, cur_entry, links); 3364 csa->ccb_h.path->device->refcount++; 3365 } 3366 3367 if ((added & AC_FOUND_DEVICE) != 0) { 3368 /* 3369 * Get this peripheral up to date with all 3370 * the currently existing devices. 3371 */ 3372 xpt_for_all_devices(xptsetasyncfunc, cur_entry); 3373 } 3374 if ((added & AC_PATH_REGISTERED) != 0) { 3375 /* 3376 * Get this peripheral up to date with all 3377 * the currently existing busses. 3378 */ 3379 xpt_for_all_busses(xptsetasyncbusfunc, cur_entry); 3380 } 3381 splx(s); 3382 start_ccb->ccb_h.status = CAM_REQ_CMP; 3383 break; 3384 } 3385 case XPT_REL_SIMQ: 3386 { 3387 struct ccb_relsim *crs; 3388 struct cam_ed *dev; 3389 int s; 3390 3391 crs = &start_ccb->crs; 3392 dev = crs->ccb_h.path->device; 3393 if (dev == NULL) { 3394 3395 crs->ccb_h.status = CAM_DEV_NOT_THERE; 3396 break; 3397 } 3398 3399 s = splcam(); 3400 3401 if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) { 3402 3403 if ((dev->inq_data.flags & SID_CmdQue) != 0) { 3404 3405 /* Don't ever go below one opening */ 3406 if (crs->openings > 0) { 3407 xpt_dev_ccbq_resize(crs->ccb_h.path, 3408 crs->openings); 3409 3410 if (bootverbose) { 3411 xpt_print_path(crs->ccb_h.path); 3412 printf("tagged openings " 3413 "now %d\n", 3414 crs->openings); 3415 } 3416 } 3417 } 3418 } 3419 3420 if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) { 3421 3422 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) { 3423 3424 /* 3425 * Just extend the old timeout and decrement 3426 * the freeze count so that a single timeout 3427 * is sufficient for releasing the queue. 3428 */ 3429 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE; 3430 untimeout(xpt_release_devq_timeout, 3431 dev, dev->c_handle); 3432 } else { 3433 3434 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 3435 } 3436 3437 dev->c_handle = 3438 timeout(xpt_release_devq_timeout, 3439 dev, 3440 (crs->release_timeout * hz) / 1000); 3441 3442 dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING; 3443 3444 } 3445 3446 if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) { 3447 3448 if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) { 3449 /* 3450 * Decrement the freeze count so that a single 3451 * completion is still sufficient to unfreeze 3452 * the queue. 3453 */ 3454 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE; 3455 } else { 3456 3457 dev->flags |= CAM_DEV_REL_ON_COMPLETE; 3458 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 3459 } 3460 } 3461 3462 if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) { 3463 3464 if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0 3465 || (dev->ccbq.dev_active == 0)) { 3466 3467 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE; 3468 } else { 3469 3470 dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY; 3471 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 3472 } 3473 } 3474 splx(s); 3475 3476 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) { 3477 3478 xpt_release_devq(crs->ccb_h.path, /*count*/1, 3479 /*run_queue*/TRUE); 3480 } 3481 start_ccb->crs.qfrozen_cnt = dev->qfrozen_cnt; 3482 start_ccb->ccb_h.status = CAM_REQ_CMP; 3483 break; 3484 } 3485 case XPT_SCAN_BUS: 3486 xpt_scan_bus(start_ccb->ccb_h.path->periph, start_ccb); 3487 break; 3488 case XPT_SCAN_LUN: 3489 xpt_scan_lun(start_ccb->ccb_h.path->periph, 3490 start_ccb->ccb_h.path, start_ccb->crcn.flags, 3491 start_ccb); 3492 break; 3493 case XPT_DEBUG: { 3494 #ifdef CAMDEBUG 3495 int s; 3496 3497 s = splcam(); 3498 #ifdef CAM_DEBUG_DELAY 3499 cam_debug_delay = CAM_DEBUG_DELAY; 3500 #endif 3501 cam_dflags = start_ccb->cdbg.flags; 3502 if (cam_dpath != NULL) { 3503 xpt_free_path(cam_dpath); 3504 cam_dpath = NULL; 3505 } 3506 3507 if (cam_dflags != CAM_DEBUG_NONE) { 3508 if (xpt_create_path(&cam_dpath, xpt_periph, 3509 start_ccb->ccb_h.path_id, 3510 start_ccb->ccb_h.target_id, 3511 start_ccb->ccb_h.target_lun) != 3512 CAM_REQ_CMP) { 3513 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 3514 cam_dflags = CAM_DEBUG_NONE; 3515 } else { 3516 start_ccb->ccb_h.status = CAM_REQ_CMP; 3517 xpt_print_path(cam_dpath); 3518 printf("debugging flags now %x\n", cam_dflags); 3519 } 3520 } else { 3521 cam_dpath = NULL; 3522 start_ccb->ccb_h.status = CAM_REQ_CMP; 3523 } 3524 splx(s); 3525 #else /* !CAMDEBUG */ 3526 start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 3527 #endif /* CAMDEBUG */ 3528 break; 3529 } 3530 case XPT_NOOP: 3531 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0) 3532 xpt_freeze_devq(start_ccb->ccb_h.path, 1); 3533 start_ccb->ccb_h.status = CAM_REQ_CMP; 3534 break; 3535 default: 3536 case XPT_SDEV_TYPE: 3537 case XPT_TERM_IO: 3538 case XPT_ENG_INQ: 3539 /* XXX Implement */ 3540 start_ccb->ccb_h.status = CAM_PROVIDE_FAIL; 3541 break; 3542 } 3543 splx(iopl); 3544 } 3545 3546 void 3547 xpt_polled_action(union ccb *start_ccb) 3548 { 3549 int s; 3550 u_int32_t timeout; 3551 struct cam_sim *sim; 3552 struct cam_devq *devq; 3553 struct cam_ed *dev; 3554 3555 timeout = start_ccb->ccb_h.timeout; 3556 sim = start_ccb->ccb_h.path->bus->sim; 3557 devq = sim->devq; 3558 dev = start_ccb->ccb_h.path->device; 3559 3560 s = splcam(); 3561 3562 /* 3563 * Steal an opening so that no other queued requests 3564 * can get it before us while we simulate interrupts. 3565 */ 3566 dev->ccbq.devq_openings--; 3567 dev->ccbq.dev_openings--; 3568 3569 while((devq->send_openings <= 0 || dev->ccbq.dev_openings < 0) 3570 && (--timeout > 0)) { 3571 DELAY(1000); 3572 (*(sim->sim_poll))(sim); 3573 camisr(&cam_netq); 3574 camisr(&cam_bioq); 3575 } 3576 3577 dev->ccbq.devq_openings++; 3578 dev->ccbq.dev_openings++; 3579 3580 if (timeout != 0) { 3581 xpt_action(start_ccb); 3582 while(--timeout > 0) { 3583 (*(sim->sim_poll))(sim); 3584 camisr(&cam_netq); 3585 camisr(&cam_bioq); 3586 if ((start_ccb->ccb_h.status & CAM_STATUS_MASK) 3587 != CAM_REQ_INPROG) 3588 break; 3589 DELAY(1000); 3590 } 3591 if (timeout == 0) { 3592 /* 3593 * XXX Is it worth adding a sim_timeout entry 3594 * point so we can attempt recovery? If 3595 * this is only used for dumps, I don't think 3596 * it is. 3597 */ 3598 start_ccb->ccb_h.status = CAM_CMD_TIMEOUT; 3599 } 3600 } else { 3601 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 3602 } 3603 splx(s); 3604 } 3605 3606 /* 3607 * Schedule a peripheral driver to receive a ccb when it's 3608 * target device has space for more transactions. 3609 */ 3610 void 3611 xpt_schedule(struct cam_periph *perph, u_int32_t new_priority) 3612 { 3613 struct cam_ed *device; 3614 int s; 3615 int runq; 3616 3617 CAM_DEBUG(perph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n")); 3618 device = perph->path->device; 3619 s = splsoftcam(); 3620 if (periph_is_queued(perph)) { 3621 /* Simply reorder based on new priority */ 3622 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE, 3623 (" change priority to %d\n", new_priority)); 3624 if (new_priority < perph->pinfo.priority) { 3625 camq_change_priority(&device->drvq, 3626 perph->pinfo.index, 3627 new_priority); 3628 } 3629 runq = 0; 3630 } else { 3631 /* New entry on the queue */ 3632 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE, 3633 (" added periph to queue\n")); 3634 perph->pinfo.priority = new_priority; 3635 perph->pinfo.generation = ++device->drvq.generation; 3636 camq_insert(&device->drvq, &perph->pinfo); 3637 runq = xpt_schedule_dev_allocq(perph->path->bus, device); 3638 } 3639 splx(s); 3640 if (runq != 0) { 3641 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE, 3642 (" calling xpt_run_devq\n")); 3643 xpt_run_dev_allocq(perph->path->bus); 3644 } 3645 } 3646 3647 3648 /* 3649 * Schedule a device to run on a given queue. 3650 * If the device was inserted as a new entry on the queue, 3651 * return 1 meaning the device queue should be run. If we 3652 * were already queued, implying someone else has already 3653 * started the queue, return 0 so the caller doesn't attempt 3654 * to run the queue. Must be run at either splsoftcam 3655 * (or splcam since that encompases splsoftcam). 3656 */ 3657 static int 3658 xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo, 3659 u_int32_t new_priority) 3660 { 3661 int retval; 3662 u_int32_t old_priority; 3663 3664 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n")); 3665 3666 old_priority = pinfo->priority; 3667 3668 /* 3669 * Are we already queued? 3670 */ 3671 if (pinfo->index != CAM_UNQUEUED_INDEX) { 3672 /* Simply reorder based on new priority */ 3673 if (new_priority < old_priority) { 3674 camq_change_priority(queue, pinfo->index, 3675 new_priority); 3676 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3677 ("changed priority to %d\n", 3678 new_priority)); 3679 } 3680 retval = 0; 3681 } else { 3682 /* New entry on the queue */ 3683 if (new_priority < old_priority) 3684 pinfo->priority = new_priority; 3685 3686 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3687 ("Inserting onto queue\n")); 3688 pinfo->generation = ++queue->generation; 3689 camq_insert(queue, pinfo); 3690 retval = 1; 3691 } 3692 return (retval); 3693 } 3694 3695 static void 3696 xpt_run_dev_allocq(struct cam_eb *bus) 3697 { 3698 struct cam_devq *devq; 3699 int s; 3700 3701 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_allocq\n")); 3702 devq = bus->sim->devq; 3703 3704 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3705 (" qfrozen_cnt == 0x%x, entries == %d, " 3706 "openings == %d, active == %d\n", 3707 devq->alloc_queue.qfrozen_cnt, 3708 devq->alloc_queue.entries, 3709 devq->alloc_openings, 3710 devq->alloc_active)); 3711 3712 s = splsoftcam(); 3713 devq->alloc_queue.qfrozen_cnt++; 3714 while ((devq->alloc_queue.entries > 0) 3715 && (devq->alloc_openings > 0) 3716 && (devq->alloc_queue.qfrozen_cnt <= 1)) { 3717 struct cam_ed_qinfo *qinfo; 3718 struct cam_ed *device; 3719 union ccb *work_ccb; 3720 struct cam_periph *drv; 3721 struct camq *drvq; 3722 3723 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue, 3724 CAMQ_HEAD); 3725 device = qinfo->device; 3726 3727 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3728 ("running device %p\n", device)); 3729 3730 drvq = &device->drvq; 3731 3732 #ifdef CAMDEBUG 3733 if (drvq->entries <= 0) { 3734 panic("xpt_run_dev_allocq: " 3735 "Device on queue without any work to do"); 3736 } 3737 #endif 3738 if ((work_ccb = xpt_get_ccb(device)) != NULL) { 3739 devq->alloc_openings--; 3740 devq->alloc_active++; 3741 drv = (struct cam_periph*)camq_remove(drvq, CAMQ_HEAD); 3742 splx(s); 3743 xpt_setup_ccb(&work_ccb->ccb_h, drv->path, 3744 drv->pinfo.priority); 3745 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3746 ("calling periph start\n")); 3747 drv->periph_start(drv, work_ccb); 3748 } else { 3749 /* 3750 * Malloc failure in alloc_ccb 3751 */ 3752 /* 3753 * XXX add us to a list to be run from free_ccb 3754 * if we don't have any ccbs active on this 3755 * device queue otherwise we may never get run 3756 * again. 3757 */ 3758 break; 3759 } 3760 3761 /* Raise IPL for possible insertion and test at top of loop */ 3762 s = splsoftcam(); 3763 3764 if (drvq->entries > 0) { 3765 /* We have more work. Attempt to reschedule */ 3766 xpt_schedule_dev_allocq(bus, device); 3767 } 3768 } 3769 devq->alloc_queue.qfrozen_cnt--; 3770 splx(s); 3771 } 3772 3773 static void 3774 xpt_run_dev_sendq(struct cam_eb *bus) 3775 { 3776 struct cam_devq *devq; 3777 int s; 3778 3779 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_sendq\n")); 3780 3781 devq = bus->sim->devq; 3782 3783 s = splcam(); 3784 devq->send_queue.qfrozen_cnt++; 3785 splx(s); 3786 s = splsoftcam(); 3787 while ((devq->send_queue.entries > 0) 3788 && (devq->send_openings > 0)) { 3789 struct cam_ed_qinfo *qinfo; 3790 struct cam_ed *device; 3791 union ccb *work_ccb; 3792 struct cam_sim *sim; 3793 int ospl; 3794 3795 ospl = splcam(); 3796 if (devq->send_queue.qfrozen_cnt > 1) { 3797 splx(ospl); 3798 break; 3799 } 3800 3801 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue, 3802 CAMQ_HEAD); 3803 device = qinfo->device; 3804 3805 /* 3806 * If the device has been "frozen", don't attempt 3807 * to run it. 3808 */ 3809 if (device->qfrozen_cnt > 0) { 3810 splx(ospl); 3811 continue; 3812 } 3813 3814 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3815 ("running device %p\n", device)); 3816 3817 work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD); 3818 if (work_ccb == NULL) { 3819 printf("device on run queue with no ccbs???"); 3820 splx(ospl); 3821 continue; 3822 } 3823 3824 if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) { 3825 3826 if (num_highpower <= 0) { 3827 /* 3828 * We got a high power command, but we 3829 * don't have any available slots. Freeze 3830 * the device queue until we have a slot 3831 * available. 3832 */ 3833 device->qfrozen_cnt++; 3834 STAILQ_INSERT_TAIL(&highpowerq, 3835 &work_ccb->ccb_h, 3836 xpt_links.stqe); 3837 3838 splx(ospl); 3839 continue; 3840 } else { 3841 /* 3842 * Consume a high power slot while 3843 * this ccb runs. 3844 */ 3845 num_highpower--; 3846 } 3847 } 3848 devq->active_dev = device; 3849 cam_ccbq_remove_ccb(&device->ccbq, work_ccb); 3850 3851 cam_ccbq_send_ccb(&device->ccbq, work_ccb); 3852 splx(ospl); 3853 3854 devq->send_openings--; 3855 devq->send_active++; 3856 3857 if (device->ccbq.queue.entries > 0) 3858 xpt_schedule_dev_sendq(bus, device); 3859 3860 if (work_ccb && (work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0){ 3861 /* 3862 * The client wants to freeze the queue 3863 * after this CCB is sent. 3864 */ 3865 ospl = splcam(); 3866 device->qfrozen_cnt++; 3867 splx(ospl); 3868 } 3869 3870 splx(s); 3871 3872 /* In Target mode, the peripheral driver knows best... */ 3873 if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) { 3874 if ((device->inq_flags & SID_CmdQue) != 0 3875 && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE) 3876 work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID; 3877 else 3878 /* 3879 * Clear this in case of a retried CCB that 3880 * failed due to a rejected tag. 3881 */ 3882 work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID; 3883 } 3884 3885 /* 3886 * Device queues can be shared among multiple sim instances 3887 * that reside on different busses. Use the SIM in the queue 3888 * CCB's path, rather than the one in the bus that was passed 3889 * into this function. 3890 */ 3891 sim = work_ccb->ccb_h.path->bus->sim; 3892 (*(sim->sim_action))(sim, work_ccb); 3893 3894 ospl = splcam(); 3895 devq->active_dev = NULL; 3896 splx(ospl); 3897 /* Raise IPL for possible insertion and test at top of loop */ 3898 s = splsoftcam(); 3899 } 3900 splx(s); 3901 s = splcam(); 3902 devq->send_queue.qfrozen_cnt--; 3903 splx(s); 3904 } 3905 3906 /* 3907 * This function merges stuff from the slave ccb into the master ccb, while 3908 * keeping important fields in the master ccb constant. 3909 */ 3910 void 3911 xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb) 3912 { 3913 /* 3914 * Pull fields that are valid for peripheral drivers to set 3915 * into the master CCB along with the CCB "payload". 3916 */ 3917 master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count; 3918 master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code; 3919 master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout; 3920 master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags; 3921 bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1], 3922 sizeof(union ccb) - sizeof(struct ccb_hdr)); 3923 } 3924 3925 void 3926 xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority) 3927 { 3928 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n")); 3929 ccb_h->pinfo.priority = priority; 3930 ccb_h->path = path; 3931 ccb_h->path_id = path->bus->path_id; 3932 if (path->target) 3933 ccb_h->target_id = path->target->target_id; 3934 else 3935 ccb_h->target_id = CAM_TARGET_WILDCARD; 3936 if (path->device) { 3937 ccb_h->target_lun = path->device->lun_id; 3938 ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation; 3939 } else { 3940 ccb_h->target_lun = CAM_TARGET_WILDCARD; 3941 } 3942 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX; 3943 ccb_h->flags = 0; 3944 } 3945 3946 /* Path manipulation functions */ 3947 cam_status 3948 xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph, 3949 path_id_t path_id, target_id_t target_id, lun_id_t lun_id) 3950 { 3951 struct cam_path *path; 3952 cam_status status; 3953 3954 path = (struct cam_path *)malloc(sizeof(*path), M_DEVBUF, M_NOWAIT); 3955 3956 if (path == NULL) { 3957 status = CAM_RESRC_UNAVAIL; 3958 return(status); 3959 } 3960 status = xpt_compile_path(path, perph, path_id, target_id, lun_id); 3961 if (status != CAM_REQ_CMP) { 3962 free(path, M_DEVBUF); 3963 path = NULL; 3964 } 3965 *new_path_ptr = path; 3966 return (status); 3967 } 3968 3969 static cam_status 3970 xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph, 3971 path_id_t path_id, target_id_t target_id, lun_id_t lun_id) 3972 { 3973 struct cam_eb *bus; 3974 struct cam_et *target; 3975 struct cam_ed *device; 3976 cam_status status; 3977 int s; 3978 3979 status = CAM_REQ_CMP; /* Completed without error */ 3980 target = NULL; /* Wildcarded */ 3981 device = NULL; /* Wildcarded */ 3982 3983 /* 3984 * We will potentially modify the EDT, so block interrupts 3985 * that may attempt to create cam paths. 3986 */ 3987 s = splcam(); 3988 bus = xpt_find_bus(path_id); 3989 if (bus == NULL) { 3990 status = CAM_PATH_INVALID; 3991 } else { 3992 target = xpt_find_target(bus, target_id); 3993 if (target == NULL) { 3994 /* Create one */ 3995 struct cam_et *new_target; 3996 3997 new_target = xpt_alloc_target(bus, target_id); 3998 if (new_target == NULL) { 3999 status = CAM_RESRC_UNAVAIL; 4000 } else { 4001 target = new_target; 4002 } 4003 } 4004 if (target != NULL) { 4005 device = xpt_find_device(target, lun_id); 4006 if (device == NULL) { 4007 /* Create one */ 4008 struct cam_ed *new_device; 4009 4010 new_device = xpt_alloc_device(bus, 4011 target, 4012 lun_id); 4013 if (new_device == NULL) { 4014 status = CAM_RESRC_UNAVAIL; 4015 } else { 4016 device = new_device; 4017 } 4018 } 4019 } 4020 } 4021 splx(s); 4022 4023 /* 4024 * Only touch the user's data if we are successful. 4025 */ 4026 if (status == CAM_REQ_CMP) { 4027 new_path->periph = perph; 4028 new_path->bus = bus; 4029 new_path->target = target; 4030 new_path->device = device; 4031 CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n")); 4032 } else { 4033 if (device != NULL) 4034 xpt_release_device(bus, target, device); 4035 if (target != NULL) 4036 xpt_release_target(bus, target); 4037 if (bus != NULL) 4038 xpt_release_bus(bus); 4039 } 4040 return (status); 4041 } 4042 4043 static void 4044 xpt_release_path(struct cam_path *path) 4045 { 4046 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n")); 4047 if (path->device != NULL) { 4048 xpt_release_device(path->bus, path->target, path->device); 4049 path->device = NULL; 4050 } 4051 if (path->target != NULL) { 4052 xpt_release_target(path->bus, path->target); 4053 path->target = NULL; 4054 } 4055 if (path->bus != NULL) { 4056 xpt_release_bus(path->bus); 4057 path->bus = NULL; 4058 } 4059 } 4060 4061 void 4062 xpt_free_path(struct cam_path *path) 4063 { 4064 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n")); 4065 xpt_release_path(path); 4066 free(path, M_DEVBUF); 4067 } 4068 4069 4070 /* 4071 * Return -1 for failure, 0 for exact match, 1 for match with wildcards 4072 * in path1, 2 for match with wildcards in path2. 4073 */ 4074 int 4075 xpt_path_comp(struct cam_path *path1, struct cam_path *path2) 4076 { 4077 int retval = 0; 4078 4079 if (path1->bus != path2->bus) { 4080 if (path1->bus->path_id == CAM_BUS_WILDCARD) 4081 retval = 1; 4082 else if (path2->bus->path_id == CAM_BUS_WILDCARD) 4083 retval = 2; 4084 else 4085 return (-1); 4086 } 4087 if (path1->target != path2->target) { 4088 if (path1->target->target_id == CAM_TARGET_WILDCARD) { 4089 if (retval == 0) 4090 retval = 1; 4091 } else if (path2->target->target_id == CAM_TARGET_WILDCARD) 4092 retval = 2; 4093 else 4094 return (-1); 4095 } 4096 if (path1->device != path2->device) { 4097 if (path1->device->lun_id == CAM_LUN_WILDCARD) { 4098 if (retval == 0) 4099 retval = 1; 4100 } else if (path2->device->lun_id == CAM_LUN_WILDCARD) 4101 retval = 2; 4102 else 4103 return (-1); 4104 } 4105 return (retval); 4106 } 4107 4108 void 4109 xpt_print_path(struct cam_path *path) 4110 { 4111 if (path == NULL) 4112 printf("(nopath): "); 4113 else { 4114 if (path->periph != NULL) 4115 printf("(%s%d:", path->periph->periph_name, 4116 path->periph->unit_number); 4117 else 4118 printf("(noperiph:"); 4119 4120 if (path->bus != NULL) 4121 printf("%s%d:%d:", path->bus->sim->sim_name, 4122 path->bus->sim->unit_number, 4123 path->bus->sim->bus_id); 4124 else 4125 printf("nobus:"); 4126 4127 if (path->target != NULL) 4128 printf("%d:", path->target->target_id); 4129 else 4130 printf("X:"); 4131 4132 if (path->device != NULL) 4133 printf("%d): ", path->device->lun_id); 4134 else 4135 printf("X): "); 4136 } 4137 } 4138 4139 int 4140 xpt_path_string(struct cam_path *path, char *str, size_t str_len) 4141 { 4142 struct sbuf sb; 4143 4144 sbuf_new(&sb, str, str_len, 0); 4145 4146 if (path == NULL) 4147 sbuf_printf(&sb, "(nopath): "); 4148 else { 4149 if (path->periph != NULL) 4150 sbuf_printf(&sb, "(%s%d:", path->periph->periph_name, 4151 path->periph->unit_number); 4152 else 4153 sbuf_printf(&sb, "(noperiph:"); 4154 4155 if (path->bus != NULL) 4156 sbuf_printf(&sb, "%s%d:%d:", path->bus->sim->sim_name, 4157 path->bus->sim->unit_number, 4158 path->bus->sim->bus_id); 4159 else 4160 sbuf_printf(&sb, "nobus:"); 4161 4162 if (path->target != NULL) 4163 sbuf_printf(&sb, "%d:", path->target->target_id); 4164 else 4165 sbuf_printf(&sb, "X:"); 4166 4167 if (path->device != NULL) 4168 sbuf_printf(&sb, "%d): ", path->device->lun_id); 4169 else 4170 sbuf_printf(&sb, "X): "); 4171 } 4172 sbuf_finish(&sb); 4173 4174 return(sbuf_len(&sb)); 4175 } 4176 4177 path_id_t 4178 xpt_path_path_id(struct cam_path *path) 4179 { 4180 return(path->bus->path_id); 4181 } 4182 4183 target_id_t 4184 xpt_path_target_id(struct cam_path *path) 4185 { 4186 if (path->target != NULL) 4187 return (path->target->target_id); 4188 else 4189 return (CAM_TARGET_WILDCARD); 4190 } 4191 4192 lun_id_t 4193 xpt_path_lun_id(struct cam_path *path) 4194 { 4195 if (path->device != NULL) 4196 return (path->device->lun_id); 4197 else 4198 return (CAM_LUN_WILDCARD); 4199 } 4200 4201 struct cam_sim * 4202 xpt_path_sim(struct cam_path *path) 4203 { 4204 return (path->bus->sim); 4205 } 4206 4207 struct cam_periph* 4208 xpt_path_periph(struct cam_path *path) 4209 { 4210 return (path->periph); 4211 } 4212 4213 /* 4214 * Release a CAM control block for the caller. Remit the cost of the structure 4215 * to the device referenced by the path. If the this device had no 'credits' 4216 * and peripheral drivers have registered async callbacks for this notification 4217 * call them now. 4218 */ 4219 void 4220 xpt_release_ccb(union ccb *free_ccb) 4221 { 4222 int s; 4223 struct cam_path *path; 4224 struct cam_ed *device; 4225 struct cam_eb *bus; 4226 4227 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n")); 4228 path = free_ccb->ccb_h.path; 4229 device = path->device; 4230 bus = path->bus; 4231 s = splsoftcam(); 4232 cam_ccbq_release_opening(&device->ccbq); 4233 if (xpt_ccb_count > xpt_max_ccbs) { 4234 xpt_free_ccb(free_ccb); 4235 xpt_ccb_count--; 4236 } else { 4237 SLIST_INSERT_HEAD(&ccb_freeq, &free_ccb->ccb_h, xpt_links.sle); 4238 } 4239 bus->sim->devq->alloc_openings++; 4240 bus->sim->devq->alloc_active--; 4241 /* XXX Turn this into an inline function - xpt_run_device?? */ 4242 if ((device_is_alloc_queued(device) == 0) 4243 && (device->drvq.entries > 0)) { 4244 xpt_schedule_dev_allocq(bus, device); 4245 } 4246 splx(s); 4247 if (dev_allocq_is_runnable(bus->sim->devq)) 4248 xpt_run_dev_allocq(bus); 4249 } 4250 4251 /* Functions accessed by SIM drivers */ 4252 4253 /* 4254 * A sim structure, listing the SIM entry points and instance 4255 * identification info is passed to xpt_bus_register to hook the SIM 4256 * into the CAM framework. xpt_bus_register creates a cam_eb entry 4257 * for this new bus and places it in the array of busses and assigns 4258 * it a path_id. The path_id may be influenced by "hard wiring" 4259 * information specified by the user. Once interrupt services are 4260 * availible, the bus will be probed. 4261 */ 4262 int32_t 4263 xpt_bus_register(struct cam_sim *sim, u_int32_t bus) 4264 { 4265 struct cam_eb *new_bus; 4266 struct cam_eb *old_bus; 4267 struct ccb_pathinq cpi; 4268 int s; 4269 4270 sim->bus_id = bus; 4271 new_bus = (struct cam_eb *)malloc(sizeof(*new_bus), 4272 M_DEVBUF, M_NOWAIT); 4273 if (new_bus == NULL) { 4274 /* Couldn't satisfy request */ 4275 return (CAM_RESRC_UNAVAIL); 4276 } 4277 4278 if (strcmp(sim->sim_name, "xpt") != 0) { 4279 4280 sim->path_id = 4281 xptpathid(sim->sim_name, sim->unit_number, sim->bus_id); 4282 } 4283 4284 TAILQ_INIT(&new_bus->et_entries); 4285 new_bus->path_id = sim->path_id; 4286 new_bus->sim = sim; 4287 timevalclear(&new_bus->last_reset); 4288 new_bus->flags = 0; 4289 new_bus->refcount = 1; /* Held until a bus_deregister event */ 4290 new_bus->generation = 0; 4291 s = splcam(); 4292 old_bus = TAILQ_FIRST(&xpt_busses); 4293 while (old_bus != NULL 4294 && old_bus->path_id < new_bus->path_id) 4295 old_bus = TAILQ_NEXT(old_bus, links); 4296 if (old_bus != NULL) 4297 TAILQ_INSERT_BEFORE(old_bus, new_bus, links); 4298 else 4299 TAILQ_INSERT_TAIL(&xpt_busses, new_bus, links); 4300 bus_generation++; 4301 splx(s); 4302 4303 /* Notify interested parties */ 4304 if (sim->path_id != CAM_XPT_PATH_ID) { 4305 struct cam_path path; 4306 4307 xpt_compile_path(&path, /*periph*/NULL, sim->path_id, 4308 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 4309 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1); 4310 cpi.ccb_h.func_code = XPT_PATH_INQ; 4311 xpt_action((union ccb *)&cpi); 4312 xpt_async(AC_PATH_REGISTERED, &path, &cpi); 4313 xpt_release_path(&path); 4314 } 4315 return (CAM_SUCCESS); 4316 } 4317 4318 int32_t 4319 xpt_bus_deregister(path_id_t pathid) 4320 { 4321 struct cam_path bus_path; 4322 cam_status status; 4323 4324 status = xpt_compile_path(&bus_path, NULL, pathid, 4325 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 4326 if (status != CAM_REQ_CMP) 4327 return (status); 4328 4329 xpt_async(AC_LOST_DEVICE, &bus_path, NULL); 4330 xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL); 4331 4332 /* Release the reference count held while registered. */ 4333 xpt_release_bus(bus_path.bus); 4334 xpt_release_path(&bus_path); 4335 4336 return (CAM_REQ_CMP); 4337 } 4338 4339 static path_id_t 4340 xptnextfreepathid(void) 4341 { 4342 struct cam_eb *bus; 4343 path_id_t pathid; 4344 const char *strval; 4345 4346 pathid = 0; 4347 bus = TAILQ_FIRST(&xpt_busses); 4348 retry: 4349 /* Find an unoccupied pathid */ 4350 while (bus != NULL 4351 && bus->path_id <= pathid) { 4352 if (bus->path_id == pathid) 4353 pathid++; 4354 bus = TAILQ_NEXT(bus, links); 4355 } 4356 4357 /* 4358 * Ensure that this pathid is not reserved for 4359 * a bus that may be registered in the future. 4360 */ 4361 if (resource_string_value("scbus", pathid, "at", &strval) == 0) { 4362 ++pathid; 4363 /* Start the search over */ 4364 goto retry; 4365 } 4366 return (pathid); 4367 } 4368 4369 static path_id_t 4370 xptpathid(const char *sim_name, int sim_unit, int sim_bus) 4371 { 4372 path_id_t pathid; 4373 int i, dunit, val; 4374 char buf[32]; 4375 const char *dname; 4376 4377 pathid = CAM_XPT_PATH_ID; 4378 snprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit); 4379 i = 0; 4380 while ((resource_find_match(&i, &dname, &dunit, "at", buf)) == 0) { 4381 if (strcmp(dname, "scbus")) { 4382 /* Avoid a bit of foot shooting. */ 4383 continue; 4384 } 4385 if (dunit < 0) /* unwired?! */ 4386 continue; 4387 if (resource_int_value("scbus", dunit, "bus", &val) == 0) { 4388 if (sim_bus == val) { 4389 pathid = dunit; 4390 break; 4391 } 4392 } else if (sim_bus == 0) { 4393 /* Unspecified matches bus 0 */ 4394 pathid = dunit; 4395 break; 4396 } else { 4397 printf("Ambiguous scbus configuration for %s%d " 4398 "bus %d, cannot wire down. The kernel " 4399 "config entry for scbus%d should " 4400 "specify a controller bus.\n" 4401 "Scbus will be assigned dynamically.\n", 4402 sim_name, sim_unit, sim_bus, dunit); 4403 break; 4404 } 4405 } 4406 4407 if (pathid == CAM_XPT_PATH_ID) 4408 pathid = xptnextfreepathid(); 4409 return (pathid); 4410 } 4411 4412 void 4413 xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg) 4414 { 4415 struct cam_eb *bus; 4416 struct cam_et *target, *next_target; 4417 struct cam_ed *device, *next_device; 4418 int s; 4419 4420 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_async\n")); 4421 4422 /* 4423 * Most async events come from a CAM interrupt context. In 4424 * a few cases, the error recovery code at the peripheral layer, 4425 * which may run from our SWI or a process context, may signal 4426 * deferred events with a call to xpt_async. Ensure async 4427 * notifications are serialized by blocking cam interrupts. 4428 */ 4429 s = splcam(); 4430 4431 bus = path->bus; 4432 4433 if (async_code == AC_BUS_RESET) { 4434 int s; 4435 4436 s = splclock(); 4437 /* Update our notion of when the last reset occurred */ 4438 microtime(&bus->last_reset); 4439 splx(s); 4440 } 4441 4442 for (target = TAILQ_FIRST(&bus->et_entries); 4443 target != NULL; 4444 target = next_target) { 4445 4446 next_target = TAILQ_NEXT(target, links); 4447 4448 if (path->target != target 4449 && path->target->target_id != CAM_TARGET_WILDCARD 4450 && target->target_id != CAM_TARGET_WILDCARD) 4451 continue; 4452 4453 if (async_code == AC_SENT_BDR) { 4454 int s; 4455 4456 /* Update our notion of when the last reset occurred */ 4457 s = splclock(); 4458 microtime(&path->target->last_reset); 4459 splx(s); 4460 } 4461 4462 for (device = TAILQ_FIRST(&target->ed_entries); 4463 device != NULL; 4464 device = next_device) { 4465 4466 next_device = TAILQ_NEXT(device, links); 4467 4468 if (path->device != device 4469 && path->device->lun_id != CAM_LUN_WILDCARD 4470 && device->lun_id != CAM_LUN_WILDCARD) 4471 continue; 4472 4473 xpt_dev_async(async_code, bus, target, 4474 device, async_arg); 4475 4476 xpt_async_bcast(&device->asyncs, async_code, 4477 path, async_arg); 4478 } 4479 } 4480 4481 /* 4482 * If this wasn't a fully wildcarded async, tell all 4483 * clients that want all async events. 4484 */ 4485 if (bus != xpt_periph->path->bus) 4486 xpt_async_bcast(&xpt_periph->path->device->asyncs, async_code, 4487 path, async_arg); 4488 splx(s); 4489 } 4490 4491 static void 4492 xpt_async_bcast(struct async_list *async_head, 4493 u_int32_t async_code, 4494 struct cam_path *path, void *async_arg) 4495 { 4496 struct async_node *cur_entry; 4497 4498 cur_entry = SLIST_FIRST(async_head); 4499 while (cur_entry != NULL) { 4500 struct async_node *next_entry; 4501 /* 4502 * Grab the next list entry before we call the current 4503 * entry's callback. This is because the callback function 4504 * can delete its async callback entry. 4505 */ 4506 next_entry = SLIST_NEXT(cur_entry, links); 4507 if ((cur_entry->event_enable & async_code) != 0) 4508 cur_entry->callback(cur_entry->callback_arg, 4509 async_code, path, 4510 async_arg); 4511 cur_entry = next_entry; 4512 } 4513 } 4514 4515 /* 4516 * Handle any per-device event notifications that require action by the XPT. 4517 */ 4518 static void 4519 xpt_dev_async(u_int32_t async_code, struct cam_eb *bus, struct cam_et *target, 4520 struct cam_ed *device, void *async_arg) 4521 { 4522 cam_status status; 4523 struct cam_path newpath; 4524 4525 /* 4526 * We only need to handle events for real devices. 4527 */ 4528 if (target->target_id == CAM_TARGET_WILDCARD 4529 || device->lun_id == CAM_LUN_WILDCARD) 4530 return; 4531 4532 /* 4533 * We need our own path with wildcards expanded to 4534 * handle certain types of events. 4535 */ 4536 if ((async_code == AC_SENT_BDR) 4537 || (async_code == AC_BUS_RESET) 4538 || (async_code == AC_INQ_CHANGED)) 4539 status = xpt_compile_path(&newpath, NULL, 4540 bus->path_id, 4541 target->target_id, 4542 device->lun_id); 4543 else 4544 status = CAM_REQ_CMP_ERR; 4545 4546 if (status == CAM_REQ_CMP) { 4547 4548 /* 4549 * Allow transfer negotiation to occur in a 4550 * tag free environment. 4551 */ 4552 if (async_code == AC_SENT_BDR 4553 || async_code == AC_BUS_RESET) 4554 xpt_toggle_tags(&newpath); 4555 4556 if (async_code == AC_INQ_CHANGED) { 4557 /* 4558 * We've sent a start unit command, or 4559 * something similar to a device that 4560 * may have caused its inquiry data to 4561 * change. So we re-scan the device to 4562 * refresh the inquiry data for it. 4563 */ 4564 xpt_scan_lun(newpath.periph, &newpath, 4565 CAM_EXPECT_INQ_CHANGE, NULL); 4566 } 4567 xpt_release_path(&newpath); 4568 } else if (async_code == AC_LOST_DEVICE) { 4569 device->flags |= CAM_DEV_UNCONFIGURED; 4570 } else if (async_code == AC_TRANSFER_NEG) { 4571 struct ccb_trans_settings *settings; 4572 4573 settings = (struct ccb_trans_settings *)async_arg; 4574 xpt_set_transfer_settings(settings, device, 4575 /*async_update*/TRUE); 4576 } 4577 } 4578 4579 u_int32_t 4580 xpt_freeze_devq(struct cam_path *path, u_int count) 4581 { 4582 int s; 4583 struct ccb_hdr *ccbh; 4584 4585 s = splcam(); 4586 path->device->qfrozen_cnt += count; 4587 4588 /* 4589 * Mark the last CCB in the queue as needing 4590 * to be requeued if the driver hasn't 4591 * changed it's state yet. This fixes a race 4592 * where a ccb is just about to be queued to 4593 * a controller driver when it's interrupt routine 4594 * freezes the queue. To completly close the 4595 * hole, controller drives must check to see 4596 * if a ccb's status is still CAM_REQ_INPROG 4597 * under spl protection just before they queue 4598 * the CCB. See ahc_action/ahc_freeze_devq for 4599 * an example. 4600 */ 4601 ccbh = TAILQ_LAST(&path->device->ccbq.active_ccbs, ccb_hdr_tailq); 4602 if (ccbh && ccbh->status == CAM_REQ_INPROG) 4603 ccbh->status = CAM_REQUEUE_REQ; 4604 splx(s); 4605 return (path->device->qfrozen_cnt); 4606 } 4607 4608 u_int32_t 4609 xpt_freeze_simq(struct cam_sim *sim, u_int count) 4610 { 4611 sim->devq->send_queue.qfrozen_cnt += count; 4612 if (sim->devq->active_dev != NULL) { 4613 struct ccb_hdr *ccbh; 4614 4615 ccbh = TAILQ_LAST(&sim->devq->active_dev->ccbq.active_ccbs, 4616 ccb_hdr_tailq); 4617 if (ccbh && ccbh->status == CAM_REQ_INPROG) 4618 ccbh->status = CAM_REQUEUE_REQ; 4619 } 4620 return (sim->devq->send_queue.qfrozen_cnt); 4621 } 4622 4623 static void 4624 xpt_release_devq_timeout(void *arg) 4625 { 4626 struct cam_ed *device; 4627 4628 device = (struct cam_ed *)arg; 4629 4630 xpt_release_devq_device(device, /*count*/1, /*run_queue*/TRUE); 4631 } 4632 4633 void 4634 xpt_release_devq(struct cam_path *path, u_int count, int run_queue) 4635 { 4636 xpt_release_devq_device(path->device, count, run_queue); 4637 } 4638 4639 static void 4640 xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue) 4641 { 4642 int rundevq; 4643 int s0, s1; 4644 4645 rundevq = 0; 4646 s0 = splsoftcam(); 4647 s1 = splcam(); 4648 if (dev->qfrozen_cnt > 0) { 4649 4650 count = (count > dev->qfrozen_cnt) ? dev->qfrozen_cnt : count; 4651 dev->qfrozen_cnt -= count; 4652 if (dev->qfrozen_cnt == 0) { 4653 4654 /* 4655 * No longer need to wait for a successful 4656 * command completion. 4657 */ 4658 dev->flags &= ~CAM_DEV_REL_ON_COMPLETE; 4659 4660 /* 4661 * Remove any timeouts that might be scheduled 4662 * to release this queue. 4663 */ 4664 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) { 4665 untimeout(xpt_release_devq_timeout, dev, 4666 dev->c_handle); 4667 dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING; 4668 } 4669 4670 /* 4671 * Now that we are unfrozen schedule the 4672 * device so any pending transactions are 4673 * run. 4674 */ 4675 if ((dev->ccbq.queue.entries > 0) 4676 && (xpt_schedule_dev_sendq(dev->target->bus, dev)) 4677 && (run_queue != 0)) { 4678 rundevq = 1; 4679 } 4680 } 4681 } 4682 splx(s1); 4683 if (rundevq != 0) 4684 xpt_run_dev_sendq(dev->target->bus); 4685 splx(s0); 4686 } 4687 4688 void 4689 xpt_release_simq(struct cam_sim *sim, int run_queue) 4690 { 4691 int s; 4692 struct camq *sendq; 4693 4694 sendq = &(sim->devq->send_queue); 4695 s = splcam(); 4696 if (sendq->qfrozen_cnt > 0) { 4697 4698 sendq->qfrozen_cnt--; 4699 if (sendq->qfrozen_cnt == 0) { 4700 struct cam_eb *bus; 4701 4702 /* 4703 * If there is a timeout scheduled to release this 4704 * sim queue, remove it. The queue frozen count is 4705 * already at 0. 4706 */ 4707 if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){ 4708 untimeout(xpt_release_simq_timeout, sim, 4709 sim->c_handle); 4710 sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING; 4711 } 4712 bus = xpt_find_bus(sim->path_id); 4713 splx(s); 4714 4715 if (run_queue) { 4716 /* 4717 * Now that we are unfrozen run the send queue. 4718 */ 4719 xpt_run_dev_sendq(bus); 4720 } 4721 xpt_release_bus(bus); 4722 } else 4723 splx(s); 4724 } else 4725 splx(s); 4726 } 4727 4728 static void 4729 xpt_release_simq_timeout(void *arg) 4730 { 4731 struct cam_sim *sim; 4732 4733 sim = (struct cam_sim *)arg; 4734 xpt_release_simq(sim, /* run_queue */ TRUE); 4735 } 4736 4737 void 4738 xpt_done(union ccb *done_ccb) 4739 { 4740 int s; 4741 4742 s = splcam(); 4743 4744 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done\n")); 4745 if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) != 0) { 4746 /* 4747 * Queue up the request for handling by our SWI handler 4748 * any of the "non-immediate" type of ccbs. 4749 */ 4750 switch (done_ccb->ccb_h.path->periph->type) { 4751 case CAM_PERIPH_BIO: 4752 TAILQ_INSERT_TAIL(&cam_bioq, &done_ccb->ccb_h, 4753 sim_links.tqe); 4754 done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX; 4755 swi_sched(cambio_ih, SWI_NOSWITCH); 4756 break; 4757 case CAM_PERIPH_NET: 4758 TAILQ_INSERT_TAIL(&cam_netq, &done_ccb->ccb_h, 4759 sim_links.tqe); 4760 done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX; 4761 swi_sched(camnet_ih, SWI_NOSWITCH); 4762 break; 4763 } 4764 } 4765 splx(s); 4766 } 4767 4768 union ccb * 4769 xpt_alloc_ccb() 4770 { 4771 union ccb *new_ccb; 4772 4773 new_ccb = malloc(sizeof(*new_ccb), M_DEVBUF, M_WAITOK); 4774 return (new_ccb); 4775 } 4776 4777 void 4778 xpt_free_ccb(union ccb *free_ccb) 4779 { 4780 free(free_ccb, M_DEVBUF); 4781 } 4782 4783 4784 4785 /* Private XPT functions */ 4786 4787 /* 4788 * Get a CAM control block for the caller. Charge the structure to the device 4789 * referenced by the path. If the this device has no 'credits' then the 4790 * device already has the maximum number of outstanding operations under way 4791 * and we return NULL. If we don't have sufficient resources to allocate more 4792 * ccbs, we also return NULL. 4793 */ 4794 static union ccb * 4795 xpt_get_ccb(struct cam_ed *device) 4796 { 4797 union ccb *new_ccb; 4798 int s; 4799 4800 s = splsoftcam(); 4801 if ((new_ccb = (union ccb *)SLIST_FIRST(&ccb_freeq)) == NULL) { 4802 new_ccb = malloc(sizeof(*new_ccb), M_DEVBUF, M_NOWAIT); 4803 if (new_ccb == NULL) { 4804 splx(s); 4805 return (NULL); 4806 } 4807 callout_handle_init(&new_ccb->ccb_h.timeout_ch); 4808 SLIST_INSERT_HEAD(&ccb_freeq, &new_ccb->ccb_h, 4809 xpt_links.sle); 4810 xpt_ccb_count++; 4811 } 4812 cam_ccbq_take_opening(&device->ccbq); 4813 SLIST_REMOVE_HEAD(&ccb_freeq, xpt_links.sle); 4814 splx(s); 4815 return (new_ccb); 4816 } 4817 4818 static void 4819 xpt_release_bus(struct cam_eb *bus) 4820 { 4821 int s; 4822 4823 s = splcam(); 4824 if ((--bus->refcount == 0) 4825 && (TAILQ_FIRST(&bus->et_entries) == NULL)) { 4826 TAILQ_REMOVE(&xpt_busses, bus, links); 4827 bus_generation++; 4828 splx(s); 4829 free(bus, M_DEVBUF); 4830 } else 4831 splx(s); 4832 } 4833 4834 static struct cam_et * 4835 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id) 4836 { 4837 struct cam_et *target; 4838 4839 target = (struct cam_et *)malloc(sizeof(*target), M_DEVBUF, M_NOWAIT); 4840 if (target != NULL) { 4841 struct cam_et *cur_target; 4842 4843 TAILQ_INIT(&target->ed_entries); 4844 target->bus = bus; 4845 target->target_id = target_id; 4846 target->refcount = 1; 4847 target->generation = 0; 4848 timevalclear(&target->last_reset); 4849 /* 4850 * Hold a reference to our parent bus so it 4851 * will not go away before we do. 4852 */ 4853 bus->refcount++; 4854 4855 /* Insertion sort into our bus's target list */ 4856 cur_target = TAILQ_FIRST(&bus->et_entries); 4857 while (cur_target != NULL && cur_target->target_id < target_id) 4858 cur_target = TAILQ_NEXT(cur_target, links); 4859 4860 if (cur_target != NULL) { 4861 TAILQ_INSERT_BEFORE(cur_target, target, links); 4862 } else { 4863 TAILQ_INSERT_TAIL(&bus->et_entries, target, links); 4864 } 4865 bus->generation++; 4866 } 4867 return (target); 4868 } 4869 4870 static void 4871 xpt_release_target(struct cam_eb *bus, struct cam_et *target) 4872 { 4873 int s; 4874 4875 s = splcam(); 4876 if ((--target->refcount == 0) 4877 && (TAILQ_FIRST(&target->ed_entries) == NULL)) { 4878 TAILQ_REMOVE(&bus->et_entries, target, links); 4879 bus->generation++; 4880 splx(s); 4881 free(target, M_DEVBUF); 4882 xpt_release_bus(bus); 4883 } else 4884 splx(s); 4885 } 4886 4887 static struct cam_ed * 4888 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id) 4889 { 4890 #ifdef CAM_NEW_TRAN_CODE 4891 struct cam_path path; 4892 #endif /* CAM_NEW_TRAN_CODE */ 4893 struct cam_ed *device; 4894 struct cam_devq *devq; 4895 cam_status status; 4896 4897 /* Make space for us in the device queue on our bus */ 4898 devq = bus->sim->devq; 4899 status = cam_devq_resize(devq, devq->alloc_queue.array_size + 1); 4900 4901 if (status != CAM_REQ_CMP) { 4902 device = NULL; 4903 } else { 4904 device = (struct cam_ed *)malloc(sizeof(*device), 4905 M_DEVBUF, M_NOWAIT); 4906 } 4907 4908 if (device != NULL) { 4909 struct cam_ed *cur_device; 4910 4911 cam_init_pinfo(&device->alloc_ccb_entry.pinfo); 4912 device->alloc_ccb_entry.device = device; 4913 cam_init_pinfo(&device->send_ccb_entry.pinfo); 4914 device->send_ccb_entry.device = device; 4915 device->target = target; 4916 device->lun_id = lun_id; 4917 /* Initialize our queues */ 4918 if (camq_init(&device->drvq, 0) != 0) { 4919 free(device, M_DEVBUF); 4920 return (NULL); 4921 } 4922 if (cam_ccbq_init(&device->ccbq, 4923 bus->sim->max_dev_openings) != 0) { 4924 camq_fini(&device->drvq); 4925 free(device, M_DEVBUF); 4926 return (NULL); 4927 } 4928 SLIST_INIT(&device->asyncs); 4929 SLIST_INIT(&device->periphs); 4930 device->generation = 0; 4931 device->owner = NULL; 4932 /* 4933 * Take the default quirk entry until we have inquiry 4934 * data and can determine a better quirk to use. 4935 */ 4936 device->quirk = &xpt_quirk_table[xpt_quirk_table_size - 1]; 4937 bzero(&device->inq_data, sizeof(device->inq_data)); 4938 device->inq_flags = 0; 4939 device->queue_flags = 0; 4940 device->serial_num = NULL; 4941 device->serial_num_len = 0; 4942 device->qfrozen_cnt = 0; 4943 device->flags = CAM_DEV_UNCONFIGURED; 4944 device->tag_delay_count = 0; 4945 device->refcount = 1; 4946 callout_handle_init(&device->c_handle); 4947 4948 /* 4949 * Hold a reference to our parent target so it 4950 * will not go away before we do. 4951 */ 4952 target->refcount++; 4953 4954 /* 4955 * XXX should be limited by number of CCBs this bus can 4956 * do. 4957 */ 4958 xpt_max_ccbs += device->ccbq.devq_openings; 4959 /* Insertion sort into our target's device list */ 4960 cur_device = TAILQ_FIRST(&target->ed_entries); 4961 while (cur_device != NULL && cur_device->lun_id < lun_id) 4962 cur_device = TAILQ_NEXT(cur_device, links); 4963 if (cur_device != NULL) { 4964 TAILQ_INSERT_BEFORE(cur_device, device, links); 4965 } else { 4966 TAILQ_INSERT_TAIL(&target->ed_entries, device, links); 4967 } 4968 target->generation++; 4969 #ifdef CAM_NEW_TRAN_CODE 4970 if (lun_id != CAM_LUN_WILDCARD) { 4971 xpt_compile_path(&path, 4972 NULL, 4973 bus->path_id, 4974 target->target_id, 4975 lun_id); 4976 xpt_devise_transport(&path); 4977 xpt_release_path(&path); 4978 } 4979 #endif /* CAM_NEW_TRAN_CODE */ 4980 } 4981 return (device); 4982 } 4983 4984 static void 4985 xpt_release_device(struct cam_eb *bus, struct cam_et *target, 4986 struct cam_ed *device) 4987 { 4988 int s; 4989 4990 s = splcam(); 4991 if ((--device->refcount == 0) 4992 && ((device->flags & CAM_DEV_UNCONFIGURED) != 0)) { 4993 struct cam_devq *devq; 4994 4995 if (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX 4996 || device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX) 4997 panic("Removing device while still queued for ccbs"); 4998 4999 if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) 5000 untimeout(xpt_release_devq_timeout, device, 5001 device->c_handle); 5002 5003 TAILQ_REMOVE(&target->ed_entries, device,links); 5004 target->generation++; 5005 xpt_max_ccbs -= device->ccbq.devq_openings; 5006 /* Release our slot in the devq */ 5007 devq = bus->sim->devq; 5008 cam_devq_resize(devq, devq->alloc_queue.array_size - 1); 5009 splx(s); 5010 free(device, M_DEVBUF); 5011 xpt_release_target(bus, target); 5012 } else 5013 splx(s); 5014 } 5015 5016 static u_int32_t 5017 xpt_dev_ccbq_resize(struct cam_path *path, int newopenings) 5018 { 5019 int s; 5020 int diff; 5021 int result; 5022 struct cam_ed *dev; 5023 5024 dev = path->device; 5025 s = splsoftcam(); 5026 5027 diff = newopenings - (dev->ccbq.dev_active + dev->ccbq.dev_openings); 5028 result = cam_ccbq_resize(&dev->ccbq, newopenings); 5029 if (result == CAM_REQ_CMP && (diff < 0)) { 5030 dev->flags |= CAM_DEV_RESIZE_QUEUE_NEEDED; 5031 } 5032 /* Adjust the global limit */ 5033 xpt_max_ccbs += diff; 5034 splx(s); 5035 return (result); 5036 } 5037 5038 static struct cam_eb * 5039 xpt_find_bus(path_id_t path_id) 5040 { 5041 struct cam_eb *bus; 5042 5043 for (bus = TAILQ_FIRST(&xpt_busses); 5044 bus != NULL; 5045 bus = TAILQ_NEXT(bus, links)) { 5046 if (bus->path_id == path_id) { 5047 bus->refcount++; 5048 break; 5049 } 5050 } 5051 return (bus); 5052 } 5053 5054 static struct cam_et * 5055 xpt_find_target(struct cam_eb *bus, target_id_t target_id) 5056 { 5057 struct cam_et *target; 5058 5059 for (target = TAILQ_FIRST(&bus->et_entries); 5060 target != NULL; 5061 target = TAILQ_NEXT(target, links)) { 5062 if (target->target_id == target_id) { 5063 target->refcount++; 5064 break; 5065 } 5066 } 5067 return (target); 5068 } 5069 5070 static struct cam_ed * 5071 xpt_find_device(struct cam_et *target, lun_id_t lun_id) 5072 { 5073 struct cam_ed *device; 5074 5075 for (device = TAILQ_FIRST(&target->ed_entries); 5076 device != NULL; 5077 device = TAILQ_NEXT(device, links)) { 5078 if (device->lun_id == lun_id) { 5079 device->refcount++; 5080 break; 5081 } 5082 } 5083 return (device); 5084 } 5085 5086 typedef struct { 5087 union ccb *request_ccb; 5088 struct ccb_pathinq *cpi; 5089 int pending_count; 5090 } xpt_scan_bus_info; 5091 5092 /* 5093 * To start a scan, request_ccb is an XPT_SCAN_BUS ccb. 5094 * As the scan progresses, xpt_scan_bus is used as the 5095 * callback on completion function. 5096 */ 5097 static void 5098 xpt_scan_bus(struct cam_periph *periph, union ccb *request_ccb) 5099 { 5100 CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE, 5101 ("xpt_scan_bus\n")); 5102 switch (request_ccb->ccb_h.func_code) { 5103 case XPT_SCAN_BUS: 5104 { 5105 xpt_scan_bus_info *scan_info; 5106 union ccb *work_ccb; 5107 struct cam_path *path; 5108 u_int i; 5109 u_int max_target; 5110 u_int initiator_id; 5111 5112 /* Find out the characteristics of the bus */ 5113 work_ccb = xpt_alloc_ccb(); 5114 xpt_setup_ccb(&work_ccb->ccb_h, request_ccb->ccb_h.path, 5115 request_ccb->ccb_h.pinfo.priority); 5116 work_ccb->ccb_h.func_code = XPT_PATH_INQ; 5117 xpt_action(work_ccb); 5118 if (work_ccb->ccb_h.status != CAM_REQ_CMP) { 5119 request_ccb->ccb_h.status = work_ccb->ccb_h.status; 5120 xpt_free_ccb(work_ccb); 5121 xpt_done(request_ccb); 5122 return; 5123 } 5124 5125 if ((work_ccb->cpi.hba_misc & PIM_NOINITIATOR) != 0) { 5126 /* 5127 * Can't scan the bus on an adapter that 5128 * cannot perform the initiator role. 5129 */ 5130 request_ccb->ccb_h.status = CAM_REQ_CMP; 5131 xpt_free_ccb(work_ccb); 5132 xpt_done(request_ccb); 5133 return; 5134 } 5135 5136 /* Save some state for use while we probe for devices */ 5137 scan_info = (xpt_scan_bus_info *) 5138 malloc(sizeof(xpt_scan_bus_info), M_TEMP, M_WAITOK); 5139 scan_info->request_ccb = request_ccb; 5140 scan_info->cpi = &work_ccb->cpi; 5141 5142 /* Cache on our stack so we can work asynchronously */ 5143 max_target = scan_info->cpi->max_target; 5144 initiator_id = scan_info->cpi->initiator_id; 5145 5146 /* 5147 * Don't count the initiator if the 5148 * initiator is addressable. 5149 */ 5150 scan_info->pending_count = max_target + 1; 5151 if (initiator_id <= max_target) 5152 scan_info->pending_count--; 5153 5154 for (i = 0; i <= max_target; i++) { 5155 cam_status status; 5156 if (i == initiator_id) 5157 continue; 5158 5159 status = xpt_create_path(&path, xpt_periph, 5160 request_ccb->ccb_h.path_id, 5161 i, 0); 5162 if (status != CAM_REQ_CMP) { 5163 printf("xpt_scan_bus: xpt_create_path failed" 5164 " with status %#x, bus scan halted\n", 5165 status); 5166 break; 5167 } 5168 work_ccb = xpt_alloc_ccb(); 5169 xpt_setup_ccb(&work_ccb->ccb_h, path, 5170 request_ccb->ccb_h.pinfo.priority); 5171 work_ccb->ccb_h.func_code = XPT_SCAN_LUN; 5172 work_ccb->ccb_h.cbfcnp = xpt_scan_bus; 5173 work_ccb->ccb_h.ppriv_ptr0 = scan_info; 5174 work_ccb->crcn.flags = request_ccb->crcn.flags; 5175 xpt_action(work_ccb); 5176 } 5177 break; 5178 } 5179 case XPT_SCAN_LUN: 5180 { 5181 xpt_scan_bus_info *scan_info; 5182 path_id_t path_id; 5183 target_id_t target_id; 5184 lun_id_t lun_id; 5185 5186 /* Reuse the same CCB to query if a device was really found */ 5187 scan_info = (xpt_scan_bus_info *)request_ccb->ccb_h.ppriv_ptr0; 5188 xpt_setup_ccb(&request_ccb->ccb_h, request_ccb->ccb_h.path, 5189 request_ccb->ccb_h.pinfo.priority); 5190 request_ccb->ccb_h.func_code = XPT_GDEV_TYPE; 5191 5192 path_id = request_ccb->ccb_h.path_id; 5193 target_id = request_ccb->ccb_h.target_id; 5194 lun_id = request_ccb->ccb_h.target_lun; 5195 xpt_action(request_ccb); 5196 5197 if (request_ccb->ccb_h.status != CAM_REQ_CMP) { 5198 struct cam_ed *device; 5199 struct cam_et *target; 5200 int s, phl; 5201 5202 /* 5203 * If we already probed lun 0 successfully, or 5204 * we have additional configured luns on this 5205 * target that might have "gone away", go onto 5206 * the next lun. 5207 */ 5208 target = request_ccb->ccb_h.path->target; 5209 /* 5210 * We may touch devices that we don't 5211 * hold references too, so ensure they 5212 * don't disappear out from under us. 5213 * The target above is referenced by the 5214 * path in the request ccb. 5215 */ 5216 phl = 0; 5217 s = splcam(); 5218 device = TAILQ_FIRST(&target->ed_entries); 5219 if (device != NULL) { 5220 phl = device->quirk->quirks & CAM_QUIRK_HILUNS; 5221 if (device->lun_id == 0) 5222 device = TAILQ_NEXT(device, links); 5223 } 5224 splx(s); 5225 if ((lun_id != 0) || (device != NULL)) { 5226 if (lun_id < (CAM_SCSI2_MAXLUN-1) || phl) 5227 lun_id++; 5228 } 5229 } else { 5230 struct cam_ed *device; 5231 5232 device = request_ccb->ccb_h.path->device; 5233 5234 if ((device->quirk->quirks & CAM_QUIRK_NOLUNS) == 0) { 5235 /* Try the next lun */ 5236 if (lun_id < (CAM_SCSI2_MAXLUN-1) || 5237 (device->quirk->quirks & CAM_QUIRK_HILUNS)) 5238 lun_id++; 5239 } 5240 } 5241 5242 xpt_free_path(request_ccb->ccb_h.path); 5243 5244 /* Check Bounds */ 5245 if ((lun_id == request_ccb->ccb_h.target_lun) 5246 || lun_id > scan_info->cpi->max_lun) { 5247 /* We're done */ 5248 5249 xpt_free_ccb(request_ccb); 5250 scan_info->pending_count--; 5251 if (scan_info->pending_count == 0) { 5252 xpt_free_ccb((union ccb *)scan_info->cpi); 5253 request_ccb = scan_info->request_ccb; 5254 free(scan_info, M_TEMP); 5255 request_ccb->ccb_h.status = CAM_REQ_CMP; 5256 xpt_done(request_ccb); 5257 } 5258 } else { 5259 /* Try the next device */ 5260 struct cam_path *path; 5261 cam_status status; 5262 5263 path = request_ccb->ccb_h.path; 5264 status = xpt_create_path(&path, xpt_periph, 5265 path_id, target_id, lun_id); 5266 if (status != CAM_REQ_CMP) { 5267 printf("xpt_scan_bus: xpt_create_path failed " 5268 "with status %#x, halting LUN scan\n", 5269 status); 5270 xpt_free_ccb(request_ccb); 5271 scan_info->pending_count--; 5272 if (scan_info->pending_count == 0) { 5273 xpt_free_ccb( 5274 (union ccb *)scan_info->cpi); 5275 request_ccb = scan_info->request_ccb; 5276 free(scan_info, M_TEMP); 5277 request_ccb->ccb_h.status = CAM_REQ_CMP; 5278 xpt_done(request_ccb); 5279 break; 5280 } 5281 } 5282 xpt_setup_ccb(&request_ccb->ccb_h, path, 5283 request_ccb->ccb_h.pinfo.priority); 5284 request_ccb->ccb_h.func_code = XPT_SCAN_LUN; 5285 request_ccb->ccb_h.cbfcnp = xpt_scan_bus; 5286 request_ccb->ccb_h.ppriv_ptr0 = scan_info; 5287 request_ccb->crcn.flags = 5288 scan_info->request_ccb->crcn.flags; 5289 xpt_action(request_ccb); 5290 } 5291 break; 5292 } 5293 default: 5294 break; 5295 } 5296 } 5297 5298 typedef enum { 5299 PROBE_TUR, 5300 PROBE_INQUIRY, 5301 PROBE_FULL_INQUIRY, 5302 PROBE_MODE_SENSE, 5303 PROBE_SERIAL_NUM, 5304 PROBE_TUR_FOR_NEGOTIATION 5305 } probe_action; 5306 5307 typedef enum { 5308 PROBE_INQUIRY_CKSUM = 0x01, 5309 PROBE_SERIAL_CKSUM = 0x02, 5310 PROBE_NO_ANNOUNCE = 0x04 5311 } probe_flags; 5312 5313 typedef struct { 5314 TAILQ_HEAD(, ccb_hdr) request_ccbs; 5315 probe_action action; 5316 union ccb saved_ccb; 5317 probe_flags flags; 5318 MD5_CTX context; 5319 u_int8_t digest[16]; 5320 } probe_softc; 5321 5322 static void 5323 xpt_scan_lun(struct cam_periph *periph, struct cam_path *path, 5324 cam_flags flags, union ccb *request_ccb) 5325 { 5326 struct ccb_pathinq cpi; 5327 cam_status status; 5328 struct cam_path *new_path; 5329 struct cam_periph *old_periph; 5330 int s; 5331 5332 CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE, 5333 ("xpt_scan_lun\n")); 5334 5335 xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1); 5336 cpi.ccb_h.func_code = XPT_PATH_INQ; 5337 xpt_action((union ccb *)&cpi); 5338 5339 if (cpi.ccb_h.status != CAM_REQ_CMP) { 5340 if (request_ccb != NULL) { 5341 request_ccb->ccb_h.status = cpi.ccb_h.status; 5342 xpt_done(request_ccb); 5343 } 5344 return; 5345 } 5346 5347 if ((cpi.hba_misc & PIM_NOINITIATOR) != 0) { 5348 /* 5349 * Can't scan the bus on an adapter that 5350 * cannot perform the initiator role. 5351 */ 5352 if (request_ccb != NULL) { 5353 request_ccb->ccb_h.status = CAM_REQ_CMP; 5354 xpt_done(request_ccb); 5355 } 5356 return; 5357 } 5358 5359 if (request_ccb == NULL) { 5360 request_ccb = malloc(sizeof(union ccb), M_TEMP, M_NOWAIT); 5361 if (request_ccb == NULL) { 5362 xpt_print_path(path); 5363 printf("xpt_scan_lun: can't allocate CCB, can't " 5364 "continue\n"); 5365 return; 5366 } 5367 new_path = malloc(sizeof(*new_path), M_TEMP, M_NOWAIT); 5368 if (new_path == NULL) { 5369 xpt_print_path(path); 5370 printf("xpt_scan_lun: can't allocate path, can't " 5371 "continue\n"); 5372 free(request_ccb, M_TEMP); 5373 return; 5374 } 5375 status = xpt_compile_path(new_path, xpt_periph, 5376 path->bus->path_id, 5377 path->target->target_id, 5378 path->device->lun_id); 5379 5380 if (status != CAM_REQ_CMP) { 5381 xpt_print_path(path); 5382 printf("xpt_scan_lun: can't compile path, can't " 5383 "continue\n"); 5384 free(request_ccb, M_TEMP); 5385 free(new_path, M_TEMP); 5386 return; 5387 } 5388 xpt_setup_ccb(&request_ccb->ccb_h, new_path, /*priority*/ 1); 5389 request_ccb->ccb_h.cbfcnp = xptscandone; 5390 request_ccb->ccb_h.func_code = XPT_SCAN_LUN; 5391 request_ccb->crcn.flags = flags; 5392 } 5393 5394 s = splsoftcam(); 5395 if ((old_periph = cam_periph_find(path, "probe")) != NULL) { 5396 probe_softc *softc; 5397 5398 softc = (probe_softc *)old_periph->softc; 5399 TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h, 5400 periph_links.tqe); 5401 } else { 5402 status = cam_periph_alloc(proberegister, NULL, probecleanup, 5403 probestart, "probe", 5404 CAM_PERIPH_BIO, 5405 request_ccb->ccb_h.path, NULL, 0, 5406 request_ccb); 5407 5408 if (status != CAM_REQ_CMP) { 5409 xpt_print_path(path); 5410 printf("xpt_scan_lun: cam_alloc_periph returned an " 5411 "error, can't continue probe\n"); 5412 request_ccb->ccb_h.status = status; 5413 xpt_done(request_ccb); 5414 } 5415 } 5416 splx(s); 5417 } 5418 5419 static void 5420 xptscandone(struct cam_periph *periph, union ccb *done_ccb) 5421 { 5422 xpt_release_path(done_ccb->ccb_h.path); 5423 free(done_ccb->ccb_h.path, M_TEMP); 5424 free(done_ccb, M_TEMP); 5425 } 5426 5427 static cam_status 5428 proberegister(struct cam_periph *periph, void *arg) 5429 { 5430 union ccb *request_ccb; /* CCB representing the probe request */ 5431 probe_softc *softc; 5432 5433 request_ccb = (union ccb *)arg; 5434 if (periph == NULL) { 5435 printf("proberegister: periph was NULL!!\n"); 5436 return(CAM_REQ_CMP_ERR); 5437 } 5438 5439 if (request_ccb == NULL) { 5440 printf("proberegister: no probe CCB, " 5441 "can't register device\n"); 5442 return(CAM_REQ_CMP_ERR); 5443 } 5444 5445 softc = (probe_softc *)malloc(sizeof(*softc), M_TEMP, M_NOWAIT); 5446 5447 if (softc == NULL) { 5448 printf("proberegister: Unable to probe new device. " 5449 "Unable to allocate softc\n"); 5450 return(CAM_REQ_CMP_ERR); 5451 } 5452 TAILQ_INIT(&softc->request_ccbs); 5453 TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h, 5454 periph_links.tqe); 5455 softc->flags = 0; 5456 periph->softc = softc; 5457 cam_periph_acquire(periph); 5458 /* 5459 * Ensure we've waited at least a bus settle 5460 * delay before attempting to probe the device. 5461 * For HBAs that don't do bus resets, this won't make a difference. 5462 */ 5463 cam_periph_freeze_after_event(periph, &periph->path->bus->last_reset, 5464 SCSI_DELAY); 5465 probeschedule(periph); 5466 return(CAM_REQ_CMP); 5467 } 5468 5469 static void 5470 probeschedule(struct cam_periph *periph) 5471 { 5472 struct ccb_pathinq cpi; 5473 union ccb *ccb; 5474 probe_softc *softc; 5475 5476 softc = (probe_softc *)periph->softc; 5477 ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs); 5478 5479 xpt_setup_ccb(&cpi.ccb_h, periph->path, /*priority*/1); 5480 cpi.ccb_h.func_code = XPT_PATH_INQ; 5481 xpt_action((union ccb *)&cpi); 5482 5483 /* 5484 * If a device has gone away and another device, or the same one, 5485 * is back in the same place, it should have a unit attention 5486 * condition pending. It will not report the unit attention in 5487 * response to an inquiry, which may leave invalid transfer 5488 * negotiations in effect. The TUR will reveal the unit attention 5489 * condition. Only send the TUR for lun 0, since some devices 5490 * will get confused by commands other than inquiry to non-existent 5491 * luns. If you think a device has gone away start your scan from 5492 * lun 0. This will insure that any bogus transfer settings are 5493 * invalidated. 5494 * 5495 * If we haven't seen the device before and the controller supports 5496 * some kind of transfer negotiation, negotiate with the first 5497 * sent command if no bus reset was performed at startup. This 5498 * ensures that the device is not confused by transfer negotiation 5499 * settings left over by loader or BIOS action. 5500 */ 5501 if (((ccb->ccb_h.path->device->flags & CAM_DEV_UNCONFIGURED) == 0) 5502 && (ccb->ccb_h.target_lun == 0)) { 5503 softc->action = PROBE_TUR; 5504 } else if ((cpi.hba_inquiry & (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE)) != 0 5505 && (cpi.hba_misc & PIM_NOBUSRESET) != 0) { 5506 proberequestdefaultnegotiation(periph); 5507 softc->action = PROBE_INQUIRY; 5508 } else { 5509 softc->action = PROBE_INQUIRY; 5510 } 5511 5512 if (ccb->crcn.flags & CAM_EXPECT_INQ_CHANGE) 5513 softc->flags |= PROBE_NO_ANNOUNCE; 5514 else 5515 softc->flags &= ~PROBE_NO_ANNOUNCE; 5516 5517 xpt_schedule(periph, ccb->ccb_h.pinfo.priority); 5518 } 5519 5520 static void 5521 probestart(struct cam_periph *periph, union ccb *start_ccb) 5522 { 5523 /* Probe the device that our peripheral driver points to */ 5524 struct ccb_scsiio *csio; 5525 probe_softc *softc; 5526 5527 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probestart\n")); 5528 5529 softc = (probe_softc *)periph->softc; 5530 csio = &start_ccb->csio; 5531 5532 switch (softc->action) { 5533 case PROBE_TUR: 5534 case PROBE_TUR_FOR_NEGOTIATION: 5535 { 5536 scsi_test_unit_ready(csio, 5537 /*retries*/4, 5538 probedone, 5539 MSG_SIMPLE_Q_TAG, 5540 SSD_FULL_SIZE, 5541 /*timeout*/60000); 5542 break; 5543 } 5544 case PROBE_INQUIRY: 5545 case PROBE_FULL_INQUIRY: 5546 { 5547 u_int inquiry_len; 5548 struct scsi_inquiry_data *inq_buf; 5549 5550 inq_buf = &periph->path->device->inq_data; 5551 /* 5552 * If the device is currently configured, we calculate an 5553 * MD5 checksum of the inquiry data, and if the serial number 5554 * length is greater than 0, add the serial number data 5555 * into the checksum as well. Once the inquiry and the 5556 * serial number check finish, we attempt to figure out 5557 * whether we still have the same device. 5558 */ 5559 if ((periph->path->device->flags & CAM_DEV_UNCONFIGURED) == 0) { 5560 5561 MD5Init(&softc->context); 5562 MD5Update(&softc->context, (unsigned char *)inq_buf, 5563 sizeof(struct scsi_inquiry_data)); 5564 softc->flags |= PROBE_INQUIRY_CKSUM; 5565 if (periph->path->device->serial_num_len > 0) { 5566 MD5Update(&softc->context, 5567 periph->path->device->serial_num, 5568 periph->path->device->serial_num_len); 5569 softc->flags |= PROBE_SERIAL_CKSUM; 5570 } 5571 MD5Final(softc->digest, &softc->context); 5572 } 5573 5574 if (softc->action == PROBE_INQUIRY) 5575 inquiry_len = SHORT_INQUIRY_LENGTH; 5576 else 5577 inquiry_len = inq_buf->additional_length + 4; 5578 5579 scsi_inquiry(csio, 5580 /*retries*/4, 5581 probedone, 5582 MSG_SIMPLE_Q_TAG, 5583 (u_int8_t *)inq_buf, 5584 inquiry_len, 5585 /*evpd*/FALSE, 5586 /*page_code*/0, 5587 SSD_MIN_SIZE, 5588 /*timeout*/60 * 1000); 5589 break; 5590 } 5591 case PROBE_MODE_SENSE: 5592 { 5593 void *mode_buf; 5594 int mode_buf_len; 5595 5596 mode_buf_len = sizeof(struct scsi_mode_header_6) 5597 + sizeof(struct scsi_mode_blk_desc) 5598 + sizeof(struct scsi_control_page); 5599 mode_buf = malloc(mode_buf_len, M_TEMP, M_NOWAIT); 5600 if (mode_buf != NULL) { 5601 scsi_mode_sense(csio, 5602 /*retries*/4, 5603 probedone, 5604 MSG_SIMPLE_Q_TAG, 5605 /*dbd*/FALSE, 5606 SMS_PAGE_CTRL_CURRENT, 5607 SMS_CONTROL_MODE_PAGE, 5608 mode_buf, 5609 mode_buf_len, 5610 SSD_FULL_SIZE, 5611 /*timeout*/60000); 5612 break; 5613 } 5614 xpt_print_path(periph->path); 5615 printf("Unable to mode sense control page - malloc failure\n"); 5616 softc->action = PROBE_SERIAL_NUM; 5617 /* FALLTHROUGH */ 5618 } 5619 case PROBE_SERIAL_NUM: 5620 { 5621 struct scsi_vpd_unit_serial_number *serial_buf; 5622 struct cam_ed* device; 5623 5624 serial_buf = NULL; 5625 device = periph->path->device; 5626 device->serial_num = NULL; 5627 device->serial_num_len = 0; 5628 5629 if ((device->quirk->quirks & CAM_QUIRK_NOSERIAL) == 0) 5630 serial_buf = (struct scsi_vpd_unit_serial_number *) 5631 malloc(sizeof(*serial_buf), M_TEMP, 5632 M_NOWAIT | M_ZERO); 5633 5634 if (serial_buf != NULL) { 5635 scsi_inquiry(csio, 5636 /*retries*/4, 5637 probedone, 5638 MSG_SIMPLE_Q_TAG, 5639 (u_int8_t *)serial_buf, 5640 sizeof(*serial_buf), 5641 /*evpd*/TRUE, 5642 SVPD_UNIT_SERIAL_NUMBER, 5643 SSD_MIN_SIZE, 5644 /*timeout*/60 * 1000); 5645 break; 5646 } 5647 /* 5648 * We'll have to do without, let our probedone 5649 * routine finish up for us. 5650 */ 5651 start_ccb->csio.data_ptr = NULL; 5652 probedone(periph, start_ccb); 5653 return; 5654 } 5655 } 5656 xpt_action(start_ccb); 5657 } 5658 5659 static void 5660 proberequestdefaultnegotiation(struct cam_periph *periph) 5661 { 5662 struct ccb_trans_settings cts; 5663 5664 xpt_setup_ccb(&cts.ccb_h, periph->path, /*priority*/1); 5665 cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; 5666 #ifdef CAM_NEW_TRAN_CODE 5667 cts.type = CTS_TYPE_USER_SETTINGS; 5668 #else /* CAM_NEW_TRAN_CODE */ 5669 cts.flags = CCB_TRANS_USER_SETTINGS; 5670 #endif /* CAM_NEW_TRAN_CODE */ 5671 xpt_action((union ccb *)&cts); 5672 cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS; 5673 #ifdef CAM_NEW_TRAN_CODE 5674 cts.type = CTS_TYPE_CURRENT_SETTINGS; 5675 #else /* CAM_NEW_TRAN_CODE */ 5676 cts.flags &= ~CCB_TRANS_USER_SETTINGS; 5677 cts.flags |= CCB_TRANS_CURRENT_SETTINGS; 5678 #endif /* CAM_NEW_TRAN_CODE */ 5679 xpt_action((union ccb *)&cts); 5680 } 5681 5682 static void 5683 probedone(struct cam_periph *periph, union ccb *done_ccb) 5684 { 5685 probe_softc *softc; 5686 struct cam_path *path; 5687 u_int32_t priority; 5688 5689 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probedone\n")); 5690 5691 softc = (probe_softc *)periph->softc; 5692 path = done_ccb->ccb_h.path; 5693 priority = done_ccb->ccb_h.pinfo.priority; 5694 5695 switch (softc->action) { 5696 case PROBE_TUR: 5697 { 5698 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 5699 5700 if (cam_periph_error(done_ccb, 0, 5701 SF_NO_PRINT, NULL) == ERESTART) 5702 return; 5703 else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 5704 /* Don't wedge the queue */ 5705 xpt_release_devq(done_ccb->ccb_h.path, 5706 /*count*/1, 5707 /*run_queue*/TRUE); 5708 } 5709 softc->action = PROBE_INQUIRY; 5710 xpt_release_ccb(done_ccb); 5711 xpt_schedule(periph, priority); 5712 return; 5713 } 5714 case PROBE_INQUIRY: 5715 case PROBE_FULL_INQUIRY: 5716 { 5717 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 5718 struct scsi_inquiry_data *inq_buf; 5719 u_int8_t periph_qual; 5720 u_int8_t periph_dtype; 5721 5722 path->device->flags |= CAM_DEV_INQUIRY_DATA_VALID; 5723 inq_buf = &path->device->inq_data; 5724 5725 periph_qual = SID_QUAL(inq_buf); 5726 periph_dtype = SID_TYPE(inq_buf); 5727 5728 if (periph_dtype != T_NODEVICE) { 5729 switch(periph_qual) { 5730 case SID_QUAL_LU_CONNECTED: 5731 { 5732 u_int8_t alen; 5733 5734 /* 5735 * We conservatively request only 5736 * SHORT_INQUIRY_LEN bytes of inquiry 5737 * information during our first try 5738 * at sending an INQUIRY. If the device 5739 * has more information to give, 5740 * perform a second request specifying 5741 * the amount of information the device 5742 * is willing to give. 5743 */ 5744 alen = inq_buf->additional_length; 5745 if (softc->action == PROBE_INQUIRY 5746 && alen > (SHORT_INQUIRY_LENGTH - 4)) { 5747 softc->action = 5748 PROBE_FULL_INQUIRY; 5749 xpt_release_ccb(done_ccb); 5750 xpt_schedule(periph, priority); 5751 return; 5752 } 5753 5754 xpt_find_quirk(path->device); 5755 5756 #ifdef CAM_NEW_TRAN_CODE 5757 xpt_devise_transport(path); 5758 #endif /* CAM_NEW_TRAN_CODE */ 5759 if ((inq_buf->flags & SID_CmdQue) != 0) 5760 softc->action = 5761 PROBE_MODE_SENSE; 5762 else 5763 softc->action = 5764 PROBE_SERIAL_NUM; 5765 5766 path->device->flags &= 5767 ~CAM_DEV_UNCONFIGURED; 5768 5769 xpt_release_ccb(done_ccb); 5770 xpt_schedule(periph, priority); 5771 return; 5772 } 5773 default: 5774 break; 5775 } 5776 } 5777 } else if (cam_periph_error(done_ccb, 0, 5778 done_ccb->ccb_h.target_lun > 0 5779 ? SF_RETRY_UA|SF_QUIET_IR 5780 : SF_RETRY_UA, 5781 &softc->saved_ccb) == ERESTART) { 5782 return; 5783 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 5784 /* Don't wedge the queue */ 5785 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1, 5786 /*run_queue*/TRUE); 5787 } 5788 /* 5789 * If we get to this point, we got an error status back 5790 * from the inquiry and the error status doesn't require 5791 * automatically retrying the command. Therefore, the 5792 * inquiry failed. If we had inquiry information before 5793 * for this device, but this latest inquiry command failed, 5794 * the device has probably gone away. If this device isn't 5795 * already marked unconfigured, notify the peripheral 5796 * drivers that this device is no more. 5797 */ 5798 if ((path->device->flags & CAM_DEV_UNCONFIGURED) == 0) 5799 /* Send the async notification. */ 5800 xpt_async(AC_LOST_DEVICE, path, NULL); 5801 5802 xpt_release_ccb(done_ccb); 5803 break; 5804 } 5805 case PROBE_MODE_SENSE: 5806 { 5807 struct ccb_scsiio *csio; 5808 struct scsi_mode_header_6 *mode_hdr; 5809 5810 csio = &done_ccb->csio; 5811 mode_hdr = (struct scsi_mode_header_6 *)csio->data_ptr; 5812 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 5813 struct scsi_control_page *page; 5814 u_int8_t *offset; 5815 5816 offset = ((u_int8_t *)&mode_hdr[1]) 5817 + mode_hdr->blk_desc_len; 5818 page = (struct scsi_control_page *)offset; 5819 path->device->queue_flags = page->queue_flags; 5820 } else if (cam_periph_error(done_ccb, 0, 5821 SF_RETRY_UA|SF_NO_PRINT, 5822 &softc->saved_ccb) == ERESTART) { 5823 return; 5824 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 5825 /* Don't wedge the queue */ 5826 xpt_release_devq(done_ccb->ccb_h.path, 5827 /*count*/1, /*run_queue*/TRUE); 5828 } 5829 xpt_release_ccb(done_ccb); 5830 free(mode_hdr, M_TEMP); 5831 softc->action = PROBE_SERIAL_NUM; 5832 xpt_schedule(periph, priority); 5833 return; 5834 } 5835 case PROBE_SERIAL_NUM: 5836 { 5837 struct ccb_scsiio *csio; 5838 struct scsi_vpd_unit_serial_number *serial_buf; 5839 u_int32_t priority; 5840 int changed; 5841 int have_serialnum; 5842 5843 changed = 1; 5844 have_serialnum = 0; 5845 csio = &done_ccb->csio; 5846 priority = done_ccb->ccb_h.pinfo.priority; 5847 serial_buf = 5848 (struct scsi_vpd_unit_serial_number *)csio->data_ptr; 5849 5850 /* Clean up from previous instance of this device */ 5851 if (path->device->serial_num != NULL) { 5852 free(path->device->serial_num, M_DEVBUF); 5853 path->device->serial_num = NULL; 5854 path->device->serial_num_len = 0; 5855 } 5856 5857 if (serial_buf == NULL) { 5858 /* 5859 * Don't process the command as it was never sent 5860 */ 5861 } else if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP 5862 && (serial_buf->length > 0)) { 5863 5864 have_serialnum = 1; 5865 path->device->serial_num = 5866 (u_int8_t *)malloc((serial_buf->length + 1), 5867 M_DEVBUF, M_NOWAIT); 5868 if (path->device->serial_num != NULL) { 5869 bcopy(serial_buf->serial_num, 5870 path->device->serial_num, 5871 serial_buf->length); 5872 path->device->serial_num_len = 5873 serial_buf->length; 5874 path->device->serial_num[serial_buf->length] 5875 = '\0'; 5876 } 5877 } else if (cam_periph_error(done_ccb, 0, 5878 SF_RETRY_UA|SF_NO_PRINT, 5879 &softc->saved_ccb) == ERESTART) { 5880 return; 5881 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 5882 /* Don't wedge the queue */ 5883 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1, 5884 /*run_queue*/TRUE); 5885 } 5886 5887 /* 5888 * Let's see if we have seen this device before. 5889 */ 5890 if ((softc->flags & PROBE_INQUIRY_CKSUM) != 0) { 5891 MD5_CTX context; 5892 u_int8_t digest[16]; 5893 5894 MD5Init(&context); 5895 5896 MD5Update(&context, 5897 (unsigned char *)&path->device->inq_data, 5898 sizeof(struct scsi_inquiry_data)); 5899 5900 if (have_serialnum) 5901 MD5Update(&context, serial_buf->serial_num, 5902 serial_buf->length); 5903 5904 MD5Final(digest, &context); 5905 if (bcmp(softc->digest, digest, 16) == 0) 5906 changed = 0; 5907 5908 /* 5909 * XXX Do we need to do a TUR in order to ensure 5910 * that the device really hasn't changed??? 5911 */ 5912 if ((changed != 0) 5913 && ((softc->flags & PROBE_NO_ANNOUNCE) == 0)) 5914 xpt_async(AC_LOST_DEVICE, path, NULL); 5915 } 5916 if (serial_buf != NULL) 5917 free(serial_buf, M_TEMP); 5918 5919 if (changed != 0) { 5920 /* 5921 * Now that we have all the necessary 5922 * information to safely perform transfer 5923 * negotiations... Controllers don't perform 5924 * any negotiation or tagged queuing until 5925 * after the first XPT_SET_TRAN_SETTINGS ccb is 5926 * received. So, on a new device, just retreive 5927 * the user settings, and set them as the current 5928 * settings to set the device up. 5929 */ 5930 proberequestdefaultnegotiation(periph); 5931 xpt_release_ccb(done_ccb); 5932 5933 /* 5934 * Perform a TUR to allow the controller to 5935 * perform any necessary transfer negotiation. 5936 */ 5937 softc->action = PROBE_TUR_FOR_NEGOTIATION; 5938 xpt_schedule(periph, priority); 5939 return; 5940 } 5941 xpt_release_ccb(done_ccb); 5942 break; 5943 } 5944 case PROBE_TUR_FOR_NEGOTIATION: 5945 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 5946 /* Don't wedge the queue */ 5947 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1, 5948 /*run_queue*/TRUE); 5949 } 5950 5951 path->device->flags &= ~CAM_DEV_UNCONFIGURED; 5952 5953 if ((softc->flags & PROBE_NO_ANNOUNCE) == 0) { 5954 /* Inform the XPT that a new device has been found */ 5955 done_ccb->ccb_h.func_code = XPT_GDEV_TYPE; 5956 xpt_action(done_ccb); 5957 5958 xpt_async(AC_FOUND_DEVICE, xpt_periph->path, done_ccb); 5959 } 5960 xpt_release_ccb(done_ccb); 5961 break; 5962 } 5963 done_ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs); 5964 TAILQ_REMOVE(&softc->request_ccbs, &done_ccb->ccb_h, periph_links.tqe); 5965 done_ccb->ccb_h.status = CAM_REQ_CMP; 5966 xpt_done(done_ccb); 5967 if (TAILQ_FIRST(&softc->request_ccbs) == NULL) { 5968 cam_periph_invalidate(periph); 5969 cam_periph_release(periph); 5970 } else { 5971 probeschedule(periph); 5972 } 5973 } 5974 5975 static void 5976 probecleanup(struct cam_periph *periph) 5977 { 5978 free(periph->softc, M_TEMP); 5979 } 5980 5981 static void 5982 xpt_find_quirk(struct cam_ed *device) 5983 { 5984 caddr_t match; 5985 5986 match = cam_quirkmatch((caddr_t)&device->inq_data, 5987 (caddr_t)xpt_quirk_table, 5988 sizeof(xpt_quirk_table)/sizeof(*xpt_quirk_table), 5989 sizeof(*xpt_quirk_table), scsi_inquiry_match); 5990 5991 if (match == NULL) 5992 panic("xpt_find_quirk: device didn't match wildcard entry!!"); 5993 5994 device->quirk = (struct xpt_quirk_entry *)match; 5995 } 5996 5997 #ifdef CAM_NEW_TRAN_CODE 5998 5999 static void 6000 xpt_devise_transport(struct cam_path *path) 6001 { 6002 struct ccb_pathinq cpi; 6003 struct ccb_trans_settings cts; 6004 struct scsi_inquiry_data *inq_buf; 6005 6006 /* Get transport information from the SIM */ 6007 xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1); 6008 cpi.ccb_h.func_code = XPT_PATH_INQ; 6009 xpt_action((union ccb *)&cpi); 6010 6011 inq_buf = NULL; 6012 if ((path->device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0) 6013 inq_buf = &path->device->inq_data; 6014 path->device->protocol = PROTO_SCSI; 6015 path->device->protocol_version = 6016 inq_buf != NULL ? SID_ANSI_REV(inq_buf) : cpi.protocol_version; 6017 path->device->transport = cpi.transport; 6018 path->device->transport_version = cpi.transport_version; 6019 6020 /* 6021 * Any device not using SPI3 features should 6022 * be considered SPI2 or lower. 6023 */ 6024 if (inq_buf != NULL) { 6025 if (path->device->transport == XPORT_SPI 6026 && (inq_buf->spi3data & SID_SPI_MASK) == 0 6027 && path->device->transport_version > 2) 6028 path->device->transport_version = 2; 6029 } else { 6030 struct cam_ed* otherdev; 6031 6032 for (otherdev = TAILQ_FIRST(&path->target->ed_entries); 6033 otherdev != NULL; 6034 otherdev = TAILQ_NEXT(otherdev, links)) { 6035 if (otherdev != path->device) 6036 break; 6037 } 6038 6039 if (otherdev != NULL) { 6040 /* 6041 * Initially assume the same versioning as 6042 * prior luns for this target. 6043 */ 6044 path->device->protocol_version = 6045 otherdev->protocol_version; 6046 path->device->transport_version = 6047 otherdev->transport_version; 6048 } else { 6049 /* Until we know better, opt for safty */ 6050 path->device->protocol_version = 2; 6051 if (path->device->transport == XPORT_SPI) 6052 path->device->transport_version = 2; 6053 else 6054 path->device->transport_version = 0; 6055 } 6056 } 6057 6058 /* 6059 * XXX 6060 * For a device compliant with SPC-2 we should be able 6061 * to determine the transport version supported by 6062 * scrutinizing the version descriptors in the 6063 * inquiry buffer. 6064 */ 6065 6066 /* Tell the controller what we think */ 6067 xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1); 6068 cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS; 6069 cts.type = CTS_TYPE_CURRENT_SETTINGS; 6070 cts.transport = path->device->transport; 6071 cts.transport_version = path->device->transport_version; 6072 cts.protocol = path->device->protocol; 6073 cts.protocol_version = path->device->protocol_version; 6074 cts.proto_specific.valid = 0; 6075 cts.xport_specific.valid = 0; 6076 xpt_action((union ccb *)&cts); 6077 } 6078 6079 static void 6080 xpt_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device, 6081 int async_update) 6082 { 6083 struct ccb_pathinq cpi; 6084 struct ccb_trans_settings cur_cts; 6085 struct ccb_trans_settings_scsi *scsi; 6086 struct ccb_trans_settings_scsi *cur_scsi; 6087 struct cam_sim *sim; 6088 struct scsi_inquiry_data *inq_data; 6089 6090 if (device == NULL) { 6091 cts->ccb_h.status = CAM_PATH_INVALID; 6092 xpt_done((union ccb *)cts); 6093 return; 6094 } 6095 6096 if (cts->protocol == PROTO_UNKNOWN 6097 || cts->protocol == PROTO_UNSPECIFIED) { 6098 cts->protocol = device->protocol; 6099 cts->protocol_version = device->protocol_version; 6100 } 6101 6102 if (cts->protocol_version == PROTO_VERSION_UNKNOWN 6103 || cts->protocol_version == PROTO_VERSION_UNSPECIFIED) 6104 cts->protocol_version = device->protocol_version; 6105 6106 if (cts->protocol != device->protocol) { 6107 xpt_print_path(cts->ccb_h.path); 6108 printf("Uninitialized Protocol %x:%x?\n", 6109 cts->protocol, device->protocol); 6110 cts->protocol = device->protocol; 6111 } 6112 6113 if (cts->protocol_version > device->protocol_version) { 6114 if (bootverbose) { 6115 xpt_print_path(cts->ccb_h.path); 6116 printf("Down reving Protocol Version from %d to %d?\n", 6117 cts->protocol_version, device->protocol_version); 6118 } 6119 cts->protocol_version = device->protocol_version; 6120 } 6121 6122 if (cts->transport == XPORT_UNKNOWN 6123 || cts->transport == XPORT_UNSPECIFIED) { 6124 cts->transport = device->transport; 6125 cts->transport_version = device->transport_version; 6126 } 6127 6128 if (cts->transport_version == XPORT_VERSION_UNKNOWN 6129 || cts->transport_version == XPORT_VERSION_UNSPECIFIED) 6130 cts->transport_version = device->transport_version; 6131 6132 if (cts->transport != device->transport) { 6133 xpt_print_path(cts->ccb_h.path); 6134 printf("Uninitialized Transport %x:%x?\n", 6135 cts->transport, device->transport); 6136 cts->transport = device->transport; 6137 } 6138 6139 if (cts->transport_version > device->transport_version) { 6140 if (bootverbose) { 6141 xpt_print_path(cts->ccb_h.path); 6142 printf("Down reving Transport Version from %d to %d?\n", 6143 cts->transport_version, 6144 device->transport_version); 6145 } 6146 cts->transport_version = device->transport_version; 6147 } 6148 6149 sim = cts->ccb_h.path->bus->sim; 6150 6151 /* 6152 * Nothing more of interest to do unless 6153 * this is a device connected via the 6154 * SCSI protocol. 6155 */ 6156 if (cts->protocol != PROTO_SCSI) { 6157 if (async_update == FALSE) 6158 (*(sim->sim_action))(sim, (union ccb *)cts); 6159 return; 6160 } 6161 6162 inq_data = &device->inq_data; 6163 scsi = &cts->proto_specific.scsi; 6164 xpt_setup_ccb(&cpi.ccb_h, cts->ccb_h.path, /*priority*/1); 6165 cpi.ccb_h.func_code = XPT_PATH_INQ; 6166 xpt_action((union ccb *)&cpi); 6167 6168 /* SCSI specific sanity checking */ 6169 if ((cpi.hba_inquiry & PI_TAG_ABLE) == 0 6170 || (inq_data->flags & SID_CmdQue) == 0 6171 || (device->queue_flags & SCP_QUEUE_DQUE) != 0 6172 || (device->quirk->mintags == 0)) { 6173 /* 6174 * Can't tag on hardware that doesn't support tags, 6175 * doesn't have it enabled, or has broken tag support. 6176 */ 6177 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; 6178 } 6179 6180 if (async_update == FALSE) { 6181 /* 6182 * Perform sanity checking against what the 6183 * controller and device can do. 6184 */ 6185 xpt_setup_ccb(&cur_cts.ccb_h, cts->ccb_h.path, /*priority*/1); 6186 cur_cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; 6187 cur_cts.type = cts->type; 6188 xpt_action((union ccb *)&cur_cts); 6189 6190 cur_scsi = &cur_cts.proto_specific.scsi; 6191 if ((scsi->valid & CTS_SCSI_VALID_TQ) == 0) { 6192 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; 6193 scsi->flags |= cur_scsi->flags & CTS_SCSI_FLAGS_TAG_ENB; 6194 } 6195 if ((cur_scsi->valid & CTS_SCSI_VALID_TQ) == 0) 6196 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; 6197 } 6198 6199 /* SPI specific sanity checking */ 6200 if (cts->transport == XPORT_SPI && async_update == FALSE) { 6201 u_int spi3caps; 6202 struct ccb_trans_settings_spi *spi; 6203 struct ccb_trans_settings_spi *cur_spi; 6204 6205 spi = &cts->xport_specific.spi; 6206 6207 cur_spi = &cur_cts.xport_specific.spi; 6208 6209 /* Fill in any gaps in what the user gave us */ 6210 if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) 6211 spi->sync_period = cur_spi->sync_period; 6212 if ((cur_spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) 6213 spi->sync_period = 0; 6214 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0) 6215 spi->sync_offset = cur_spi->sync_offset; 6216 if ((cur_spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0) 6217 spi->sync_offset = 0; 6218 if ((spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0) 6219 spi->ppr_options = cur_spi->ppr_options; 6220 if ((cur_spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0) 6221 spi->ppr_options = 0; 6222 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) == 0) 6223 spi->bus_width = cur_spi->bus_width; 6224 if ((cur_spi->valid & CTS_SPI_VALID_BUS_WIDTH) == 0) 6225 spi->bus_width = 0; 6226 if ((spi->valid & CTS_SPI_VALID_DISC) == 0) { 6227 spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; 6228 spi->flags |= cur_spi->flags & CTS_SPI_FLAGS_DISC_ENB; 6229 } 6230 if ((cur_spi->valid & CTS_SPI_VALID_DISC) == 0) 6231 spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; 6232 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0 6233 && (inq_data->flags & SID_Sync) == 0 6234 && cts->type == CTS_TYPE_CURRENT_SETTINGS) 6235 || ((cpi.hba_inquiry & PI_SDTR_ABLE) == 0) 6236 || (cur_spi->sync_offset == 0) 6237 || (cur_spi->sync_period == 0)) { 6238 /* Force async */ 6239 spi->sync_period = 0; 6240 spi->sync_offset = 0; 6241 } 6242 6243 switch (spi->bus_width) { 6244 case MSG_EXT_WDTR_BUS_32_BIT: 6245 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0 6246 || (inq_data->flags & SID_WBus32) != 0 6247 || cts->type == CTS_TYPE_USER_SETTINGS) 6248 && (cpi.hba_inquiry & PI_WIDE_32) != 0) 6249 break; 6250 /* Fall Through to 16-bit */ 6251 case MSG_EXT_WDTR_BUS_16_BIT: 6252 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0 6253 || (inq_data->flags & SID_WBus16) != 0 6254 || cts->type == CTS_TYPE_USER_SETTINGS) 6255 && (cpi.hba_inquiry & PI_WIDE_16) != 0) { 6256 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 6257 break; 6258 } 6259 /* Fall Through to 8-bit */ 6260 default: /* New bus width?? */ 6261 case MSG_EXT_WDTR_BUS_8_BIT: 6262 /* All targets can do this */ 6263 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 6264 break; 6265 } 6266 6267 spi3caps = cpi.xport_specific.spi.ppr_options; 6268 if ((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0 6269 && cts->type == CTS_TYPE_CURRENT_SETTINGS) 6270 spi3caps &= inq_data->spi3data; 6271 6272 if ((spi3caps & SID_SPI_CLOCK_DT) == 0) 6273 spi->ppr_options &= ~MSG_EXT_PPR_DT_REQ; 6274 6275 if ((spi3caps & SID_SPI_IUS) == 0) 6276 spi->ppr_options &= ~MSG_EXT_PPR_IU_REQ; 6277 6278 if ((spi3caps & SID_SPI_QAS) == 0) 6279 spi->ppr_options &= ~MSG_EXT_PPR_QAS_REQ; 6280 6281 /* No SPI Transfer settings are allowed unless we are wide */ 6282 if (spi->bus_width == 0) 6283 spi->ppr_options = 0; 6284 6285 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) == 0) { 6286 /* 6287 * Can't tag queue without disconnection. 6288 */ 6289 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; 6290 scsi->valid |= CTS_SCSI_VALID_TQ; 6291 } 6292 6293 /* 6294 * If we are currently performing tagged transactions to 6295 * this device and want to change its negotiation parameters, 6296 * go non-tagged for a bit to give the controller a chance to 6297 * negotiate unhampered by tag messages. 6298 */ 6299 if (cts->type == CTS_TYPE_CURRENT_SETTINGS 6300 && (device->inq_flags & SID_CmdQue) != 0 6301 && (scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0 6302 && (spi->flags & (CTS_SPI_VALID_SYNC_RATE| 6303 CTS_SPI_VALID_SYNC_OFFSET| 6304 CTS_SPI_VALID_BUS_WIDTH)) != 0) 6305 xpt_toggle_tags(cts->ccb_h.path); 6306 } 6307 6308 if (cts->type == CTS_TYPE_CURRENT_SETTINGS 6309 && (scsi->valid & CTS_SCSI_VALID_TQ) != 0) { 6310 int device_tagenb; 6311 6312 /* 6313 * If we are transitioning from tags to no-tags or 6314 * vice-versa, we need to carefully freeze and restart 6315 * the queue so that we don't overlap tagged and non-tagged 6316 * commands. We also temporarily stop tags if there is 6317 * a change in transfer negotiation settings to allow 6318 * "tag-less" negotiation. 6319 */ 6320 if ((device->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 6321 || (device->inq_flags & SID_CmdQue) != 0) 6322 device_tagenb = TRUE; 6323 else 6324 device_tagenb = FALSE; 6325 6326 if (((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0 6327 && device_tagenb == FALSE) 6328 || ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) == 0 6329 && device_tagenb == TRUE)) { 6330 6331 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) { 6332 /* 6333 * Delay change to use tags until after a 6334 * few commands have gone to this device so 6335 * the controller has time to perform transfer 6336 * negotiations without tagged messages getting 6337 * in the way. 6338 */ 6339 device->tag_delay_count = CAM_TAG_DELAY_COUNT; 6340 device->flags |= CAM_DEV_TAG_AFTER_COUNT; 6341 } else { 6342 struct ccb_relsim crs; 6343 6344 xpt_freeze_devq(cts->ccb_h.path, /*count*/1); 6345 device->inq_flags &= ~SID_CmdQue; 6346 xpt_dev_ccbq_resize(cts->ccb_h.path, 6347 sim->max_dev_openings); 6348 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT; 6349 device->tag_delay_count = 0; 6350 6351 xpt_setup_ccb(&crs.ccb_h, cts->ccb_h.path, 6352 /*priority*/1); 6353 crs.ccb_h.func_code = XPT_REL_SIMQ; 6354 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY; 6355 crs.openings 6356 = crs.release_timeout 6357 = crs.qfrozen_cnt 6358 = 0; 6359 xpt_action((union ccb *)&crs); 6360 } 6361 } 6362 } 6363 if (async_update == FALSE) 6364 (*(sim->sim_action))(sim, (union ccb *)cts); 6365 } 6366 6367 #else /* CAM_NEW_TRAN_CODE */ 6368 6369 static void 6370 xpt_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device, 6371 int async_update) 6372 { 6373 struct cam_sim *sim; 6374 int qfrozen; 6375 6376 sim = cts->ccb_h.path->bus->sim; 6377 if (async_update == FALSE) { 6378 struct scsi_inquiry_data *inq_data; 6379 struct ccb_pathinq cpi; 6380 struct ccb_trans_settings cur_cts; 6381 6382 if (device == NULL) { 6383 cts->ccb_h.status = CAM_PATH_INVALID; 6384 xpt_done((union ccb *)cts); 6385 return; 6386 } 6387 6388 /* 6389 * Perform sanity checking against what the 6390 * controller and device can do. 6391 */ 6392 xpt_setup_ccb(&cpi.ccb_h, cts->ccb_h.path, /*priority*/1); 6393 cpi.ccb_h.func_code = XPT_PATH_INQ; 6394 xpt_action((union ccb *)&cpi); 6395 xpt_setup_ccb(&cur_cts.ccb_h, cts->ccb_h.path, /*priority*/1); 6396 cur_cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; 6397 cur_cts.flags = CCB_TRANS_CURRENT_SETTINGS; 6398 xpt_action((union ccb *)&cur_cts); 6399 inq_data = &device->inq_data; 6400 6401 /* Fill in any gaps in what the user gave us */ 6402 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) == 0) 6403 cts->sync_period = cur_cts.sync_period; 6404 if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) == 0) 6405 cts->sync_offset = cur_cts.sync_offset; 6406 if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) == 0) 6407 cts->bus_width = cur_cts.bus_width; 6408 if ((cts->valid & CCB_TRANS_DISC_VALID) == 0) { 6409 cts->flags &= ~CCB_TRANS_DISC_ENB; 6410 cts->flags |= cur_cts.flags & CCB_TRANS_DISC_ENB; 6411 } 6412 if ((cts->valid & CCB_TRANS_TQ_VALID) == 0) { 6413 cts->flags &= ~CCB_TRANS_TAG_ENB; 6414 cts->flags |= cur_cts.flags & CCB_TRANS_TAG_ENB; 6415 } 6416 6417 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0 6418 && (inq_data->flags & SID_Sync) == 0) 6419 || ((cpi.hba_inquiry & PI_SDTR_ABLE) == 0) 6420 || (cts->sync_offset == 0) 6421 || (cts->sync_period == 0)) { 6422 /* Force async */ 6423 cts->sync_period = 0; 6424 cts->sync_offset = 0; 6425 } else if ((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0 6426 && (inq_data->spi3data & SID_SPI_CLOCK_DT) == 0 6427 && cts->sync_period <= 0x9) { 6428 /* 6429 * Don't allow DT transmission rates if the 6430 * device does not support it. 6431 */ 6432 cts->sync_period = 0xa; 6433 } 6434 6435 switch (cts->bus_width) { 6436 case MSG_EXT_WDTR_BUS_32_BIT: 6437 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0 6438 || (inq_data->flags & SID_WBus32) != 0) 6439 && (cpi.hba_inquiry & PI_WIDE_32) != 0) 6440 break; 6441 /* Fall Through to 16-bit */ 6442 case MSG_EXT_WDTR_BUS_16_BIT: 6443 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0 6444 || (inq_data->flags & SID_WBus16) != 0) 6445 && (cpi.hba_inquiry & PI_WIDE_16) != 0) { 6446 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 6447 break; 6448 } 6449 /* Fall Through to 8-bit */ 6450 default: /* New bus width?? */ 6451 case MSG_EXT_WDTR_BUS_8_BIT: 6452 /* All targets can do this */ 6453 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 6454 break; 6455 } 6456 6457 if ((cts->flags & CCB_TRANS_DISC_ENB) == 0) { 6458 /* 6459 * Can't tag queue without disconnection. 6460 */ 6461 cts->flags &= ~CCB_TRANS_TAG_ENB; 6462 cts->valid |= CCB_TRANS_TQ_VALID; 6463 } 6464 6465 if ((cpi.hba_inquiry & PI_TAG_ABLE) == 0 6466 || (inq_data->flags & SID_CmdQue) == 0 6467 || (device->queue_flags & SCP_QUEUE_DQUE) != 0 6468 || (device->quirk->mintags == 0)) { 6469 /* 6470 * Can't tag on hardware that doesn't support, 6471 * doesn't have it enabled, or has broken tag support. 6472 */ 6473 cts->flags &= ~CCB_TRANS_TAG_ENB; 6474 } 6475 } 6476 6477 qfrozen = FALSE; 6478 if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) { 6479 int device_tagenb; 6480 6481 /* 6482 * If we are transitioning from tags to no-tags or 6483 * vice-versa, we need to carefully freeze and restart 6484 * the queue so that we don't overlap tagged and non-tagged 6485 * commands. We also temporarily stop tags if there is 6486 * a change in transfer negotiation settings to allow 6487 * "tag-less" negotiation. 6488 */ 6489 if ((device->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 6490 || (device->inq_flags & SID_CmdQue) != 0) 6491 device_tagenb = TRUE; 6492 else 6493 device_tagenb = FALSE; 6494 6495 if (((cts->flags & CCB_TRANS_TAG_ENB) != 0 6496 && device_tagenb == FALSE) 6497 || ((cts->flags & CCB_TRANS_TAG_ENB) == 0 6498 && device_tagenb == TRUE)) { 6499 6500 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) { 6501 /* 6502 * Delay change to use tags until after a 6503 * few commands have gone to this device so 6504 * the controller has time to perform transfer 6505 * negotiations without tagged messages getting 6506 * in the way. 6507 */ 6508 device->tag_delay_count = CAM_TAG_DELAY_COUNT; 6509 device->flags |= CAM_DEV_TAG_AFTER_COUNT; 6510 } else { 6511 xpt_freeze_devq(cts->ccb_h.path, /*count*/1); 6512 qfrozen = TRUE; 6513 device->inq_flags &= ~SID_CmdQue; 6514 xpt_dev_ccbq_resize(cts->ccb_h.path, 6515 sim->max_dev_openings); 6516 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT; 6517 device->tag_delay_count = 0; 6518 } 6519 } 6520 } 6521 6522 if (async_update == FALSE) { 6523 /* 6524 * If we are currently performing tagged transactions to 6525 * this device and want to change its negotiation parameters, 6526 * go non-tagged for a bit to give the controller a chance to 6527 * negotiate unhampered by tag messages. 6528 */ 6529 if ((device->inq_flags & SID_CmdQue) != 0 6530 && (cts->flags & (CCB_TRANS_SYNC_RATE_VALID| 6531 CCB_TRANS_SYNC_OFFSET_VALID| 6532 CCB_TRANS_BUS_WIDTH_VALID)) != 0) 6533 xpt_toggle_tags(cts->ccb_h.path); 6534 6535 (*(sim->sim_action))(sim, (union ccb *)cts); 6536 } 6537 6538 if (qfrozen) { 6539 struct ccb_relsim crs; 6540 6541 xpt_setup_ccb(&crs.ccb_h, cts->ccb_h.path, 6542 /*priority*/1); 6543 crs.ccb_h.func_code = XPT_REL_SIMQ; 6544 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY; 6545 crs.openings 6546 = crs.release_timeout 6547 = crs.qfrozen_cnt 6548 = 0; 6549 xpt_action((union ccb *)&crs); 6550 } 6551 } 6552 6553 6554 #endif /* CAM_NEW_TRAN_CODE */ 6555 6556 static void 6557 xpt_toggle_tags(struct cam_path *path) 6558 { 6559 struct cam_ed *dev; 6560 6561 /* 6562 * Give controllers a chance to renegotiate 6563 * before starting tag operations. We 6564 * "toggle" tagged queuing off then on 6565 * which causes the tag enable command delay 6566 * counter to come into effect. 6567 */ 6568 dev = path->device; 6569 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 6570 || ((dev->inq_flags & SID_CmdQue) != 0 6571 && (dev->inq_flags & (SID_Sync|SID_WBus16|SID_WBus32)) != 0)) { 6572 struct ccb_trans_settings cts; 6573 6574 xpt_setup_ccb(&cts.ccb_h, path, 1); 6575 #ifdef CAM_NEW_TRAN_CODE 6576 cts.protocol = PROTO_SCSI; 6577 cts.protocol_version = PROTO_VERSION_UNSPECIFIED; 6578 cts.transport = XPORT_UNSPECIFIED; 6579 cts.transport_version = XPORT_VERSION_UNSPECIFIED; 6580 cts.proto_specific.scsi.flags = 0; 6581 cts.proto_specific.scsi.valid = CTS_SCSI_VALID_TQ; 6582 #else /* CAM_NEW_TRAN_CODE */ 6583 cts.flags = 0; 6584 cts.valid = CCB_TRANS_TQ_VALID; 6585 #endif /* CAM_NEW_TRAN_CODE */ 6586 xpt_set_transfer_settings(&cts, path->device, 6587 /*async_update*/TRUE); 6588 #ifdef CAM_NEW_TRAN_CODE 6589 cts.proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB; 6590 #else /* CAM_NEW_TRAN_CODE */ 6591 cts.flags = CCB_TRANS_TAG_ENB; 6592 #endif /* CAM_NEW_TRAN_CODE */ 6593 xpt_set_transfer_settings(&cts, path->device, 6594 /*async_update*/TRUE); 6595 } 6596 } 6597 6598 static void 6599 xpt_start_tags(struct cam_path *path) 6600 { 6601 struct ccb_relsim crs; 6602 struct cam_ed *device; 6603 struct cam_sim *sim; 6604 int newopenings; 6605 6606 device = path->device; 6607 sim = path->bus->sim; 6608 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT; 6609 xpt_freeze_devq(path, /*count*/1); 6610 device->inq_flags |= SID_CmdQue; 6611 newopenings = min(device->quirk->maxtags, sim->max_tagged_dev_openings); 6612 xpt_dev_ccbq_resize(path, newopenings); 6613 xpt_setup_ccb(&crs.ccb_h, path, /*priority*/1); 6614 crs.ccb_h.func_code = XPT_REL_SIMQ; 6615 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY; 6616 crs.openings 6617 = crs.release_timeout 6618 = crs.qfrozen_cnt 6619 = 0; 6620 xpt_action((union ccb *)&crs); 6621 } 6622 6623 static int busses_to_config; 6624 static int busses_to_reset; 6625 6626 static int 6627 xptconfigbuscountfunc(struct cam_eb *bus, void *arg) 6628 { 6629 if (bus->path_id != CAM_XPT_PATH_ID) { 6630 struct cam_path path; 6631 struct ccb_pathinq cpi; 6632 int can_negotiate; 6633 6634 busses_to_config++; 6635 xpt_compile_path(&path, NULL, bus->path_id, 6636 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 6637 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1); 6638 cpi.ccb_h.func_code = XPT_PATH_INQ; 6639 xpt_action((union ccb *)&cpi); 6640 can_negotiate = cpi.hba_inquiry; 6641 can_negotiate &= (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE); 6642 if ((cpi.hba_misc & PIM_NOBUSRESET) == 0 6643 && can_negotiate) 6644 busses_to_reset++; 6645 xpt_release_path(&path); 6646 } 6647 6648 return(1); 6649 } 6650 6651 static int 6652 xptconfigfunc(struct cam_eb *bus, void *arg) 6653 { 6654 struct cam_path *path; 6655 union ccb *work_ccb; 6656 6657 if (bus->path_id != CAM_XPT_PATH_ID) { 6658 cam_status status; 6659 int can_negotiate; 6660 6661 work_ccb = xpt_alloc_ccb(); 6662 if ((status = xpt_create_path(&path, xpt_periph, bus->path_id, 6663 CAM_TARGET_WILDCARD, 6664 CAM_LUN_WILDCARD)) !=CAM_REQ_CMP){ 6665 printf("xptconfigfunc: xpt_create_path failed with " 6666 "status %#x for bus %d\n", status, bus->path_id); 6667 printf("xptconfigfunc: halting bus configuration\n"); 6668 xpt_free_ccb(work_ccb); 6669 busses_to_config--; 6670 xpt_finishconfig(xpt_periph, NULL); 6671 return(0); 6672 } 6673 xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1); 6674 work_ccb->ccb_h.func_code = XPT_PATH_INQ; 6675 xpt_action(work_ccb); 6676 if (work_ccb->ccb_h.status != CAM_REQ_CMP) { 6677 printf("xptconfigfunc: CPI failed on bus %d " 6678 "with status %d\n", bus->path_id, 6679 work_ccb->ccb_h.status); 6680 xpt_finishconfig(xpt_periph, work_ccb); 6681 return(1); 6682 } 6683 6684 can_negotiate = work_ccb->cpi.hba_inquiry; 6685 can_negotiate &= (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE); 6686 if ((work_ccb->cpi.hba_misc & PIM_NOBUSRESET) == 0 6687 && (can_negotiate != 0)) { 6688 xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1); 6689 work_ccb->ccb_h.func_code = XPT_RESET_BUS; 6690 work_ccb->ccb_h.cbfcnp = NULL; 6691 CAM_DEBUG(path, CAM_DEBUG_SUBTRACE, 6692 ("Resetting Bus\n")); 6693 xpt_action(work_ccb); 6694 xpt_finishconfig(xpt_periph, work_ccb); 6695 } else { 6696 /* Act as though we performed a successful BUS RESET */ 6697 work_ccb->ccb_h.func_code = XPT_RESET_BUS; 6698 xpt_finishconfig(xpt_periph, work_ccb); 6699 } 6700 } 6701 6702 return(1); 6703 } 6704 6705 static void 6706 xpt_config(void *arg) 6707 { 6708 /* 6709 * Now that interrupts are enabled, go find our devices 6710 */ 6711 6712 #ifdef CAMDEBUG 6713 /* Setup debugging flags and path */ 6714 #ifdef CAM_DEBUG_FLAGS 6715 cam_dflags = CAM_DEBUG_FLAGS; 6716 #else /* !CAM_DEBUG_FLAGS */ 6717 cam_dflags = CAM_DEBUG_NONE; 6718 #endif /* CAM_DEBUG_FLAGS */ 6719 #ifdef CAM_DEBUG_BUS 6720 if (cam_dflags != CAM_DEBUG_NONE) { 6721 if (xpt_create_path(&cam_dpath, xpt_periph, 6722 CAM_DEBUG_BUS, CAM_DEBUG_TARGET, 6723 CAM_DEBUG_LUN) != CAM_REQ_CMP) { 6724 printf("xpt_config: xpt_create_path() failed for debug" 6725 " target %d:%d:%d, debugging disabled\n", 6726 CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN); 6727 cam_dflags = CAM_DEBUG_NONE; 6728 } 6729 } else 6730 cam_dpath = NULL; 6731 #else /* !CAM_DEBUG_BUS */ 6732 cam_dpath = NULL; 6733 #endif /* CAM_DEBUG_BUS */ 6734 #endif /* CAMDEBUG */ 6735 6736 /* 6737 * Scan all installed busses. 6738 */ 6739 xpt_for_all_busses(xptconfigbuscountfunc, NULL); 6740 6741 if (busses_to_config == 0) { 6742 /* Call manually because we don't have any busses */ 6743 xpt_finishconfig(xpt_periph, NULL); 6744 } else { 6745 if (busses_to_reset > 0 && SCSI_DELAY >= 2000) { 6746 printf("Waiting %d seconds for SCSI " 6747 "devices to settle\n", SCSI_DELAY/1000); 6748 } 6749 xpt_for_all_busses(xptconfigfunc, NULL); 6750 } 6751 } 6752 6753 /* 6754 * If the given device only has one peripheral attached to it, and if that 6755 * peripheral is the passthrough driver, announce it. This insures that the 6756 * user sees some sort of announcement for every peripheral in their system. 6757 */ 6758 static int 6759 xptpassannouncefunc(struct cam_ed *device, void *arg) 6760 { 6761 struct cam_periph *periph; 6762 int i; 6763 6764 for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL; 6765 periph = SLIST_NEXT(periph, periph_links), i++); 6766 6767 periph = SLIST_FIRST(&device->periphs); 6768 if ((i == 1) 6769 && (strncmp(periph->periph_name, "pass", 4) == 0)) 6770 xpt_announce_periph(periph, NULL); 6771 6772 return(1); 6773 } 6774 6775 static void 6776 xpt_finishconfig(struct cam_periph *periph, union ccb *done_ccb) 6777 { 6778 struct periph_driver **p_drv; 6779 int i; 6780 6781 if (done_ccb != NULL) { 6782 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, 6783 ("xpt_finishconfig\n")); 6784 switch(done_ccb->ccb_h.func_code) { 6785 case XPT_RESET_BUS: 6786 if (done_ccb->ccb_h.status == CAM_REQ_CMP) { 6787 done_ccb->ccb_h.func_code = XPT_SCAN_BUS; 6788 done_ccb->ccb_h.cbfcnp = xpt_finishconfig; 6789 xpt_action(done_ccb); 6790 return; 6791 } 6792 /* FALLTHROUGH */ 6793 case XPT_SCAN_BUS: 6794 default: 6795 xpt_free_path(done_ccb->ccb_h.path); 6796 busses_to_config--; 6797 break; 6798 } 6799 } 6800 6801 if (busses_to_config == 0) { 6802 /* Register all the peripheral drivers */ 6803 /* XXX This will have to change when we have loadable modules */ 6804 p_drv = periph_drivers; 6805 for (i = 0; p_drv[i] != NULL; i++) { 6806 (*p_drv[i]->init)(); 6807 } 6808 6809 /* 6810 * Check for devices with no "standard" peripheral driver 6811 * attached. For any devices like that, announce the 6812 * passthrough driver so the user will see something. 6813 */ 6814 xpt_for_all_devices(xptpassannouncefunc, NULL); 6815 6816 /* Release our hook so that the boot can continue. */ 6817 config_intrhook_disestablish(xpt_config_hook); 6818 free(xpt_config_hook, M_TEMP); 6819 xpt_config_hook = NULL; 6820 } 6821 if (done_ccb != NULL) 6822 xpt_free_ccb(done_ccb); 6823 } 6824 6825 static void 6826 xptaction(struct cam_sim *sim, union ccb *work_ccb) 6827 { 6828 CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n")); 6829 6830 switch (work_ccb->ccb_h.func_code) { 6831 /* Common cases first */ 6832 case XPT_PATH_INQ: /* Path routing inquiry */ 6833 { 6834 struct ccb_pathinq *cpi; 6835 6836 cpi = &work_ccb->cpi; 6837 cpi->version_num = 1; /* XXX??? */ 6838 cpi->hba_inquiry = 0; 6839 cpi->target_sprt = 0; 6840 cpi->hba_misc = 0; 6841 cpi->hba_eng_cnt = 0; 6842 cpi->max_target = 0; 6843 cpi->max_lun = 0; 6844 cpi->initiator_id = 0; 6845 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 6846 strncpy(cpi->hba_vid, "", HBA_IDLEN); 6847 strncpy(cpi->dev_name, sim->sim_name, DEV_IDLEN); 6848 cpi->unit_number = sim->unit_number; 6849 cpi->bus_id = sim->bus_id; 6850 cpi->base_transfer_speed = 0; 6851 #ifdef CAM_NEW_TRAN_CODE 6852 cpi->protocol = PROTO_UNSPECIFIED; 6853 cpi->protocol_version = PROTO_VERSION_UNSPECIFIED; 6854 cpi->transport = XPORT_UNSPECIFIED; 6855 cpi->transport_version = XPORT_VERSION_UNSPECIFIED; 6856 #endif /* CAM_NEW_TRAN_CODE */ 6857 cpi->ccb_h.status = CAM_REQ_CMP; 6858 xpt_done(work_ccb); 6859 break; 6860 } 6861 default: 6862 work_ccb->ccb_h.status = CAM_REQ_INVALID; 6863 xpt_done(work_ccb); 6864 break; 6865 } 6866 } 6867 6868 /* 6869 * The xpt as a "controller" has no interrupt sources, so polling 6870 * is a no-op. 6871 */ 6872 static void 6873 xptpoll(struct cam_sim *sim) 6874 { 6875 } 6876 6877 static void 6878 camisr(void *V_queue) 6879 { 6880 cam_isrq_t *queue = V_queue; 6881 int s; 6882 struct ccb_hdr *ccb_h; 6883 6884 s = splcam(); 6885 while ((ccb_h = TAILQ_FIRST(queue)) != NULL) { 6886 int runq; 6887 6888 TAILQ_REMOVE(queue, ccb_h, sim_links.tqe); 6889 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX; 6890 splx(s); 6891 6892 CAM_DEBUG(ccb_h->path, CAM_DEBUG_TRACE, 6893 ("camisr")); 6894 6895 runq = FALSE; 6896 6897 if (ccb_h->flags & CAM_HIGH_POWER) { 6898 struct highpowerlist *hphead; 6899 struct cam_ed *device; 6900 union ccb *send_ccb; 6901 6902 hphead = &highpowerq; 6903 6904 send_ccb = (union ccb *)STAILQ_FIRST(hphead); 6905 6906 /* 6907 * Increment the count since this command is done. 6908 */ 6909 num_highpower++; 6910 6911 /* 6912 * Any high powered commands queued up? 6913 */ 6914 if (send_ccb != NULL) { 6915 device = send_ccb->ccb_h.path->device; 6916 6917 STAILQ_REMOVE_HEAD(hphead, xpt_links.stqe); 6918 6919 xpt_release_devq(send_ccb->ccb_h.path, 6920 /*count*/1, /*runqueue*/TRUE); 6921 } 6922 } 6923 if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) { 6924 struct cam_ed *dev; 6925 6926 dev = ccb_h->path->device; 6927 6928 s = splcam(); 6929 cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h); 6930 6931 ccb_h->path->bus->sim->devq->send_active--; 6932 ccb_h->path->bus->sim->devq->send_openings++; 6933 splx(s); 6934 6935 if (((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0 6936 && (ccb_h->status&CAM_STATUS_MASK) != CAM_REQUEUE_REQ) 6937 || ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0 6938 && (dev->ccbq.dev_active == 0))) { 6939 6940 xpt_release_devq(ccb_h->path, /*count*/1, 6941 /*run_queue*/TRUE); 6942 } 6943 6944 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 6945 && (--dev->tag_delay_count == 0)) 6946 xpt_start_tags(ccb_h->path); 6947 6948 if ((dev->ccbq.queue.entries > 0) 6949 && (dev->qfrozen_cnt == 0) 6950 && (device_is_send_queued(dev) == 0)) { 6951 runq = xpt_schedule_dev_sendq(ccb_h->path->bus, 6952 dev); 6953 } 6954 } 6955 6956 if (ccb_h->status & CAM_RELEASE_SIMQ) { 6957 xpt_release_simq(ccb_h->path->bus->sim, 6958 /*run_queue*/TRUE); 6959 ccb_h->status &= ~CAM_RELEASE_SIMQ; 6960 runq = FALSE; 6961 } 6962 6963 if ((ccb_h->flags & CAM_DEV_QFRZDIS) 6964 && (ccb_h->status & CAM_DEV_QFRZN)) { 6965 xpt_release_devq(ccb_h->path, /*count*/1, 6966 /*run_queue*/TRUE); 6967 ccb_h->status &= ~CAM_DEV_QFRZN; 6968 } else if (runq) { 6969 xpt_run_dev_sendq(ccb_h->path->bus); 6970 } 6971 6972 /* Call the peripheral driver's callback */ 6973 (*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h); 6974 6975 /* Raise IPL for while test */ 6976 s = splcam(); 6977 } 6978 splx(s); 6979 } 6980