1 /* 2 * Implementation of the Common Access Method Transport (XPT) layer. 3 * 4 * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs. 5 * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions, and the following disclaimer, 13 * without modification, immediately at the beginning of the file. 14 * 2. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * $FreeBSD$ 30 */ 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/types.h> 34 #include <sys/malloc.h> 35 #include <sys/kernel.h> 36 #include <sys/time.h> 37 #include <sys/conf.h> 38 #include <sys/fcntl.h> 39 #include <sys/md5.h> 40 #include <sys/devicestat.h> 41 #include <sys/interrupt.h> 42 #include <sys/bus.h> 43 44 #ifdef PC98 45 #include <pc98/pc98/pc98_machdep.h> /* geometry translation */ 46 #endif 47 48 #include <machine/clock.h> 49 #include <machine/ipl.h> 50 51 #include <cam/cam.h> 52 #include <cam/cam_ccb.h> 53 #include <cam/cam_periph.h> 54 #include <cam/cam_sim.h> 55 #include <cam/cam_xpt.h> 56 #include <cam/cam_xpt_sim.h> 57 #include <cam/cam_xpt_periph.h> 58 #include <cam/cam_debug.h> 59 60 #include <cam/scsi/scsi_all.h> 61 #include <cam/scsi/scsi_message.h> 62 #include <cam/scsi/scsi_pass.h> 63 #include "opt_cam.h" 64 65 /* Datastructures internal to the xpt layer */ 66 67 /* 68 * Definition of an async handler callback block. These are used to add 69 * SIMs and peripherals to the async callback lists. 70 */ 71 struct async_node { 72 SLIST_ENTRY(async_node) links; 73 u_int32_t event_enable; /* Async Event enables */ 74 void (*callback)(void *arg, u_int32_t code, 75 struct cam_path *path, void *args); 76 void *callback_arg; 77 }; 78 79 SLIST_HEAD(async_list, async_node); 80 SLIST_HEAD(periph_list, cam_periph); 81 static STAILQ_HEAD(highpowerlist, ccb_hdr) highpowerq; 82 83 /* 84 * This is the maximum number of high powered commands (e.g. start unit) 85 * that can be outstanding at a particular time. 86 */ 87 #ifndef CAM_MAX_HIGHPOWER 88 #define CAM_MAX_HIGHPOWER 4 89 #endif 90 91 /* number of high powered commands that can go through right now */ 92 static int num_highpower = CAM_MAX_HIGHPOWER; 93 94 /* 95 * Structure for queueing a device in a run queue. 96 * There is one run queue for allocating new ccbs, 97 * and another for sending ccbs to the controller. 98 */ 99 struct cam_ed_qinfo { 100 cam_pinfo pinfo; 101 struct cam_ed *device; 102 }; 103 104 /* 105 * The CAM EDT (Existing Device Table) contains the device information for 106 * all devices for all busses in the system. The table contains a 107 * cam_ed structure for each device on the bus. 108 */ 109 struct cam_ed { 110 TAILQ_ENTRY(cam_ed) links; 111 struct cam_ed_qinfo alloc_ccb_entry; 112 struct cam_ed_qinfo send_ccb_entry; 113 struct cam_et *target; 114 lun_id_t lun_id; 115 struct camq drvq; /* 116 * Queue of type drivers wanting to do 117 * work on this device. 118 */ 119 struct cam_ccbq ccbq; /* Queue of pending ccbs */ 120 struct async_list asyncs; /* Async callback info for this B/T/L */ 121 struct periph_list periphs; /* All attached devices */ 122 u_int generation; /* Generation number */ 123 struct cam_periph *owner; /* Peripheral driver's ownership tag */ 124 struct xpt_quirk_entry *quirk; /* Oddities about this device */ 125 /* Storage for the inquiry data */ 126 struct scsi_inquiry_data inq_data; 127 u_int8_t inq_flags; /* 128 * Current settings for inquiry flags. 129 * This allows us to override settings 130 * like disconnection and tagged 131 * queuing for a device. 132 */ 133 u_int8_t queue_flags; /* Queue flags from the control page */ 134 u_int8_t serial_num_len; 135 u_int8_t *serial_num; 136 u_int32_t qfrozen_cnt; 137 u_int32_t flags; 138 #define CAM_DEV_UNCONFIGURED 0x01 139 #define CAM_DEV_REL_TIMEOUT_PENDING 0x02 140 #define CAM_DEV_REL_ON_COMPLETE 0x04 141 #define CAM_DEV_REL_ON_QUEUE_EMPTY 0x08 142 #define CAM_DEV_RESIZE_QUEUE_NEEDED 0x10 143 #define CAM_DEV_TAG_AFTER_COUNT 0x20 144 #define CAM_DEV_INQUIRY_DATA_VALID 0x40 145 u_int32_t tag_delay_count; 146 #define CAM_TAG_DELAY_COUNT 5 147 u_int32_t refcount; 148 struct callout_handle c_handle; 149 }; 150 151 /* 152 * Each target is represented by an ET (Existing Target). These 153 * entries are created when a target is successfully probed with an 154 * identify, and removed when a device fails to respond after a number 155 * of retries, or a bus rescan finds the device missing. 156 */ 157 struct cam_et { 158 TAILQ_HEAD(, cam_ed) ed_entries; 159 TAILQ_ENTRY(cam_et) links; 160 struct cam_eb *bus; 161 target_id_t target_id; 162 u_int32_t refcount; 163 u_int generation; 164 struct timeval last_reset; 165 }; 166 167 /* 168 * Each bus is represented by an EB (Existing Bus). These entries 169 * are created by calls to xpt_bus_register and deleted by calls to 170 * xpt_bus_deregister. 171 */ 172 struct cam_eb { 173 TAILQ_HEAD(, cam_et) et_entries; 174 TAILQ_ENTRY(cam_eb) links; 175 path_id_t path_id; 176 struct cam_sim *sim; 177 struct timeval last_reset; 178 u_int32_t flags; 179 #define CAM_EB_RUNQ_SCHEDULED 0x01 180 u_int32_t refcount; 181 u_int generation; 182 }; 183 184 struct cam_path { 185 struct cam_periph *periph; 186 struct cam_eb *bus; 187 struct cam_et *target; 188 struct cam_ed *device; 189 }; 190 191 struct xpt_quirk_entry { 192 struct scsi_inquiry_pattern inq_pat; 193 u_int8_t quirks; 194 #define CAM_QUIRK_NOLUNS 0x01 195 #define CAM_QUIRK_NOSERIAL 0x02 196 #define CAM_QUIRK_HILUNS 0x04 197 u_int mintags; 198 u_int maxtags; 199 }; 200 #define CAM_SCSI2_MAXLUN 8 201 202 typedef enum { 203 XPT_FLAG_OPEN = 0x01 204 } xpt_flags; 205 206 struct xpt_softc { 207 xpt_flags flags; 208 u_int32_t generation; 209 }; 210 211 static const char quantum[] = "QUANTUM"; 212 static const char sony[] = "SONY"; 213 static const char west_digital[] = "WDIGTL"; 214 static const char samsung[] = "SAMSUNG"; 215 static const char seagate[] = "SEAGATE"; 216 static const char microp[] = "MICROP"; 217 218 static struct xpt_quirk_entry xpt_quirk_table[] = 219 { 220 { 221 /* Reports QUEUE FULL for temporary resource shortages */ 222 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP39100*", "*" }, 223 /*quirks*/0, /*mintags*/24, /*maxtags*/32 224 }, 225 { 226 /* Reports QUEUE FULL for temporary resource shortages */ 227 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP34550*", "*" }, 228 /*quirks*/0, /*mintags*/24, /*maxtags*/32 229 }, 230 { 231 /* Reports QUEUE FULL for temporary resource shortages */ 232 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP32275*", "*" }, 233 /*quirks*/0, /*mintags*/24, /*maxtags*/32 234 }, 235 { 236 /* Broken tagged queuing drive */ 237 { T_DIRECT, SIP_MEDIA_FIXED, microp, "4421-07*", "*" }, 238 /*quirks*/0, /*mintags*/0, /*maxtags*/0 239 }, 240 { 241 /* Broken tagged queuing drive */ 242 { T_DIRECT, SIP_MEDIA_FIXED, "HP", "C372*", "*" }, 243 /*quirks*/0, /*mintags*/0, /*maxtags*/0 244 }, 245 { 246 /* Broken tagged queuing drive */ 247 { T_DIRECT, SIP_MEDIA_FIXED, microp, "3391*", "x43h" }, 248 /*quirks*/0, /*mintags*/0, /*maxtags*/0 249 }, 250 { 251 /* 252 * Unfortunately, the Quantum Atlas III has the same 253 * problem as the Atlas II drives above. 254 * Reported by: "Johan Granlund" <johan@granlund.nu> 255 * 256 * For future reference, the drive with the problem was: 257 * QUANTUM QM39100TD-SW N1B0 258 * 259 * It's possible that Quantum will fix the problem in later 260 * firmware revisions. If that happens, the quirk entry 261 * will need to be made specific to the firmware revisions 262 * with the problem. 263 * 264 */ 265 /* Reports QUEUE FULL for temporary resource shortages */ 266 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM39100*", "*" }, 267 /*quirks*/0, /*mintags*/24, /*maxtags*/32 268 }, 269 { 270 /* 271 * 18 Gig Atlas III, same problem as the 9G version. 272 * Reported by: Andre Albsmeier 273 * <andre.albsmeier@mchp.siemens.de> 274 * 275 * For future reference, the drive with the problem was: 276 * QUANTUM QM318000TD-S N491 277 */ 278 /* Reports QUEUE FULL for temporary resource shortages */ 279 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM318000*", "*" }, 280 /*quirks*/0, /*mintags*/24, /*maxtags*/32 281 }, 282 { 283 /* 284 * Broken tagged queuing drive 285 * Reported by: Bret Ford <bford@uop.cs.uop.edu> 286 * and: Martin Renters <martin@tdc.on.ca> 287 */ 288 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST410800*", "71*" }, 289 /*quirks*/0, /*mintags*/0, /*maxtags*/0 290 }, 291 /* 292 * The Seagate Medalist Pro drives have very poor write 293 * performance with anything more than 2 tags. 294 * 295 * Reported by: Paul van der Zwan <paulz@trantor.xs4all.nl> 296 * Drive: <SEAGATE ST36530N 1444> 297 * 298 * Reported by: Jeremy Lea <reg@shale.csir.co.za> 299 * Drive: <SEAGATE ST34520W 1281> 300 * 301 * No one has actually reported that the 9G version 302 * (ST39140*) of the Medalist Pro has the same problem, but 303 * we're assuming that it does because the 4G and 6.5G 304 * versions of the drive are broken. 305 */ 306 { 307 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST34520*", "*"}, 308 /*quirks*/0, /*mintags*/2, /*maxtags*/2 309 }, 310 { 311 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST36530*", "*"}, 312 /*quirks*/0, /*mintags*/2, /*maxtags*/2 313 }, 314 { 315 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST39140*", "*"}, 316 /*quirks*/0, /*mintags*/2, /*maxtags*/2 317 }, 318 { 319 /* 320 * Slow when tagged queueing is enabled. Write performance 321 * steadily drops off with more and more concurrent 322 * transactions. Best sequential write performance with 323 * tagged queueing turned off and write caching turned on. 324 * 325 * PR: kern/10398 326 * Submitted by: Hideaki Okada <hokada@isl.melco.co.jp> 327 * Drive: DCAS-34330 w/ "S65A" firmware. 328 * 329 * The drive with the problem had the "S65A" firmware 330 * revision, and has also been reported (by Stephen J. 331 * Roznowski <sjr@home.net>) for a drive with the "S61A" 332 * firmware revision. 333 * 334 * Although no one has reported problems with the 2 gig 335 * version of the DCAS drive, the assumption is that it 336 * has the same problems as the 4 gig version. Therefore 337 * this quirk entries disables tagged queueing for all 338 * DCAS drives. 339 */ 340 { T_DIRECT, SIP_MEDIA_FIXED, "IBM", "DCAS*", "*" }, 341 /*quirks*/0, /*mintags*/0, /*maxtags*/0 342 }, 343 { 344 /* Broken tagged queuing drive */ 345 { T_DIRECT, SIP_MEDIA_REMOVABLE, "iomega", "jaz*", "*" }, 346 /*quirks*/0, /*mintags*/0, /*maxtags*/0 347 }, 348 { 349 /* Broken tagged queuing drive */ 350 { T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CFP2107*", "*" }, 351 /*quirks*/0, /*mintags*/0, /*maxtags*/0 352 }, 353 { 354 /* 355 * Broken tagged queuing drive. 356 * Submitted by: 357 * NAKAJI Hiroyuki <nakaji@zeisei.dpri.kyoto-u.ac.jp> 358 * in PR kern/9535 359 */ 360 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN34324U*", "*" }, 361 /*quirks*/0, /*mintags*/0, /*maxtags*/0 362 }, 363 { 364 /* 365 * Slow when tagged queueing is enabled. (1.5MB/sec versus 366 * 8MB/sec.) 367 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu> 368 * Best performance with these drives is achieved with 369 * tagged queueing turned off, and write caching turned on. 370 */ 371 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "WDE*", "*" }, 372 /*quirks*/0, /*mintags*/0, /*maxtags*/0 373 }, 374 { 375 /* 376 * Slow when tagged queueing is enabled. (1.5MB/sec versus 377 * 8MB/sec.) 378 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu> 379 * Best performance with these drives is achieved with 380 * tagged queueing turned off, and write caching turned on. 381 */ 382 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "ENTERPRISE", "*" }, 383 /*quirks*/0, /*mintags*/0, /*maxtags*/0 384 }, 385 { 386 /* 387 * Doesn't handle queue full condition correctly, 388 * so we need to limit maxtags to what the device 389 * can handle instead of determining this automatically. 390 */ 391 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN321010S*", "*" }, 392 /*quirks*/0, /*mintags*/2, /*maxtags*/32 393 }, 394 { 395 /* Really only one LUN */ 396 { T_ENCLOSURE, SIP_MEDIA_FIXED, "SUN", "SENA*", "*" }, 397 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 398 }, 399 { 400 /* I can't believe we need a quirk for DPT volumes. */ 401 { T_ANY, SIP_MEDIA_FIXED|SIP_MEDIA_REMOVABLE, "DPT", "*", "*" }, 402 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, 403 /*mintags*/0, /*maxtags*/255 404 }, 405 { 406 /* 407 * Many Sony CDROM drives don't like multi-LUN probing. 408 */ 409 { T_CDROM, SIP_MEDIA_REMOVABLE, sony, "CD-ROM CDU*", "*" }, 410 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 411 }, 412 { 413 /* 414 * This drive doesn't like multiple LUN probing. 415 * Submitted by: Parag Patel <parag@cgt.com> 416 */ 417 { T_WORM, SIP_MEDIA_REMOVABLE, sony, "CD-R CDU9*", "*" }, 418 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 419 }, 420 { 421 /* 422 * The 8200 doesn't like multi-lun probing, and probably 423 * don't like serial number requests either. 424 */ 425 { 426 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE", 427 "EXB-8200*", "*" 428 }, 429 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 430 }, 431 { 432 /* 433 * This old revision of the TDC3600 is also SCSI-1, and 434 * hangs upon serial number probing. 435 */ 436 { 437 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "TANDBERG", 438 " TDC 3600", "U07:" 439 }, 440 CAM_QUIRK_NOSERIAL, /*mintags*/0, /*maxtags*/0 441 }, 442 { 443 /* 444 * Would repond to all LUNs if asked for. 445 */ 446 { 447 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "CALIPER", 448 "CP150", "*" 449 }, 450 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 451 }, 452 { 453 /* 454 * Would repond to all LUNs if asked for. 455 */ 456 { 457 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "KENNEDY", 458 "96X2*", "*" 459 }, 460 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 461 }, 462 { 463 /* Submitted by: Matthew Dodd <winter@jurai.net> */ 464 { T_PROCESSOR, SIP_MEDIA_FIXED, "Cabletrn", "EA41*", "*" }, 465 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 466 }, 467 { 468 /* Submitted by: Matthew Dodd <winter@jurai.net> */ 469 { T_PROCESSOR, SIP_MEDIA_FIXED, "CABLETRN", "EA41*", "*" }, 470 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 471 }, 472 { 473 /* Default tagged queuing parameters for all devices */ 474 { 475 T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, 476 /*vendor*/"*", /*product*/"*", /*revision*/"*" 477 }, 478 /*quirks*/0, /*mintags*/2, /*maxtags*/255 479 }, 480 }; 481 482 static const int xpt_quirk_table_size = 483 sizeof(xpt_quirk_table) / sizeof(*xpt_quirk_table); 484 485 typedef enum { 486 DM_RET_COPY = 0x01, 487 DM_RET_FLAG_MASK = 0x0f, 488 DM_RET_NONE = 0x00, 489 DM_RET_STOP = 0x10, 490 DM_RET_DESCEND = 0x20, 491 DM_RET_ERROR = 0x30, 492 DM_RET_ACTION_MASK = 0xf0 493 } dev_match_ret; 494 495 typedef enum { 496 XPT_DEPTH_BUS, 497 XPT_DEPTH_TARGET, 498 XPT_DEPTH_DEVICE, 499 XPT_DEPTH_PERIPH 500 } xpt_traverse_depth; 501 502 struct xpt_traverse_config { 503 xpt_traverse_depth depth; 504 void *tr_func; 505 void *tr_arg; 506 }; 507 508 typedef int xpt_busfunc_t (struct cam_eb *bus, void *arg); 509 typedef int xpt_targetfunc_t (struct cam_et *target, void *arg); 510 typedef int xpt_devicefunc_t (struct cam_ed *device, void *arg); 511 typedef int xpt_periphfunc_t (struct cam_periph *periph, void *arg); 512 typedef int xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg); 513 514 /* Transport layer configuration information */ 515 static struct xpt_softc xsoftc; 516 517 /* Queues for our software interrupt handler */ 518 typedef TAILQ_HEAD(cam_isrq, ccb_hdr) cam_isrq_t; 519 static cam_isrq_t cam_bioq; 520 static cam_isrq_t cam_netq; 521 522 /* "Pool" of inactive ccbs managed by xpt_alloc_ccb and xpt_free_ccb */ 523 static SLIST_HEAD(,ccb_hdr) ccb_freeq; 524 static u_int xpt_max_ccbs; /* 525 * Maximum size of ccb pool. Modified as 526 * devices are added/removed or have their 527 * opening counts changed. 528 */ 529 static u_int xpt_ccb_count; /* Current count of allocated ccbs */ 530 531 static struct cam_periph *xpt_periph; 532 533 static periph_init_t xpt_periph_init; 534 535 static periph_init_t probe_periph_init; 536 537 static struct periph_driver xpt_driver = 538 { 539 xpt_periph_init, "xpt", 540 TAILQ_HEAD_INITIALIZER(xpt_driver.units) 541 }; 542 543 static struct periph_driver probe_driver = 544 { 545 probe_periph_init, "probe", 546 TAILQ_HEAD_INITIALIZER(probe_driver.units) 547 }; 548 549 DATA_SET(periphdriver_set, xpt_driver); 550 DATA_SET(periphdriver_set, probe_driver); 551 552 #define XPT_CDEV_MAJOR 104 553 554 static d_open_t xptopen; 555 static d_close_t xptclose; 556 static d_ioctl_t xptioctl; 557 558 static struct cdevsw xpt_cdevsw = { 559 /* open */ xptopen, 560 /* close */ xptclose, 561 /* read */ noread, 562 /* write */ nowrite, 563 /* ioctl */ xptioctl, 564 /* poll */ nopoll, 565 /* mmap */ nommap, 566 /* strategy */ nostrategy, 567 /* name */ "xpt", 568 /* maj */ XPT_CDEV_MAJOR, 569 /* dump */ nodump, 570 /* psize */ nopsize, 571 /* flags */ 0, 572 /* bmaj */ -1 573 }; 574 575 static struct intr_config_hook *xpt_config_hook; 576 577 /* Registered busses */ 578 static TAILQ_HEAD(,cam_eb) xpt_busses; 579 static u_int bus_generation; 580 581 /* Storage for debugging datastructures */ 582 #ifdef CAMDEBUG 583 struct cam_path *cam_dpath; 584 u_int32_t cam_dflags; 585 u_int32_t cam_debug_delay; 586 #endif 587 588 #if defined(CAM_DEBUG_FLAGS) && !defined(CAMDEBUG) 589 #error "You must have options CAMDEBUG to use options CAM_DEBUG_FLAGS" 590 #endif 591 592 /* 593 * In order to enable the CAM_DEBUG_* options, the user must have CAMDEBUG 594 * enabled. Also, the user must have either none, or all of CAM_DEBUG_BUS, 595 * CAM_DEBUG_TARGET, and CAM_DEBUG_LUN specified. 596 */ 597 #if defined(CAM_DEBUG_BUS) || defined(CAM_DEBUG_TARGET) \ 598 || defined(CAM_DEBUG_LUN) 599 #ifdef CAMDEBUG 600 #if !defined(CAM_DEBUG_BUS) || !defined(CAM_DEBUG_TARGET) \ 601 || !defined(CAM_DEBUG_LUN) 602 #error "You must define all or none of CAM_DEBUG_BUS, CAM_DEBUG_TARGET \ 603 and CAM_DEBUG_LUN" 604 #endif /* !CAM_DEBUG_BUS || !CAM_DEBUG_TARGET || !CAM_DEBUG_LUN */ 605 #else /* !CAMDEBUG */ 606 #error "You must use options CAMDEBUG if you use the CAM_DEBUG_* options" 607 #endif /* CAMDEBUG */ 608 #endif /* CAM_DEBUG_BUS || CAM_DEBUG_TARGET || CAM_DEBUG_LUN */ 609 610 /* Our boot-time initialization hook */ 611 static void xpt_init(void *); 612 SYSINIT(cam, SI_SUB_CONFIGURE, SI_ORDER_SECOND, xpt_init, NULL); 613 614 static cam_status xpt_compile_path(struct cam_path *new_path, 615 struct cam_periph *perph, 616 path_id_t path_id, 617 target_id_t target_id, 618 lun_id_t lun_id); 619 620 static void xpt_release_path(struct cam_path *path); 621 622 static void xpt_async_bcast(struct async_list *async_head, 623 u_int32_t async_code, 624 struct cam_path *path, 625 void *async_arg); 626 static path_id_t xptnextfreepathid(void); 627 static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus); 628 static union ccb *xpt_get_ccb(struct cam_ed *device); 629 static int xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo, 630 u_int32_t new_priority); 631 static void xpt_run_dev_allocq(struct cam_eb *bus); 632 static void xpt_run_dev_sendq(struct cam_eb *bus); 633 static timeout_t xpt_release_devq_timeout; 634 static timeout_t xpt_release_simq_timeout; 635 static void xpt_release_bus(struct cam_eb *bus); 636 static void xpt_release_devq_device(struct cam_ed *dev, u_int count, 637 int run_queue); 638 static struct cam_et* 639 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id); 640 static void xpt_release_target(struct cam_eb *bus, struct cam_et *target); 641 static struct cam_ed* 642 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, 643 lun_id_t lun_id); 644 static void xpt_release_device(struct cam_eb *bus, struct cam_et *target, 645 struct cam_ed *device); 646 static u_int32_t xpt_dev_ccbq_resize(struct cam_path *path, int newopenings); 647 static struct cam_eb* 648 xpt_find_bus(path_id_t path_id); 649 static struct cam_et* 650 xpt_find_target(struct cam_eb *bus, target_id_t target_id); 651 static struct cam_ed* 652 xpt_find_device(struct cam_et *target, lun_id_t lun_id); 653 static void xpt_scan_bus(struct cam_periph *periph, union ccb *ccb); 654 static void xpt_scan_lun(struct cam_periph *periph, 655 struct cam_path *path, cam_flags flags, 656 union ccb *ccb); 657 static void xptscandone(struct cam_periph *periph, union ccb *done_ccb); 658 static xpt_busfunc_t xptconfigbuscountfunc; 659 static xpt_busfunc_t xptconfigfunc; 660 static void xpt_config(void *arg); 661 static xpt_devicefunc_t xptpassannouncefunc; 662 static void xpt_finishconfig(struct cam_periph *periph, union ccb *ccb); 663 static void xptaction(struct cam_sim *sim, union ccb *work_ccb); 664 static void xptpoll(struct cam_sim *sim); 665 static swihand_t swi_camnet; 666 static swihand_t swi_cambio; 667 static void camisr(cam_isrq_t *queue); 668 #if 0 669 static void xptstart(struct cam_periph *periph, union ccb *work_ccb); 670 static void xptasync(struct cam_periph *periph, 671 u_int32_t code, cam_path *path); 672 #endif 673 static dev_match_ret xptbusmatch(struct dev_match_pattern *patterns, 674 int num_patterns, struct cam_eb *bus); 675 static dev_match_ret xptdevicematch(struct dev_match_pattern *patterns, 676 int num_patterns, struct cam_ed *device); 677 static dev_match_ret xptperiphmatch(struct dev_match_pattern *patterns, 678 int num_patterns, 679 struct cam_periph *periph); 680 static xpt_busfunc_t xptedtbusfunc; 681 static xpt_targetfunc_t xptedttargetfunc; 682 static xpt_devicefunc_t xptedtdevicefunc; 683 static xpt_periphfunc_t xptedtperiphfunc; 684 static xpt_pdrvfunc_t xptplistpdrvfunc; 685 static xpt_periphfunc_t xptplistperiphfunc; 686 static int xptedtmatch(struct ccb_dev_match *cdm); 687 static int xptperiphlistmatch(struct ccb_dev_match *cdm); 688 static int xptbustraverse(struct cam_eb *start_bus, 689 xpt_busfunc_t *tr_func, void *arg); 690 static int xpttargettraverse(struct cam_eb *bus, 691 struct cam_et *start_target, 692 xpt_targetfunc_t *tr_func, void *arg); 693 static int xptdevicetraverse(struct cam_et *target, 694 struct cam_ed *start_device, 695 xpt_devicefunc_t *tr_func, void *arg); 696 static int xptperiphtraverse(struct cam_ed *device, 697 struct cam_periph *start_periph, 698 xpt_periphfunc_t *tr_func, void *arg); 699 static int xptpdrvtraverse(struct periph_driver **start_pdrv, 700 xpt_pdrvfunc_t *tr_func, void *arg); 701 static int xptpdperiphtraverse(struct periph_driver **pdrv, 702 struct cam_periph *start_periph, 703 xpt_periphfunc_t *tr_func, 704 void *arg); 705 static xpt_busfunc_t xptdefbusfunc; 706 static xpt_targetfunc_t xptdeftargetfunc; 707 static xpt_devicefunc_t xptdefdevicefunc; 708 static xpt_periphfunc_t xptdefperiphfunc; 709 static int xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg); 710 #ifdef notusedyet 711 static int xpt_for_all_targets(xpt_targetfunc_t *tr_func, 712 void *arg); 713 #endif 714 static int xpt_for_all_devices(xpt_devicefunc_t *tr_func, 715 void *arg); 716 #ifdef notusedyet 717 static int xpt_for_all_periphs(xpt_periphfunc_t *tr_func, 718 void *arg); 719 #endif 720 static xpt_devicefunc_t xptsetasyncfunc; 721 static xpt_busfunc_t xptsetasyncbusfunc; 722 static cam_status xptregister(struct cam_periph *periph, 723 void *arg); 724 static cam_status proberegister(struct cam_periph *periph, 725 void *arg); 726 static void probeschedule(struct cam_periph *probe_periph); 727 static void probestart(struct cam_periph *periph, union ccb *start_ccb); 728 static void proberequestdefaultnegotiation(struct cam_periph *periph); 729 static void probedone(struct cam_periph *periph, union ccb *done_ccb); 730 static void probecleanup(struct cam_periph *periph); 731 static void xpt_find_quirk(struct cam_ed *device); 732 static void xpt_set_transfer_settings(struct ccb_trans_settings *cts, 733 struct cam_ed *device, 734 int async_update); 735 static void xpt_toggle_tags(struct cam_path *path); 736 static void xpt_start_tags(struct cam_path *path); 737 static __inline int xpt_schedule_dev_allocq(struct cam_eb *bus, 738 struct cam_ed *dev); 739 static __inline int xpt_schedule_dev_sendq(struct cam_eb *bus, 740 struct cam_ed *dev); 741 static __inline int periph_is_queued(struct cam_periph *periph); 742 static __inline int device_is_alloc_queued(struct cam_ed *device); 743 static __inline int device_is_send_queued(struct cam_ed *device); 744 static __inline int dev_allocq_is_runnable(struct cam_devq *devq); 745 746 static __inline int 747 xpt_schedule_dev_allocq(struct cam_eb *bus, struct cam_ed *dev) 748 { 749 int retval; 750 751 if (dev->ccbq.devq_openings > 0) { 752 if ((dev->flags & CAM_DEV_RESIZE_QUEUE_NEEDED) != 0) { 753 cam_ccbq_resize(&dev->ccbq, 754 dev->ccbq.dev_openings 755 + dev->ccbq.dev_active); 756 dev->flags &= ~CAM_DEV_RESIZE_QUEUE_NEEDED; 757 } 758 /* 759 * The priority of a device waiting for CCB resources 760 * is that of the the highest priority peripheral driver 761 * enqueued. 762 */ 763 retval = xpt_schedule_dev(&bus->sim->devq->alloc_queue, 764 &dev->alloc_ccb_entry.pinfo, 765 CAMQ_GET_HEAD(&dev->drvq)->priority); 766 } else { 767 retval = 0; 768 } 769 770 return (retval); 771 } 772 773 static __inline int 774 xpt_schedule_dev_sendq(struct cam_eb *bus, struct cam_ed *dev) 775 { 776 int retval; 777 778 if (dev->ccbq.dev_openings > 0) { 779 /* 780 * The priority of a device waiting for controller 781 * resources is that of the the highest priority CCB 782 * enqueued. 783 */ 784 retval = 785 xpt_schedule_dev(&bus->sim->devq->send_queue, 786 &dev->send_ccb_entry.pinfo, 787 CAMQ_GET_HEAD(&dev->ccbq.queue)->priority); 788 } else { 789 retval = 0; 790 } 791 return (retval); 792 } 793 794 static __inline int 795 periph_is_queued(struct cam_periph *periph) 796 { 797 return (periph->pinfo.index != CAM_UNQUEUED_INDEX); 798 } 799 800 static __inline int 801 device_is_alloc_queued(struct cam_ed *device) 802 { 803 return (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX); 804 } 805 806 static __inline int 807 device_is_send_queued(struct cam_ed *device) 808 { 809 return (device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX); 810 } 811 812 static __inline int 813 dev_allocq_is_runnable(struct cam_devq *devq) 814 { 815 /* 816 * Have work to do. 817 * Have space to do more work. 818 * Allowed to do work. 819 */ 820 return ((devq->alloc_queue.qfrozen_cnt == 0) 821 && (devq->alloc_queue.entries > 0) 822 && (devq->alloc_openings > 0)); 823 } 824 825 static void 826 xpt_periph_init() 827 { 828 make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0"); 829 } 830 831 static void 832 probe_periph_init() 833 { 834 } 835 836 837 static void 838 xptdone(struct cam_periph *periph, union ccb *done_ccb) 839 { 840 /* Caller will release the CCB */ 841 wakeup(&done_ccb->ccb_h.cbfcnp); 842 } 843 844 static int 845 xptopen(dev_t dev, int flags, int fmt, struct proc *p) 846 { 847 int unit; 848 849 unit = minor(dev) & 0xff; 850 851 /* 852 * Only allow read-write access. 853 */ 854 if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0)) 855 return(EPERM); 856 857 /* 858 * We don't allow nonblocking access. 859 */ 860 if ((flags & O_NONBLOCK) != 0) { 861 printf("xpt%d: can't do nonblocking accesss\n", unit); 862 return(ENODEV); 863 } 864 865 /* 866 * We only have one transport layer right now. If someone accesses 867 * us via something other than minor number 1, point out their 868 * mistake. 869 */ 870 if (unit != 0) { 871 printf("xptopen: got invalid xpt unit %d\n", unit); 872 return(ENXIO); 873 } 874 875 /* Mark ourselves open */ 876 xsoftc.flags |= XPT_FLAG_OPEN; 877 878 return(0); 879 } 880 881 static int 882 xptclose(dev_t dev, int flag, int fmt, struct proc *p) 883 { 884 int unit; 885 886 unit = minor(dev) & 0xff; 887 888 /* 889 * We only have one transport layer right now. If someone accesses 890 * us via something other than minor number 1, point out their 891 * mistake. 892 */ 893 if (unit != 0) { 894 printf("xptclose: got invalid xpt unit %d\n", unit); 895 return(ENXIO); 896 } 897 898 /* Mark ourselves closed */ 899 xsoftc.flags &= ~XPT_FLAG_OPEN; 900 901 return(0); 902 } 903 904 static int 905 xptioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p) 906 { 907 int unit, error; 908 909 error = 0; 910 unit = minor(dev) & 0xff; 911 912 /* 913 * We only have one transport layer right now. If someone accesses 914 * us via something other than minor number 1, point out their 915 * mistake. 916 */ 917 if (unit != 0) { 918 printf("xptioctl: got invalid xpt unit %d\n", unit); 919 return(ENXIO); 920 } 921 922 switch(cmd) { 923 /* 924 * For the transport layer CAMIOCOMMAND ioctl, we really only want 925 * to accept CCB types that don't quite make sense to send through a 926 * passthrough driver. 927 */ 928 case CAMIOCOMMAND: { 929 union ccb *ccb; 930 union ccb *inccb; 931 932 inccb = (union ccb *)addr; 933 934 switch(inccb->ccb_h.func_code) { 935 case XPT_SCAN_BUS: 936 case XPT_RESET_BUS: 937 if ((inccb->ccb_h.target_id != CAM_TARGET_WILDCARD) 938 || (inccb->ccb_h.target_lun != CAM_LUN_WILDCARD)) { 939 error = EINVAL; 940 break; 941 } 942 /* FALLTHROUGH */ 943 case XPT_SCAN_LUN: 944 945 ccb = xpt_alloc_ccb(); 946 947 /* 948 * Create a path using the bus, target, and lun the 949 * user passed in. 950 */ 951 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, 952 inccb->ccb_h.path_id, 953 inccb->ccb_h.target_id, 954 inccb->ccb_h.target_lun) != 955 CAM_REQ_CMP){ 956 error = EINVAL; 957 xpt_free_ccb(ccb); 958 break; 959 } 960 /* Ensure all of our fields are correct */ 961 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, 962 inccb->ccb_h.pinfo.priority); 963 xpt_merge_ccb(ccb, inccb); 964 ccb->ccb_h.cbfcnp = xptdone; 965 cam_periph_runccb(ccb, NULL, 0, 0, NULL); 966 bcopy(ccb, inccb, sizeof(union ccb)); 967 xpt_free_path(ccb->ccb_h.path); 968 xpt_free_ccb(ccb); 969 break; 970 971 case XPT_DEBUG: { 972 union ccb ccb; 973 974 /* 975 * This is an immediate CCB, so it's okay to 976 * allocate it on the stack. 977 */ 978 979 /* 980 * Create a path using the bus, target, and lun the 981 * user passed in. 982 */ 983 if (xpt_create_path(&ccb.ccb_h.path, xpt_periph, 984 inccb->ccb_h.path_id, 985 inccb->ccb_h.target_id, 986 inccb->ccb_h.target_lun) != 987 CAM_REQ_CMP){ 988 error = EINVAL; 989 break; 990 } 991 /* Ensure all of our fields are correct */ 992 xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path, 993 inccb->ccb_h.pinfo.priority); 994 xpt_merge_ccb(&ccb, inccb); 995 ccb.ccb_h.cbfcnp = xptdone; 996 xpt_action(&ccb); 997 bcopy(&ccb, inccb, sizeof(union ccb)); 998 xpt_free_path(ccb.ccb_h.path); 999 break; 1000 1001 } 1002 case XPT_DEV_MATCH: { 1003 struct cam_periph_map_info mapinfo; 1004 struct cam_path *old_path; 1005 1006 /* 1007 * We can't deal with physical addresses for this 1008 * type of transaction. 1009 */ 1010 if (inccb->ccb_h.flags & CAM_DATA_PHYS) { 1011 error = EINVAL; 1012 break; 1013 } 1014 1015 /* 1016 * Save this in case the caller had it set to 1017 * something in particular. 1018 */ 1019 old_path = inccb->ccb_h.path; 1020 1021 /* 1022 * We really don't need a path for the matching 1023 * code. The path is needed because of the 1024 * debugging statements in xpt_action(). They 1025 * assume that the CCB has a valid path. 1026 */ 1027 inccb->ccb_h.path = xpt_periph->path; 1028 1029 bzero(&mapinfo, sizeof(mapinfo)); 1030 1031 /* 1032 * Map the pattern and match buffers into kernel 1033 * virtual address space. 1034 */ 1035 error = cam_periph_mapmem(inccb, &mapinfo); 1036 1037 if (error) { 1038 inccb->ccb_h.path = old_path; 1039 break; 1040 } 1041 1042 /* 1043 * This is an immediate CCB, we can send it on directly. 1044 */ 1045 xpt_action(inccb); 1046 1047 /* 1048 * Map the buffers back into user space. 1049 */ 1050 cam_periph_unmapmem(inccb, &mapinfo); 1051 1052 inccb->ccb_h.path = old_path; 1053 1054 error = 0; 1055 break; 1056 } 1057 default: 1058 error = EINVAL; 1059 break; 1060 } 1061 break; 1062 } 1063 /* 1064 * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input, 1065 * with the periphal driver name and unit name filled in. The other 1066 * fields don't really matter as input. The passthrough driver name 1067 * ("pass"), and unit number are passed back in the ccb. The current 1068 * device generation number, and the index into the device peripheral 1069 * driver list, and the status are also passed back. Note that 1070 * since we do everything in one pass, unlike the XPT_GDEVLIST ccb, 1071 * we never return a status of CAM_GDEVLIST_LIST_CHANGED. It is 1072 * (or rather should be) impossible for the device peripheral driver 1073 * list to change since we look at the whole thing in one pass, and 1074 * we do it with splcam protection. 1075 * 1076 */ 1077 case CAMGETPASSTHRU: { 1078 union ccb *ccb; 1079 struct cam_periph *periph; 1080 struct periph_driver **p_drv; 1081 char *name; 1082 int unit; 1083 int cur_generation; 1084 int base_periph_found; 1085 int splbreaknum; 1086 int s; 1087 1088 ccb = (union ccb *)addr; 1089 unit = ccb->cgdl.unit_number; 1090 name = ccb->cgdl.periph_name; 1091 /* 1092 * Every 100 devices, we want to drop our spl protection to 1093 * give the software interrupt handler a chance to run. 1094 * Most systems won't run into this check, but this should 1095 * avoid starvation in the software interrupt handler in 1096 * large systems. 1097 */ 1098 splbreaknum = 100; 1099 1100 ccb = (union ccb *)addr; 1101 1102 base_periph_found = 0; 1103 1104 /* 1105 * Sanity check -- make sure we don't get a null peripheral 1106 * driver name. 1107 */ 1108 if (*ccb->cgdl.periph_name == '\0') { 1109 error = EINVAL; 1110 break; 1111 } 1112 1113 /* Keep the list from changing while we traverse it */ 1114 s = splcam(); 1115 ptstartover: 1116 cur_generation = xsoftc.generation; 1117 1118 /* first find our driver in the list of drivers */ 1119 for (p_drv = (struct periph_driver **)periphdriver_set.ls_items; 1120 *p_drv != NULL; p_drv++) 1121 if (strcmp((*p_drv)->driver_name, name) == 0) 1122 break; 1123 1124 if (*p_drv == NULL) { 1125 splx(s); 1126 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 1127 ccb->cgdl.status = CAM_GDEVLIST_ERROR; 1128 *ccb->cgdl.periph_name = '\0'; 1129 ccb->cgdl.unit_number = 0; 1130 error = ENOENT; 1131 break; 1132 } 1133 1134 /* 1135 * Run through every peripheral instance of this driver 1136 * and check to see whether it matches the unit passed 1137 * in by the user. If it does, get out of the loops and 1138 * find the passthrough driver associated with that 1139 * peripheral driver. 1140 */ 1141 for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL; 1142 periph = TAILQ_NEXT(periph, unit_links)) { 1143 1144 if (periph->unit_number == unit) { 1145 break; 1146 } else if (--splbreaknum == 0) { 1147 splx(s); 1148 s = splcam(); 1149 splbreaknum = 100; 1150 if (cur_generation != xsoftc.generation) 1151 goto ptstartover; 1152 } 1153 } 1154 /* 1155 * If we found the peripheral driver that the user passed 1156 * in, go through all of the peripheral drivers for that 1157 * particular device and look for a passthrough driver. 1158 */ 1159 if (periph != NULL) { 1160 struct cam_ed *device; 1161 int i; 1162 1163 base_periph_found = 1; 1164 device = periph->path->device; 1165 for (i = 0, periph = device->periphs.slh_first; 1166 periph != NULL; 1167 periph = periph->periph_links.sle_next, i++) { 1168 /* 1169 * Check to see whether we have a 1170 * passthrough device or not. 1171 */ 1172 if (strcmp(periph->periph_name, "pass") == 0) { 1173 /* 1174 * Fill in the getdevlist fields. 1175 */ 1176 strcpy(ccb->cgdl.periph_name, 1177 periph->periph_name); 1178 ccb->cgdl.unit_number = 1179 periph->unit_number; 1180 if (periph->periph_links.sle_next) 1181 ccb->cgdl.status = 1182 CAM_GDEVLIST_MORE_DEVS; 1183 else 1184 ccb->cgdl.status = 1185 CAM_GDEVLIST_LAST_DEVICE; 1186 ccb->cgdl.generation = 1187 device->generation; 1188 ccb->cgdl.index = i; 1189 /* 1190 * Fill in some CCB header fields 1191 * that the user may want. 1192 */ 1193 ccb->ccb_h.path_id = 1194 periph->path->bus->path_id; 1195 ccb->ccb_h.target_id = 1196 periph->path->target->target_id; 1197 ccb->ccb_h.target_lun = 1198 periph->path->device->lun_id; 1199 ccb->ccb_h.status = CAM_REQ_CMP; 1200 break; 1201 } 1202 } 1203 } 1204 1205 /* 1206 * If the periph is null here, one of two things has 1207 * happened. The first possibility is that we couldn't 1208 * find the unit number of the particular peripheral driver 1209 * that the user is asking about. e.g. the user asks for 1210 * the passthrough driver for "da11". We find the list of 1211 * "da" peripherals all right, but there is no unit 11. 1212 * The other possibility is that we went through the list 1213 * of peripheral drivers attached to the device structure, 1214 * but didn't find one with the name "pass". Either way, 1215 * we return ENOENT, since we couldn't find something. 1216 */ 1217 if (periph == NULL) { 1218 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 1219 ccb->cgdl.status = CAM_GDEVLIST_ERROR; 1220 *ccb->cgdl.periph_name = '\0'; 1221 ccb->cgdl.unit_number = 0; 1222 error = ENOENT; 1223 /* 1224 * It is unfortunate that this is even necessary, 1225 * but there are many, many clueless users out there. 1226 * If this is true, the user is looking for the 1227 * passthrough driver, but doesn't have one in his 1228 * kernel. 1229 */ 1230 if (base_periph_found == 1) { 1231 printf("xptioctl: pass driver is not in the " 1232 "kernel\n"); 1233 printf("xptioctl: put \"device pass0\" in " 1234 "your kernel config file\n"); 1235 } 1236 } 1237 splx(s); 1238 break; 1239 } 1240 default: 1241 error = ENOTTY; 1242 break; 1243 } 1244 1245 return(error); 1246 } 1247 1248 /* Functions accessed by the peripheral drivers */ 1249 static void 1250 xpt_init(dummy) 1251 void *dummy; 1252 { 1253 struct cam_sim *xpt_sim; 1254 struct cam_path *path; 1255 struct cam_devq *devq; 1256 cam_status status; 1257 1258 TAILQ_INIT(&xpt_busses); 1259 TAILQ_INIT(&cam_bioq); 1260 TAILQ_INIT(&cam_netq); 1261 SLIST_INIT(&ccb_freeq); 1262 STAILQ_INIT(&highpowerq); 1263 1264 /* 1265 * The xpt layer is, itself, the equivelent of a SIM. 1266 * Allow 16 ccbs in the ccb pool for it. This should 1267 * give decent parallelism when we probe busses and 1268 * perform other XPT functions. 1269 */ 1270 devq = cam_simq_alloc(16); 1271 xpt_sim = cam_sim_alloc(xptaction, 1272 xptpoll, 1273 "xpt", 1274 /*softc*/NULL, 1275 /*unit*/0, 1276 /*max_dev_transactions*/0, 1277 /*max_tagged_dev_transactions*/0, 1278 devq); 1279 xpt_max_ccbs = 16; 1280 1281 xpt_bus_register(xpt_sim, /*bus #*/0); 1282 1283 /* 1284 * Looking at the XPT from the SIM layer, the XPT is 1285 * the equivelent of a peripheral driver. Allocate 1286 * a peripheral driver entry for us. 1287 */ 1288 if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID, 1289 CAM_TARGET_WILDCARD, 1290 CAM_LUN_WILDCARD)) != CAM_REQ_CMP) { 1291 printf("xpt_init: xpt_create_path failed with status %#x," 1292 " failing attach\n", status); 1293 return; 1294 } 1295 1296 cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO, 1297 path, NULL, 0, NULL); 1298 xpt_free_path(path); 1299 1300 xpt_sim->softc = xpt_periph; 1301 1302 /* 1303 * Register a callback for when interrupts are enabled. 1304 */ 1305 xpt_config_hook = 1306 (struct intr_config_hook *)malloc(sizeof(struct intr_config_hook), 1307 M_TEMP, M_NOWAIT); 1308 if (xpt_config_hook == NULL) { 1309 printf("xpt_init: Cannot malloc config hook " 1310 "- failing attach\n"); 1311 return; 1312 } 1313 bzero(xpt_config_hook, sizeof(*xpt_config_hook)); 1314 1315 xpt_config_hook->ich_func = xpt_config; 1316 if (config_intrhook_establish(xpt_config_hook) != 0) { 1317 free (xpt_config_hook, M_TEMP); 1318 printf("xpt_init: config_intrhook_establish failed " 1319 "- failing attach\n"); 1320 } 1321 1322 /* Install our software interrupt handlers */ 1323 register_swi(SWI_CAMNET, swi_camnet); 1324 register_swi(SWI_CAMBIO, swi_cambio); 1325 } 1326 1327 static cam_status 1328 xptregister(struct cam_periph *periph, void *arg) 1329 { 1330 if (periph == NULL) { 1331 printf("xptregister: periph was NULL!!\n"); 1332 return(CAM_REQ_CMP_ERR); 1333 } 1334 1335 periph->softc = NULL; 1336 1337 xpt_periph = periph; 1338 1339 return(CAM_REQ_CMP); 1340 } 1341 1342 int32_t 1343 xpt_add_periph(struct cam_periph *periph) 1344 { 1345 struct cam_ed *device; 1346 int32_t status; 1347 struct periph_list *periph_head; 1348 1349 device = periph->path->device; 1350 1351 periph_head = &device->periphs; 1352 1353 status = CAM_REQ_CMP; 1354 1355 if (device != NULL) { 1356 int s; 1357 1358 /* 1359 * Make room for this peripheral 1360 * so it will fit in the queue 1361 * when it's scheduled to run 1362 */ 1363 s = splsoftcam(); 1364 status = camq_resize(&device->drvq, 1365 device->drvq.array_size + 1); 1366 1367 device->generation++; 1368 1369 SLIST_INSERT_HEAD(periph_head, periph, periph_links); 1370 1371 splx(s); 1372 } 1373 1374 xsoftc.generation++; 1375 1376 return (status); 1377 } 1378 1379 void 1380 xpt_remove_periph(struct cam_periph *periph) 1381 { 1382 struct cam_ed *device; 1383 1384 device = periph->path->device; 1385 1386 if (device != NULL) { 1387 int s; 1388 struct periph_list *periph_head; 1389 1390 periph_head = &device->periphs; 1391 1392 /* Release the slot for this peripheral */ 1393 s = splsoftcam(); 1394 camq_resize(&device->drvq, device->drvq.array_size - 1); 1395 1396 device->generation++; 1397 1398 SLIST_REMOVE(periph_head, periph, cam_periph, periph_links); 1399 1400 splx(s); 1401 } 1402 1403 xsoftc.generation++; 1404 1405 } 1406 1407 void 1408 xpt_announce_periph(struct cam_periph *periph, char *announce_string) 1409 { 1410 int s; 1411 u_int mb; 1412 struct cam_path *path; 1413 struct ccb_trans_settings cts; 1414 1415 path = periph->path; 1416 /* 1417 * To ensure that this is printed in one piece, 1418 * mask out CAM interrupts. 1419 */ 1420 s = splsoftcam(); 1421 printf("%s%d at %s%d bus %d target %d lun %d\n", 1422 periph->periph_name, periph->unit_number, 1423 path->bus->sim->sim_name, 1424 path->bus->sim->unit_number, 1425 path->bus->sim->bus_id, 1426 path->target->target_id, 1427 path->device->lun_id); 1428 printf("%s%d: ", periph->periph_name, periph->unit_number); 1429 scsi_print_inquiry(&path->device->inq_data); 1430 if ((bootverbose) 1431 && (path->device->serial_num_len > 0)) { 1432 /* Don't wrap the screen - print only the first 60 chars */ 1433 printf("%s%d: Serial Number %.60s\n", periph->periph_name, 1434 periph->unit_number, path->device->serial_num); 1435 } 1436 xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1); 1437 cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; 1438 cts.flags = CCB_TRANS_CURRENT_SETTINGS; 1439 xpt_action((union ccb*)&cts); 1440 if (cts.ccb_h.status == CAM_REQ_CMP) { 1441 u_int speed; 1442 u_int freq; 1443 1444 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0 1445 && cts.sync_offset != 0) { 1446 freq = scsi_calc_syncsrate(cts.sync_period); 1447 speed = freq; 1448 } else { 1449 struct ccb_pathinq cpi; 1450 1451 /* Ask the SIM for its base transfer speed */ 1452 xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1); 1453 cpi.ccb_h.func_code = XPT_PATH_INQ; 1454 xpt_action((union ccb *)&cpi); 1455 1456 speed = cpi.base_transfer_speed; 1457 freq = 0; 1458 } 1459 if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) 1460 speed *= (0x01 << cts.bus_width); 1461 mb = speed / 1000; 1462 if (mb > 0) 1463 printf("%s%d: %d.%03dMB/s transfers", 1464 periph->periph_name, periph->unit_number, 1465 mb, speed % 1000); 1466 else 1467 printf("%s%d: %dKB/s transfers", periph->periph_name, 1468 periph->unit_number, (speed % 1000) * 1000); 1469 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0 1470 && cts.sync_offset != 0) { 1471 printf(" (%d.%03dMHz, offset %d", freq / 1000, 1472 freq % 1000, cts.sync_offset); 1473 } 1474 if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0 1475 && cts.bus_width > 0) { 1476 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0 1477 && cts.sync_offset != 0) { 1478 printf(", "); 1479 } else { 1480 printf(" ("); 1481 } 1482 printf("%dbit)", 8 * (0x01 << cts.bus_width)); 1483 } else if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0 1484 && cts.sync_offset != 0) { 1485 printf(")"); 1486 } 1487 1488 if (path->device->inq_flags & SID_CmdQue 1489 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) { 1490 printf(", Tagged Queueing Enabled"); 1491 } 1492 1493 printf("\n"); 1494 } else if (path->device->inq_flags & SID_CmdQue 1495 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) { 1496 printf("%s%d: Tagged Queueing Enabled\n", 1497 periph->periph_name, periph->unit_number); 1498 } 1499 1500 /* 1501 * We only want to print the caller's announce string if they've 1502 * passed one in.. 1503 */ 1504 if (announce_string != NULL) 1505 printf("%s%d: %s\n", periph->periph_name, 1506 periph->unit_number, announce_string); 1507 splx(s); 1508 } 1509 1510 1511 static dev_match_ret 1512 xptbusmatch(struct dev_match_pattern *patterns, int num_patterns, 1513 struct cam_eb *bus) 1514 { 1515 dev_match_ret retval; 1516 int i; 1517 1518 retval = DM_RET_NONE; 1519 1520 /* 1521 * If we aren't given something to match against, that's an error. 1522 */ 1523 if (bus == NULL) 1524 return(DM_RET_ERROR); 1525 1526 /* 1527 * If there are no match entries, then this bus matches no 1528 * matter what. 1529 */ 1530 if ((patterns == NULL) || (num_patterns == 0)) 1531 return(DM_RET_DESCEND | DM_RET_COPY); 1532 1533 for (i = 0; i < num_patterns; i++) { 1534 struct bus_match_pattern *cur_pattern; 1535 1536 /* 1537 * If the pattern in question isn't for a bus node, we 1538 * aren't interested. However, we do indicate to the 1539 * calling routine that we should continue descending the 1540 * tree, since the user wants to match against lower-level 1541 * EDT elements. 1542 */ 1543 if (patterns[i].type != DEV_MATCH_BUS) { 1544 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) 1545 retval |= DM_RET_DESCEND; 1546 continue; 1547 } 1548 1549 cur_pattern = &patterns[i].pattern.bus_pattern; 1550 1551 /* 1552 * If they want to match any bus node, we give them any 1553 * device node. 1554 */ 1555 if (cur_pattern->flags == BUS_MATCH_ANY) { 1556 /* set the copy flag */ 1557 retval |= DM_RET_COPY; 1558 1559 /* 1560 * If we've already decided on an action, go ahead 1561 * and return. 1562 */ 1563 if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE) 1564 return(retval); 1565 } 1566 1567 /* 1568 * Not sure why someone would do this... 1569 */ 1570 if (cur_pattern->flags == BUS_MATCH_NONE) 1571 continue; 1572 1573 if (((cur_pattern->flags & BUS_MATCH_PATH) != 0) 1574 && (cur_pattern->path_id != bus->path_id)) 1575 continue; 1576 1577 if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0) 1578 && (cur_pattern->bus_id != bus->sim->bus_id)) 1579 continue; 1580 1581 if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0) 1582 && (cur_pattern->unit_number != bus->sim->unit_number)) 1583 continue; 1584 1585 if (((cur_pattern->flags & BUS_MATCH_NAME) != 0) 1586 && (strncmp(cur_pattern->dev_name, bus->sim->sim_name, 1587 DEV_IDLEN) != 0)) 1588 continue; 1589 1590 /* 1591 * If we get to this point, the user definitely wants 1592 * information on this bus. So tell the caller to copy the 1593 * data out. 1594 */ 1595 retval |= DM_RET_COPY; 1596 1597 /* 1598 * If the return action has been set to descend, then we 1599 * know that we've already seen a non-bus matching 1600 * expression, therefore we need to further descend the tree. 1601 * This won't change by continuing around the loop, so we 1602 * go ahead and return. If we haven't seen a non-bus 1603 * matching expression, we keep going around the loop until 1604 * we exhaust the matching expressions. We'll set the stop 1605 * flag once we fall out of the loop. 1606 */ 1607 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND) 1608 return(retval); 1609 } 1610 1611 /* 1612 * If the return action hasn't been set to descend yet, that means 1613 * we haven't seen anything other than bus matching patterns. So 1614 * tell the caller to stop descending the tree -- the user doesn't 1615 * want to match against lower level tree elements. 1616 */ 1617 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) 1618 retval |= DM_RET_STOP; 1619 1620 return(retval); 1621 } 1622 1623 static dev_match_ret 1624 xptdevicematch(struct dev_match_pattern *patterns, int num_patterns, 1625 struct cam_ed *device) 1626 { 1627 dev_match_ret retval; 1628 int i; 1629 1630 retval = DM_RET_NONE; 1631 1632 /* 1633 * If we aren't given something to match against, that's an error. 1634 */ 1635 if (device == NULL) 1636 return(DM_RET_ERROR); 1637 1638 /* 1639 * If there are no match entries, then this device matches no 1640 * matter what. 1641 */ 1642 if ((patterns == NULL) || (patterns == 0)) 1643 return(DM_RET_DESCEND | DM_RET_COPY); 1644 1645 for (i = 0; i < num_patterns; i++) { 1646 struct device_match_pattern *cur_pattern; 1647 1648 /* 1649 * If the pattern in question isn't for a device node, we 1650 * aren't interested. 1651 */ 1652 if (patterns[i].type != DEV_MATCH_DEVICE) { 1653 if ((patterns[i].type == DEV_MATCH_PERIPH) 1654 && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)) 1655 retval |= DM_RET_DESCEND; 1656 continue; 1657 } 1658 1659 cur_pattern = &patterns[i].pattern.device_pattern; 1660 1661 /* 1662 * If they want to match any device node, we give them any 1663 * device node. 1664 */ 1665 if (cur_pattern->flags == DEV_MATCH_ANY) { 1666 /* set the copy flag */ 1667 retval |= DM_RET_COPY; 1668 1669 1670 /* 1671 * If we've already decided on an action, go ahead 1672 * and return. 1673 */ 1674 if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE) 1675 return(retval); 1676 } 1677 1678 /* 1679 * Not sure why someone would do this... 1680 */ 1681 if (cur_pattern->flags == DEV_MATCH_NONE) 1682 continue; 1683 1684 if (((cur_pattern->flags & DEV_MATCH_PATH) != 0) 1685 && (cur_pattern->path_id != device->target->bus->path_id)) 1686 continue; 1687 1688 if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0) 1689 && (cur_pattern->target_id != device->target->target_id)) 1690 continue; 1691 1692 if (((cur_pattern->flags & DEV_MATCH_LUN) != 0) 1693 && (cur_pattern->target_lun != device->lun_id)) 1694 continue; 1695 1696 if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0) 1697 && (cam_quirkmatch((caddr_t)&device->inq_data, 1698 (caddr_t)&cur_pattern->inq_pat, 1699 1, sizeof(cur_pattern->inq_pat), 1700 scsi_static_inquiry_match) == NULL)) 1701 continue; 1702 1703 /* 1704 * If we get to this point, the user definitely wants 1705 * information on this device. So tell the caller to copy 1706 * the data out. 1707 */ 1708 retval |= DM_RET_COPY; 1709 1710 /* 1711 * If the return action has been set to descend, then we 1712 * know that we've already seen a peripheral matching 1713 * expression, therefore we need to further descend the tree. 1714 * This won't change by continuing around the loop, so we 1715 * go ahead and return. If we haven't seen a peripheral 1716 * matching expression, we keep going around the loop until 1717 * we exhaust the matching expressions. We'll set the stop 1718 * flag once we fall out of the loop. 1719 */ 1720 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND) 1721 return(retval); 1722 } 1723 1724 /* 1725 * If the return action hasn't been set to descend yet, that means 1726 * we haven't seen any peripheral matching patterns. So tell the 1727 * caller to stop descending the tree -- the user doesn't want to 1728 * match against lower level tree elements. 1729 */ 1730 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) 1731 retval |= DM_RET_STOP; 1732 1733 return(retval); 1734 } 1735 1736 /* 1737 * Match a single peripheral against any number of match patterns. 1738 */ 1739 static dev_match_ret 1740 xptperiphmatch(struct dev_match_pattern *patterns, int num_patterns, 1741 struct cam_periph *periph) 1742 { 1743 dev_match_ret retval; 1744 int i; 1745 1746 /* 1747 * If we aren't given something to match against, that's an error. 1748 */ 1749 if (periph == NULL) 1750 return(DM_RET_ERROR); 1751 1752 /* 1753 * If there are no match entries, then this peripheral matches no 1754 * matter what. 1755 */ 1756 if ((patterns == NULL) || (num_patterns == 0)) 1757 return(DM_RET_STOP | DM_RET_COPY); 1758 1759 /* 1760 * There aren't any nodes below a peripheral node, so there's no 1761 * reason to descend the tree any further. 1762 */ 1763 retval = DM_RET_STOP; 1764 1765 for (i = 0; i < num_patterns; i++) { 1766 struct periph_match_pattern *cur_pattern; 1767 1768 /* 1769 * If the pattern in question isn't for a peripheral, we 1770 * aren't interested. 1771 */ 1772 if (patterns[i].type != DEV_MATCH_PERIPH) 1773 continue; 1774 1775 cur_pattern = &patterns[i].pattern.periph_pattern; 1776 1777 /* 1778 * If they want to match on anything, then we will do so. 1779 */ 1780 if (cur_pattern->flags == PERIPH_MATCH_ANY) { 1781 /* set the copy flag */ 1782 retval |= DM_RET_COPY; 1783 1784 /* 1785 * We've already set the return action to stop, 1786 * since there are no nodes below peripherals in 1787 * the tree. 1788 */ 1789 return(retval); 1790 } 1791 1792 /* 1793 * Not sure why someone would do this... 1794 */ 1795 if (cur_pattern->flags == PERIPH_MATCH_NONE) 1796 continue; 1797 1798 if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0) 1799 && (cur_pattern->path_id != periph->path->bus->path_id)) 1800 continue; 1801 1802 /* 1803 * For the target and lun id's, we have to make sure the 1804 * target and lun pointers aren't NULL. The xpt peripheral 1805 * has a wildcard target and device. 1806 */ 1807 if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0) 1808 && ((periph->path->target == NULL) 1809 ||(cur_pattern->target_id != periph->path->target->target_id))) 1810 continue; 1811 1812 if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0) 1813 && ((periph->path->device == NULL) 1814 || (cur_pattern->target_lun != periph->path->device->lun_id))) 1815 continue; 1816 1817 if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0) 1818 && (cur_pattern->unit_number != periph->unit_number)) 1819 continue; 1820 1821 if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0) 1822 && (strncmp(cur_pattern->periph_name, periph->periph_name, 1823 DEV_IDLEN) != 0)) 1824 continue; 1825 1826 /* 1827 * If we get to this point, the user definitely wants 1828 * information on this peripheral. So tell the caller to 1829 * copy the data out. 1830 */ 1831 retval |= DM_RET_COPY; 1832 1833 /* 1834 * The return action has already been set to stop, since 1835 * peripherals don't have any nodes below them in the EDT. 1836 */ 1837 return(retval); 1838 } 1839 1840 /* 1841 * If we get to this point, the peripheral that was passed in 1842 * doesn't match any of the patterns. 1843 */ 1844 return(retval); 1845 } 1846 1847 static int 1848 xptedtbusfunc(struct cam_eb *bus, void *arg) 1849 { 1850 struct ccb_dev_match *cdm; 1851 dev_match_ret retval; 1852 1853 cdm = (struct ccb_dev_match *)arg; 1854 1855 /* 1856 * If our position is for something deeper in the tree, that means 1857 * that we've already seen this node. So, we keep going down. 1858 */ 1859 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1860 && (cdm->pos.cookie.bus == bus) 1861 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1862 && (cdm->pos.cookie.target != NULL)) 1863 retval = DM_RET_DESCEND; 1864 else 1865 retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus); 1866 1867 /* 1868 * If we got an error, bail out of the search. 1869 */ 1870 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 1871 cdm->status = CAM_DEV_MATCH_ERROR; 1872 return(0); 1873 } 1874 1875 /* 1876 * If the copy flag is set, copy this bus out. 1877 */ 1878 if (retval & DM_RET_COPY) { 1879 int spaceleft, j; 1880 1881 spaceleft = cdm->match_buf_len - (cdm->num_matches * 1882 sizeof(struct dev_match_result)); 1883 1884 /* 1885 * If we don't have enough space to put in another 1886 * match result, save our position and tell the 1887 * user there are more devices to check. 1888 */ 1889 if (spaceleft < sizeof(struct dev_match_result)) { 1890 bzero(&cdm->pos, sizeof(cdm->pos)); 1891 cdm->pos.position_type = 1892 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS; 1893 1894 cdm->pos.cookie.bus = bus; 1895 cdm->pos.generations[CAM_BUS_GENERATION]= 1896 bus_generation; 1897 cdm->status = CAM_DEV_MATCH_MORE; 1898 return(0); 1899 } 1900 j = cdm->num_matches; 1901 cdm->num_matches++; 1902 cdm->matches[j].type = DEV_MATCH_BUS; 1903 cdm->matches[j].result.bus_result.path_id = bus->path_id; 1904 cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id; 1905 cdm->matches[j].result.bus_result.unit_number = 1906 bus->sim->unit_number; 1907 strncpy(cdm->matches[j].result.bus_result.dev_name, 1908 bus->sim->sim_name, DEV_IDLEN); 1909 } 1910 1911 /* 1912 * If the user is only interested in busses, there's no 1913 * reason to descend to the next level in the tree. 1914 */ 1915 if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP) 1916 return(1); 1917 1918 /* 1919 * If there is a target generation recorded, check it to 1920 * make sure the target list hasn't changed. 1921 */ 1922 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1923 && (bus == cdm->pos.cookie.bus) 1924 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1925 && (cdm->pos.generations[CAM_TARGET_GENERATION] != 0) 1926 && (cdm->pos.generations[CAM_TARGET_GENERATION] != 1927 bus->generation)) { 1928 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 1929 return(0); 1930 } 1931 1932 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1933 && (cdm->pos.cookie.bus == bus) 1934 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1935 && (cdm->pos.cookie.target != NULL)) 1936 return(xpttargettraverse(bus, 1937 (struct cam_et *)cdm->pos.cookie.target, 1938 xptedttargetfunc, arg)); 1939 else 1940 return(xpttargettraverse(bus, NULL, xptedttargetfunc, arg)); 1941 } 1942 1943 static int 1944 xptedttargetfunc(struct cam_et *target, void *arg) 1945 { 1946 struct ccb_dev_match *cdm; 1947 1948 cdm = (struct ccb_dev_match *)arg; 1949 1950 /* 1951 * If there is a device list generation recorded, check it to 1952 * make sure the device list hasn't changed. 1953 */ 1954 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1955 && (cdm->pos.cookie.bus == target->bus) 1956 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1957 && (cdm->pos.cookie.target == target) 1958 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) 1959 && (cdm->pos.generations[CAM_DEV_GENERATION] != 0) 1960 && (cdm->pos.generations[CAM_DEV_GENERATION] != 1961 target->generation)) { 1962 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 1963 return(0); 1964 } 1965 1966 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1967 && (cdm->pos.cookie.bus == target->bus) 1968 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1969 && (cdm->pos.cookie.target == target) 1970 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) 1971 && (cdm->pos.cookie.device != NULL)) 1972 return(xptdevicetraverse(target, 1973 (struct cam_ed *)cdm->pos.cookie.device, 1974 xptedtdevicefunc, arg)); 1975 else 1976 return(xptdevicetraverse(target, NULL, xptedtdevicefunc, arg)); 1977 } 1978 1979 static int 1980 xptedtdevicefunc(struct cam_ed *device, void *arg) 1981 { 1982 1983 struct ccb_dev_match *cdm; 1984 dev_match_ret retval; 1985 1986 cdm = (struct ccb_dev_match *)arg; 1987 1988 /* 1989 * If our position is for something deeper in the tree, that means 1990 * that we've already seen this node. So, we keep going down. 1991 */ 1992 if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE) 1993 && (cdm->pos.cookie.device == device) 1994 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 1995 && (cdm->pos.cookie.periph != NULL)) 1996 retval = DM_RET_DESCEND; 1997 else 1998 retval = xptdevicematch(cdm->patterns, cdm->num_patterns, 1999 device); 2000 2001 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 2002 cdm->status = CAM_DEV_MATCH_ERROR; 2003 return(0); 2004 } 2005 2006 /* 2007 * If the copy flag is set, copy this device out. 2008 */ 2009 if (retval & DM_RET_COPY) { 2010 int spaceleft, j; 2011 2012 spaceleft = cdm->match_buf_len - (cdm->num_matches * 2013 sizeof(struct dev_match_result)); 2014 2015 /* 2016 * If we don't have enough space to put in another 2017 * match result, save our position and tell the 2018 * user there are more devices to check. 2019 */ 2020 if (spaceleft < sizeof(struct dev_match_result)) { 2021 bzero(&cdm->pos, sizeof(cdm->pos)); 2022 cdm->pos.position_type = 2023 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS | 2024 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE; 2025 2026 cdm->pos.cookie.bus = device->target->bus; 2027 cdm->pos.generations[CAM_BUS_GENERATION]= 2028 bus_generation; 2029 cdm->pos.cookie.target = device->target; 2030 cdm->pos.generations[CAM_TARGET_GENERATION] = 2031 device->target->bus->generation; 2032 cdm->pos.cookie.device = device; 2033 cdm->pos.generations[CAM_DEV_GENERATION] = 2034 device->target->generation; 2035 cdm->status = CAM_DEV_MATCH_MORE; 2036 return(0); 2037 } 2038 j = cdm->num_matches; 2039 cdm->num_matches++; 2040 cdm->matches[j].type = DEV_MATCH_DEVICE; 2041 cdm->matches[j].result.device_result.path_id = 2042 device->target->bus->path_id; 2043 cdm->matches[j].result.device_result.target_id = 2044 device->target->target_id; 2045 cdm->matches[j].result.device_result.target_lun = 2046 device->lun_id; 2047 bcopy(&device->inq_data, 2048 &cdm->matches[j].result.device_result.inq_data, 2049 sizeof(struct scsi_inquiry_data)); 2050 2051 /* Let the user know whether this device is unconfigured */ 2052 if (device->flags & CAM_DEV_UNCONFIGURED) 2053 cdm->matches[j].result.device_result.flags = 2054 DEV_RESULT_UNCONFIGURED; 2055 else 2056 cdm->matches[j].result.device_result.flags = 2057 DEV_RESULT_NOFLAG; 2058 } 2059 2060 /* 2061 * If the user isn't interested in peripherals, don't descend 2062 * the tree any further. 2063 */ 2064 if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP) 2065 return(1); 2066 2067 /* 2068 * If there is a peripheral list generation recorded, make sure 2069 * it hasn't changed. 2070 */ 2071 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 2072 && (device->target->bus == cdm->pos.cookie.bus) 2073 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 2074 && (device->target == cdm->pos.cookie.target) 2075 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) 2076 && (device == cdm->pos.cookie.device) 2077 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 2078 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0) 2079 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 2080 device->generation)){ 2081 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 2082 return(0); 2083 } 2084 2085 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 2086 && (cdm->pos.cookie.bus == device->target->bus) 2087 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 2088 && (cdm->pos.cookie.target == device->target) 2089 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) 2090 && (cdm->pos.cookie.device == device) 2091 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 2092 && (cdm->pos.cookie.periph != NULL)) 2093 return(xptperiphtraverse(device, 2094 (struct cam_periph *)cdm->pos.cookie.periph, 2095 xptedtperiphfunc, arg)); 2096 else 2097 return(xptperiphtraverse(device, NULL, xptedtperiphfunc, arg)); 2098 } 2099 2100 static int 2101 xptedtperiphfunc(struct cam_periph *periph, void *arg) 2102 { 2103 struct ccb_dev_match *cdm; 2104 dev_match_ret retval; 2105 2106 cdm = (struct ccb_dev_match *)arg; 2107 2108 retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph); 2109 2110 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 2111 cdm->status = CAM_DEV_MATCH_ERROR; 2112 return(0); 2113 } 2114 2115 /* 2116 * If the copy flag is set, copy this peripheral out. 2117 */ 2118 if (retval & DM_RET_COPY) { 2119 int spaceleft, j; 2120 2121 spaceleft = cdm->match_buf_len - (cdm->num_matches * 2122 sizeof(struct dev_match_result)); 2123 2124 /* 2125 * If we don't have enough space to put in another 2126 * match result, save our position and tell the 2127 * user there are more devices to check. 2128 */ 2129 if (spaceleft < sizeof(struct dev_match_result)) { 2130 bzero(&cdm->pos, sizeof(cdm->pos)); 2131 cdm->pos.position_type = 2132 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS | 2133 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE | 2134 CAM_DEV_POS_PERIPH; 2135 2136 cdm->pos.cookie.bus = periph->path->bus; 2137 cdm->pos.generations[CAM_BUS_GENERATION]= 2138 bus_generation; 2139 cdm->pos.cookie.target = periph->path->target; 2140 cdm->pos.generations[CAM_TARGET_GENERATION] = 2141 periph->path->bus->generation; 2142 cdm->pos.cookie.device = periph->path->device; 2143 cdm->pos.generations[CAM_DEV_GENERATION] = 2144 periph->path->target->generation; 2145 cdm->pos.cookie.periph = periph; 2146 cdm->pos.generations[CAM_PERIPH_GENERATION] = 2147 periph->path->device->generation; 2148 cdm->status = CAM_DEV_MATCH_MORE; 2149 return(0); 2150 } 2151 2152 j = cdm->num_matches; 2153 cdm->num_matches++; 2154 cdm->matches[j].type = DEV_MATCH_PERIPH; 2155 cdm->matches[j].result.periph_result.path_id = 2156 periph->path->bus->path_id; 2157 cdm->matches[j].result.periph_result.target_id = 2158 periph->path->target->target_id; 2159 cdm->matches[j].result.periph_result.target_lun = 2160 periph->path->device->lun_id; 2161 cdm->matches[j].result.periph_result.unit_number = 2162 periph->unit_number; 2163 strncpy(cdm->matches[j].result.periph_result.periph_name, 2164 periph->periph_name, DEV_IDLEN); 2165 } 2166 2167 return(1); 2168 } 2169 2170 static int 2171 xptedtmatch(struct ccb_dev_match *cdm) 2172 { 2173 int ret; 2174 2175 cdm->num_matches = 0; 2176 2177 /* 2178 * Check the bus list generation. If it has changed, the user 2179 * needs to reset everything and start over. 2180 */ 2181 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 2182 && (cdm->pos.generations[CAM_BUS_GENERATION] != 0) 2183 && (cdm->pos.generations[CAM_BUS_GENERATION] != bus_generation)) { 2184 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 2185 return(0); 2186 } 2187 2188 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 2189 && (cdm->pos.cookie.bus != NULL)) 2190 ret = xptbustraverse((struct cam_eb *)cdm->pos.cookie.bus, 2191 xptedtbusfunc, cdm); 2192 else 2193 ret = xptbustraverse(NULL, xptedtbusfunc, cdm); 2194 2195 /* 2196 * If we get back 0, that means that we had to stop before fully 2197 * traversing the EDT. It also means that one of the subroutines 2198 * has set the status field to the proper value. If we get back 1, 2199 * we've fully traversed the EDT and copied out any matching entries. 2200 */ 2201 if (ret == 1) 2202 cdm->status = CAM_DEV_MATCH_LAST; 2203 2204 return(ret); 2205 } 2206 2207 static int 2208 xptplistpdrvfunc(struct periph_driver **pdrv, void *arg) 2209 { 2210 struct ccb_dev_match *cdm; 2211 2212 cdm = (struct ccb_dev_match *)arg; 2213 2214 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR) 2215 && (cdm->pos.cookie.pdrv == pdrv) 2216 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 2217 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0) 2218 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 2219 (*pdrv)->generation)) { 2220 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 2221 return(0); 2222 } 2223 2224 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR) 2225 && (cdm->pos.cookie.pdrv == pdrv) 2226 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 2227 && (cdm->pos.cookie.periph != NULL)) 2228 return(xptpdperiphtraverse(pdrv, 2229 (struct cam_periph *)cdm->pos.cookie.periph, 2230 xptplistperiphfunc, arg)); 2231 else 2232 return(xptpdperiphtraverse(pdrv, NULL,xptplistperiphfunc, arg)); 2233 } 2234 2235 static int 2236 xptplistperiphfunc(struct cam_periph *periph, void *arg) 2237 { 2238 struct ccb_dev_match *cdm; 2239 dev_match_ret retval; 2240 2241 cdm = (struct ccb_dev_match *)arg; 2242 2243 retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph); 2244 2245 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 2246 cdm->status = CAM_DEV_MATCH_ERROR; 2247 return(0); 2248 } 2249 2250 /* 2251 * If the copy flag is set, copy this peripheral out. 2252 */ 2253 if (retval & DM_RET_COPY) { 2254 int spaceleft, j; 2255 2256 spaceleft = cdm->match_buf_len - (cdm->num_matches * 2257 sizeof(struct dev_match_result)); 2258 2259 /* 2260 * If we don't have enough space to put in another 2261 * match result, save our position and tell the 2262 * user there are more devices to check. 2263 */ 2264 if (spaceleft < sizeof(struct dev_match_result)) { 2265 struct periph_driver **pdrv; 2266 2267 pdrv = NULL; 2268 bzero(&cdm->pos, sizeof(cdm->pos)); 2269 cdm->pos.position_type = 2270 CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR | 2271 CAM_DEV_POS_PERIPH; 2272 2273 /* 2274 * This may look a bit non-sensical, but it is 2275 * actually quite logical. There are very few 2276 * peripheral drivers, and bloating every peripheral 2277 * structure with a pointer back to its parent 2278 * peripheral driver linker set entry would cost 2279 * more in the long run than doing this quick lookup. 2280 */ 2281 for (pdrv = 2282 (struct periph_driver **)periphdriver_set.ls_items; 2283 *pdrv != NULL; pdrv++) { 2284 if (strcmp((*pdrv)->driver_name, 2285 periph->periph_name) == 0) 2286 break; 2287 } 2288 2289 if (pdrv == NULL) { 2290 cdm->status = CAM_DEV_MATCH_ERROR; 2291 return(0); 2292 } 2293 2294 cdm->pos.cookie.pdrv = pdrv; 2295 /* 2296 * The periph generation slot does double duty, as 2297 * does the periph pointer slot. They are used for 2298 * both edt and pdrv lookups and positioning. 2299 */ 2300 cdm->pos.cookie.periph = periph; 2301 cdm->pos.generations[CAM_PERIPH_GENERATION] = 2302 (*pdrv)->generation; 2303 cdm->status = CAM_DEV_MATCH_MORE; 2304 return(0); 2305 } 2306 2307 j = cdm->num_matches; 2308 cdm->num_matches++; 2309 cdm->matches[j].type = DEV_MATCH_PERIPH; 2310 cdm->matches[j].result.periph_result.path_id = 2311 periph->path->bus->path_id; 2312 2313 /* 2314 * The transport layer peripheral doesn't have a target or 2315 * lun. 2316 */ 2317 if (periph->path->target) 2318 cdm->matches[j].result.periph_result.target_id = 2319 periph->path->target->target_id; 2320 else 2321 cdm->matches[j].result.periph_result.target_id = -1; 2322 2323 if (periph->path->device) 2324 cdm->matches[j].result.periph_result.target_lun = 2325 periph->path->device->lun_id; 2326 else 2327 cdm->matches[j].result.periph_result.target_lun = -1; 2328 2329 cdm->matches[j].result.periph_result.unit_number = 2330 periph->unit_number; 2331 strncpy(cdm->matches[j].result.periph_result.periph_name, 2332 periph->periph_name, DEV_IDLEN); 2333 } 2334 2335 return(1); 2336 } 2337 2338 static int 2339 xptperiphlistmatch(struct ccb_dev_match *cdm) 2340 { 2341 int ret; 2342 2343 cdm->num_matches = 0; 2344 2345 /* 2346 * At this point in the edt traversal function, we check the bus 2347 * list generation to make sure that no busses have been added or 2348 * removed since the user last sent a XPT_DEV_MATCH ccb through. 2349 * For the peripheral driver list traversal function, however, we 2350 * don't have to worry about new peripheral driver types coming or 2351 * going; they're in a linker set, and therefore can't change 2352 * without a recompile. 2353 */ 2354 2355 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR) 2356 && (cdm->pos.cookie.pdrv != NULL)) 2357 ret = xptpdrvtraverse( 2358 (struct periph_driver **)cdm->pos.cookie.pdrv, 2359 xptplistpdrvfunc, cdm); 2360 else 2361 ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm); 2362 2363 /* 2364 * If we get back 0, that means that we had to stop before fully 2365 * traversing the peripheral driver tree. It also means that one of 2366 * the subroutines has set the status field to the proper value. If 2367 * we get back 1, we've fully traversed the EDT and copied out any 2368 * matching entries. 2369 */ 2370 if (ret == 1) 2371 cdm->status = CAM_DEV_MATCH_LAST; 2372 2373 return(ret); 2374 } 2375 2376 static int 2377 xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg) 2378 { 2379 struct cam_eb *bus, *next_bus; 2380 int retval; 2381 2382 retval = 1; 2383 2384 for (bus = (start_bus ? start_bus : TAILQ_FIRST(&xpt_busses)); 2385 bus != NULL; 2386 bus = next_bus) { 2387 next_bus = TAILQ_NEXT(bus, links); 2388 2389 retval = tr_func(bus, arg); 2390 if (retval == 0) 2391 return(retval); 2392 } 2393 2394 return(retval); 2395 } 2396 2397 static int 2398 xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target, 2399 xpt_targetfunc_t *tr_func, void *arg) 2400 { 2401 struct cam_et *target, *next_target; 2402 int retval; 2403 2404 retval = 1; 2405 for (target = (start_target ? start_target : 2406 TAILQ_FIRST(&bus->et_entries)); 2407 target != NULL; target = next_target) { 2408 2409 next_target = TAILQ_NEXT(target, links); 2410 2411 retval = tr_func(target, arg); 2412 2413 if (retval == 0) 2414 return(retval); 2415 } 2416 2417 return(retval); 2418 } 2419 2420 static int 2421 xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device, 2422 xpt_devicefunc_t *tr_func, void *arg) 2423 { 2424 struct cam_ed *device, *next_device; 2425 int retval; 2426 2427 retval = 1; 2428 for (device = (start_device ? start_device : 2429 TAILQ_FIRST(&target->ed_entries)); 2430 device != NULL; 2431 device = next_device) { 2432 2433 next_device = TAILQ_NEXT(device, links); 2434 2435 retval = tr_func(device, arg); 2436 2437 if (retval == 0) 2438 return(retval); 2439 } 2440 2441 return(retval); 2442 } 2443 2444 static int 2445 xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph, 2446 xpt_periphfunc_t *tr_func, void *arg) 2447 { 2448 struct cam_periph *periph, *next_periph; 2449 int retval; 2450 2451 retval = 1; 2452 2453 for (periph = (start_periph ? start_periph : 2454 SLIST_FIRST(&device->periphs)); 2455 periph != NULL; 2456 periph = next_periph) { 2457 2458 next_periph = SLIST_NEXT(periph, periph_links); 2459 2460 retval = tr_func(periph, arg); 2461 if (retval == 0) 2462 return(retval); 2463 } 2464 2465 return(retval); 2466 } 2467 2468 static int 2469 xptpdrvtraverse(struct periph_driver **start_pdrv, 2470 xpt_pdrvfunc_t *tr_func, void *arg) 2471 { 2472 struct periph_driver **pdrv; 2473 int retval; 2474 2475 retval = 1; 2476 2477 /* 2478 * We don't traverse the peripheral driver list like we do the 2479 * other lists, because it is a linker set, and therefore cannot be 2480 * changed during runtime. If the peripheral driver list is ever 2481 * re-done to be something other than a linker set (i.e. it can 2482 * change while the system is running), the list traversal should 2483 * be modified to work like the other traversal functions. 2484 */ 2485 for (pdrv = (start_pdrv ? start_pdrv : 2486 (struct periph_driver **)periphdriver_set.ls_items); 2487 *pdrv != NULL; pdrv++) { 2488 retval = tr_func(pdrv, arg); 2489 2490 if (retval == 0) 2491 return(retval); 2492 } 2493 2494 return(retval); 2495 } 2496 2497 static int 2498 xptpdperiphtraverse(struct periph_driver **pdrv, 2499 struct cam_periph *start_periph, 2500 xpt_periphfunc_t *tr_func, void *arg) 2501 { 2502 struct cam_periph *periph, *next_periph; 2503 int retval; 2504 2505 retval = 1; 2506 2507 for (periph = (start_periph ? start_periph : 2508 TAILQ_FIRST(&(*pdrv)->units)); periph != NULL; 2509 periph = next_periph) { 2510 2511 next_periph = TAILQ_NEXT(periph, unit_links); 2512 2513 retval = tr_func(periph, arg); 2514 if (retval == 0) 2515 return(retval); 2516 } 2517 return(retval); 2518 } 2519 2520 static int 2521 xptdefbusfunc(struct cam_eb *bus, void *arg) 2522 { 2523 struct xpt_traverse_config *tr_config; 2524 2525 tr_config = (struct xpt_traverse_config *)arg; 2526 2527 if (tr_config->depth == XPT_DEPTH_BUS) { 2528 xpt_busfunc_t *tr_func; 2529 2530 tr_func = (xpt_busfunc_t *)tr_config->tr_func; 2531 2532 return(tr_func(bus, tr_config->tr_arg)); 2533 } else 2534 return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg)); 2535 } 2536 2537 static int 2538 xptdeftargetfunc(struct cam_et *target, void *arg) 2539 { 2540 struct xpt_traverse_config *tr_config; 2541 2542 tr_config = (struct xpt_traverse_config *)arg; 2543 2544 if (tr_config->depth == XPT_DEPTH_TARGET) { 2545 xpt_targetfunc_t *tr_func; 2546 2547 tr_func = (xpt_targetfunc_t *)tr_config->tr_func; 2548 2549 return(tr_func(target, tr_config->tr_arg)); 2550 } else 2551 return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg)); 2552 } 2553 2554 static int 2555 xptdefdevicefunc(struct cam_ed *device, void *arg) 2556 { 2557 struct xpt_traverse_config *tr_config; 2558 2559 tr_config = (struct xpt_traverse_config *)arg; 2560 2561 if (tr_config->depth == XPT_DEPTH_DEVICE) { 2562 xpt_devicefunc_t *tr_func; 2563 2564 tr_func = (xpt_devicefunc_t *)tr_config->tr_func; 2565 2566 return(tr_func(device, tr_config->tr_arg)); 2567 } else 2568 return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg)); 2569 } 2570 2571 static int 2572 xptdefperiphfunc(struct cam_periph *periph, void *arg) 2573 { 2574 struct xpt_traverse_config *tr_config; 2575 xpt_periphfunc_t *tr_func; 2576 2577 tr_config = (struct xpt_traverse_config *)arg; 2578 2579 tr_func = (xpt_periphfunc_t *)tr_config->tr_func; 2580 2581 /* 2582 * Unlike the other default functions, we don't check for depth 2583 * here. The peripheral driver level is the last level in the EDT, 2584 * so if we're here, we should execute the function in question. 2585 */ 2586 return(tr_func(periph, tr_config->tr_arg)); 2587 } 2588 2589 /* 2590 * Execute the given function for every bus in the EDT. 2591 */ 2592 static int 2593 xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg) 2594 { 2595 struct xpt_traverse_config tr_config; 2596 2597 tr_config.depth = XPT_DEPTH_BUS; 2598 tr_config.tr_func = tr_func; 2599 tr_config.tr_arg = arg; 2600 2601 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config)); 2602 } 2603 2604 #ifdef notusedyet 2605 /* 2606 * Execute the given function for every target in the EDT. 2607 */ 2608 static int 2609 xpt_for_all_targets(xpt_targetfunc_t *tr_func, void *arg) 2610 { 2611 struct xpt_traverse_config tr_config; 2612 2613 tr_config.depth = XPT_DEPTH_TARGET; 2614 tr_config.tr_func = tr_func; 2615 tr_config.tr_arg = arg; 2616 2617 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config)); 2618 } 2619 #endif /* notusedyet */ 2620 2621 /* 2622 * Execute the given function for every device in the EDT. 2623 */ 2624 static int 2625 xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg) 2626 { 2627 struct xpt_traverse_config tr_config; 2628 2629 tr_config.depth = XPT_DEPTH_DEVICE; 2630 tr_config.tr_func = tr_func; 2631 tr_config.tr_arg = arg; 2632 2633 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config)); 2634 } 2635 2636 #ifdef notusedyet 2637 /* 2638 * Execute the given function for every peripheral in the EDT. 2639 */ 2640 static int 2641 xpt_for_all_periphs(xpt_periphfunc_t *tr_func, void *arg) 2642 { 2643 struct xpt_traverse_config tr_config; 2644 2645 tr_config.depth = XPT_DEPTH_PERIPH; 2646 tr_config.tr_func = tr_func; 2647 tr_config.tr_arg = arg; 2648 2649 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config)); 2650 } 2651 #endif /* notusedyet */ 2652 2653 static int 2654 xptsetasyncfunc(struct cam_ed *device, void *arg) 2655 { 2656 struct cam_path path; 2657 struct ccb_getdev cgd; 2658 struct async_node *cur_entry; 2659 2660 cur_entry = (struct async_node *)arg; 2661 2662 /* 2663 * Don't report unconfigured devices (Wildcard devs, 2664 * devices only for target mode, device instances 2665 * that have been invalidated but are waiting for 2666 * their last reference count to be released). 2667 */ 2668 if ((device->flags & CAM_DEV_UNCONFIGURED) != 0) 2669 return (1); 2670 2671 xpt_compile_path(&path, 2672 NULL, 2673 device->target->bus->path_id, 2674 device->target->target_id, 2675 device->lun_id); 2676 xpt_setup_ccb(&cgd.ccb_h, &path, /*priority*/1); 2677 cgd.ccb_h.func_code = XPT_GDEV_TYPE; 2678 xpt_action((union ccb *)&cgd); 2679 cur_entry->callback(cur_entry->callback_arg, 2680 AC_FOUND_DEVICE, 2681 &path, &cgd); 2682 xpt_release_path(&path); 2683 2684 return(1); 2685 } 2686 2687 static int 2688 xptsetasyncbusfunc(struct cam_eb *bus, void *arg) 2689 { 2690 struct cam_path path; 2691 struct ccb_pathinq cpi; 2692 struct async_node *cur_entry; 2693 2694 cur_entry = (struct async_node *)arg; 2695 2696 xpt_compile_path(&path, /*periph*/NULL, 2697 bus->sim->path_id, 2698 CAM_TARGET_WILDCARD, 2699 CAM_LUN_WILDCARD); 2700 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1); 2701 cpi.ccb_h.func_code = XPT_PATH_INQ; 2702 xpt_action((union ccb *)&cpi); 2703 cur_entry->callback(cur_entry->callback_arg, 2704 AC_PATH_REGISTERED, 2705 &path, &cpi); 2706 xpt_release_path(&path); 2707 2708 return(1); 2709 } 2710 2711 void 2712 xpt_action(union ccb *start_ccb) 2713 { 2714 int iopl; 2715 2716 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action\n")); 2717 2718 start_ccb->ccb_h.status = CAM_REQ_INPROG; 2719 2720 iopl = splsoftcam(); 2721 switch (start_ccb->ccb_h.func_code) { 2722 case XPT_SCSI_IO: 2723 { 2724 #ifdef CAMDEBUG 2725 char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1]; 2726 struct cam_path *path; 2727 2728 path = start_ccb->ccb_h.path; 2729 #endif 2730 2731 /* 2732 * For the sake of compatibility with SCSI-1 2733 * devices that may not understand the identify 2734 * message, we include lun information in the 2735 * second byte of all commands. SCSI-1 specifies 2736 * that luns are a 3 bit value and reserves only 3 2737 * bits for lun information in the CDB. Later 2738 * revisions of the SCSI spec allow for more than 8 2739 * luns, but have deprecated lun information in the 2740 * CDB. So, if the lun won't fit, we must omit. 2741 * 2742 * Also be aware that during initial probing for devices, 2743 * the inquiry information is unknown but initialized to 0. 2744 * This means that this code will be exercised while probing 2745 * devices with an ANSI revision greater than 2. 2746 */ 2747 if (SID_ANSI_REV(&start_ccb->ccb_h.path->device->inq_data) <= 2 2748 && start_ccb->ccb_h.target_lun < 8 2749 && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) { 2750 2751 start_ccb->csio.cdb_io.cdb_bytes[1] |= 2752 start_ccb->ccb_h.target_lun << 5; 2753 } 2754 start_ccb->csio.scsi_status = SCSI_STATUS_OK; 2755 CAM_DEBUG(path, CAM_DEBUG_CDB,("%s. CDB: %s\n", 2756 scsi_op_desc(start_ccb->csio.cdb_io.cdb_bytes[0], 2757 &path->device->inq_data), 2758 scsi_cdb_string(start_ccb->csio.cdb_io.cdb_bytes, 2759 cdb_str, sizeof(cdb_str)))); 2760 /* FALLTHROUGH */ 2761 } 2762 case XPT_TARGET_IO: 2763 case XPT_CONT_TARGET_IO: 2764 start_ccb->csio.sense_resid = 0; 2765 start_ccb->csio.resid = 0; 2766 /* FALLTHROUGH */ 2767 case XPT_RESET_DEV: 2768 case XPT_ENG_EXEC: 2769 { 2770 struct cam_path *path; 2771 int s; 2772 int runq; 2773 2774 path = start_ccb->ccb_h.path; 2775 s = splsoftcam(); 2776 2777 cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb); 2778 if (path->device->qfrozen_cnt == 0) 2779 runq = xpt_schedule_dev_sendq(path->bus, path->device); 2780 else 2781 runq = 0; 2782 splx(s); 2783 if (runq != 0) 2784 xpt_run_dev_sendq(path->bus); 2785 break; 2786 } 2787 case XPT_SET_TRAN_SETTINGS: 2788 { 2789 xpt_set_transfer_settings(&start_ccb->cts, 2790 start_ccb->ccb_h.path->device, 2791 /*async_update*/FALSE); 2792 break; 2793 } 2794 case XPT_CALC_GEOMETRY: 2795 { 2796 struct cam_sim *sim; 2797 2798 /* Filter out garbage */ 2799 if (start_ccb->ccg.block_size == 0 2800 || start_ccb->ccg.volume_size == 0) { 2801 start_ccb->ccg.cylinders = 0; 2802 start_ccb->ccg.heads = 0; 2803 start_ccb->ccg.secs_per_track = 0; 2804 start_ccb->ccb_h.status = CAM_REQ_CMP; 2805 break; 2806 } 2807 #ifdef PC98 2808 /* 2809 * In a PC-98 system, geometry translation depens on 2810 * the "real" device geometry obtained from mode page 4. 2811 * SCSI geometry translation is performed in the 2812 * initialization routine of the SCSI BIOS and the result 2813 * stored in host memory. If the translation is available 2814 * in host memory, use it. If not, rely on the default 2815 * translation the device driver performs. 2816 */ 2817 if (scsi_da_bios_params(&start_ccb->ccg) != 0) { 2818 start_ccb->ccb_h.status = CAM_REQ_CMP; 2819 break; 2820 } 2821 #endif 2822 sim = start_ccb->ccb_h.path->bus->sim; 2823 (*(sim->sim_action))(sim, start_ccb); 2824 break; 2825 } 2826 case XPT_ABORT: 2827 { 2828 union ccb* abort_ccb; 2829 int s; 2830 2831 abort_ccb = start_ccb->cab.abort_ccb; 2832 if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) { 2833 2834 if (abort_ccb->ccb_h.pinfo.index >= 0) { 2835 struct cam_ccbq *ccbq; 2836 2837 ccbq = &abort_ccb->ccb_h.path->device->ccbq; 2838 cam_ccbq_remove_ccb(ccbq, abort_ccb); 2839 abort_ccb->ccb_h.status = 2840 CAM_REQ_ABORTED|CAM_DEV_QFRZN; 2841 xpt_freeze_devq(abort_ccb->ccb_h.path, 1); 2842 s = splcam(); 2843 xpt_done(abort_ccb); 2844 splx(s); 2845 start_ccb->ccb_h.status = CAM_REQ_CMP; 2846 break; 2847 } 2848 if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX 2849 && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) { 2850 /* 2851 * We've caught this ccb en route to 2852 * the SIM. Flag it for abort and the 2853 * SIM will do so just before starting 2854 * real work on the CCB. 2855 */ 2856 abort_ccb->ccb_h.status = 2857 CAM_REQ_ABORTED|CAM_DEV_QFRZN; 2858 xpt_freeze_devq(abort_ccb->ccb_h.path, 1); 2859 start_ccb->ccb_h.status = CAM_REQ_CMP; 2860 break; 2861 } 2862 } 2863 if (XPT_FC_IS_QUEUED(abort_ccb) 2864 && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) { 2865 /* 2866 * It's already completed but waiting 2867 * for our SWI to get to it. 2868 */ 2869 start_ccb->ccb_h.status = CAM_UA_ABORT; 2870 break; 2871 } 2872 /* 2873 * If we weren't able to take care of the abort request 2874 * in the XPT, pass the request down to the SIM for processing. 2875 */ 2876 /* FALLTHROUGH */ 2877 } 2878 case XPT_ACCEPT_TARGET_IO: 2879 case XPT_EN_LUN: 2880 case XPT_IMMED_NOTIFY: 2881 case XPT_NOTIFY_ACK: 2882 case XPT_GET_TRAN_SETTINGS: 2883 case XPT_RESET_BUS: 2884 { 2885 struct cam_sim *sim; 2886 2887 sim = start_ccb->ccb_h.path->bus->sim; 2888 (*(sim->sim_action))(sim, start_ccb); 2889 break; 2890 } 2891 case XPT_PATH_INQ: 2892 { 2893 struct cam_sim *sim; 2894 2895 sim = start_ccb->ccb_h.path->bus->sim; 2896 (*(sim->sim_action))(sim, start_ccb); 2897 break; 2898 } 2899 case XPT_PATH_STATS: 2900 start_ccb->cpis.last_reset = 2901 start_ccb->ccb_h.path->bus->last_reset; 2902 start_ccb->ccb_h.status = CAM_REQ_CMP; 2903 break; 2904 case XPT_GDEV_TYPE: 2905 { 2906 struct cam_ed *dev; 2907 int s; 2908 2909 dev = start_ccb->ccb_h.path->device; 2910 s = splcam(); 2911 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) { 2912 start_ccb->ccb_h.status = CAM_DEV_NOT_THERE; 2913 } else { 2914 struct ccb_getdev *cgd; 2915 struct cam_eb *bus; 2916 struct cam_et *tar; 2917 2918 cgd = &start_ccb->cgd; 2919 bus = cgd->ccb_h.path->bus; 2920 tar = cgd->ccb_h.path->target; 2921 cgd->inq_data = dev->inq_data; 2922 cgd->ccb_h.status = CAM_REQ_CMP; 2923 cgd->serial_num_len = dev->serial_num_len; 2924 if ((dev->serial_num_len > 0) 2925 && (dev->serial_num != NULL)) 2926 bcopy(dev->serial_num, cgd->serial_num, 2927 dev->serial_num_len); 2928 } 2929 splx(s); 2930 break; 2931 } 2932 case XPT_GDEV_STATS: 2933 { 2934 struct cam_ed *dev; 2935 int s; 2936 2937 dev = start_ccb->ccb_h.path->device; 2938 s = splcam(); 2939 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) { 2940 start_ccb->ccb_h.status = CAM_DEV_NOT_THERE; 2941 } else { 2942 struct ccb_getdevstats *cgds; 2943 struct cam_eb *bus; 2944 struct cam_et *tar; 2945 2946 cgds = &start_ccb->cgds; 2947 bus = cgds->ccb_h.path->bus; 2948 tar = cgds->ccb_h.path->target; 2949 cgds->dev_openings = dev->ccbq.dev_openings; 2950 cgds->dev_active = dev->ccbq.dev_active; 2951 cgds->devq_openings = dev->ccbq.devq_openings; 2952 cgds->devq_queued = dev->ccbq.queue.entries; 2953 cgds->held = dev->ccbq.held; 2954 cgds->last_reset = tar->last_reset; 2955 cgds->maxtags = dev->quirk->maxtags; 2956 cgds->mintags = dev->quirk->mintags; 2957 if (timevalcmp(&tar->last_reset, &bus->last_reset, <)) 2958 cgds->last_reset = bus->last_reset; 2959 cgds->ccb_h.status = CAM_REQ_CMP; 2960 } 2961 splx(s); 2962 break; 2963 } 2964 case XPT_GDEVLIST: 2965 { 2966 struct cam_periph *nperiph; 2967 struct periph_list *periph_head; 2968 struct ccb_getdevlist *cgdl; 2969 int i; 2970 int s; 2971 struct cam_ed *device; 2972 int found; 2973 2974 2975 found = 0; 2976 2977 /* 2978 * Don't want anyone mucking with our data. 2979 */ 2980 s = splcam(); 2981 device = start_ccb->ccb_h.path->device; 2982 periph_head = &device->periphs; 2983 cgdl = &start_ccb->cgdl; 2984 2985 /* 2986 * Check and see if the list has changed since the user 2987 * last requested a list member. If so, tell them that the 2988 * list has changed, and therefore they need to start over 2989 * from the beginning. 2990 */ 2991 if ((cgdl->index != 0) && 2992 (cgdl->generation != device->generation)) { 2993 cgdl->status = CAM_GDEVLIST_LIST_CHANGED; 2994 splx(s); 2995 break; 2996 } 2997 2998 /* 2999 * Traverse the list of peripherals and attempt to find 3000 * the requested peripheral. 3001 */ 3002 for (nperiph = periph_head->slh_first, i = 0; 3003 (nperiph != NULL) && (i <= cgdl->index); 3004 nperiph = nperiph->periph_links.sle_next, i++) { 3005 if (i == cgdl->index) { 3006 strncpy(cgdl->periph_name, 3007 nperiph->periph_name, 3008 DEV_IDLEN); 3009 cgdl->unit_number = nperiph->unit_number; 3010 found = 1; 3011 } 3012 } 3013 if (found == 0) { 3014 cgdl->status = CAM_GDEVLIST_ERROR; 3015 splx(s); 3016 break; 3017 } 3018 3019 if (nperiph == NULL) 3020 cgdl->status = CAM_GDEVLIST_LAST_DEVICE; 3021 else 3022 cgdl->status = CAM_GDEVLIST_MORE_DEVS; 3023 3024 cgdl->index++; 3025 cgdl->generation = device->generation; 3026 3027 splx(s); 3028 cgdl->ccb_h.status = CAM_REQ_CMP; 3029 break; 3030 } 3031 case XPT_DEV_MATCH: 3032 { 3033 int s; 3034 dev_pos_type position_type; 3035 struct ccb_dev_match *cdm; 3036 int ret; 3037 3038 cdm = &start_ccb->cdm; 3039 3040 /* 3041 * Prevent EDT changes while we traverse it. 3042 */ 3043 s = splcam(); 3044 /* 3045 * There are two ways of getting at information in the EDT. 3046 * The first way is via the primary EDT tree. It starts 3047 * with a list of busses, then a list of targets on a bus, 3048 * then devices/luns on a target, and then peripherals on a 3049 * device/lun. The "other" way is by the peripheral driver 3050 * lists. The peripheral driver lists are organized by 3051 * peripheral driver. (obviously) So it makes sense to 3052 * use the peripheral driver list if the user is looking 3053 * for something like "da1", or all "da" devices. If the 3054 * user is looking for something on a particular bus/target 3055 * or lun, it's generally better to go through the EDT tree. 3056 */ 3057 3058 if (cdm->pos.position_type != CAM_DEV_POS_NONE) 3059 position_type = cdm->pos.position_type; 3060 else { 3061 int i; 3062 3063 position_type = CAM_DEV_POS_NONE; 3064 3065 for (i = 0; i < cdm->num_patterns; i++) { 3066 if ((cdm->patterns[i].type == DEV_MATCH_BUS) 3067 ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){ 3068 position_type = CAM_DEV_POS_EDT; 3069 break; 3070 } 3071 } 3072 3073 if (cdm->num_patterns == 0) 3074 position_type = CAM_DEV_POS_EDT; 3075 else if (position_type == CAM_DEV_POS_NONE) 3076 position_type = CAM_DEV_POS_PDRV; 3077 } 3078 3079 switch(position_type & CAM_DEV_POS_TYPEMASK) { 3080 case CAM_DEV_POS_EDT: 3081 ret = xptedtmatch(cdm); 3082 break; 3083 case CAM_DEV_POS_PDRV: 3084 ret = xptperiphlistmatch(cdm); 3085 break; 3086 default: 3087 cdm->status = CAM_DEV_MATCH_ERROR; 3088 break; 3089 } 3090 3091 splx(s); 3092 3093 if (cdm->status == CAM_DEV_MATCH_ERROR) 3094 start_ccb->ccb_h.status = CAM_REQ_CMP_ERR; 3095 else 3096 start_ccb->ccb_h.status = CAM_REQ_CMP; 3097 3098 break; 3099 } 3100 case XPT_SASYNC_CB: 3101 { 3102 struct ccb_setasync *csa; 3103 struct async_node *cur_entry; 3104 struct async_list *async_head; 3105 u_int32_t added; 3106 int s; 3107 3108 csa = &start_ccb->csa; 3109 added = csa->event_enable; 3110 async_head = &csa->ccb_h.path->device->asyncs; 3111 3112 /* 3113 * If there is already an entry for us, simply 3114 * update it. 3115 */ 3116 s = splcam(); 3117 cur_entry = SLIST_FIRST(async_head); 3118 while (cur_entry != NULL) { 3119 if ((cur_entry->callback_arg == csa->callback_arg) 3120 && (cur_entry->callback == csa->callback)) 3121 break; 3122 cur_entry = SLIST_NEXT(cur_entry, links); 3123 } 3124 3125 if (cur_entry != NULL) { 3126 /* 3127 * If the request has no flags set, 3128 * remove the entry. 3129 */ 3130 added &= ~cur_entry->event_enable; 3131 if (csa->event_enable == 0) { 3132 SLIST_REMOVE(async_head, cur_entry, 3133 async_node, links); 3134 csa->ccb_h.path->device->refcount--; 3135 free(cur_entry, M_DEVBUF); 3136 } else { 3137 cur_entry->event_enable = csa->event_enable; 3138 } 3139 } else { 3140 cur_entry = malloc(sizeof(*cur_entry), M_DEVBUF, 3141 M_NOWAIT); 3142 if (cur_entry == NULL) { 3143 splx(s); 3144 csa->ccb_h.status = CAM_RESRC_UNAVAIL; 3145 break; 3146 } 3147 cur_entry->event_enable = csa->event_enable; 3148 cur_entry->callback_arg = csa->callback_arg; 3149 cur_entry->callback = csa->callback; 3150 SLIST_INSERT_HEAD(async_head, cur_entry, links); 3151 csa->ccb_h.path->device->refcount++; 3152 } 3153 3154 if ((added & AC_FOUND_DEVICE) != 0) { 3155 /* 3156 * Get this peripheral up to date with all 3157 * the currently existing devices. 3158 */ 3159 xpt_for_all_devices(xptsetasyncfunc, cur_entry); 3160 } 3161 if ((added & AC_PATH_REGISTERED) != 0) { 3162 /* 3163 * Get this peripheral up to date with all 3164 * the currently existing busses. 3165 */ 3166 xpt_for_all_busses(xptsetasyncbusfunc, cur_entry); 3167 } 3168 splx(s); 3169 start_ccb->ccb_h.status = CAM_REQ_CMP; 3170 break; 3171 } 3172 case XPT_REL_SIMQ: 3173 { 3174 struct ccb_relsim *crs; 3175 struct cam_ed *dev; 3176 int s; 3177 3178 crs = &start_ccb->crs; 3179 dev = crs->ccb_h.path->device; 3180 if (dev == NULL) { 3181 3182 crs->ccb_h.status = CAM_DEV_NOT_THERE; 3183 break; 3184 } 3185 3186 s = splcam(); 3187 3188 if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) { 3189 3190 if ((dev->inq_data.flags & SID_CmdQue) != 0) { 3191 3192 /* Don't ever go below one opening */ 3193 if (crs->openings > 0) { 3194 xpt_dev_ccbq_resize(crs->ccb_h.path, 3195 crs->openings); 3196 3197 if (bootverbose) { 3198 xpt_print_path(crs->ccb_h.path); 3199 printf("tagged openings " 3200 "now %d\n", 3201 crs->openings); 3202 } 3203 } 3204 } 3205 } 3206 3207 if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) { 3208 3209 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) { 3210 3211 /* 3212 * Just extend the old timeout and decrement 3213 * the freeze count so that a single timeout 3214 * is sufficient for releasing the queue. 3215 */ 3216 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE; 3217 untimeout(xpt_release_devq_timeout, 3218 dev, dev->c_handle); 3219 } else { 3220 3221 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 3222 } 3223 3224 dev->c_handle = 3225 timeout(xpt_release_devq_timeout, 3226 dev, 3227 (crs->release_timeout * hz) / 1000); 3228 3229 dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING; 3230 3231 } 3232 3233 if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) { 3234 3235 if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) { 3236 /* 3237 * Decrement the freeze count so that a single 3238 * completion is still sufficient to unfreeze 3239 * the queue. 3240 */ 3241 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE; 3242 } else { 3243 3244 dev->flags |= CAM_DEV_REL_ON_COMPLETE; 3245 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 3246 } 3247 } 3248 3249 if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) { 3250 3251 if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0 3252 || (dev->ccbq.dev_active == 0)) { 3253 3254 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE; 3255 } else { 3256 3257 dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY; 3258 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 3259 } 3260 } 3261 splx(s); 3262 3263 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) { 3264 3265 xpt_release_devq(crs->ccb_h.path, /*count*/1, 3266 /*run_queue*/TRUE); 3267 } 3268 start_ccb->crs.qfrozen_cnt = dev->qfrozen_cnt; 3269 start_ccb->ccb_h.status = CAM_REQ_CMP; 3270 break; 3271 } 3272 case XPT_SCAN_BUS: 3273 xpt_scan_bus(start_ccb->ccb_h.path->periph, start_ccb); 3274 break; 3275 case XPT_SCAN_LUN: 3276 xpt_scan_lun(start_ccb->ccb_h.path->periph, 3277 start_ccb->ccb_h.path, start_ccb->crcn.flags, 3278 start_ccb); 3279 break; 3280 case XPT_DEBUG: { 3281 #ifdef CAMDEBUG 3282 int s; 3283 3284 s = splcam(); 3285 #ifdef CAM_DEBUG_DELAY 3286 cam_debug_delay = CAM_DEBUG_DELAY; 3287 #endif 3288 cam_dflags = start_ccb->cdbg.flags; 3289 if (cam_dpath != NULL) { 3290 xpt_free_path(cam_dpath); 3291 cam_dpath = NULL; 3292 } 3293 3294 if (cam_dflags != CAM_DEBUG_NONE) { 3295 if (xpt_create_path(&cam_dpath, xpt_periph, 3296 start_ccb->ccb_h.path_id, 3297 start_ccb->ccb_h.target_id, 3298 start_ccb->ccb_h.target_lun) != 3299 CAM_REQ_CMP) { 3300 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 3301 cam_dflags = CAM_DEBUG_NONE; 3302 } else { 3303 start_ccb->ccb_h.status = CAM_REQ_CMP; 3304 xpt_print_path(cam_dpath); 3305 printf("debugging flags now %x\n", cam_dflags); 3306 } 3307 } else { 3308 cam_dpath = NULL; 3309 start_ccb->ccb_h.status = CAM_REQ_CMP; 3310 } 3311 splx(s); 3312 #else /* !CAMDEBUG */ 3313 start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 3314 #endif /* CAMDEBUG */ 3315 break; 3316 } 3317 case XPT_NOOP: 3318 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0) 3319 xpt_freeze_devq(start_ccb->ccb_h.path, 1); 3320 start_ccb->ccb_h.status = CAM_REQ_CMP; 3321 break; 3322 default: 3323 case XPT_SDEV_TYPE: 3324 case XPT_TERM_IO: 3325 case XPT_ENG_INQ: 3326 /* XXX Implement */ 3327 start_ccb->ccb_h.status = CAM_PROVIDE_FAIL; 3328 break; 3329 } 3330 splx(iopl); 3331 } 3332 3333 void 3334 xpt_polled_action(union ccb *start_ccb) 3335 { 3336 int s; 3337 u_int32_t timeout; 3338 struct cam_sim *sim; 3339 struct cam_devq *devq; 3340 struct cam_ed *dev; 3341 3342 timeout = start_ccb->ccb_h.timeout; 3343 sim = start_ccb->ccb_h.path->bus->sim; 3344 devq = sim->devq; 3345 dev = start_ccb->ccb_h.path->device; 3346 3347 s = splcam(); 3348 3349 /* 3350 * Steal an opening so that no other queued requests 3351 * can get it before us while we simulate interrupts. 3352 */ 3353 dev->ccbq.devq_openings--; 3354 dev->ccbq.dev_openings--; 3355 3356 while((devq->send_openings <= 0 || dev->ccbq.dev_openings < 0) 3357 && (--timeout > 0)) { 3358 DELAY(1000); 3359 (*(sim->sim_poll))(sim); 3360 swi_camnet(); 3361 swi_cambio(); 3362 } 3363 3364 dev->ccbq.devq_openings++; 3365 dev->ccbq.dev_openings++; 3366 3367 if (timeout != 0) { 3368 xpt_action(start_ccb); 3369 while(--timeout > 0) { 3370 (*(sim->sim_poll))(sim); 3371 swi_camnet(); 3372 swi_cambio(); 3373 if ((start_ccb->ccb_h.status & CAM_STATUS_MASK) 3374 != CAM_REQ_INPROG) 3375 break; 3376 DELAY(1000); 3377 } 3378 if (timeout == 0) { 3379 /* 3380 * XXX Is it worth adding a sim_timeout entry 3381 * point so we can attempt recovery? If 3382 * this is only used for dumps, I don't think 3383 * it is. 3384 */ 3385 start_ccb->ccb_h.status = CAM_CMD_TIMEOUT; 3386 } 3387 } else { 3388 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 3389 } 3390 splx(s); 3391 } 3392 3393 /* 3394 * Schedule a peripheral driver to receive a ccb when it's 3395 * target device has space for more transactions. 3396 */ 3397 void 3398 xpt_schedule(struct cam_periph *perph, u_int32_t new_priority) 3399 { 3400 struct cam_ed *device; 3401 int s; 3402 int runq; 3403 3404 CAM_DEBUG(perph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n")); 3405 device = perph->path->device; 3406 s = splsoftcam(); 3407 if (periph_is_queued(perph)) { 3408 /* Simply reorder based on new priority */ 3409 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE, 3410 (" change priority to %d\n", new_priority)); 3411 if (new_priority < perph->pinfo.priority) { 3412 camq_change_priority(&device->drvq, 3413 perph->pinfo.index, 3414 new_priority); 3415 } 3416 runq = 0; 3417 } else { 3418 /* New entry on the queue */ 3419 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE, 3420 (" added periph to queue\n")); 3421 perph->pinfo.priority = new_priority; 3422 perph->pinfo.generation = ++device->drvq.generation; 3423 camq_insert(&device->drvq, &perph->pinfo); 3424 runq = xpt_schedule_dev_allocq(perph->path->bus, device); 3425 } 3426 splx(s); 3427 if (runq != 0) { 3428 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE, 3429 (" calling xpt_run_devq\n")); 3430 xpt_run_dev_allocq(perph->path->bus); 3431 } 3432 } 3433 3434 3435 /* 3436 * Schedule a device to run on a given queue. 3437 * If the device was inserted as a new entry on the queue, 3438 * return 1 meaning the device queue should be run. If we 3439 * were already queued, implying someone else has already 3440 * started the queue, return 0 so the caller doesn't attempt 3441 * to run the queue. Must be run at either splsoftcam 3442 * (or splcam since that encompases splsoftcam). 3443 */ 3444 static int 3445 xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo, 3446 u_int32_t new_priority) 3447 { 3448 int retval; 3449 u_int32_t old_priority; 3450 3451 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n")); 3452 3453 old_priority = pinfo->priority; 3454 3455 /* 3456 * Are we already queued? 3457 */ 3458 if (pinfo->index != CAM_UNQUEUED_INDEX) { 3459 /* Simply reorder based on new priority */ 3460 if (new_priority < old_priority) { 3461 camq_change_priority(queue, pinfo->index, 3462 new_priority); 3463 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3464 ("changed priority to %d\n", 3465 new_priority)); 3466 } 3467 retval = 0; 3468 } else { 3469 /* New entry on the queue */ 3470 if (new_priority < old_priority) 3471 pinfo->priority = new_priority; 3472 3473 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3474 ("Inserting onto queue\n")); 3475 pinfo->generation = ++queue->generation; 3476 camq_insert(queue, pinfo); 3477 retval = 1; 3478 } 3479 return (retval); 3480 } 3481 3482 static void 3483 xpt_run_dev_allocq(struct cam_eb *bus) 3484 { 3485 struct cam_devq *devq; 3486 int s; 3487 3488 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_allocq\n")); 3489 devq = bus->sim->devq; 3490 3491 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3492 (" qfrozen_cnt == 0x%x, entries == %d, " 3493 "openings == %d, active == %d\n", 3494 devq->alloc_queue.qfrozen_cnt, 3495 devq->alloc_queue.entries, 3496 devq->alloc_openings, 3497 devq->alloc_active)); 3498 3499 s = splsoftcam(); 3500 devq->alloc_queue.qfrozen_cnt++; 3501 while ((devq->alloc_queue.entries > 0) 3502 && (devq->alloc_openings > 0) 3503 && (devq->alloc_queue.qfrozen_cnt <= 1)) { 3504 struct cam_ed_qinfo *qinfo; 3505 struct cam_ed *device; 3506 union ccb *work_ccb; 3507 struct cam_periph *drv; 3508 struct camq *drvq; 3509 3510 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue, 3511 CAMQ_HEAD); 3512 device = qinfo->device; 3513 3514 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3515 ("running device %p\n", device)); 3516 3517 drvq = &device->drvq; 3518 3519 #ifdef CAMDEBUG 3520 if (drvq->entries <= 0) { 3521 panic("xpt_run_dev_allocq: " 3522 "Device on queue without any work to do"); 3523 } 3524 #endif 3525 if ((work_ccb = xpt_get_ccb(device)) != NULL) { 3526 devq->alloc_openings--; 3527 devq->alloc_active++; 3528 drv = (struct cam_periph*)camq_remove(drvq, CAMQ_HEAD); 3529 splx(s); 3530 xpt_setup_ccb(&work_ccb->ccb_h, drv->path, 3531 drv->pinfo.priority); 3532 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3533 ("calling periph start\n")); 3534 drv->periph_start(drv, work_ccb); 3535 } else { 3536 /* 3537 * Malloc failure in alloc_ccb 3538 */ 3539 /* 3540 * XXX add us to a list to be run from free_ccb 3541 * if we don't have any ccbs active on this 3542 * device queue otherwise we may never get run 3543 * again. 3544 */ 3545 break; 3546 } 3547 3548 /* Raise IPL for possible insertion and test at top of loop */ 3549 s = splsoftcam(); 3550 3551 if (drvq->entries > 0) { 3552 /* We have more work. Attempt to reschedule */ 3553 xpt_schedule_dev_allocq(bus, device); 3554 } 3555 } 3556 devq->alloc_queue.qfrozen_cnt--; 3557 splx(s); 3558 } 3559 3560 static void 3561 xpt_run_dev_sendq(struct cam_eb *bus) 3562 { 3563 struct cam_devq *devq; 3564 int s; 3565 3566 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_sendq\n")); 3567 3568 devq = bus->sim->devq; 3569 3570 s = splcam(); 3571 devq->send_queue.qfrozen_cnt++; 3572 splx(s); 3573 s = splsoftcam(); 3574 while ((devq->send_queue.entries > 0) 3575 && (devq->send_openings > 0)) { 3576 struct cam_ed_qinfo *qinfo; 3577 struct cam_ed *device; 3578 union ccb *work_ccb; 3579 struct cam_sim *sim; 3580 int ospl; 3581 3582 ospl = splcam(); 3583 if (devq->send_queue.qfrozen_cnt > 1) { 3584 splx(ospl); 3585 break; 3586 } 3587 3588 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue, 3589 CAMQ_HEAD); 3590 device = qinfo->device; 3591 3592 /* 3593 * If the device has been "frozen", don't attempt 3594 * to run it. 3595 */ 3596 if (device->qfrozen_cnt > 0) { 3597 splx(ospl); 3598 continue; 3599 } 3600 3601 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3602 ("running device %p\n", device)); 3603 3604 work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD); 3605 if (work_ccb == NULL) { 3606 printf("device on run queue with no ccbs???"); 3607 splx(ospl); 3608 continue; 3609 } 3610 3611 if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) { 3612 3613 if (num_highpower <= 0) { 3614 /* 3615 * We got a high power command, but we 3616 * don't have any available slots. Freeze 3617 * the device queue until we have a slot 3618 * available. 3619 */ 3620 device->qfrozen_cnt++; 3621 STAILQ_INSERT_TAIL(&highpowerq, 3622 &work_ccb->ccb_h, 3623 xpt_links.stqe); 3624 3625 splx(ospl); 3626 continue; 3627 } else { 3628 /* 3629 * Consume a high power slot while 3630 * this ccb runs. 3631 */ 3632 num_highpower--; 3633 } 3634 } 3635 devq->active_dev = device; 3636 cam_ccbq_remove_ccb(&device->ccbq, work_ccb); 3637 3638 cam_ccbq_send_ccb(&device->ccbq, work_ccb); 3639 splx(ospl); 3640 3641 devq->send_openings--; 3642 devq->send_active++; 3643 3644 if (device->ccbq.queue.entries > 0) 3645 xpt_schedule_dev_sendq(bus, device); 3646 3647 if (work_ccb && (work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0){ 3648 /* 3649 * The client wants to freeze the queue 3650 * after this CCB is sent. 3651 */ 3652 ospl = splcam(); 3653 device->qfrozen_cnt++; 3654 splx(ospl); 3655 } 3656 3657 splx(s); 3658 3659 if ((device->inq_flags & SID_CmdQue) != 0) 3660 work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID; 3661 else 3662 /* 3663 * Clear this in case of a retried CCB that failed 3664 * due to a rejected tag. 3665 */ 3666 work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID; 3667 3668 /* 3669 * Device queues can be shared among multiple sim instances 3670 * that reside on different busses. Use the SIM in the queue 3671 * CCB's path, rather than the one in the bus that was passed 3672 * into this function. 3673 */ 3674 sim = work_ccb->ccb_h.path->bus->sim; 3675 (*(sim->sim_action))(sim, work_ccb); 3676 3677 ospl = splcam(); 3678 devq->active_dev = NULL; 3679 splx(ospl); 3680 /* Raise IPL for possible insertion and test at top of loop */ 3681 s = splsoftcam(); 3682 } 3683 splx(s); 3684 s = splcam(); 3685 devq->send_queue.qfrozen_cnt--; 3686 splx(s); 3687 } 3688 3689 /* 3690 * This function merges stuff from the slave ccb into the master ccb, while 3691 * keeping important fields in the master ccb constant. 3692 */ 3693 void 3694 xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb) 3695 { 3696 /* 3697 * Pull fields that are valid for peripheral drivers to set 3698 * into the master CCB along with the CCB "payload". 3699 */ 3700 master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count; 3701 master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code; 3702 master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout; 3703 master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags; 3704 bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1], 3705 sizeof(union ccb) - sizeof(struct ccb_hdr)); 3706 } 3707 3708 void 3709 xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority) 3710 { 3711 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n")); 3712 ccb_h->pinfo.priority = priority; 3713 ccb_h->path = path; 3714 ccb_h->path_id = path->bus->path_id; 3715 if (path->target) 3716 ccb_h->target_id = path->target->target_id; 3717 else 3718 ccb_h->target_id = CAM_TARGET_WILDCARD; 3719 if (path->device) { 3720 ccb_h->target_lun = path->device->lun_id; 3721 ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation; 3722 } else { 3723 ccb_h->target_lun = CAM_TARGET_WILDCARD; 3724 } 3725 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX; 3726 ccb_h->flags = 0; 3727 } 3728 3729 /* Path manipulation functions */ 3730 cam_status 3731 xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph, 3732 path_id_t path_id, target_id_t target_id, lun_id_t lun_id) 3733 { 3734 struct cam_path *path; 3735 cam_status status; 3736 3737 path = (struct cam_path *)malloc(sizeof(*path), M_DEVBUF, M_NOWAIT); 3738 3739 if (path == NULL) { 3740 status = CAM_RESRC_UNAVAIL; 3741 return(status); 3742 } 3743 status = xpt_compile_path(path, perph, path_id, target_id, lun_id); 3744 if (status != CAM_REQ_CMP) { 3745 free(path, M_DEVBUF); 3746 path = NULL; 3747 } 3748 *new_path_ptr = path; 3749 return (status); 3750 } 3751 3752 static cam_status 3753 xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph, 3754 path_id_t path_id, target_id_t target_id, lun_id_t lun_id) 3755 { 3756 struct cam_eb *bus; 3757 struct cam_et *target; 3758 struct cam_ed *device; 3759 cam_status status; 3760 int s; 3761 3762 status = CAM_REQ_CMP; /* Completed without error */ 3763 target = NULL; /* Wildcarded */ 3764 device = NULL; /* Wildcarded */ 3765 3766 /* 3767 * We will potentially modify the EDT, so block interrupts 3768 * that may attempt to create cam paths. 3769 */ 3770 s = splcam(); 3771 bus = xpt_find_bus(path_id); 3772 if (bus == NULL) { 3773 status = CAM_PATH_INVALID; 3774 } else { 3775 target = xpt_find_target(bus, target_id); 3776 if (target == NULL) { 3777 /* Create one */ 3778 struct cam_et *new_target; 3779 3780 new_target = xpt_alloc_target(bus, target_id); 3781 if (new_target == NULL) { 3782 status = CAM_RESRC_UNAVAIL; 3783 } else { 3784 target = new_target; 3785 } 3786 } 3787 if (target != NULL) { 3788 device = xpt_find_device(target, lun_id); 3789 if (device == NULL) { 3790 /* Create one */ 3791 struct cam_ed *new_device; 3792 3793 new_device = xpt_alloc_device(bus, 3794 target, 3795 lun_id); 3796 if (new_device == NULL) { 3797 status = CAM_RESRC_UNAVAIL; 3798 } else { 3799 device = new_device; 3800 } 3801 } 3802 } 3803 } 3804 splx(s); 3805 3806 /* 3807 * Only touch the user's data if we are successful. 3808 */ 3809 if (status == CAM_REQ_CMP) { 3810 new_path->periph = perph; 3811 new_path->bus = bus; 3812 new_path->target = target; 3813 new_path->device = device; 3814 CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n")); 3815 } else { 3816 if (device != NULL) 3817 xpt_release_device(bus, target, device); 3818 if (target != NULL) 3819 xpt_release_target(bus, target); 3820 if (bus != NULL) 3821 xpt_release_bus(bus); 3822 } 3823 return (status); 3824 } 3825 3826 static void 3827 xpt_release_path(struct cam_path *path) 3828 { 3829 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n")); 3830 if (path->device != NULL) { 3831 xpt_release_device(path->bus, path->target, path->device); 3832 path->device = NULL; 3833 } 3834 if (path->target != NULL) { 3835 xpt_release_target(path->bus, path->target); 3836 path->target = NULL; 3837 } 3838 if (path->bus != NULL) { 3839 xpt_release_bus(path->bus); 3840 path->bus = NULL; 3841 } 3842 } 3843 3844 void 3845 xpt_free_path(struct cam_path *path) 3846 { 3847 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n")); 3848 xpt_release_path(path); 3849 free(path, M_DEVBUF); 3850 } 3851 3852 3853 /* 3854 * Return -1 for failure, 0 for exact match, 1 for match with wildcards 3855 * in path1, 2 for match with wildcards in path2. 3856 */ 3857 int 3858 xpt_path_comp(struct cam_path *path1, struct cam_path *path2) 3859 { 3860 int retval = 0; 3861 3862 if (path1->bus != path2->bus) { 3863 if (path1->bus->path_id == CAM_BUS_WILDCARD) 3864 retval = 1; 3865 else if (path2->bus->path_id == CAM_BUS_WILDCARD) 3866 retval = 2; 3867 else 3868 return (-1); 3869 } 3870 if (path1->target != path2->target) { 3871 if (path1->target->target_id == CAM_TARGET_WILDCARD) { 3872 if (retval == 0) 3873 retval = 1; 3874 } else if (path2->target->target_id == CAM_TARGET_WILDCARD) 3875 retval = 2; 3876 else 3877 return (-1); 3878 } 3879 if (path1->device != path2->device) { 3880 if (path1->device->lun_id == CAM_LUN_WILDCARD) { 3881 if (retval == 0) 3882 retval = 1; 3883 } else if (path2->device->lun_id == CAM_LUN_WILDCARD) 3884 retval = 2; 3885 else 3886 return (-1); 3887 } 3888 return (retval); 3889 } 3890 3891 void 3892 xpt_print_path(struct cam_path *path) 3893 { 3894 if (path == NULL) 3895 printf("(nopath): "); 3896 else { 3897 if (path->periph != NULL) 3898 printf("(%s%d:", path->periph->periph_name, 3899 path->periph->unit_number); 3900 else 3901 printf("(noperiph:"); 3902 3903 if (path->bus != NULL) 3904 printf("%s%d:%d:", path->bus->sim->sim_name, 3905 path->bus->sim->unit_number, 3906 path->bus->sim->bus_id); 3907 else 3908 printf("nobus:"); 3909 3910 if (path->target != NULL) 3911 printf("%d:", path->target->target_id); 3912 else 3913 printf("X:"); 3914 3915 if (path->device != NULL) 3916 printf("%d): ", path->device->lun_id); 3917 else 3918 printf("X): "); 3919 } 3920 } 3921 3922 path_id_t 3923 xpt_path_path_id(struct cam_path *path) 3924 { 3925 return(path->bus->path_id); 3926 } 3927 3928 target_id_t 3929 xpt_path_target_id(struct cam_path *path) 3930 { 3931 if (path->target != NULL) 3932 return (path->target->target_id); 3933 else 3934 return (CAM_TARGET_WILDCARD); 3935 } 3936 3937 lun_id_t 3938 xpt_path_lun_id(struct cam_path *path) 3939 { 3940 if (path->device != NULL) 3941 return (path->device->lun_id); 3942 else 3943 return (CAM_LUN_WILDCARD); 3944 } 3945 3946 struct cam_sim * 3947 xpt_path_sim(struct cam_path *path) 3948 { 3949 return (path->bus->sim); 3950 } 3951 3952 struct cam_periph* 3953 xpt_path_periph(struct cam_path *path) 3954 { 3955 return (path->periph); 3956 } 3957 3958 /* 3959 * Release a CAM control block for the caller. Remit the cost of the structure 3960 * to the device referenced by the path. If the this device had no 'credits' 3961 * and peripheral drivers have registered async callbacks for this notification 3962 * call them now. 3963 */ 3964 void 3965 xpt_release_ccb(union ccb *free_ccb) 3966 { 3967 int s; 3968 struct cam_path *path; 3969 struct cam_ed *device; 3970 struct cam_eb *bus; 3971 3972 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n")); 3973 path = free_ccb->ccb_h.path; 3974 device = path->device; 3975 bus = path->bus; 3976 s = splsoftcam(); 3977 cam_ccbq_release_opening(&device->ccbq); 3978 if (xpt_ccb_count > xpt_max_ccbs) { 3979 xpt_free_ccb(free_ccb); 3980 xpt_ccb_count--; 3981 } else { 3982 SLIST_INSERT_HEAD(&ccb_freeq, &free_ccb->ccb_h, xpt_links.sle); 3983 } 3984 bus->sim->devq->alloc_openings++; 3985 bus->sim->devq->alloc_active--; 3986 /* XXX Turn this into an inline function - xpt_run_device?? */ 3987 if ((device_is_alloc_queued(device) == 0) 3988 && (device->drvq.entries > 0)) { 3989 xpt_schedule_dev_allocq(bus, device); 3990 } 3991 splx(s); 3992 if (dev_allocq_is_runnable(bus->sim->devq)) 3993 xpt_run_dev_allocq(bus); 3994 } 3995 3996 /* Functions accessed by SIM drivers */ 3997 3998 /* 3999 * A sim structure, listing the SIM entry points and instance 4000 * identification info is passed to xpt_bus_register to hook the SIM 4001 * into the CAM framework. xpt_bus_register creates a cam_eb entry 4002 * for this new bus and places it in the array of busses and assigns 4003 * it a path_id. The path_id may be influenced by "hard wiring" 4004 * information specified by the user. Once interrupt services are 4005 * availible, the bus will be probed. 4006 */ 4007 int32_t 4008 xpt_bus_register(struct cam_sim *sim, u_int32_t bus) 4009 { 4010 struct cam_eb *new_bus; 4011 struct cam_eb *old_bus; 4012 struct ccb_pathinq cpi; 4013 int s; 4014 4015 sim->bus_id = bus; 4016 new_bus = (struct cam_eb *)malloc(sizeof(*new_bus), 4017 M_DEVBUF, M_NOWAIT); 4018 if (new_bus == NULL) { 4019 /* Couldn't satisfy request */ 4020 return (CAM_RESRC_UNAVAIL); 4021 } 4022 4023 if (strcmp(sim->sim_name, "xpt") != 0) { 4024 4025 sim->path_id = 4026 xptpathid(sim->sim_name, sim->unit_number, sim->bus_id); 4027 } 4028 4029 TAILQ_INIT(&new_bus->et_entries); 4030 new_bus->path_id = sim->path_id; 4031 new_bus->sim = sim; 4032 timevalclear(&new_bus->last_reset); 4033 new_bus->flags = 0; 4034 new_bus->refcount = 1; /* Held until a bus_deregister event */ 4035 new_bus->generation = 0; 4036 s = splcam(); 4037 old_bus = TAILQ_FIRST(&xpt_busses); 4038 while (old_bus != NULL 4039 && old_bus->path_id < new_bus->path_id) 4040 old_bus = TAILQ_NEXT(old_bus, links); 4041 if (old_bus != NULL) 4042 TAILQ_INSERT_BEFORE(old_bus, new_bus, links); 4043 else 4044 TAILQ_INSERT_TAIL(&xpt_busses, new_bus, links); 4045 bus_generation++; 4046 splx(s); 4047 4048 /* Notify interested parties */ 4049 if (sim->path_id != CAM_XPT_PATH_ID) { 4050 struct cam_path path; 4051 4052 xpt_compile_path(&path, /*periph*/NULL, sim->path_id, 4053 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 4054 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1); 4055 cpi.ccb_h.func_code = XPT_PATH_INQ; 4056 xpt_action((union ccb *)&cpi); 4057 xpt_async(AC_PATH_REGISTERED, xpt_periph->path, &cpi); 4058 xpt_release_path(&path); 4059 } 4060 return (CAM_SUCCESS); 4061 } 4062 4063 int32_t 4064 xpt_bus_deregister(path_id_t pathid) 4065 { 4066 struct cam_path bus_path; 4067 cam_status status; 4068 4069 status = xpt_compile_path(&bus_path, NULL, pathid, 4070 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 4071 if (status != CAM_REQ_CMP) 4072 return (status); 4073 4074 xpt_async(AC_LOST_DEVICE, &bus_path, NULL); 4075 xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL); 4076 4077 /* Release the reference count held while registered. */ 4078 xpt_release_bus(bus_path.bus); 4079 xpt_release_path(&bus_path); 4080 4081 return (CAM_REQ_CMP); 4082 } 4083 4084 static path_id_t 4085 xptnextfreepathid(void) 4086 { 4087 struct cam_eb *bus; 4088 path_id_t pathid; 4089 char *strval; 4090 4091 pathid = 0; 4092 bus = TAILQ_FIRST(&xpt_busses); 4093 retry: 4094 /* Find an unoccupied pathid */ 4095 while (bus != NULL 4096 && bus->path_id <= pathid) { 4097 if (bus->path_id == pathid) 4098 pathid++; 4099 bus = TAILQ_NEXT(bus, links); 4100 } 4101 4102 /* 4103 * Ensure that this pathid is not reserved for 4104 * a bus that may be registered in the future. 4105 */ 4106 if (resource_string_value("scbus", pathid, "at", &strval) == 0) { 4107 ++pathid; 4108 /* Start the search over */ 4109 goto retry; 4110 } 4111 return (pathid); 4112 } 4113 4114 static path_id_t 4115 xptpathid(const char *sim_name, int sim_unit, int sim_bus) 4116 { 4117 path_id_t pathid; 4118 int i, dunit, val; 4119 char buf[32], *strval; 4120 4121 pathid = CAM_XPT_PATH_ID; 4122 snprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit); 4123 i = -1; 4124 while ((i = resource_locate(i, "scbus")) != -1) { 4125 dunit = resource_query_unit(i); 4126 if (dunit < 0) /* unwired?! */ 4127 continue; 4128 if (resource_string_value("scbus", dunit, "at", &strval) != 0) 4129 continue; 4130 if (strcmp(buf, strval) != 0) 4131 continue; 4132 if (resource_int_value("scbus", dunit, "bus", &val) == 0) { 4133 if (sim_bus == val) { 4134 pathid = dunit; 4135 break; 4136 } 4137 } else if (sim_bus == 0) { 4138 /* Unspecified matches bus 0 */ 4139 pathid = dunit; 4140 break; 4141 } else { 4142 printf("Ambiguous scbus configuration for %s%d " 4143 "bus %d, cannot wire down. The kernel " 4144 "config entry for scbus%d should " 4145 "specify a controller bus.\n" 4146 "Scbus will be assigned dynamically.\n", 4147 sim_name, sim_unit, sim_bus, dunit); 4148 break; 4149 } 4150 } 4151 4152 if (pathid == CAM_XPT_PATH_ID) 4153 pathid = xptnextfreepathid(); 4154 return (pathid); 4155 } 4156 4157 void 4158 xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg) 4159 { 4160 struct cam_eb *bus; 4161 struct cam_et *target, *next_target; 4162 struct cam_ed *device, *next_device; 4163 int s; 4164 4165 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_async\n")); 4166 4167 /* 4168 * Most async events come from a CAM interrupt context. In 4169 * a few cases, the error recovery code at the peripheral layer, 4170 * which may run from our SWI or a process context, may signal 4171 * deferred events with a call to xpt_async. Ensure async 4172 * notifications are serialized by blocking cam interrupts. 4173 */ 4174 s = splcam(); 4175 4176 bus = path->bus; 4177 4178 if (async_code == AC_BUS_RESET) { 4179 int s; 4180 4181 s = splclock(); 4182 /* Update our notion of when the last reset occurred */ 4183 microtime(&bus->last_reset); 4184 splx(s); 4185 } 4186 4187 for (target = TAILQ_FIRST(&bus->et_entries); 4188 target != NULL; 4189 target = next_target) { 4190 4191 next_target = TAILQ_NEXT(target, links); 4192 4193 if (path->target != target 4194 && path->target->target_id != CAM_TARGET_WILDCARD) 4195 continue; 4196 4197 if (async_code == AC_SENT_BDR) { 4198 int s; 4199 4200 /* Update our notion of when the last reset occurred */ 4201 s = splclock(); 4202 microtime(&path->target->last_reset); 4203 splx(s); 4204 } 4205 4206 for (device = TAILQ_FIRST(&target->ed_entries); 4207 device != NULL; 4208 device = next_device) { 4209 cam_status status; 4210 struct cam_path newpath; 4211 4212 next_device = TAILQ_NEXT(device, links); 4213 4214 if (path->device != device 4215 && path->device->lun_id != CAM_LUN_WILDCARD) 4216 continue; 4217 4218 /* 4219 * We need our own path with wildcards expanded to 4220 * handle certain types of events. 4221 */ 4222 if ((async_code == AC_SENT_BDR) 4223 || (async_code == AC_BUS_RESET) 4224 || (async_code == AC_INQ_CHANGED)) 4225 status = xpt_compile_path(&newpath, NULL, 4226 bus->path_id, 4227 target->target_id, 4228 device->lun_id); 4229 else 4230 status = CAM_REQ_CMP_ERR; 4231 4232 if (status == CAM_REQ_CMP) { 4233 4234 /* 4235 * Allow transfer negotiation to occur in a 4236 * tag free environment. 4237 */ 4238 if (async_code == AC_SENT_BDR 4239 || async_code == AC_BUS_RESET) 4240 xpt_toggle_tags(&newpath); 4241 4242 if (async_code == AC_INQ_CHANGED) { 4243 /* 4244 * We've sent a start unit command, or 4245 * something similar to a device that 4246 * may have caused its inquiry data to 4247 * change. So we re-scan the device to 4248 * refresh the inquiry data for it. 4249 */ 4250 xpt_scan_lun(newpath.periph, &newpath, 4251 CAM_EXPECT_INQ_CHANGE, 4252 NULL); 4253 } 4254 xpt_release_path(&newpath); 4255 } else if (async_code == AC_LOST_DEVICE) { 4256 device->flags |= CAM_DEV_UNCONFIGURED; 4257 } else if (async_code == AC_TRANSFER_NEG) { 4258 struct ccb_trans_settings *settings; 4259 4260 settings = 4261 (struct ccb_trans_settings *)async_arg; 4262 xpt_set_transfer_settings(settings, device, 4263 /*async_update*/TRUE); 4264 } 4265 4266 xpt_async_bcast(&device->asyncs, 4267 async_code, 4268 path, 4269 async_arg); 4270 } 4271 } 4272 4273 /* 4274 * If this wasn't a fully wildcarded async, tell all 4275 * clients that want all async events. 4276 */ 4277 if (bus != xpt_periph->path->bus) 4278 xpt_async_bcast(&xpt_periph->path->device->asyncs, async_code, 4279 path, async_arg); 4280 splx(s); 4281 } 4282 4283 static void 4284 xpt_async_bcast(struct async_list *async_head, 4285 u_int32_t async_code, 4286 struct cam_path *path, void *async_arg) 4287 { 4288 struct async_node *cur_entry; 4289 4290 cur_entry = SLIST_FIRST(async_head); 4291 while (cur_entry != NULL) { 4292 struct async_node *next_entry; 4293 /* 4294 * Grab the next list entry before we call the current 4295 * entry's callback. This is because the callback function 4296 * can delete its async callback entry. 4297 */ 4298 next_entry = SLIST_NEXT(cur_entry, links); 4299 if ((cur_entry->event_enable & async_code) != 0) 4300 cur_entry->callback(cur_entry->callback_arg, 4301 async_code, path, 4302 async_arg); 4303 cur_entry = next_entry; 4304 } 4305 } 4306 4307 u_int32_t 4308 xpt_freeze_devq(struct cam_path *path, u_int count) 4309 { 4310 int s; 4311 struct ccb_hdr *ccbh; 4312 4313 s = splcam(); 4314 path->device->qfrozen_cnt += count; 4315 4316 /* 4317 * Mark the last CCB in the queue as needing 4318 * to be requeued if the driver hasn't 4319 * changed it's state yet. This fixes a race 4320 * where a ccb is just about to be queued to 4321 * a controller driver when it's interrupt routine 4322 * freezes the queue. To completly close the 4323 * hole, controller drives must check to see 4324 * if a ccb's status is still CAM_REQ_INPROG 4325 * under spl protection just before they queue 4326 * the CCB. See ahc_action/ahc_freeze_devq for 4327 * an example. 4328 */ 4329 ccbh = TAILQ_LAST(&path->device->ccbq.active_ccbs, ccb_hdr_tailq); 4330 if (ccbh && ccbh->status == CAM_REQ_INPROG) 4331 ccbh->status = CAM_REQUEUE_REQ; 4332 splx(s); 4333 return (path->device->qfrozen_cnt); 4334 } 4335 4336 u_int32_t 4337 xpt_freeze_simq(struct cam_sim *sim, u_int count) 4338 { 4339 sim->devq->send_queue.qfrozen_cnt += count; 4340 if (sim->devq->active_dev != NULL) { 4341 struct ccb_hdr *ccbh; 4342 4343 ccbh = TAILQ_LAST(&sim->devq->active_dev->ccbq.active_ccbs, 4344 ccb_hdr_tailq); 4345 if (ccbh && ccbh->status == CAM_REQ_INPROG) 4346 ccbh->status = CAM_REQUEUE_REQ; 4347 } 4348 return (sim->devq->send_queue.qfrozen_cnt); 4349 } 4350 4351 static void 4352 xpt_release_devq_timeout(void *arg) 4353 { 4354 struct cam_ed *device; 4355 4356 device = (struct cam_ed *)arg; 4357 4358 xpt_release_devq_device(device, /*count*/1, /*run_queue*/TRUE); 4359 } 4360 4361 void 4362 xpt_release_devq(struct cam_path *path, u_int count, int run_queue) 4363 { 4364 xpt_release_devq_device(path->device, count, run_queue); 4365 } 4366 4367 static void 4368 xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue) 4369 { 4370 int rundevq; 4371 int s0, s1; 4372 4373 rundevq = 0; 4374 s0 = splsoftcam(); 4375 s1 = splcam(); 4376 if (dev->qfrozen_cnt > 0) { 4377 4378 count = (count > dev->qfrozen_cnt) ? dev->qfrozen_cnt : count; 4379 dev->qfrozen_cnt -= count; 4380 if (dev->qfrozen_cnt == 0) { 4381 4382 /* 4383 * No longer need to wait for a successful 4384 * command completion. 4385 */ 4386 dev->flags &= ~CAM_DEV_REL_ON_COMPLETE; 4387 4388 /* 4389 * Remove any timeouts that might be scheduled 4390 * to release this queue. 4391 */ 4392 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) { 4393 untimeout(xpt_release_devq_timeout, dev, 4394 dev->c_handle); 4395 dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING; 4396 } 4397 4398 /* 4399 * Now that we are unfrozen schedule the 4400 * device so any pending transactions are 4401 * run. 4402 */ 4403 if ((dev->ccbq.queue.entries > 0) 4404 && (xpt_schedule_dev_sendq(dev->target->bus, dev)) 4405 && (run_queue != 0)) { 4406 rundevq = 1; 4407 } 4408 } 4409 } 4410 splx(s1); 4411 if (rundevq != 0) 4412 xpt_run_dev_sendq(dev->target->bus); 4413 splx(s0); 4414 } 4415 4416 void 4417 xpt_release_simq(struct cam_sim *sim, int run_queue) 4418 { 4419 int s; 4420 struct camq *sendq; 4421 4422 sendq = &(sim->devq->send_queue); 4423 s = splcam(); 4424 if (sendq->qfrozen_cnt > 0) { 4425 4426 sendq->qfrozen_cnt--; 4427 if (sendq->qfrozen_cnt == 0) { 4428 struct cam_eb *bus; 4429 4430 /* 4431 * If there is a timeout scheduled to release this 4432 * sim queue, remove it. The queue frozen count is 4433 * already at 0. 4434 */ 4435 if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){ 4436 untimeout(xpt_release_simq_timeout, sim, 4437 sim->c_handle); 4438 sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING; 4439 } 4440 bus = xpt_find_bus(sim->path_id); 4441 splx(s); 4442 4443 if (run_queue) { 4444 /* 4445 * Now that we are unfrozen run the send queue. 4446 */ 4447 xpt_run_dev_sendq(bus); 4448 } 4449 xpt_release_bus(bus); 4450 } else 4451 splx(s); 4452 } else 4453 splx(s); 4454 } 4455 4456 static void 4457 xpt_release_simq_timeout(void *arg) 4458 { 4459 struct cam_sim *sim; 4460 4461 sim = (struct cam_sim *)arg; 4462 xpt_release_simq(sim, /* run_queue */ TRUE); 4463 } 4464 4465 void 4466 xpt_done(union ccb *done_ccb) 4467 { 4468 int s; 4469 4470 s = splcam(); 4471 4472 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done\n")); 4473 if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) != 0) { 4474 /* 4475 * Queue up the request for handling by our SWI handler 4476 * any of the "non-immediate" type of ccbs. 4477 */ 4478 switch (done_ccb->ccb_h.path->periph->type) { 4479 case CAM_PERIPH_BIO: 4480 TAILQ_INSERT_TAIL(&cam_bioq, &done_ccb->ccb_h, 4481 sim_links.tqe); 4482 done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX; 4483 setsoftcambio(); 4484 break; 4485 case CAM_PERIPH_NET: 4486 TAILQ_INSERT_TAIL(&cam_netq, &done_ccb->ccb_h, 4487 sim_links.tqe); 4488 done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX; 4489 setsoftcamnet(); 4490 break; 4491 } 4492 } 4493 splx(s); 4494 } 4495 4496 union ccb * 4497 xpt_alloc_ccb() 4498 { 4499 union ccb *new_ccb; 4500 4501 new_ccb = malloc(sizeof(*new_ccb), M_DEVBUF, M_WAITOK); 4502 return (new_ccb); 4503 } 4504 4505 void 4506 xpt_free_ccb(union ccb *free_ccb) 4507 { 4508 free(free_ccb, M_DEVBUF); 4509 } 4510 4511 4512 4513 /* Private XPT functions */ 4514 4515 /* 4516 * Get a CAM control block for the caller. Charge the structure to the device 4517 * referenced by the path. If the this device has no 'credits' then the 4518 * device already has the maximum number of outstanding operations under way 4519 * and we return NULL. If we don't have sufficient resources to allocate more 4520 * ccbs, we also return NULL. 4521 */ 4522 static union ccb * 4523 xpt_get_ccb(struct cam_ed *device) 4524 { 4525 union ccb *new_ccb; 4526 int s; 4527 4528 s = splsoftcam(); 4529 if ((new_ccb = (union ccb *)ccb_freeq.slh_first) == NULL) { 4530 new_ccb = malloc(sizeof(*new_ccb), M_DEVBUF, M_NOWAIT); 4531 if (new_ccb == NULL) { 4532 splx(s); 4533 return (NULL); 4534 } 4535 callout_handle_init(&new_ccb->ccb_h.timeout_ch); 4536 SLIST_INSERT_HEAD(&ccb_freeq, &new_ccb->ccb_h, 4537 xpt_links.sle); 4538 xpt_ccb_count++; 4539 } 4540 cam_ccbq_take_opening(&device->ccbq); 4541 SLIST_REMOVE_HEAD(&ccb_freeq, xpt_links.sle); 4542 splx(s); 4543 return (new_ccb); 4544 } 4545 4546 static void 4547 xpt_release_bus(struct cam_eb *bus) 4548 { 4549 int s; 4550 4551 s = splcam(); 4552 if ((--bus->refcount == 0) 4553 && (TAILQ_FIRST(&bus->et_entries) == NULL)) { 4554 TAILQ_REMOVE(&xpt_busses, bus, links); 4555 bus_generation++; 4556 splx(s); 4557 free(bus, M_DEVBUF); 4558 } else 4559 splx(s); 4560 } 4561 4562 static struct cam_et * 4563 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id) 4564 { 4565 struct cam_et *target; 4566 4567 target = (struct cam_et *)malloc(sizeof(*target), M_DEVBUF, M_NOWAIT); 4568 if (target != NULL) { 4569 struct cam_et *cur_target; 4570 4571 TAILQ_INIT(&target->ed_entries); 4572 target->bus = bus; 4573 target->target_id = target_id; 4574 target->refcount = 1; 4575 target->generation = 0; 4576 timevalclear(&target->last_reset); 4577 /* 4578 * Hold a reference to our parent bus so it 4579 * will not go away before we do. 4580 */ 4581 bus->refcount++; 4582 4583 /* Insertion sort into our bus's target list */ 4584 cur_target = TAILQ_FIRST(&bus->et_entries); 4585 while (cur_target != NULL && cur_target->target_id < target_id) 4586 cur_target = TAILQ_NEXT(cur_target, links); 4587 4588 if (cur_target != NULL) { 4589 TAILQ_INSERT_BEFORE(cur_target, target, links); 4590 } else { 4591 TAILQ_INSERT_TAIL(&bus->et_entries, target, links); 4592 } 4593 bus->generation++; 4594 } 4595 return (target); 4596 } 4597 4598 static void 4599 xpt_release_target(struct cam_eb *bus, struct cam_et *target) 4600 { 4601 int s; 4602 4603 s = splcam(); 4604 if ((--target->refcount == 0) 4605 && (TAILQ_FIRST(&target->ed_entries) == NULL)) { 4606 TAILQ_REMOVE(&bus->et_entries, target, links); 4607 bus->generation++; 4608 splx(s); 4609 free(target, M_DEVBUF); 4610 xpt_release_bus(bus); 4611 } else 4612 splx(s); 4613 } 4614 4615 static struct cam_ed * 4616 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id) 4617 { 4618 struct cam_ed *device; 4619 struct cam_devq *devq; 4620 cam_status status; 4621 4622 /* Make space for us in the device queue on our bus */ 4623 devq = bus->sim->devq; 4624 status = cam_devq_resize(devq, devq->alloc_queue.array_size + 1); 4625 4626 if (status != CAM_REQ_CMP) { 4627 device = NULL; 4628 } else { 4629 device = (struct cam_ed *)malloc(sizeof(*device), 4630 M_DEVBUF, M_NOWAIT); 4631 } 4632 4633 if (device != NULL) { 4634 struct cam_ed *cur_device; 4635 4636 cam_init_pinfo(&device->alloc_ccb_entry.pinfo); 4637 device->alloc_ccb_entry.device = device; 4638 cam_init_pinfo(&device->send_ccb_entry.pinfo); 4639 device->send_ccb_entry.device = device; 4640 device->target = target; 4641 device->lun_id = lun_id; 4642 /* Initialize our queues */ 4643 if (camq_init(&device->drvq, 0) != 0) { 4644 free(device, M_DEVBUF); 4645 return (NULL); 4646 } 4647 if (cam_ccbq_init(&device->ccbq, 4648 bus->sim->max_dev_openings) != 0) { 4649 camq_fini(&device->drvq); 4650 free(device, M_DEVBUF); 4651 return (NULL); 4652 } 4653 SLIST_INIT(&device->asyncs); 4654 SLIST_INIT(&device->periphs); 4655 device->generation = 0; 4656 device->owner = NULL; 4657 /* 4658 * Take the default quirk entry until we have inquiry 4659 * data and can determine a better quirk to use. 4660 */ 4661 device->quirk = &xpt_quirk_table[xpt_quirk_table_size - 1]; 4662 bzero(&device->inq_data, sizeof(device->inq_data)); 4663 device->inq_flags = 0; 4664 device->queue_flags = 0; 4665 device->serial_num = NULL; 4666 device->serial_num_len = 0; 4667 device->qfrozen_cnt = 0; 4668 device->flags = CAM_DEV_UNCONFIGURED; 4669 device->tag_delay_count = 0; 4670 device->refcount = 1; 4671 callout_handle_init(&device->c_handle); 4672 4673 /* 4674 * Hold a reference to our parent target so it 4675 * will not go away before we do. 4676 */ 4677 target->refcount++; 4678 4679 /* 4680 * XXX should be limited by number of CCBs this bus can 4681 * do. 4682 */ 4683 xpt_max_ccbs += device->ccbq.devq_openings; 4684 /* Insertion sort into our target's device list */ 4685 cur_device = TAILQ_FIRST(&target->ed_entries); 4686 while (cur_device != NULL && cur_device->lun_id < lun_id) 4687 cur_device = TAILQ_NEXT(cur_device, links); 4688 if (cur_device != NULL) { 4689 TAILQ_INSERT_BEFORE(cur_device, device, links); 4690 } else { 4691 TAILQ_INSERT_TAIL(&target->ed_entries, device, links); 4692 } 4693 target->generation++; 4694 } 4695 return (device); 4696 } 4697 4698 static void 4699 xpt_release_device(struct cam_eb *bus, struct cam_et *target, 4700 struct cam_ed *device) 4701 { 4702 int s; 4703 4704 s = splcam(); 4705 if ((--device->refcount == 0) 4706 && ((device->flags & CAM_DEV_UNCONFIGURED) != 0)) { 4707 struct cam_devq *devq; 4708 4709 if (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX 4710 || device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX) 4711 panic("Removing device while still queued for ccbs"); 4712 4713 if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) 4714 untimeout(xpt_release_devq_timeout, device, 4715 device->c_handle); 4716 4717 TAILQ_REMOVE(&target->ed_entries, device,links); 4718 target->generation++; 4719 xpt_max_ccbs -= device->ccbq.devq_openings; 4720 /* Release our slot in the devq */ 4721 devq = bus->sim->devq; 4722 cam_devq_resize(devq, devq->alloc_queue.array_size - 1); 4723 splx(s); 4724 free(device, M_DEVBUF); 4725 xpt_release_target(bus, target); 4726 } else 4727 splx(s); 4728 } 4729 4730 static u_int32_t 4731 xpt_dev_ccbq_resize(struct cam_path *path, int newopenings) 4732 { 4733 int s; 4734 int diff; 4735 int result; 4736 struct cam_ed *dev; 4737 4738 dev = path->device; 4739 s = splsoftcam(); 4740 4741 diff = newopenings - (dev->ccbq.dev_active + dev->ccbq.dev_openings); 4742 result = cam_ccbq_resize(&dev->ccbq, newopenings); 4743 if (result == CAM_REQ_CMP && (diff < 0)) { 4744 dev->flags |= CAM_DEV_RESIZE_QUEUE_NEEDED; 4745 } 4746 /* Adjust the global limit */ 4747 xpt_max_ccbs += diff; 4748 splx(s); 4749 return (result); 4750 } 4751 4752 static struct cam_eb * 4753 xpt_find_bus(path_id_t path_id) 4754 { 4755 struct cam_eb *bus; 4756 4757 for (bus = TAILQ_FIRST(&xpt_busses); 4758 bus != NULL; 4759 bus = TAILQ_NEXT(bus, links)) { 4760 if (bus->path_id == path_id) { 4761 bus->refcount++; 4762 break; 4763 } 4764 } 4765 return (bus); 4766 } 4767 4768 static struct cam_et * 4769 xpt_find_target(struct cam_eb *bus, target_id_t target_id) 4770 { 4771 struct cam_et *target; 4772 4773 for (target = TAILQ_FIRST(&bus->et_entries); 4774 target != NULL; 4775 target = TAILQ_NEXT(target, links)) { 4776 if (target->target_id == target_id) { 4777 target->refcount++; 4778 break; 4779 } 4780 } 4781 return (target); 4782 } 4783 4784 static struct cam_ed * 4785 xpt_find_device(struct cam_et *target, lun_id_t lun_id) 4786 { 4787 struct cam_ed *device; 4788 4789 for (device = TAILQ_FIRST(&target->ed_entries); 4790 device != NULL; 4791 device = TAILQ_NEXT(device, links)) { 4792 if (device->lun_id == lun_id) { 4793 device->refcount++; 4794 break; 4795 } 4796 } 4797 return (device); 4798 } 4799 4800 typedef struct { 4801 union ccb *request_ccb; 4802 struct ccb_pathinq *cpi; 4803 int pending_count; 4804 } xpt_scan_bus_info; 4805 4806 /* 4807 * To start a scan, request_ccb is an XPT_SCAN_BUS ccb. 4808 * As the scan progresses, xpt_scan_bus is used as the 4809 * callback on completion function. 4810 */ 4811 static void 4812 xpt_scan_bus(struct cam_periph *periph, union ccb *request_ccb) 4813 { 4814 CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE, 4815 ("xpt_scan_bus\n")); 4816 switch (request_ccb->ccb_h.func_code) { 4817 case XPT_SCAN_BUS: 4818 { 4819 xpt_scan_bus_info *scan_info; 4820 union ccb *work_ccb; 4821 struct cam_path *path; 4822 u_int i; 4823 u_int max_target; 4824 u_int initiator_id; 4825 4826 /* Find out the characteristics of the bus */ 4827 work_ccb = xpt_alloc_ccb(); 4828 xpt_setup_ccb(&work_ccb->ccb_h, request_ccb->ccb_h.path, 4829 request_ccb->ccb_h.pinfo.priority); 4830 work_ccb->ccb_h.func_code = XPT_PATH_INQ; 4831 xpt_action(work_ccb); 4832 if (work_ccb->ccb_h.status != CAM_REQ_CMP) { 4833 request_ccb->ccb_h.status = work_ccb->ccb_h.status; 4834 xpt_free_ccb(work_ccb); 4835 xpt_done(request_ccb); 4836 return; 4837 } 4838 4839 if ((work_ccb->cpi.hba_misc & PIM_NOINITIATOR) != 0) { 4840 /* 4841 * Can't scan the bus on an adapter that 4842 * cannot perform the initiator role. 4843 */ 4844 request_ccb->ccb_h.status = CAM_REQ_CMP; 4845 xpt_free_ccb(work_ccb); 4846 xpt_done(request_ccb); 4847 return; 4848 } 4849 4850 /* Save some state for use while we probe for devices */ 4851 scan_info = (xpt_scan_bus_info *) 4852 malloc(sizeof(xpt_scan_bus_info), M_TEMP, M_WAITOK); 4853 scan_info->request_ccb = request_ccb; 4854 scan_info->cpi = &work_ccb->cpi; 4855 4856 /* Cache on our stack so we can work asynchronously */ 4857 max_target = scan_info->cpi->max_target; 4858 initiator_id = scan_info->cpi->initiator_id; 4859 4860 /* 4861 * Don't count the initiator if the 4862 * initiator is addressable. 4863 */ 4864 scan_info->pending_count = max_target + 1; 4865 if (initiator_id <= max_target) 4866 scan_info->pending_count--; 4867 4868 for (i = 0; i <= max_target; i++) { 4869 cam_status status; 4870 if (i == initiator_id) 4871 continue; 4872 4873 status = xpt_create_path(&path, xpt_periph, 4874 request_ccb->ccb_h.path_id, 4875 i, 0); 4876 if (status != CAM_REQ_CMP) { 4877 printf("xpt_scan_bus: xpt_create_path failed" 4878 " with status %#x, bus scan halted\n", 4879 status); 4880 break; 4881 } 4882 work_ccb = xpt_alloc_ccb(); 4883 xpt_setup_ccb(&work_ccb->ccb_h, path, 4884 request_ccb->ccb_h.pinfo.priority); 4885 work_ccb->ccb_h.func_code = XPT_SCAN_LUN; 4886 work_ccb->ccb_h.cbfcnp = xpt_scan_bus; 4887 work_ccb->ccb_h.ppriv_ptr0 = scan_info; 4888 work_ccb->crcn.flags = request_ccb->crcn.flags; 4889 #if 0 4890 printf("xpt_scan_bus: probing %d:%d:%d\n", 4891 request_ccb->ccb_h.path_id, i, 0); 4892 #endif 4893 xpt_action(work_ccb); 4894 } 4895 break; 4896 } 4897 case XPT_SCAN_LUN: 4898 { 4899 xpt_scan_bus_info *scan_info; 4900 path_id_t path_id; 4901 target_id_t target_id; 4902 lun_id_t lun_id; 4903 4904 /* Reuse the same CCB to query if a device was really found */ 4905 scan_info = (xpt_scan_bus_info *)request_ccb->ccb_h.ppriv_ptr0; 4906 xpt_setup_ccb(&request_ccb->ccb_h, request_ccb->ccb_h.path, 4907 request_ccb->ccb_h.pinfo.priority); 4908 request_ccb->ccb_h.func_code = XPT_GDEV_TYPE; 4909 4910 path_id = request_ccb->ccb_h.path_id; 4911 target_id = request_ccb->ccb_h.target_id; 4912 lun_id = request_ccb->ccb_h.target_lun; 4913 xpt_action(request_ccb); 4914 4915 #if 0 4916 printf("xpt_scan_bus: got back probe from %d:%d:%d\n", 4917 path_id, target_id, lun_id); 4918 #endif 4919 4920 if (request_ccb->ccb_h.status != CAM_REQ_CMP) { 4921 struct cam_ed *device; 4922 struct cam_et *target; 4923 int s, phl; 4924 4925 /* 4926 * If we already probed lun 0 successfully, or 4927 * we have additional configured luns on this 4928 * target that might have "gone away", go onto 4929 * the next lun. 4930 */ 4931 target = request_ccb->ccb_h.path->target; 4932 /* 4933 * We may touch devices that we don't 4934 * hold references too, so ensure they 4935 * don't disappear out from under us. 4936 * The target above is referenced by the 4937 * path in the request ccb. 4938 */ 4939 phl = 0; 4940 s = splcam(); 4941 device = TAILQ_FIRST(&target->ed_entries); 4942 if (device != NULL) { 4943 phl = device->quirk->quirks & CAM_QUIRK_HILUNS; 4944 if (device->lun_id == 0) 4945 device = TAILQ_NEXT(device, links); 4946 } 4947 splx(s); 4948 if ((lun_id != 0) || (device != NULL)) { 4949 if (lun_id < (CAM_SCSI2_MAXLUN-1) || phl) 4950 lun_id++; 4951 } 4952 } else { 4953 struct cam_ed *device; 4954 4955 device = request_ccb->ccb_h.path->device; 4956 4957 if ((device->quirk->quirks & CAM_QUIRK_NOLUNS) == 0) { 4958 /* Try the next lun */ 4959 if (lun_id < (CAM_SCSI2_MAXLUN-1) || 4960 (device->quirk->quirks & CAM_QUIRK_HILUNS)) 4961 lun_id++; 4962 } 4963 } 4964 4965 xpt_free_path(request_ccb->ccb_h.path); 4966 4967 /* Check Bounds */ 4968 if ((lun_id == request_ccb->ccb_h.target_lun) 4969 || lun_id > scan_info->cpi->max_lun) { 4970 /* We're done */ 4971 4972 xpt_free_ccb(request_ccb); 4973 scan_info->pending_count--; 4974 if (scan_info->pending_count == 0) { 4975 xpt_free_ccb((union ccb *)scan_info->cpi); 4976 request_ccb = scan_info->request_ccb; 4977 free(scan_info, M_TEMP); 4978 request_ccb->ccb_h.status = CAM_REQ_CMP; 4979 xpt_done(request_ccb); 4980 } 4981 } else { 4982 /* Try the next device */ 4983 struct cam_path *path; 4984 cam_status status; 4985 4986 path = request_ccb->ccb_h.path; 4987 status = xpt_create_path(&path, xpt_periph, 4988 path_id, target_id, lun_id); 4989 if (status != CAM_REQ_CMP) { 4990 printf("xpt_scan_bus: xpt_create_path failed " 4991 "with status %#x, halting LUN scan\n", 4992 status); 4993 xpt_free_ccb(request_ccb); 4994 scan_info->pending_count--; 4995 if (scan_info->pending_count == 0) { 4996 xpt_free_ccb( 4997 (union ccb *)scan_info->cpi); 4998 request_ccb = scan_info->request_ccb; 4999 free(scan_info, M_TEMP); 5000 request_ccb->ccb_h.status = CAM_REQ_CMP; 5001 xpt_done(request_ccb); 5002 break; 5003 } 5004 } 5005 xpt_setup_ccb(&request_ccb->ccb_h, path, 5006 request_ccb->ccb_h.pinfo.priority); 5007 request_ccb->ccb_h.func_code = XPT_SCAN_LUN; 5008 request_ccb->ccb_h.cbfcnp = xpt_scan_bus; 5009 request_ccb->ccb_h.ppriv_ptr0 = scan_info; 5010 request_ccb->crcn.flags = 5011 scan_info->request_ccb->crcn.flags; 5012 #if 0 5013 xpt_print_path(path); 5014 printf("xpt_scan bus probing\n"); 5015 #endif 5016 xpt_action(request_ccb); 5017 } 5018 break; 5019 } 5020 default: 5021 break; 5022 } 5023 } 5024 5025 typedef enum { 5026 PROBE_TUR, 5027 PROBE_INQUIRY, 5028 PROBE_FULL_INQUIRY, 5029 PROBE_MODE_SENSE, 5030 PROBE_SERIAL_NUM, 5031 PROBE_TUR_FOR_NEGOTIATION 5032 } probe_action; 5033 5034 typedef enum { 5035 PROBE_INQUIRY_CKSUM = 0x01, 5036 PROBE_SERIAL_CKSUM = 0x02, 5037 PROBE_NO_ANNOUNCE = 0x04 5038 } probe_flags; 5039 5040 typedef struct { 5041 TAILQ_HEAD(, ccb_hdr) request_ccbs; 5042 probe_action action; 5043 union ccb saved_ccb; 5044 probe_flags flags; 5045 MD5_CTX context; 5046 u_int8_t digest[16]; 5047 } probe_softc; 5048 5049 static void 5050 xpt_scan_lun(struct cam_periph *periph, struct cam_path *path, 5051 cam_flags flags, union ccb *request_ccb) 5052 { 5053 struct ccb_pathinq cpi; 5054 cam_status status; 5055 struct cam_path *new_path; 5056 struct cam_periph *old_periph; 5057 int s; 5058 5059 CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE, 5060 ("xpt_scan_lun\n")); 5061 5062 xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1); 5063 cpi.ccb_h.func_code = XPT_PATH_INQ; 5064 xpt_action((union ccb *)&cpi); 5065 5066 if (cpi.ccb_h.status != CAM_REQ_CMP) { 5067 if (request_ccb != NULL) { 5068 request_ccb->ccb_h.status = cpi.ccb_h.status; 5069 xpt_done(request_ccb); 5070 } 5071 return; 5072 } 5073 5074 if ((cpi.hba_misc & PIM_NOINITIATOR) != 0) { 5075 /* 5076 * Can't scan the bus on an adapter that 5077 * cannot perform the initiator role. 5078 */ 5079 if (request_ccb != NULL) { 5080 request_ccb->ccb_h.status = CAM_REQ_CMP; 5081 xpt_done(request_ccb); 5082 } 5083 return; 5084 } 5085 5086 if (request_ccb == NULL) { 5087 request_ccb = malloc(sizeof(union ccb), M_TEMP, M_NOWAIT); 5088 if (request_ccb == NULL) { 5089 xpt_print_path(path); 5090 printf("xpt_scan_lun: can't allocate CCB, can't " 5091 "continue\n"); 5092 return; 5093 } 5094 new_path = malloc(sizeof(*new_path), M_TEMP, M_NOWAIT); 5095 if (new_path == NULL) { 5096 xpt_print_path(path); 5097 printf("xpt_scan_lun: can't allocate path, can't " 5098 "continue\n"); 5099 free(request_ccb, M_TEMP); 5100 return; 5101 } 5102 status = xpt_compile_path(new_path, xpt_periph, 5103 path->bus->path_id, 5104 path->target->target_id, 5105 path->device->lun_id); 5106 5107 if (status != CAM_REQ_CMP) { 5108 xpt_print_path(path); 5109 printf("xpt_scan_lun: can't compile path, can't " 5110 "continue\n"); 5111 free(request_ccb, M_TEMP); 5112 free(new_path, M_TEMP); 5113 return; 5114 } 5115 xpt_setup_ccb(&request_ccb->ccb_h, new_path, /*priority*/ 1); 5116 request_ccb->ccb_h.cbfcnp = xptscandone; 5117 request_ccb->ccb_h.func_code = XPT_SCAN_LUN; 5118 request_ccb->crcn.flags = flags; 5119 } 5120 5121 s = splsoftcam(); 5122 if ((old_periph = cam_periph_find(path, "probe")) != NULL) { 5123 probe_softc *softc; 5124 5125 softc = (probe_softc *)old_periph->softc; 5126 TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h, 5127 periph_links.tqe); 5128 } else { 5129 status = cam_periph_alloc(proberegister, NULL, probecleanup, 5130 probestart, "probe", 5131 CAM_PERIPH_BIO, 5132 request_ccb->ccb_h.path, NULL, 0, 5133 request_ccb); 5134 5135 if (status != CAM_REQ_CMP) { 5136 xpt_print_path(path); 5137 printf("xpt_scan_lun: cam_alloc_periph returned an " 5138 "error, can't continue probe\n"); 5139 request_ccb->ccb_h.status = status; 5140 xpt_done(request_ccb); 5141 } 5142 } 5143 splx(s); 5144 } 5145 5146 static void 5147 xptscandone(struct cam_periph *periph, union ccb *done_ccb) 5148 { 5149 xpt_release_path(done_ccb->ccb_h.path); 5150 free(done_ccb->ccb_h.path, M_TEMP); 5151 free(done_ccb, M_TEMP); 5152 } 5153 5154 static cam_status 5155 proberegister(struct cam_periph *periph, void *arg) 5156 { 5157 union ccb *request_ccb; /* CCB representing the probe request */ 5158 probe_softc *softc; 5159 5160 request_ccb = (union ccb *)arg; 5161 if (periph == NULL) { 5162 printf("proberegister: periph was NULL!!\n"); 5163 return(CAM_REQ_CMP_ERR); 5164 } 5165 5166 if (request_ccb == NULL) { 5167 printf("proberegister: no probe CCB, " 5168 "can't register device\n"); 5169 return(CAM_REQ_CMP_ERR); 5170 } 5171 5172 softc = (probe_softc *)malloc(sizeof(*softc), M_TEMP, M_NOWAIT); 5173 5174 if (softc == NULL) { 5175 printf("proberegister: Unable to probe new device. " 5176 "Unable to allocate softc\n"); 5177 return(CAM_REQ_CMP_ERR); 5178 } 5179 TAILQ_INIT(&softc->request_ccbs); 5180 TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h, 5181 periph_links.tqe); 5182 softc->flags = 0; 5183 periph->softc = softc; 5184 cam_periph_acquire(periph); 5185 /* 5186 * Ensure we've waited at least a bus settle 5187 * delay before attempting to probe the device. 5188 * For HBAs that don't do bus resets, this won't make a difference. 5189 */ 5190 cam_periph_freeze_after_event(periph, &periph->path->bus->last_reset, 5191 SCSI_DELAY); 5192 probeschedule(periph); 5193 return(CAM_REQ_CMP); 5194 } 5195 5196 static void 5197 probeschedule(struct cam_periph *periph) 5198 { 5199 struct ccb_pathinq cpi; 5200 union ccb *ccb; 5201 probe_softc *softc; 5202 5203 softc = (probe_softc *)periph->softc; 5204 ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs); 5205 5206 xpt_setup_ccb(&cpi.ccb_h, periph->path, /*priority*/1); 5207 cpi.ccb_h.func_code = XPT_PATH_INQ; 5208 xpt_action((union ccb *)&cpi); 5209 5210 /* 5211 * If a device has gone away and another device, or the same one, 5212 * is back in the same place, it should have a unit attention 5213 * condition pending. It will not report the unit attention in 5214 * response to an inquiry, which may leave invalid transfer 5215 * negotiations in effect. The TUR will reveal the unit attention 5216 * condition. Only send the TUR for lun 0, since some devices 5217 * will get confused by commands other than inquiry to non-existent 5218 * luns. If you think a device has gone away start your scan from 5219 * lun 0. This will insure that any bogus transfer settings are 5220 * invalidated. 5221 * 5222 * If we haven't seen the device before and the controller supports 5223 * some kind of transfer negotiation, negotiate with the first 5224 * sent command if no bus reset was performed at startup. This 5225 * ensures that the device is not confused by transfer negotiation 5226 * settings left over by loader or BIOS action. 5227 */ 5228 if (((ccb->ccb_h.path->device->flags & CAM_DEV_UNCONFIGURED) == 0) 5229 && (ccb->ccb_h.target_lun == 0)) { 5230 softc->action = PROBE_TUR; 5231 } else if ((cpi.hba_inquiry & (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE)) != 0 5232 && (cpi.hba_misc & PIM_NOBUSRESET) != 0) { 5233 proberequestdefaultnegotiation(periph); 5234 softc->action = PROBE_INQUIRY; 5235 } else { 5236 softc->action = PROBE_INQUIRY; 5237 } 5238 5239 if (ccb->crcn.flags & CAM_EXPECT_INQ_CHANGE) 5240 softc->flags |= PROBE_NO_ANNOUNCE; 5241 else 5242 softc->flags &= ~PROBE_NO_ANNOUNCE; 5243 5244 xpt_schedule(periph, ccb->ccb_h.pinfo.priority); 5245 } 5246 5247 static void 5248 probestart(struct cam_periph *periph, union ccb *start_ccb) 5249 { 5250 /* Probe the device that our peripheral driver points to */ 5251 struct ccb_scsiio *csio; 5252 probe_softc *softc; 5253 5254 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probestart\n")); 5255 5256 softc = (probe_softc *)periph->softc; 5257 csio = &start_ccb->csio; 5258 5259 switch (softc->action) { 5260 case PROBE_TUR: 5261 case PROBE_TUR_FOR_NEGOTIATION: 5262 { 5263 scsi_test_unit_ready(csio, 5264 /*retries*/4, 5265 probedone, 5266 MSG_SIMPLE_Q_TAG, 5267 SSD_FULL_SIZE, 5268 /*timeout*/60000); 5269 break; 5270 } 5271 case PROBE_INQUIRY: 5272 case PROBE_FULL_INQUIRY: 5273 { 5274 u_int inquiry_len; 5275 struct scsi_inquiry_data *inq_buf; 5276 5277 inq_buf = &periph->path->device->inq_data; 5278 /* 5279 * If the device is currently configured, we calculate an 5280 * MD5 checksum of the inquiry data, and if the serial number 5281 * length is greater than 0, add the serial number data 5282 * into the checksum as well. Once the inquiry and the 5283 * serial number check finish, we attempt to figure out 5284 * whether we still have the same device. 5285 */ 5286 if ((periph->path->device->flags & CAM_DEV_UNCONFIGURED) == 0) { 5287 5288 MD5Init(&softc->context); 5289 MD5Update(&softc->context, (unsigned char *)inq_buf, 5290 sizeof(struct scsi_inquiry_data)); 5291 softc->flags |= PROBE_INQUIRY_CKSUM; 5292 if (periph->path->device->serial_num_len > 0) { 5293 MD5Update(&softc->context, 5294 periph->path->device->serial_num, 5295 periph->path->device->serial_num_len); 5296 softc->flags |= PROBE_SERIAL_CKSUM; 5297 } 5298 MD5Final(softc->digest, &softc->context); 5299 } 5300 5301 if (softc->action == PROBE_INQUIRY) 5302 inquiry_len = SHORT_INQUIRY_LENGTH; 5303 else 5304 inquiry_len = inq_buf->additional_length + 4; 5305 5306 scsi_inquiry(csio, 5307 /*retries*/4, 5308 probedone, 5309 MSG_SIMPLE_Q_TAG, 5310 (u_int8_t *)inq_buf, 5311 inquiry_len, 5312 /*evpd*/FALSE, 5313 /*page_code*/0, 5314 SSD_MIN_SIZE, 5315 /*timeout*/60 * 1000); 5316 break; 5317 } 5318 case PROBE_MODE_SENSE: 5319 { 5320 void *mode_buf; 5321 int mode_buf_len; 5322 5323 mode_buf_len = sizeof(struct scsi_mode_header_6) 5324 + sizeof(struct scsi_mode_blk_desc) 5325 + sizeof(struct scsi_control_page); 5326 mode_buf = malloc(mode_buf_len, M_TEMP, M_NOWAIT); 5327 if (mode_buf != NULL) { 5328 scsi_mode_sense(csio, 5329 /*retries*/4, 5330 probedone, 5331 MSG_SIMPLE_Q_TAG, 5332 /*dbd*/FALSE, 5333 SMS_PAGE_CTRL_CURRENT, 5334 SMS_CONTROL_MODE_PAGE, 5335 mode_buf, 5336 mode_buf_len, 5337 SSD_FULL_SIZE, 5338 /*timeout*/60000); 5339 break; 5340 } 5341 xpt_print_path(periph->path); 5342 printf("Unable to mode sense control page - malloc failure\n"); 5343 softc->action = PROBE_SERIAL_NUM; 5344 /* FALLTHROUGH */ 5345 } 5346 case PROBE_SERIAL_NUM: 5347 { 5348 struct scsi_vpd_unit_serial_number *serial_buf; 5349 struct cam_ed* device; 5350 5351 serial_buf = NULL; 5352 device = periph->path->device; 5353 device->serial_num = NULL; 5354 device->serial_num_len = 0; 5355 5356 if ((device->quirk->quirks & CAM_QUIRK_NOSERIAL) == 0) 5357 serial_buf = (struct scsi_vpd_unit_serial_number *) 5358 malloc(sizeof(*serial_buf), M_TEMP, M_NOWAIT); 5359 5360 if (serial_buf != NULL) { 5361 bzero(serial_buf, sizeof(*serial_buf)); 5362 scsi_inquiry(csio, 5363 /*retries*/4, 5364 probedone, 5365 MSG_SIMPLE_Q_TAG, 5366 (u_int8_t *)serial_buf, 5367 sizeof(*serial_buf), 5368 /*evpd*/TRUE, 5369 SVPD_UNIT_SERIAL_NUMBER, 5370 SSD_MIN_SIZE, 5371 /*timeout*/60 * 1000); 5372 break; 5373 } 5374 /* 5375 * We'll have to do without, let our probedone 5376 * routine finish up for us. 5377 */ 5378 start_ccb->csio.data_ptr = NULL; 5379 probedone(periph, start_ccb); 5380 return; 5381 } 5382 } 5383 xpt_action(start_ccb); 5384 } 5385 5386 static void 5387 proberequestdefaultnegotiation(struct cam_periph *periph) 5388 { 5389 struct ccb_trans_settings cts; 5390 5391 xpt_setup_ccb(&cts.ccb_h, periph->path, /*priority*/1); 5392 cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; 5393 cts.flags = CCB_TRANS_USER_SETTINGS; 5394 xpt_action((union ccb *)&cts); 5395 cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS; 5396 cts.flags &= ~CCB_TRANS_USER_SETTINGS; 5397 cts.flags |= CCB_TRANS_CURRENT_SETTINGS; 5398 xpt_action((union ccb *)&cts); 5399 } 5400 5401 static void 5402 probedone(struct cam_periph *periph, union ccb *done_ccb) 5403 { 5404 probe_softc *softc; 5405 struct cam_path *path; 5406 u_int32_t priority; 5407 5408 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probedone\n")); 5409 5410 softc = (probe_softc *)periph->softc; 5411 path = done_ccb->ccb_h.path; 5412 priority = done_ccb->ccb_h.pinfo.priority; 5413 5414 switch (softc->action) { 5415 case PROBE_TUR: 5416 { 5417 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 5418 5419 if (cam_periph_error(done_ccb, 0, 5420 SF_NO_PRINT, NULL) == ERESTART) 5421 return; 5422 else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 5423 /* Don't wedge the queue */ 5424 xpt_release_devq(done_ccb->ccb_h.path, 5425 /*count*/1, 5426 /*run_queue*/TRUE); 5427 } 5428 softc->action = PROBE_INQUIRY; 5429 xpt_release_ccb(done_ccb); 5430 xpt_schedule(periph, priority); 5431 return; 5432 } 5433 case PROBE_INQUIRY: 5434 case PROBE_FULL_INQUIRY: 5435 { 5436 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 5437 struct scsi_inquiry_data *inq_buf; 5438 u_int8_t periph_qual; 5439 u_int8_t periph_dtype; 5440 5441 path->device->flags |= CAM_DEV_INQUIRY_DATA_VALID; 5442 inq_buf = &path->device->inq_data; 5443 5444 periph_qual = SID_QUAL(inq_buf); 5445 periph_dtype = SID_TYPE(inq_buf); 5446 5447 if (periph_dtype != T_NODEVICE) { 5448 switch(periph_qual) { 5449 case SID_QUAL_LU_CONNECTED: 5450 { 5451 u_int8_t alen; 5452 5453 /* 5454 * We conservatively request only 5455 * SHORT_INQUIRY_LEN bytes of inquiry 5456 * information during our first try 5457 * at sending an INQUIRY. If the device 5458 * has more information to give, 5459 * perform a second request specifying 5460 * the amount of information the device 5461 * is willing to give. 5462 */ 5463 alen = inq_buf->additional_length; 5464 if (softc->action == PROBE_INQUIRY 5465 && alen > (SHORT_INQUIRY_LENGTH - 4)) { 5466 softc->action = 5467 PROBE_FULL_INQUIRY; 5468 xpt_release_ccb(done_ccb); 5469 xpt_schedule(periph, priority); 5470 return; 5471 } 5472 5473 xpt_find_quirk(path->device); 5474 5475 if ((inq_buf->flags & SID_CmdQue) != 0) 5476 softc->action = 5477 PROBE_MODE_SENSE; 5478 else 5479 softc->action = 5480 PROBE_SERIAL_NUM; 5481 5482 path->device->flags &= 5483 ~CAM_DEV_UNCONFIGURED; 5484 5485 xpt_release_ccb(done_ccb); 5486 xpt_schedule(periph, priority); 5487 return; 5488 } 5489 default: 5490 break; 5491 } 5492 } 5493 } else if (cam_periph_error(done_ccb, 0, 5494 done_ccb->ccb_h.target_lun > 0 5495 ? SF_RETRY_UA|SF_QUIET_IR 5496 : SF_RETRY_UA, 5497 &softc->saved_ccb) == ERESTART) { 5498 return; 5499 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 5500 /* Don't wedge the queue */ 5501 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1, 5502 /*run_queue*/TRUE); 5503 } 5504 /* 5505 * If we get to this point, we got an error status back 5506 * from the inquiry and the error status doesn't require 5507 * automatically retrying the command. Therefore, the 5508 * inquiry failed. If we had inquiry information before 5509 * for this device, but this latest inquiry command failed, 5510 * the device has probably gone away. If this device isn't 5511 * already marked unconfigured, notify the peripheral 5512 * drivers that this device is no more. 5513 */ 5514 if ((path->device->flags & CAM_DEV_UNCONFIGURED) == 0) 5515 /* Send the async notification. */ 5516 xpt_async(AC_LOST_DEVICE, path, NULL); 5517 5518 xpt_release_ccb(done_ccb); 5519 break; 5520 } 5521 case PROBE_MODE_SENSE: 5522 { 5523 struct ccb_scsiio *csio; 5524 struct scsi_mode_header_6 *mode_hdr; 5525 5526 csio = &done_ccb->csio; 5527 mode_hdr = (struct scsi_mode_header_6 *)csio->data_ptr; 5528 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 5529 struct scsi_control_page *page; 5530 u_int8_t *offset; 5531 5532 offset = ((u_int8_t *)&mode_hdr[1]) 5533 + mode_hdr->blk_desc_len; 5534 page = (struct scsi_control_page *)offset; 5535 path->device->queue_flags = page->queue_flags; 5536 } else if (cam_periph_error(done_ccb, 0, 5537 SF_RETRY_UA|SF_NO_PRINT, 5538 &softc->saved_ccb) == ERESTART) { 5539 return; 5540 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 5541 /* Don't wedge the queue */ 5542 xpt_release_devq(done_ccb->ccb_h.path, 5543 /*count*/1, /*run_queue*/TRUE); 5544 } 5545 xpt_release_ccb(done_ccb); 5546 free(mode_hdr, M_TEMP); 5547 softc->action = PROBE_SERIAL_NUM; 5548 xpt_schedule(periph, priority); 5549 return; 5550 } 5551 case PROBE_SERIAL_NUM: 5552 { 5553 struct ccb_scsiio *csio; 5554 struct scsi_vpd_unit_serial_number *serial_buf; 5555 u_int32_t priority; 5556 int changed; 5557 int have_serialnum; 5558 5559 changed = 1; 5560 have_serialnum = 0; 5561 csio = &done_ccb->csio; 5562 priority = done_ccb->ccb_h.pinfo.priority; 5563 serial_buf = 5564 (struct scsi_vpd_unit_serial_number *)csio->data_ptr; 5565 5566 /* Clean up from previous instance of this device */ 5567 if (path->device->serial_num != NULL) { 5568 free(path->device->serial_num, M_DEVBUF); 5569 path->device->serial_num = NULL; 5570 path->device->serial_num_len = 0; 5571 } 5572 5573 if (serial_buf == NULL) { 5574 /* 5575 * Don't process the command as it was never sent 5576 */ 5577 } else if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP 5578 && (serial_buf->length > 0)) { 5579 5580 have_serialnum = 1; 5581 path->device->serial_num = 5582 (u_int8_t *)malloc((serial_buf->length + 1), 5583 M_DEVBUF, M_NOWAIT); 5584 if (path->device->serial_num != NULL) { 5585 bcopy(serial_buf->serial_num, 5586 path->device->serial_num, 5587 serial_buf->length); 5588 path->device->serial_num_len = 5589 serial_buf->length; 5590 path->device->serial_num[serial_buf->length] 5591 = '\0'; 5592 } 5593 } else if (cam_periph_error(done_ccb, 0, 5594 SF_RETRY_UA|SF_NO_PRINT, 5595 &softc->saved_ccb) == ERESTART) { 5596 return; 5597 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 5598 /* Don't wedge the queue */ 5599 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1, 5600 /*run_queue*/TRUE); 5601 } 5602 5603 /* 5604 * Let's see if we have seen this device before. 5605 */ 5606 if ((softc->flags & PROBE_INQUIRY_CKSUM) != 0) { 5607 MD5_CTX context; 5608 u_int8_t digest[16]; 5609 5610 MD5Init(&context); 5611 5612 MD5Update(&context, 5613 (unsigned char *)&path->device->inq_data, 5614 sizeof(struct scsi_inquiry_data)); 5615 5616 if (have_serialnum) 5617 MD5Update(&context, serial_buf->serial_num, 5618 serial_buf->length); 5619 5620 MD5Final(digest, &context); 5621 if (bcmp(softc->digest, digest, 16) == 0) 5622 changed = 0; 5623 5624 /* 5625 * XXX Do we need to do a TUR in order to ensure 5626 * that the device really hasn't changed??? 5627 */ 5628 if ((changed != 0) 5629 && ((softc->flags & PROBE_NO_ANNOUNCE) == 0)) 5630 xpt_async(AC_LOST_DEVICE, path, NULL); 5631 } 5632 if (serial_buf != NULL) 5633 free(serial_buf, M_TEMP); 5634 5635 if (changed != 0) { 5636 /* 5637 * Now that we have all the necessary 5638 * information to safely perform transfer 5639 * negotiations... Controllers don't perform 5640 * any negotiation or tagged queuing until 5641 * after the first XPT_SET_TRAN_SETTINGS ccb is 5642 * received. So, on a new device, just retreive 5643 * the user settings, and set them as the current 5644 * settings to set the device up. 5645 */ 5646 proberequestdefaultnegotiation(periph); 5647 xpt_release_ccb(done_ccb); 5648 5649 /* 5650 * Perform a TUR to allow the controller to 5651 * perform any necessary transfer negotiation. 5652 */ 5653 softc->action = PROBE_TUR_FOR_NEGOTIATION; 5654 xpt_schedule(periph, priority); 5655 return; 5656 } 5657 xpt_release_ccb(done_ccb); 5658 break; 5659 } 5660 case PROBE_TUR_FOR_NEGOTIATION: 5661 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 5662 /* Don't wedge the queue */ 5663 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1, 5664 /*run_queue*/TRUE); 5665 } 5666 5667 path->device->flags &= ~CAM_DEV_UNCONFIGURED; 5668 5669 if ((softc->flags & PROBE_NO_ANNOUNCE) == 0) { 5670 /* Inform the XPT that a new device has been found */ 5671 done_ccb->ccb_h.func_code = XPT_GDEV_TYPE; 5672 xpt_action(done_ccb); 5673 5674 xpt_async(AC_FOUND_DEVICE, xpt_periph->path, done_ccb); 5675 } 5676 xpt_release_ccb(done_ccb); 5677 break; 5678 } 5679 done_ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs); 5680 TAILQ_REMOVE(&softc->request_ccbs, &done_ccb->ccb_h, periph_links.tqe); 5681 done_ccb->ccb_h.status = CAM_REQ_CMP; 5682 xpt_done(done_ccb); 5683 if (TAILQ_FIRST(&softc->request_ccbs) == NULL) { 5684 cam_periph_invalidate(periph); 5685 cam_periph_release(periph); 5686 } else { 5687 probeschedule(periph); 5688 } 5689 } 5690 5691 static void 5692 probecleanup(struct cam_periph *periph) 5693 { 5694 free(periph->softc, M_TEMP); 5695 } 5696 5697 static void 5698 xpt_find_quirk(struct cam_ed *device) 5699 { 5700 caddr_t match; 5701 5702 match = cam_quirkmatch((caddr_t)&device->inq_data, 5703 (caddr_t)xpt_quirk_table, 5704 sizeof(xpt_quirk_table)/sizeof(*xpt_quirk_table), 5705 sizeof(*xpt_quirk_table), scsi_inquiry_match); 5706 5707 if (match == NULL) 5708 panic("xpt_find_quirk: device didn't match wildcard entry!!"); 5709 5710 device->quirk = (struct xpt_quirk_entry *)match; 5711 } 5712 5713 static void 5714 xpt_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device, 5715 int async_update) 5716 { 5717 struct cam_sim *sim; 5718 int qfrozen; 5719 5720 sim = cts->ccb_h.path->bus->sim; 5721 if (async_update == FALSE) { 5722 struct scsi_inquiry_data *inq_data; 5723 struct ccb_pathinq cpi; 5724 struct ccb_trans_settings cur_cts; 5725 5726 if (device == NULL) { 5727 cts->ccb_h.status = CAM_PATH_INVALID; 5728 xpt_done((union ccb *)cts); 5729 return; 5730 } 5731 5732 /* 5733 * Perform sanity checking against what the 5734 * controller and device can do. 5735 */ 5736 xpt_setup_ccb(&cpi.ccb_h, cts->ccb_h.path, /*priority*/1); 5737 cpi.ccb_h.func_code = XPT_PATH_INQ; 5738 xpt_action((union ccb *)&cpi); 5739 xpt_setup_ccb(&cur_cts.ccb_h, cts->ccb_h.path, /*priority*/1); 5740 cur_cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; 5741 cur_cts.flags = CCB_TRANS_CURRENT_SETTINGS; 5742 xpt_action((union ccb *)&cur_cts); 5743 inq_data = &device->inq_data; 5744 5745 /* Fill in any gaps in what the user gave us */ 5746 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) == 0) 5747 cts->sync_period = cur_cts.sync_period; 5748 if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) == 0) 5749 cts->sync_offset = cur_cts.sync_offset; 5750 if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) == 0) 5751 cts->bus_width = cur_cts.bus_width; 5752 if ((cts->valid & CCB_TRANS_DISC_VALID) == 0) { 5753 cts->flags &= ~CCB_TRANS_DISC_ENB; 5754 cts->flags |= cur_cts.flags & CCB_TRANS_DISC_ENB; 5755 } 5756 if ((cts->valid & CCB_TRANS_TQ_VALID) == 0) { 5757 cts->flags &= ~CCB_TRANS_TAG_ENB; 5758 cts->flags |= cur_cts.flags & CCB_TRANS_TAG_ENB; 5759 } 5760 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0 5761 && (inq_data->flags & SID_Sync) == 0) 5762 || (cpi.hba_inquiry & PI_SDTR_ABLE) == 0) { 5763 /* Force async */ 5764 cts->sync_period = 0; 5765 cts->sync_offset = 0; 5766 } 5767 5768 switch (cts->bus_width) { 5769 case MSG_EXT_WDTR_BUS_32_BIT: 5770 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0 5771 || (inq_data->flags & SID_WBus32) != 0) 5772 && (cpi.hba_inquiry & PI_WIDE_32) != 0) 5773 break; 5774 /* Fall Through to 16-bit */ 5775 case MSG_EXT_WDTR_BUS_16_BIT: 5776 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0 5777 || (inq_data->flags & SID_WBus16) != 0) 5778 && (cpi.hba_inquiry & PI_WIDE_16) != 0) { 5779 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 5780 break; 5781 } 5782 /* Fall Through to 8-bit */ 5783 default: /* New bus width?? */ 5784 case MSG_EXT_WDTR_BUS_8_BIT: 5785 /* All targets can do this */ 5786 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 5787 break; 5788 } 5789 5790 if ((cts->flags & CCB_TRANS_DISC_ENB) == 0) { 5791 /* 5792 * Can't tag queue without disconnection. 5793 */ 5794 cts->flags &= ~CCB_TRANS_TAG_ENB; 5795 cts->valid |= CCB_TRANS_TQ_VALID; 5796 } 5797 5798 if ((cpi.hba_inquiry & PI_TAG_ABLE) == 0 5799 || (inq_data->flags & SID_CmdQue) == 0 5800 || (device->queue_flags & SCP_QUEUE_DQUE) != 0 5801 || (device->quirk->mintags == 0)) { 5802 /* 5803 * Can't tag on hardware that doesn't support, 5804 * doesn't have it enabled, or has broken tag support. 5805 */ 5806 cts->flags &= ~CCB_TRANS_TAG_ENB; 5807 } 5808 } 5809 5810 qfrozen = FALSE; 5811 if ((cts->valid & CCB_TRANS_TQ_VALID) != 0 5812 && (async_update == FALSE)) { 5813 int device_tagenb; 5814 5815 /* 5816 * If we are transitioning from tags to no-tags or 5817 * vice-versa, we need to carefully freeze and restart 5818 * the queue so that we don't overlap tagged and non-tagged 5819 * commands. We also temporarily stop tags if there is 5820 * a change in transfer negotiation settings to allow 5821 * "tag-less" negotiation. 5822 */ 5823 if ((device->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 5824 || (device->inq_flags & SID_CmdQue) != 0) 5825 device_tagenb = TRUE; 5826 else 5827 device_tagenb = FALSE; 5828 5829 if (((cts->flags & CCB_TRANS_TAG_ENB) != 0 5830 && device_tagenb == FALSE) 5831 || ((cts->flags & CCB_TRANS_TAG_ENB) == 0 5832 && device_tagenb == TRUE)) { 5833 5834 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) { 5835 /* 5836 * Delay change to use tags until after a 5837 * few commands have gone to this device so 5838 * the controller has time to perform transfer 5839 * negotiations without tagged messages getting 5840 * in the way. 5841 */ 5842 device->tag_delay_count = CAM_TAG_DELAY_COUNT; 5843 device->flags |= CAM_DEV_TAG_AFTER_COUNT; 5844 } else { 5845 xpt_freeze_devq(cts->ccb_h.path, /*count*/1); 5846 qfrozen = TRUE; 5847 device->inq_flags &= ~SID_CmdQue; 5848 xpt_dev_ccbq_resize(cts->ccb_h.path, 5849 sim->max_dev_openings); 5850 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT; 5851 device->tag_delay_count = 0; 5852 } 5853 } 5854 } 5855 5856 if (async_update == FALSE) { 5857 /* 5858 * If we are currently performing tagged transactions to 5859 * this device and want to change its negotiation parameters, 5860 * go non-tagged for a bit to give the controller a chance to 5861 * negotiate unhampered by tag messages. 5862 */ 5863 if ((device->inq_flags & SID_CmdQue) != 0 5864 && (cts->flags & (CCB_TRANS_SYNC_RATE_VALID| 5865 CCB_TRANS_SYNC_OFFSET_VALID| 5866 CCB_TRANS_BUS_WIDTH_VALID)) != 0) 5867 xpt_toggle_tags(cts->ccb_h.path); 5868 5869 (*(sim->sim_action))(sim, (union ccb *)cts); 5870 } 5871 5872 if (qfrozen) { 5873 struct ccb_relsim crs; 5874 5875 xpt_setup_ccb(&crs.ccb_h, cts->ccb_h.path, 5876 /*priority*/1); 5877 crs.ccb_h.func_code = XPT_REL_SIMQ; 5878 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY; 5879 crs.openings 5880 = crs.release_timeout 5881 = crs.qfrozen_cnt 5882 = 0; 5883 xpt_action((union ccb *)&crs); 5884 } 5885 } 5886 5887 static void 5888 xpt_toggle_tags(struct cam_path *path) 5889 { 5890 struct cam_ed *dev; 5891 5892 /* 5893 * Give controllers a chance to renegotiate 5894 * before starting tag operations. We 5895 * "toggle" tagged queuing off then on 5896 * which causes the tag enable command delay 5897 * counter to come into effect. 5898 */ 5899 dev = path->device; 5900 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 5901 || ((dev->inq_flags & SID_CmdQue) != 0 5902 && (dev->inq_flags & (SID_Sync|SID_WBus16|SID_WBus32)) != 0)) { 5903 struct ccb_trans_settings cts; 5904 5905 xpt_setup_ccb(&cts.ccb_h, path, 1); 5906 cts.flags = 0; 5907 cts.valid = CCB_TRANS_TQ_VALID; 5908 xpt_set_transfer_settings(&cts, path->device, 5909 /*async_update*/TRUE); 5910 cts.flags = CCB_TRANS_TAG_ENB; 5911 xpt_set_transfer_settings(&cts, path->device, 5912 /*async_update*/TRUE); 5913 } 5914 } 5915 5916 static void 5917 xpt_start_tags(struct cam_path *path) 5918 { 5919 struct ccb_relsim crs; 5920 struct cam_ed *device; 5921 struct cam_sim *sim; 5922 int newopenings; 5923 5924 device = path->device; 5925 sim = path->bus->sim; 5926 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT; 5927 xpt_freeze_devq(path, /*count*/1); 5928 device->inq_flags |= SID_CmdQue; 5929 newopenings = min(device->quirk->maxtags, sim->max_tagged_dev_openings); 5930 xpt_dev_ccbq_resize(path, newopenings); 5931 xpt_setup_ccb(&crs.ccb_h, path, /*priority*/1); 5932 crs.ccb_h.func_code = XPT_REL_SIMQ; 5933 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY; 5934 crs.openings 5935 = crs.release_timeout 5936 = crs.qfrozen_cnt 5937 = 0; 5938 xpt_action((union ccb *)&crs); 5939 } 5940 5941 static int busses_to_config; 5942 static int busses_to_reset; 5943 5944 static int 5945 xptconfigbuscountfunc(struct cam_eb *bus, void *arg) 5946 { 5947 if (bus->path_id != CAM_XPT_PATH_ID) { 5948 struct cam_path path; 5949 struct ccb_pathinq cpi; 5950 int can_negotiate; 5951 5952 busses_to_config++; 5953 xpt_compile_path(&path, NULL, bus->path_id, 5954 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 5955 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1); 5956 cpi.ccb_h.func_code = XPT_PATH_INQ; 5957 xpt_action((union ccb *)&cpi); 5958 can_negotiate = cpi.hba_inquiry; 5959 can_negotiate &= (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE); 5960 if ((cpi.hba_misc & PIM_NOBUSRESET) == 0 5961 && can_negotiate) 5962 busses_to_reset++; 5963 xpt_release_path(&path); 5964 } 5965 5966 return(1); 5967 } 5968 5969 static int 5970 xptconfigfunc(struct cam_eb *bus, void *arg) 5971 { 5972 struct cam_path *path; 5973 union ccb *work_ccb; 5974 5975 if (bus->path_id != CAM_XPT_PATH_ID) { 5976 cam_status status; 5977 int can_negotiate; 5978 5979 work_ccb = xpt_alloc_ccb(); 5980 if ((status = xpt_create_path(&path, xpt_periph, bus->path_id, 5981 CAM_TARGET_WILDCARD, 5982 CAM_LUN_WILDCARD)) !=CAM_REQ_CMP){ 5983 printf("xptconfigfunc: xpt_create_path failed with " 5984 "status %#x for bus %d\n", status, bus->path_id); 5985 printf("xptconfigfunc: halting bus configuration\n"); 5986 xpt_free_ccb(work_ccb); 5987 busses_to_config--; 5988 xpt_finishconfig(xpt_periph, NULL); 5989 return(0); 5990 } 5991 xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1); 5992 work_ccb->ccb_h.func_code = XPT_PATH_INQ; 5993 xpt_action(work_ccb); 5994 if (work_ccb->ccb_h.status != CAM_REQ_CMP) { 5995 printf("xptconfigfunc: CPI failed on bus %d " 5996 "with status %d\n", bus->path_id, 5997 work_ccb->ccb_h.status); 5998 xpt_finishconfig(xpt_periph, work_ccb); 5999 return(1); 6000 } 6001 6002 can_negotiate = work_ccb->cpi.hba_inquiry; 6003 can_negotiate &= (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE); 6004 if ((work_ccb->cpi.hba_misc & PIM_NOBUSRESET) == 0 6005 && (can_negotiate != 0)) { 6006 xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1); 6007 work_ccb->ccb_h.func_code = XPT_RESET_BUS; 6008 work_ccb->ccb_h.cbfcnp = NULL; 6009 CAM_DEBUG(path, CAM_DEBUG_SUBTRACE, 6010 ("Resetting Bus\n")); 6011 xpt_action(work_ccb); 6012 xpt_finishconfig(xpt_periph, work_ccb); 6013 } else { 6014 /* Act as though we performed a successful BUS RESET */ 6015 work_ccb->ccb_h.func_code = XPT_RESET_BUS; 6016 xpt_finishconfig(xpt_periph, work_ccb); 6017 } 6018 } 6019 6020 return(1); 6021 } 6022 6023 static void 6024 xpt_config(void *arg) 6025 { 6026 /* Now that interrupts are enabled, go find our devices */ 6027 6028 #ifdef CAMDEBUG 6029 /* Setup debugging flags and path */ 6030 #ifdef CAM_DEBUG_FLAGS 6031 cam_dflags = CAM_DEBUG_FLAGS; 6032 #else /* !CAM_DEBUG_FLAGS */ 6033 cam_dflags = CAM_DEBUG_NONE; 6034 #endif /* CAM_DEBUG_FLAGS */ 6035 #ifdef CAM_DEBUG_BUS 6036 if (cam_dflags != CAM_DEBUG_NONE) { 6037 if (xpt_create_path(&cam_dpath, xpt_periph, 6038 CAM_DEBUG_BUS, CAM_DEBUG_TARGET, 6039 CAM_DEBUG_LUN) != CAM_REQ_CMP) { 6040 printf("xpt_config: xpt_create_path() failed for debug" 6041 " target %d:%d:%d, debugging disabled\n", 6042 CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN); 6043 cam_dflags = CAM_DEBUG_NONE; 6044 } 6045 } else 6046 cam_dpath = NULL; 6047 #else /* !CAM_DEBUG_BUS */ 6048 cam_dpath = NULL; 6049 #endif /* CAM_DEBUG_BUS */ 6050 #endif /* CAMDEBUG */ 6051 6052 /* 6053 * Scan all installed busses. 6054 */ 6055 xpt_for_all_busses(xptconfigbuscountfunc, NULL); 6056 6057 if (busses_to_config == 0) { 6058 /* Call manually because we don't have any busses */ 6059 xpt_finishconfig(xpt_periph, NULL); 6060 } else { 6061 if (busses_to_reset > 0 && SCSI_DELAY >= 2000) { 6062 printf("Waiting %d seconds for SCSI " 6063 "devices to settle\n", SCSI_DELAY/1000); 6064 } 6065 xpt_for_all_busses(xptconfigfunc, NULL); 6066 } 6067 } 6068 6069 /* 6070 * If the given device only has one peripheral attached to it, and if that 6071 * peripheral is the passthrough driver, announce it. This insures that the 6072 * user sees some sort of announcement for every peripheral in their system. 6073 */ 6074 static int 6075 xptpassannouncefunc(struct cam_ed *device, void *arg) 6076 { 6077 struct cam_periph *periph; 6078 int i; 6079 6080 for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL; 6081 periph = SLIST_NEXT(periph, periph_links), i++); 6082 6083 periph = SLIST_FIRST(&device->periphs); 6084 if ((i == 1) 6085 && (strncmp(periph->periph_name, "pass", 4) == 0)) 6086 xpt_announce_periph(periph, NULL); 6087 6088 return(1); 6089 } 6090 6091 static void 6092 xpt_finishconfig(struct cam_periph *periph, union ccb *done_ccb) 6093 { 6094 struct periph_driver **p_drv; 6095 int i; 6096 6097 if (done_ccb != NULL) { 6098 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, 6099 ("xpt_finishconfig\n")); 6100 switch(done_ccb->ccb_h.func_code) { 6101 case XPT_RESET_BUS: 6102 if (done_ccb->ccb_h.status == CAM_REQ_CMP) { 6103 done_ccb->ccb_h.func_code = XPT_SCAN_BUS; 6104 done_ccb->ccb_h.cbfcnp = xpt_finishconfig; 6105 xpt_action(done_ccb); 6106 return; 6107 } 6108 /* FALLTHROUGH */ 6109 case XPT_SCAN_BUS: 6110 default: 6111 xpt_free_path(done_ccb->ccb_h.path); 6112 busses_to_config--; 6113 break; 6114 } 6115 } 6116 6117 if (busses_to_config == 0) { 6118 /* Register all the peripheral drivers */ 6119 /* XXX This will have to change when we have loadable modules */ 6120 p_drv = (struct periph_driver **)periphdriver_set.ls_items; 6121 for (i = 0; p_drv[i] != NULL; i++) { 6122 (*p_drv[i]->init)(); 6123 } 6124 6125 /* 6126 * Check for devices with no "standard" peripheral driver 6127 * attached. For any devices like that, announce the 6128 * passthrough driver so the user will see something. 6129 */ 6130 xpt_for_all_devices(xptpassannouncefunc, NULL); 6131 6132 /* Release our hook so that the boot can continue. */ 6133 config_intrhook_disestablish(xpt_config_hook); 6134 free(xpt_config_hook, M_TEMP); 6135 xpt_config_hook = NULL; 6136 } 6137 if (done_ccb != NULL) 6138 xpt_free_ccb(done_ccb); 6139 } 6140 6141 static void 6142 xptaction(struct cam_sim *sim, union ccb *work_ccb) 6143 { 6144 CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n")); 6145 6146 switch (work_ccb->ccb_h.func_code) { 6147 /* Common cases first */ 6148 case XPT_PATH_INQ: /* Path routing inquiry */ 6149 { 6150 struct ccb_pathinq *cpi; 6151 6152 cpi = &work_ccb->cpi; 6153 cpi->version_num = 1; /* XXX??? */ 6154 cpi->hba_inquiry = 0; 6155 cpi->target_sprt = 0; 6156 cpi->hba_misc = 0; 6157 cpi->hba_eng_cnt = 0; 6158 cpi->max_target = 0; 6159 cpi->max_lun = 0; 6160 cpi->initiator_id = 0; 6161 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 6162 strncpy(cpi->hba_vid, "", HBA_IDLEN); 6163 strncpy(cpi->dev_name, sim->sim_name, DEV_IDLEN); 6164 cpi->unit_number = sim->unit_number; 6165 cpi->bus_id = sim->bus_id; 6166 cpi->base_transfer_speed = 0; 6167 cpi->ccb_h.status = CAM_REQ_CMP; 6168 xpt_done(work_ccb); 6169 break; 6170 } 6171 default: 6172 work_ccb->ccb_h.status = CAM_REQ_INVALID; 6173 xpt_done(work_ccb); 6174 break; 6175 } 6176 } 6177 6178 /* 6179 * The xpt as a "controller" has no interrupt sources, so polling 6180 * is a no-op. 6181 */ 6182 static void 6183 xptpoll(struct cam_sim *sim) 6184 { 6185 } 6186 6187 /* 6188 * Should only be called by the machine interrupt dispatch routines, 6189 * so put these prototypes here instead of in the header. 6190 */ 6191 6192 static void 6193 swi_camnet(void) 6194 { 6195 camisr(&cam_netq); 6196 } 6197 6198 static void 6199 swi_cambio(void) 6200 { 6201 camisr(&cam_bioq); 6202 } 6203 6204 static void 6205 camisr(cam_isrq_t *queue) 6206 { 6207 int s; 6208 struct ccb_hdr *ccb_h; 6209 6210 s = splcam(); 6211 while ((ccb_h = TAILQ_FIRST(queue)) != NULL) { 6212 int runq; 6213 6214 TAILQ_REMOVE(queue, ccb_h, sim_links.tqe); 6215 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX; 6216 splx(s); 6217 6218 CAM_DEBUG(ccb_h->path, CAM_DEBUG_TRACE, 6219 ("camisr")); 6220 6221 runq = FALSE; 6222 6223 if (ccb_h->flags & CAM_HIGH_POWER) { 6224 struct highpowerlist *hphead; 6225 struct cam_ed *device; 6226 union ccb *send_ccb; 6227 6228 hphead = &highpowerq; 6229 6230 send_ccb = (union ccb *)STAILQ_FIRST(hphead); 6231 6232 /* 6233 * Increment the count since this command is done. 6234 */ 6235 num_highpower++; 6236 6237 /* 6238 * Any high powered commands queued up? 6239 */ 6240 if (send_ccb != NULL) { 6241 device = send_ccb->ccb_h.path->device; 6242 6243 STAILQ_REMOVE_HEAD(hphead, xpt_links.stqe); 6244 6245 xpt_release_devq(send_ccb->ccb_h.path, 6246 /*count*/1, /*runqueue*/TRUE); 6247 } 6248 } 6249 if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) { 6250 struct cam_ed *dev; 6251 6252 dev = ccb_h->path->device; 6253 6254 s = splcam(); 6255 cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h); 6256 6257 ccb_h->path->bus->sim->devq->send_active--; 6258 ccb_h->path->bus->sim->devq->send_openings++; 6259 splx(s); 6260 6261 if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0 6262 || ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0 6263 && (dev->ccbq.dev_active == 0))) { 6264 6265 xpt_release_devq(ccb_h->path, /*count*/1, 6266 /*run_queue*/TRUE); 6267 } 6268 6269 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 6270 && (--dev->tag_delay_count == 0)) 6271 xpt_start_tags(ccb_h->path); 6272 6273 if ((dev->ccbq.queue.entries > 0) 6274 && (dev->qfrozen_cnt == 0) 6275 && (device_is_send_queued(dev) == 0)) { 6276 runq = xpt_schedule_dev_sendq(ccb_h->path->bus, 6277 dev); 6278 } 6279 } 6280 6281 if (ccb_h->status & CAM_RELEASE_SIMQ) { 6282 xpt_release_simq(ccb_h->path->bus->sim, 6283 /*run_queue*/TRUE); 6284 ccb_h->status &= ~CAM_RELEASE_SIMQ; 6285 runq = FALSE; 6286 } 6287 6288 if ((ccb_h->flags & CAM_DEV_QFRZDIS) 6289 && (ccb_h->status & CAM_DEV_QFRZN)) { 6290 xpt_release_devq(ccb_h->path, /*count*/1, 6291 /*run_queue*/TRUE); 6292 ccb_h->status &= ~CAM_DEV_QFRZN; 6293 } else if (runq) { 6294 xpt_run_dev_sendq(ccb_h->path->bus); 6295 } 6296 6297 /* Call the peripheral driver's callback */ 6298 (*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h); 6299 6300 /* Raise IPL for while test */ 6301 s = splcam(); 6302 } 6303 splx(s); 6304 } 6305