1 /* 2 * Implementation of the Common Access Method Transport (XPT) layer. 3 * 4 * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs. 5 * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions, and the following disclaimer, 13 * without modification, immediately at the beginning of the file. 14 * 2. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * $FreeBSD$ 30 */ 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/types.h> 34 #include <sys/malloc.h> 35 #include <sys/kernel.h> 36 #include <sys/time.h> 37 #include <sys/conf.h> 38 #include <sys/fcntl.h> 39 #include <sys/md5.h> 40 #include <sys/devicestat.h> 41 #include <sys/interrupt.h> 42 43 #ifdef PC98 44 #include <pc98/pc98/pc98_machdep.h> /* geometry translation */ 45 #endif 46 47 #include <machine/clock.h> 48 #include <machine/ipl.h> 49 50 #include <cam/cam.h> 51 #include <cam/cam_conf.h> 52 #include <cam/cam_ccb.h> 53 #include <cam/cam_periph.h> 54 #include <cam/cam_sim.h> 55 #include <cam/cam_xpt.h> 56 #include <cam/cam_xpt_sim.h> 57 #include <cam/cam_xpt_periph.h> 58 #include <cam/cam_debug.h> 59 60 #include <cam/scsi/scsi_all.h> 61 #include <cam/scsi/scsi_message.h> 62 #include <cam/scsi/scsi_pass.h> 63 #include "opt_cam.h" 64 65 /* Datastructures internal to the xpt layer */ 66 67 /* 68 * Definition of an async handler callback block. These are used to add 69 * SIMs and peripherals to the async callback lists. 70 */ 71 struct async_node { 72 SLIST_ENTRY(async_node) links; 73 u_int32_t event_enable; /* Async Event enables */ 74 void (*callback)(void *arg, u_int32_t code, 75 struct cam_path *path, void *args); 76 void *callback_arg; 77 }; 78 79 SLIST_HEAD(async_list, async_node); 80 SLIST_HEAD(periph_list, cam_periph); 81 static STAILQ_HEAD(highpowerlist, ccb_hdr) highpowerq; 82 83 /* 84 * This is the maximum number of high powered commands (e.g. start unit) 85 * that can be outstanding at a particular time. 86 */ 87 #ifndef CAM_MAX_HIGHPOWER 88 #define CAM_MAX_HIGHPOWER 4 89 #endif 90 91 /* number of high powered commands that can go through right now */ 92 static int num_highpower = CAM_MAX_HIGHPOWER; 93 94 /* 95 * Structure for queueing a device in a run queue. 96 * There is one run queue for allocating new ccbs, 97 * and another for sending ccbs to the controller. 98 */ 99 struct cam_ed_qinfo { 100 cam_pinfo pinfo; 101 struct cam_ed *device; 102 }; 103 104 /* 105 * The CAM EDT (Existing Device Table) contains the device information for 106 * all devices for all busses in the system. The table contains a 107 * cam_ed structure for each device on the bus. 108 */ 109 struct cam_ed { 110 TAILQ_ENTRY(cam_ed) links; 111 struct cam_ed_qinfo alloc_ccb_entry; 112 struct cam_ed_qinfo send_ccb_entry; 113 struct cam_et *target; 114 lun_id_t lun_id; 115 struct camq drvq; /* 116 * Queue of type drivers wanting to do 117 * work on this device. 118 */ 119 struct cam_ccbq ccbq; /* Queue of pending ccbs */ 120 struct async_list asyncs; /* Async callback info for this B/T/L */ 121 struct periph_list periphs; /* All attached devices */ 122 u_int generation; /* Generation number */ 123 struct cam_periph *owner; /* Peripheral driver's ownership tag */ 124 struct xpt_quirk_entry *quirk; /* Oddities about this device */ 125 /* Storage for the inquiry data */ 126 struct scsi_inquiry_data inq_data; 127 u_int8_t inq_flags; /* 128 * Current settings for inquiry flags. 129 * This allows us to override settings 130 * like disconnection and tagged 131 * queuing for a device. 132 */ 133 u_int8_t queue_flags; /* Queue flags from the control page */ 134 u_int8_t *serial_num; 135 u_int8_t serial_num_len; 136 u_int32_t qfrozen_cnt; 137 u_int32_t flags; 138 #define CAM_DEV_UNCONFIGURED 0x01 139 #define CAM_DEV_REL_TIMEOUT_PENDING 0x02 140 #define CAM_DEV_REL_ON_COMPLETE 0x04 141 #define CAM_DEV_REL_ON_QUEUE_EMPTY 0x08 142 #define CAM_DEV_RESIZE_QUEUE_NEEDED 0x10 143 #define CAM_DEV_TAG_AFTER_COUNT 0x20 144 #define CAM_DEV_INQUIRY_DATA_VALID 0x40 145 u_int32_t tag_delay_count; 146 #define CAM_TAG_DELAY_COUNT 5 147 u_int32_t refcount; 148 struct callout_handle c_handle; 149 }; 150 151 /* 152 * Each target is represented by an ET (Existing Target). These 153 * entries are created when a target is successfully probed with an 154 * identify, and removed when a device fails to respond after a number 155 * of retries, or a bus rescan finds the device missing. 156 */ 157 struct cam_et { 158 TAILQ_HEAD(, cam_ed) ed_entries; 159 TAILQ_ENTRY(cam_et) links; 160 struct cam_eb *bus; 161 target_id_t target_id; 162 u_int32_t refcount; 163 u_int generation; 164 struct timeval last_reset; 165 }; 166 167 /* 168 * Each bus is represented by an EB (Existing Bus). These entries 169 * are created by calls to xpt_bus_register and deleted by calls to 170 * xpt_bus_deregister. 171 */ 172 struct cam_eb { 173 TAILQ_HEAD(, cam_et) et_entries; 174 TAILQ_ENTRY(cam_eb) links; 175 path_id_t path_id; 176 struct cam_sim *sim; 177 struct timeval last_reset; 178 u_int32_t flags; 179 #define CAM_EB_RUNQ_SCHEDULED 0x01 180 u_int32_t refcount; 181 u_int generation; 182 }; 183 184 struct cam_path { 185 struct cam_periph *periph; 186 struct cam_eb *bus; 187 struct cam_et *target; 188 struct cam_ed *device; 189 }; 190 191 struct xpt_quirk_entry { 192 struct scsi_inquiry_pattern inq_pat; 193 u_int8_t quirks; 194 #define CAM_QUIRK_NOLUNS 0x01 195 #define CAM_QUIRK_NOSERIAL 0x02 196 #define CAM_QUIRK_HILUNS 0x04 197 u_int mintags; 198 u_int maxtags; 199 }; 200 #define CAM_SCSI2_MAXLUN 8 201 202 typedef enum { 203 XPT_FLAG_OPEN = 0x01 204 } xpt_flags; 205 206 struct xpt_softc { 207 xpt_flags flags; 208 u_int32_t generation; 209 }; 210 211 static const char quantum[] = "QUANTUM"; 212 static const char sony[] = "SONY"; 213 static const char west_digital[] = "WDIGTL"; 214 static const char samsung[] = "SAMSUNG"; 215 static const char seagate[] = "SEAGATE"; 216 static const char microp[] = "MICROP"; 217 218 static struct xpt_quirk_entry xpt_quirk_table[] = 219 { 220 { 221 /* Reports QUEUE FULL for temporary resource shortages */ 222 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP39100*", "*" }, 223 /*quirks*/0, /*mintags*/24, /*maxtags*/32 224 }, 225 { 226 /* Reports QUEUE FULL for temporary resource shortages */ 227 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP34550*", "*" }, 228 /*quirks*/0, /*mintags*/24, /*maxtags*/32 229 }, 230 { 231 /* Reports QUEUE FULL for temporary resource shortages */ 232 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP32275*", "*" }, 233 /*quirks*/0, /*mintags*/24, /*maxtags*/32 234 }, 235 { 236 /* Broken tagged queuing drive */ 237 { T_DIRECT, SIP_MEDIA_FIXED, microp, "4421-07*", "*" }, 238 /*quirks*/0, /*mintags*/0, /*maxtags*/0 239 }, 240 { 241 /* Broken tagged queuing drive */ 242 { T_DIRECT, SIP_MEDIA_FIXED, "HP", "C372*", "*" }, 243 /*quirks*/0, /*mintags*/0, /*maxtags*/0 244 }, 245 { 246 /* Broken tagged queuing drive */ 247 { T_DIRECT, SIP_MEDIA_FIXED, microp, "3391*", "x43h" }, 248 /*quirks*/0, /*mintags*/0, /*maxtags*/0 249 }, 250 { 251 /* 252 * Unfortunately, the Quantum Atlas III has the same 253 * problem as the Atlas II drives above. 254 * Reported by: "Johan Granlund" <johan@granlund.nu> 255 * 256 * For future reference, the drive with the problem was: 257 * QUANTUM QM39100TD-SW N1B0 258 * 259 * It's possible that Quantum will fix the problem in later 260 * firmware revisions. If that happens, the quirk entry 261 * will need to be made specific to the firmware revisions 262 * with the problem. 263 * 264 */ 265 /* Reports QUEUE FULL for temporary resource shortages */ 266 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM39100*", "*" }, 267 /*quirks*/0, /*mintags*/24, /*maxtags*/32 268 }, 269 { 270 /* 271 * 18 Gig Atlas III, same problem as the 9G version. 272 * Reported by: Andre Albsmeier 273 * <andre.albsmeier@mchp.siemens.de> 274 * 275 * For future reference, the drive with the problem was: 276 * QUANTUM QM318000TD-S N491 277 */ 278 /* Reports QUEUE FULL for temporary resource shortages */ 279 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM318000*", "*" }, 280 /*quirks*/0, /*mintags*/24, /*maxtags*/32 281 }, 282 { 283 /* 284 * Broken tagged queuing drive 285 * Reported by: Bret Ford <bford@uop.cs.uop.edu> 286 * and: Martin Renters <martin@tdc.on.ca> 287 */ 288 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST410800*", "71*" }, 289 /*quirks*/0, /*mintags*/0, /*maxtags*/0 290 }, 291 /* 292 * The Seagate Medalist Pro drives have very poor write 293 * performance with anything more than 2 tags. 294 * 295 * Reported by: Paul van der Zwan <paulz@trantor.xs4all.nl> 296 * Drive: <SEAGATE ST36530N 1444> 297 * 298 * Reported by: Jeremy Lea <reg@shale.csir.co.za> 299 * Drive: <SEAGATE ST34520W 1281> 300 * 301 * No one has actually reported that the 9G version 302 * (ST39140*) of the Medalist Pro has the same problem, but 303 * we're assuming that it does because the 4G and 6.5G 304 * versions of the drive are broken. 305 */ 306 { 307 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST34520*", "*"}, 308 /*quirks*/0, /*mintags*/2, /*maxtags*/2 309 }, 310 { 311 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST36530*", "*"}, 312 /*quirks*/0, /*mintags*/2, /*maxtags*/2 313 }, 314 { 315 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST39140*", "*"}, 316 /*quirks*/0, /*mintags*/2, /*maxtags*/2 317 }, 318 { 319 /* 320 * Slow when tagged queueing is enabled. Write performance 321 * steadily drops off with more and more concurrent 322 * transactions. Best sequential write performance with 323 * tagged queueing turned off and write caching turned on. 324 * 325 * PR: kern/10398 326 * Submitted by: Hideaki Okada <hokada@isl.melco.co.jp> 327 * Drive: DCAS-34330 w/ "S65A" firmware. 328 * 329 * The drive with the problem had the "S65A" firmware 330 * revision, and has also been reported (by Stephen J. 331 * Roznowski <sjr@home.net>) for a drive with the "S61A" 332 * firmware revision. 333 * 334 * Although no one has reported problems with the 2 gig 335 * version of the DCAS drive, the assumption is that it 336 * has the same problems as the 4 gig version. Therefore 337 * this quirk entries disables tagged queueing for all 338 * DCAS drives. 339 */ 340 { T_DIRECT, SIP_MEDIA_FIXED, "IBM", "DCAS*", "*" }, 341 /*quirks*/0, /*mintags*/0, /*maxtags*/0 342 }, 343 { 344 /* Broken tagged queuing drive */ 345 { T_DIRECT, SIP_MEDIA_REMOVABLE, "iomega", "jaz*", "*" }, 346 /*quirks*/0, /*mintags*/0, /*maxtags*/0 347 }, 348 { 349 /* Broken tagged queuing drive */ 350 { T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CFP2107*", "*" }, 351 /*quirks*/0, /*mintags*/0, /*maxtags*/0 352 }, 353 { 354 /* 355 * Broken tagged queuing drive. 356 * Submitted by: 357 * NAKAJI Hiroyuki <nakaji@zeisei.dpri.kyoto-u.ac.jp> 358 * in PR kern/9535 359 */ 360 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN34324U*", "*" }, 361 /*quirks*/0, /*mintags*/0, /*maxtags*/0 362 }, 363 { 364 /* 365 * Slow when tagged queueing is enabled. (1.5MB/sec versus 366 * 8MB/sec.) 367 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu> 368 * Best performance with these drives is achieved with 369 * tagged queueing turned off, and write caching turned on. 370 */ 371 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "WDE*", "*" }, 372 /*quirks*/0, /*mintags*/0, /*maxtags*/0 373 }, 374 { 375 /* 376 * Slow when tagged queueing is enabled. (1.5MB/sec versus 377 * 8MB/sec.) 378 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu> 379 * Best performance with these drives is achieved with 380 * tagged queueing turned off, and write caching turned on. 381 */ 382 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "ENTERPRISE", "*" }, 383 /*quirks*/0, /*mintags*/0, /*maxtags*/0 384 }, 385 { 386 /* 387 * Doesn't handle queue full condition correctly, 388 * so we need to limit maxtags to what the device 389 * can handle instead of determining this automatically. 390 */ 391 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN321010S*", "*" }, 392 /*quirks*/0, /*mintags*/2, /*maxtags*/32 393 }, 394 { 395 /* Really only one LUN */ 396 { T_ENCLOSURE, SIP_MEDIA_FIXED, "SUN", "SENA*", "*" }, 397 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 398 }, 399 { 400 /* I can't believe we need a quirk for DPT volumes. */ 401 { T_ANY, SIP_MEDIA_FIXED|SIP_MEDIA_REMOVABLE, "DPT", "*", "*" }, 402 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, 403 /*mintags*/0, /*maxtags*/255 404 }, 405 { 406 /* 407 * Many Sony CDROM drives don't like multi-LUN probing. 408 */ 409 { T_CDROM, SIP_MEDIA_REMOVABLE, sony, "CD-ROM CDU*", "*" }, 410 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 411 }, 412 { 413 /* 414 * This drive doesn't like multiple LUN probing. 415 * Submitted by: Parag Patel <parag@cgt.com> 416 */ 417 { T_WORM, SIP_MEDIA_REMOVABLE, sony, "CD-R CDU9*", "*" }, 418 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 419 }, 420 { 421 /* 422 * The 8200 doesn't like multi-lun probing, and probably 423 * don't like serial number requests either. 424 */ 425 { 426 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE", 427 "EXB-8200*", "*" 428 }, 429 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 430 }, 431 { 432 /* 433 * This old revision of the TDC3600 is also SCSI-1, and 434 * hangs upon serial number probing. 435 */ 436 { 437 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "TANDBERG", 438 " TDC 3600", "U07:" 439 }, 440 CAM_QUIRK_NOSERIAL, /*mintags*/0, /*maxtags*/0 441 }, 442 { 443 /* 444 * Would repond to all LUNs if asked for. 445 */ 446 { 447 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "CALIPER", 448 "CP150", "*" 449 }, 450 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 451 }, 452 { 453 /* 454 * Would repond to all LUNs if asked for. 455 */ 456 { 457 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "KENNEDY", 458 "96X2*", "*" 459 }, 460 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 461 }, 462 { 463 /* Submitted by: Matthew Dodd <winter@jurai.net> */ 464 { T_PROCESSOR, SIP_MEDIA_FIXED, "Cabletrn", "EA41*", "*" }, 465 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 466 }, 467 { 468 /* Submitted by: Matthew Dodd <winter@jurai.net> */ 469 { T_PROCESSOR, SIP_MEDIA_FIXED, "CABLETRN", "EA41*", "*" }, 470 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 471 }, 472 { 473 /* Default tagged queuing parameters for all devices */ 474 { 475 T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, 476 /*vendor*/"*", /*product*/"*", /*revision*/"*" 477 }, 478 /*quirks*/0, /*mintags*/2, /*maxtags*/255 479 }, 480 }; 481 482 static const int xpt_quirk_table_size = 483 sizeof(xpt_quirk_table) / sizeof(*xpt_quirk_table); 484 485 typedef enum { 486 DM_RET_COPY = 0x01, 487 DM_RET_FLAG_MASK = 0x0f, 488 DM_RET_NONE = 0x00, 489 DM_RET_STOP = 0x10, 490 DM_RET_DESCEND = 0x20, 491 DM_RET_ERROR = 0x30, 492 DM_RET_ACTION_MASK = 0xf0 493 } dev_match_ret; 494 495 typedef enum { 496 XPT_DEPTH_BUS, 497 XPT_DEPTH_TARGET, 498 XPT_DEPTH_DEVICE, 499 XPT_DEPTH_PERIPH 500 } xpt_traverse_depth; 501 502 struct xpt_traverse_config { 503 xpt_traverse_depth depth; 504 void *tr_func; 505 void *tr_arg; 506 }; 507 508 typedef int xpt_busfunc_t (struct cam_eb *bus, void *arg); 509 typedef int xpt_targetfunc_t (struct cam_et *target, void *arg); 510 typedef int xpt_devicefunc_t (struct cam_ed *device, void *arg); 511 typedef int xpt_periphfunc_t (struct cam_periph *periph, void *arg); 512 typedef int xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg); 513 514 /* Transport layer configuration information */ 515 static struct xpt_softc xsoftc; 516 517 /* Queues for our software interrupt handler */ 518 typedef TAILQ_HEAD(cam_isrq, ccb_hdr) cam_isrq_t; 519 static cam_isrq_t cam_bioq; 520 static cam_isrq_t cam_netq; 521 522 /* "Pool" of inactive ccbs managed by xpt_alloc_ccb and xpt_free_ccb */ 523 static SLIST_HEAD(,ccb_hdr) ccb_freeq; 524 static u_int xpt_max_ccbs; /* 525 * Maximum size of ccb pool. Modified as 526 * devices are added/removed or have their 527 * opening counts changed. 528 */ 529 static u_int xpt_ccb_count; /* Current count of allocated ccbs */ 530 531 static struct cam_periph *xpt_periph; 532 533 static periph_init_t xpt_periph_init; 534 535 static periph_init_t probe_periph_init; 536 537 static struct periph_driver xpt_driver = 538 { 539 xpt_periph_init, "xpt", 540 TAILQ_HEAD_INITIALIZER(xpt_driver.units) 541 }; 542 543 static struct periph_driver probe_driver = 544 { 545 probe_periph_init, "probe", 546 TAILQ_HEAD_INITIALIZER(probe_driver.units) 547 }; 548 549 DATA_SET(periphdriver_set, xpt_driver); 550 DATA_SET(periphdriver_set, probe_driver); 551 552 #define XPT_CDEV_MAJOR 104 553 554 static d_open_t xptopen; 555 static d_close_t xptclose; 556 static d_ioctl_t xptioctl; 557 558 static struct cdevsw xpt_cdevsw = { 559 /* open */ xptopen, 560 /* close */ xptclose, 561 /* read */ noread, 562 /* write */ nowrite, 563 /* ioctl */ xptioctl, 564 /* poll */ nopoll, 565 /* mmap */ nommap, 566 /* strategy */ nostrategy, 567 /* name */ "xpt", 568 /* maj */ XPT_CDEV_MAJOR, 569 /* dump */ nodump, 570 /* psize */ nopsize, 571 /* flags */ 0, 572 /* bmaj */ -1 573 }; 574 575 static struct intr_config_hook *xpt_config_hook; 576 577 /* Registered busses */ 578 static TAILQ_HEAD(,cam_eb) xpt_busses; 579 static u_int bus_generation; 580 581 /* Storage for debugging datastructures */ 582 #ifdef CAMDEBUG 583 struct cam_path *cam_dpath; 584 u_int32_t cam_dflags; 585 u_int32_t cam_debug_delay; 586 #endif 587 588 #if defined(CAM_DEBUG_FLAGS) && !defined(CAMDEBUG) 589 #error "You must have options CAMDEBUG to use options CAM_DEBUG_FLAGS" 590 #endif 591 592 /* 593 * In order to enable the CAM_DEBUG_* options, the user must have CAMDEBUG 594 * enabled. Also, the user must have either none, or all of CAM_DEBUG_BUS, 595 * CAM_DEBUG_TARGET, and CAM_DEBUG_LUN specified. 596 */ 597 #if defined(CAM_DEBUG_BUS) || defined(CAM_DEBUG_TARGET) \ 598 || defined(CAM_DEBUG_LUN) 599 #ifdef CAMDEBUG 600 #if !defined(CAM_DEBUG_BUS) || !defined(CAM_DEBUG_TARGET) \ 601 || !defined(CAM_DEBUG_LUN) 602 #error "You must define all or none of CAM_DEBUG_BUS, CAM_DEBUG_TARGET \ 603 and CAM_DEBUG_LUN" 604 #endif /* !CAM_DEBUG_BUS || !CAM_DEBUG_TARGET || !CAM_DEBUG_LUN */ 605 #else /* !CAMDEBUG */ 606 #error "You must use options CAMDEBUG if you use the CAM_DEBUG_* options" 607 #endif /* CAMDEBUG */ 608 #endif /* CAM_DEBUG_BUS || CAM_DEBUG_TARGET || CAM_DEBUG_LUN */ 609 610 /* Our boot-time initialization hook */ 611 static void xpt_init(void *); 612 SYSINIT(cam, SI_SUB_CONFIGURE, SI_ORDER_SECOND, xpt_init, NULL); 613 614 static cam_status xpt_compile_path(struct cam_path *new_path, 615 struct cam_periph *perph, 616 path_id_t path_id, 617 target_id_t target_id, 618 lun_id_t lun_id); 619 620 static void xpt_release_path(struct cam_path *path); 621 622 static void xpt_async_bcast(struct async_list *async_head, 623 u_int32_t async_code, 624 struct cam_path *path, 625 void *async_arg); 626 static int xptnextfreebus(path_id_t startbus); 627 static int xptpathid(const char *sim_name, int sim_unit, int sim_bus, 628 path_id_t *nextpath); 629 static union ccb *xpt_get_ccb(struct cam_ed *device); 630 static int xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo, 631 u_int32_t new_priority); 632 static void xpt_run_dev_allocq(struct cam_eb *bus); 633 static void xpt_run_dev_sendq(struct cam_eb *bus); 634 static timeout_t xpt_release_devq_timeout; 635 static timeout_t xpt_release_simq_timeout; 636 static void xpt_release_bus(struct cam_eb *bus); 637 static void xpt_release_devq_device(struct cam_ed *dev, u_int count, 638 int run_queue); 639 static struct cam_et* 640 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id); 641 static void xpt_release_target(struct cam_eb *bus, struct cam_et *target); 642 static struct cam_ed* 643 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, 644 lun_id_t lun_id); 645 static void xpt_release_device(struct cam_eb *bus, struct cam_et *target, 646 struct cam_ed *device); 647 static u_int32_t xpt_dev_ccbq_resize(struct cam_path *path, int newopenings); 648 static struct cam_eb* 649 xpt_find_bus(path_id_t path_id); 650 static struct cam_et* 651 xpt_find_target(struct cam_eb *bus, target_id_t target_id); 652 static struct cam_ed* 653 xpt_find_device(struct cam_et *target, lun_id_t lun_id); 654 static void xpt_scan_bus(struct cam_periph *periph, union ccb *ccb); 655 static void xpt_scan_lun(struct cam_periph *periph, 656 struct cam_path *path, cam_flags flags, 657 union ccb *ccb); 658 static void xptscandone(struct cam_periph *periph, union ccb *done_ccb); 659 static xpt_busfunc_t xptconfigbuscountfunc; 660 static xpt_busfunc_t xptconfigfunc; 661 static void xpt_config(void *arg); 662 static xpt_devicefunc_t xptpassannouncefunc; 663 static void xpt_finishconfig(struct cam_periph *periph, union ccb *ccb); 664 static void xptaction(struct cam_sim *sim, union ccb *work_ccb); 665 static swihand_t swi_camnet; 666 static swihand_t swi_cambio; 667 static void camisr(cam_isrq_t *queue); 668 #if 0 669 static void xptstart(struct cam_periph *periph, union ccb *work_ccb); 670 static void xptasync(struct cam_periph *periph, 671 u_int32_t code, cam_path *path); 672 #endif 673 static dev_match_ret xptbusmatch(struct dev_match_pattern *patterns, 674 int num_patterns, struct cam_eb *bus); 675 static dev_match_ret xptdevicematch(struct dev_match_pattern *patterns, 676 int num_patterns, struct cam_ed *device); 677 static dev_match_ret xptperiphmatch(struct dev_match_pattern *patterns, 678 int num_patterns, 679 struct cam_periph *periph); 680 static xpt_busfunc_t xptedtbusfunc; 681 static xpt_targetfunc_t xptedttargetfunc; 682 static xpt_devicefunc_t xptedtdevicefunc; 683 static xpt_periphfunc_t xptedtperiphfunc; 684 static xpt_pdrvfunc_t xptplistpdrvfunc; 685 static xpt_periphfunc_t xptplistperiphfunc; 686 static int xptedtmatch(struct ccb_dev_match *cdm); 687 static int xptperiphlistmatch(struct ccb_dev_match *cdm); 688 static int xptbustraverse(struct cam_eb *start_bus, 689 xpt_busfunc_t *tr_func, void *arg); 690 static int xpttargettraverse(struct cam_eb *bus, 691 struct cam_et *start_target, 692 xpt_targetfunc_t *tr_func, void *arg); 693 static int xptdevicetraverse(struct cam_et *target, 694 struct cam_ed *start_device, 695 xpt_devicefunc_t *tr_func, void *arg); 696 static int xptperiphtraverse(struct cam_ed *device, 697 struct cam_periph *start_periph, 698 xpt_periphfunc_t *tr_func, void *arg); 699 static int xptpdrvtraverse(struct periph_driver **start_pdrv, 700 xpt_pdrvfunc_t *tr_func, void *arg); 701 static int xptpdperiphtraverse(struct periph_driver **pdrv, 702 struct cam_periph *start_periph, 703 xpt_periphfunc_t *tr_func, 704 void *arg); 705 static xpt_busfunc_t xptdefbusfunc; 706 static xpt_targetfunc_t xptdeftargetfunc; 707 static xpt_devicefunc_t xptdefdevicefunc; 708 static xpt_periphfunc_t xptdefperiphfunc; 709 static int xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg); 710 #ifdef notusedyet 711 static int xpt_for_all_targets(xpt_targetfunc_t *tr_func, 712 void *arg); 713 #endif 714 static int xpt_for_all_devices(xpt_devicefunc_t *tr_func, 715 void *arg); 716 #ifdef notusedyet 717 static int xpt_for_all_periphs(xpt_periphfunc_t *tr_func, 718 void *arg); 719 #endif 720 static xpt_devicefunc_t xptsetasyncfunc; 721 static xpt_busfunc_t xptsetasyncbusfunc; 722 static cam_status xptregister(struct cam_periph *periph, 723 void *arg); 724 static cam_status proberegister(struct cam_periph *periph, 725 void *arg); 726 static void probeschedule(struct cam_periph *probe_periph); 727 static void probestart(struct cam_periph *periph, union ccb *start_ccb); 728 static void proberequestdefaultnegotiation(struct cam_periph *periph); 729 static void probedone(struct cam_periph *periph, union ccb *done_ccb); 730 static void probecleanup(struct cam_periph *periph); 731 static void xpt_find_quirk(struct cam_ed *device); 732 static void xpt_set_transfer_settings(struct ccb_trans_settings *cts, 733 struct cam_ed *device, 734 int async_update); 735 static void xpt_toggle_tags(struct cam_path *path); 736 static void xpt_start_tags(struct cam_path *path); 737 static __inline int xpt_schedule_dev_allocq(struct cam_eb *bus, 738 struct cam_ed *dev); 739 static __inline int xpt_schedule_dev_sendq(struct cam_eb *bus, 740 struct cam_ed *dev); 741 static __inline int periph_is_queued(struct cam_periph *periph); 742 static __inline int device_is_alloc_queued(struct cam_ed *device); 743 static __inline int device_is_send_queued(struct cam_ed *device); 744 static __inline int dev_allocq_is_runnable(struct cam_devq *devq); 745 746 static __inline int 747 xpt_schedule_dev_allocq(struct cam_eb *bus, struct cam_ed *dev) 748 { 749 int retval; 750 751 if (dev->ccbq.devq_openings > 0) { 752 if ((dev->flags & CAM_DEV_RESIZE_QUEUE_NEEDED) != 0) { 753 cam_ccbq_resize(&dev->ccbq, 754 dev->ccbq.dev_openings 755 + dev->ccbq.dev_active); 756 dev->flags &= ~CAM_DEV_RESIZE_QUEUE_NEEDED; 757 } 758 /* 759 * The priority of a device waiting for CCB resources 760 * is that of the the highest priority peripheral driver 761 * enqueued. 762 */ 763 retval = xpt_schedule_dev(&bus->sim->devq->alloc_queue, 764 &dev->alloc_ccb_entry.pinfo, 765 CAMQ_GET_HEAD(&dev->drvq)->priority); 766 } else { 767 retval = 0; 768 } 769 770 return (retval); 771 } 772 773 static __inline int 774 xpt_schedule_dev_sendq(struct cam_eb *bus, struct cam_ed *dev) 775 { 776 int retval; 777 778 if (dev->ccbq.dev_openings > 0) { 779 /* 780 * The priority of a device waiting for controller 781 * resources is that of the the highest priority CCB 782 * enqueued. 783 */ 784 retval = 785 xpt_schedule_dev(&bus->sim->devq->send_queue, 786 &dev->send_ccb_entry.pinfo, 787 CAMQ_GET_HEAD(&dev->ccbq.queue)->priority); 788 } else { 789 retval = 0; 790 } 791 return (retval); 792 } 793 794 static __inline int 795 periph_is_queued(struct cam_periph *periph) 796 { 797 return (periph->pinfo.index != CAM_UNQUEUED_INDEX); 798 } 799 800 static __inline int 801 device_is_alloc_queued(struct cam_ed *device) 802 { 803 return (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX); 804 } 805 806 static __inline int 807 device_is_send_queued(struct cam_ed *device) 808 { 809 return (device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX); 810 } 811 812 static __inline int 813 dev_allocq_is_runnable(struct cam_devq *devq) 814 { 815 /* 816 * Have work to do. 817 * Have space to do more work. 818 * Allowed to do work. 819 */ 820 return ((devq->alloc_queue.qfrozen_cnt == 0) 821 && (devq->alloc_queue.entries > 0) 822 && (devq->alloc_openings > 0)); 823 } 824 825 static void 826 xpt_periph_init() 827 { 828 make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0"); 829 } 830 831 static void 832 probe_periph_init() 833 { 834 } 835 836 837 static void 838 xptdone(struct cam_periph *periph, union ccb *done_ccb) 839 { 840 /* Caller will release the CCB */ 841 wakeup(&done_ccb->ccb_h.cbfcnp); 842 } 843 844 static int 845 xptopen(dev_t dev, int flags, int fmt, struct proc *p) 846 { 847 int unit; 848 849 unit = minor(dev) & 0xff; 850 851 /* 852 * Only allow read-write access. 853 */ 854 if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0)) 855 return(EPERM); 856 857 /* 858 * We don't allow nonblocking access. 859 */ 860 if ((flags & O_NONBLOCK) != 0) { 861 printf("xpt%d: can't do nonblocking accesss\n", unit); 862 return(ENODEV); 863 } 864 865 /* 866 * We only have one transport layer right now. If someone accesses 867 * us via something other than minor number 1, point out their 868 * mistake. 869 */ 870 if (unit != 0) { 871 printf("xptopen: got invalid xpt unit %d\n", unit); 872 return(ENXIO); 873 } 874 875 /* Mark ourselves open */ 876 xsoftc.flags |= XPT_FLAG_OPEN; 877 878 return(0); 879 } 880 881 static int 882 xptclose(dev_t dev, int flag, int fmt, struct proc *p) 883 { 884 int unit; 885 886 unit = minor(dev) & 0xff; 887 888 /* 889 * We only have one transport layer right now. If someone accesses 890 * us via something other than minor number 1, point out their 891 * mistake. 892 */ 893 if (unit != 0) { 894 printf("xptclose: got invalid xpt unit %d\n", unit); 895 return(ENXIO); 896 } 897 898 /* Mark ourselves closed */ 899 xsoftc.flags &= ~XPT_FLAG_OPEN; 900 901 return(0); 902 } 903 904 static int 905 xptioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p) 906 { 907 int unit, error; 908 909 error = 0; 910 unit = minor(dev) & 0xff; 911 912 /* 913 * We only have one transport layer right now. If someone accesses 914 * us via something other than minor number 1, point out their 915 * mistake. 916 */ 917 if (unit != 0) { 918 printf("xptioctl: got invalid xpt unit %d\n", unit); 919 return(ENXIO); 920 } 921 922 switch(cmd) { 923 /* 924 * For the transport layer CAMIOCOMMAND ioctl, we really only want 925 * to accept CCB types that don't quite make sense to send through a 926 * passthrough driver. 927 */ 928 case CAMIOCOMMAND: { 929 union ccb *ccb; 930 union ccb *inccb; 931 932 inccb = (union ccb *)addr; 933 934 switch(inccb->ccb_h.func_code) { 935 case XPT_SCAN_BUS: 936 case XPT_RESET_BUS: 937 if ((inccb->ccb_h.target_id != CAM_TARGET_WILDCARD) 938 || (inccb->ccb_h.target_lun != CAM_LUN_WILDCARD)) { 939 error = EINVAL; 940 break; 941 } 942 /* FALLTHROUGH */ 943 case XPT_SCAN_LUN: 944 945 ccb = xpt_alloc_ccb(); 946 947 /* 948 * Create a path using the bus, target, and lun the 949 * user passed in. 950 */ 951 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, 952 inccb->ccb_h.path_id, 953 inccb->ccb_h.target_id, 954 inccb->ccb_h.target_lun) != 955 CAM_REQ_CMP){ 956 error = EINVAL; 957 xpt_free_ccb(ccb); 958 break; 959 } 960 /* Ensure all of our fields are correct */ 961 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, 962 inccb->ccb_h.pinfo.priority); 963 xpt_merge_ccb(ccb, inccb); 964 ccb->ccb_h.cbfcnp = xptdone; 965 cam_periph_runccb(ccb, NULL, 0, 0, NULL); 966 bcopy(ccb, inccb, sizeof(union ccb)); 967 xpt_free_path(ccb->ccb_h.path); 968 xpt_free_ccb(ccb); 969 break; 970 971 case XPT_DEBUG: { 972 union ccb ccb; 973 974 /* 975 * This is an immediate CCB, so it's okay to 976 * allocate it on the stack. 977 */ 978 979 /* 980 * Create a path using the bus, target, and lun the 981 * user passed in. 982 */ 983 if (xpt_create_path(&ccb.ccb_h.path, xpt_periph, 984 inccb->ccb_h.path_id, 985 inccb->ccb_h.target_id, 986 inccb->ccb_h.target_lun) != 987 CAM_REQ_CMP){ 988 error = EINVAL; 989 break; 990 } 991 /* Ensure all of our fields are correct */ 992 xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path, 993 inccb->ccb_h.pinfo.priority); 994 xpt_merge_ccb(&ccb, inccb); 995 ccb.ccb_h.cbfcnp = xptdone; 996 xpt_action(&ccb); 997 bcopy(&ccb, inccb, sizeof(union ccb)); 998 xpt_free_path(ccb.ccb_h.path); 999 break; 1000 1001 } 1002 case XPT_DEV_MATCH: { 1003 struct cam_periph_map_info mapinfo; 1004 struct cam_path *old_path; 1005 1006 /* 1007 * We can't deal with physical addresses for this 1008 * type of transaction. 1009 */ 1010 if (inccb->ccb_h.flags & CAM_DATA_PHYS) { 1011 error = EINVAL; 1012 break; 1013 } 1014 1015 /* 1016 * Save this in case the caller had it set to 1017 * something in particular. 1018 */ 1019 old_path = inccb->ccb_h.path; 1020 1021 /* 1022 * We really don't need a path for the matching 1023 * code. The path is needed because of the 1024 * debugging statements in xpt_action(). They 1025 * assume that the CCB has a valid path. 1026 */ 1027 inccb->ccb_h.path = xpt_periph->path; 1028 1029 bzero(&mapinfo, sizeof(mapinfo)); 1030 1031 /* 1032 * Map the pattern and match buffers into kernel 1033 * virtual address space. 1034 */ 1035 error = cam_periph_mapmem(inccb, &mapinfo); 1036 1037 if (error) { 1038 inccb->ccb_h.path = old_path; 1039 break; 1040 } 1041 1042 /* 1043 * This is an immediate CCB, we can send it on directly. 1044 */ 1045 xpt_action(inccb); 1046 1047 /* 1048 * Map the buffers back into user space. 1049 */ 1050 cam_periph_unmapmem(inccb, &mapinfo); 1051 1052 inccb->ccb_h.path = old_path; 1053 1054 error = 0; 1055 break; 1056 } 1057 default: 1058 error = EINVAL; 1059 break; 1060 } 1061 break; 1062 } 1063 /* 1064 * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input, 1065 * with the periphal driver name and unit name filled in. The other 1066 * fields don't really matter as input. The passthrough driver name 1067 * ("pass"), and unit number are passed back in the ccb. The current 1068 * device generation number, and the index into the device peripheral 1069 * driver list, and the status are also passed back. Note that 1070 * since we do everything in one pass, unlike the XPT_GDEVLIST ccb, 1071 * we never return a status of CAM_GDEVLIST_LIST_CHANGED. It is 1072 * (or rather should be) impossible for the device peripheral driver 1073 * list to change since we look at the whole thing in one pass, and 1074 * we do it with splcam protection. 1075 * 1076 */ 1077 case CAMGETPASSTHRU: { 1078 union ccb *ccb; 1079 struct cam_periph *periph; 1080 struct periph_driver **p_drv; 1081 char *name; 1082 int unit; 1083 int cur_generation; 1084 int base_periph_found; 1085 int splbreaknum; 1086 int s; 1087 1088 ccb = (union ccb *)addr; 1089 unit = ccb->cgdl.unit_number; 1090 name = ccb->cgdl.periph_name; 1091 /* 1092 * Every 100 devices, we want to drop our spl protection to 1093 * give the software interrupt handler a chance to run. 1094 * Most systems won't run into this check, but this should 1095 * avoid starvation in the software interrupt handler in 1096 * large systems. 1097 */ 1098 splbreaknum = 100; 1099 1100 ccb = (union ccb *)addr; 1101 1102 base_periph_found = 0; 1103 1104 /* 1105 * Sanity check -- make sure we don't get a null peripheral 1106 * driver name. 1107 */ 1108 if (*ccb->cgdl.periph_name == '\0') { 1109 error = EINVAL; 1110 break; 1111 } 1112 1113 /* Keep the list from changing while we traverse it */ 1114 s = splcam(); 1115 ptstartover: 1116 cur_generation = xsoftc.generation; 1117 1118 /* first find our driver in the list of drivers */ 1119 for (p_drv = (struct periph_driver **)periphdriver_set.ls_items; 1120 *p_drv != NULL; p_drv++) 1121 if (strcmp((*p_drv)->driver_name, name) == 0) 1122 break; 1123 1124 if (*p_drv == NULL) { 1125 splx(s); 1126 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 1127 ccb->cgdl.status = CAM_GDEVLIST_ERROR; 1128 *ccb->cgdl.periph_name = '\0'; 1129 ccb->cgdl.unit_number = 0; 1130 error = ENOENT; 1131 break; 1132 } 1133 1134 /* 1135 * Run through every peripheral instance of this driver 1136 * and check to see whether it matches the unit passed 1137 * in by the user. If it does, get out of the loops and 1138 * find the passthrough driver associated with that 1139 * peripheral driver. 1140 */ 1141 for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL; 1142 periph = TAILQ_NEXT(periph, unit_links)) { 1143 1144 if (periph->unit_number == unit) { 1145 break; 1146 } else if (--splbreaknum == 0) { 1147 splx(s); 1148 s = splcam(); 1149 splbreaknum = 100; 1150 if (cur_generation != xsoftc.generation) 1151 goto ptstartover; 1152 } 1153 } 1154 /* 1155 * If we found the peripheral driver that the user passed 1156 * in, go through all of the peripheral drivers for that 1157 * particular device and look for a passthrough driver. 1158 */ 1159 if (periph != NULL) { 1160 struct cam_ed *device; 1161 int i; 1162 1163 base_periph_found = 1; 1164 device = periph->path->device; 1165 for (i = 0, periph = device->periphs.slh_first; 1166 periph != NULL; 1167 periph = periph->periph_links.sle_next, i++) { 1168 /* 1169 * Check to see whether we have a 1170 * passthrough device or not. 1171 */ 1172 if (strcmp(periph->periph_name, "pass") == 0) { 1173 /* 1174 * Fill in the getdevlist fields. 1175 */ 1176 strcpy(ccb->cgdl.periph_name, 1177 periph->periph_name); 1178 ccb->cgdl.unit_number = 1179 periph->unit_number; 1180 if (periph->periph_links.sle_next) 1181 ccb->cgdl.status = 1182 CAM_GDEVLIST_MORE_DEVS; 1183 else 1184 ccb->cgdl.status = 1185 CAM_GDEVLIST_LAST_DEVICE; 1186 ccb->cgdl.generation = 1187 device->generation; 1188 ccb->cgdl.index = i; 1189 /* 1190 * Fill in some CCB header fields 1191 * that the user may want. 1192 */ 1193 ccb->ccb_h.path_id = 1194 periph->path->bus->path_id; 1195 ccb->ccb_h.target_id = 1196 periph->path->target->target_id; 1197 ccb->ccb_h.target_lun = 1198 periph->path->device->lun_id; 1199 ccb->ccb_h.status = CAM_REQ_CMP; 1200 break; 1201 } 1202 } 1203 } 1204 1205 /* 1206 * If the periph is null here, one of two things has 1207 * happened. The first possibility is that we couldn't 1208 * find the unit number of the particular peripheral driver 1209 * that the user is asking about. e.g. the user asks for 1210 * the passthrough driver for "da11". We find the list of 1211 * "da" peripherals all right, but there is no unit 11. 1212 * The other possibility is that we went through the list 1213 * of peripheral drivers attached to the device structure, 1214 * but didn't find one with the name "pass". Either way, 1215 * we return ENOENT, since we couldn't find something. 1216 */ 1217 if (periph == NULL) { 1218 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 1219 ccb->cgdl.status = CAM_GDEVLIST_ERROR; 1220 *ccb->cgdl.periph_name = '\0'; 1221 ccb->cgdl.unit_number = 0; 1222 error = ENOENT; 1223 /* 1224 * It is unfortunate that this is even necessary, 1225 * but there are many, many clueless users out there. 1226 * If this is true, the user is looking for the 1227 * passthrough driver, but doesn't have one in his 1228 * kernel. 1229 */ 1230 if (base_periph_found == 1) { 1231 printf("xptioctl: pass driver is not in the " 1232 "kernel\n"); 1233 printf("xptioctl: put \"device pass0\" in " 1234 "your kernel config file\n"); 1235 } 1236 } 1237 splx(s); 1238 break; 1239 } 1240 default: 1241 error = ENOTTY; 1242 break; 1243 } 1244 1245 return(error); 1246 } 1247 1248 /* Functions accessed by the peripheral drivers */ 1249 static void 1250 xpt_init(dummy) 1251 void *dummy; 1252 { 1253 struct cam_sim *xpt_sim; 1254 struct cam_path *path; 1255 struct cam_devq; 1256 cam_status status; 1257 1258 TAILQ_INIT(&xpt_busses); 1259 TAILQ_INIT(&cam_bioq); 1260 TAILQ_INIT(&cam_netq); 1261 SLIST_INIT(&ccb_freeq); 1262 STAILQ_INIT(&highpowerq); 1263 1264 /* 1265 * The xpt layer is, itself, the equivelent of a SIM. 1266 * Allow 16 ccbs in the ccb pool for it. This should 1267 * give decent parallelism when we probe busses and 1268 * perform other XPT functions. 1269 */ 1270 xpt_sim = (struct cam_sim *)malloc(sizeof(*xpt_sim), 1271 M_DEVBUF, M_WAITOK); 1272 xpt_sim->sim_action = xptaction; 1273 xpt_sim->sim_name = "xpt"; 1274 xpt_sim->path_id = CAM_XPT_PATH_ID; 1275 xpt_sim->bus_id = 0; 1276 xpt_sim->max_tagged_dev_openings = 0; 1277 xpt_sim->max_dev_openings = 0; 1278 xpt_sim->devq = cam_simq_alloc(16); 1279 xpt_max_ccbs = 16; 1280 1281 xpt_bus_register(xpt_sim, 0); 1282 1283 /* 1284 * Looking at the XPT from the SIM layer, the XPT is 1285 * the equivelent of a peripheral driver. Allocate 1286 * a peripheral driver entry for us. 1287 */ 1288 if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID, 1289 CAM_TARGET_WILDCARD, 1290 CAM_LUN_WILDCARD)) != CAM_REQ_CMP) { 1291 printf("xpt_init: xpt_create_path failed with status %#x," 1292 " failing attach\n", status); 1293 return; 1294 } 1295 1296 cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO, 1297 path, NULL, 0, NULL); 1298 xpt_free_path(path); 1299 1300 xpt_sim->softc = xpt_periph; 1301 1302 /* 1303 * Register a callback for when interrupts are enabled. 1304 */ 1305 xpt_config_hook = 1306 (struct intr_config_hook *)malloc(sizeof(struct intr_config_hook), 1307 M_TEMP, M_NOWAIT); 1308 if (xpt_config_hook == NULL) { 1309 printf("xpt_init: Cannot malloc config hook " 1310 "- failing attach\n"); 1311 return; 1312 } 1313 bzero(xpt_config_hook, sizeof(*xpt_config_hook)); 1314 1315 xpt_config_hook->ich_func = xpt_config; 1316 if (config_intrhook_establish(xpt_config_hook) != 0) { 1317 free (xpt_config_hook, M_TEMP); 1318 printf("xpt_init: config_intrhook_establish failed " 1319 "- failing attach\n"); 1320 } 1321 1322 /* Install our software interrupt handlers */ 1323 register_swi(SWI_CAMNET, swi_camnet); 1324 register_swi(SWI_CAMBIO, swi_cambio); 1325 } 1326 1327 static cam_status 1328 xptregister(struct cam_periph *periph, void *arg) 1329 { 1330 if (periph == NULL) { 1331 printf("xptregister: periph was NULL!!\n"); 1332 return(CAM_REQ_CMP_ERR); 1333 } 1334 1335 periph->softc = NULL; 1336 1337 xpt_periph = periph; 1338 1339 return(CAM_REQ_CMP); 1340 } 1341 1342 int32_t 1343 xpt_add_periph(struct cam_periph *periph) 1344 { 1345 struct cam_ed *device; 1346 int32_t status; 1347 struct periph_list *periph_head; 1348 1349 device = periph->path->device; 1350 1351 periph_head = &device->periphs; 1352 1353 status = CAM_REQ_CMP; 1354 1355 if (device != NULL) { 1356 int s; 1357 1358 /* 1359 * Make room for this peripheral 1360 * so it will fit in the queue 1361 * when it's scheduled to run 1362 */ 1363 s = splsoftcam(); 1364 status = camq_resize(&device->drvq, 1365 device->drvq.array_size + 1); 1366 1367 device->generation++; 1368 1369 SLIST_INSERT_HEAD(periph_head, periph, periph_links); 1370 1371 splx(s); 1372 } 1373 1374 xsoftc.generation++; 1375 1376 return (status); 1377 } 1378 1379 void 1380 xpt_remove_periph(struct cam_periph *periph) 1381 { 1382 struct cam_ed *device; 1383 1384 device = periph->path->device; 1385 1386 if (device != NULL) { 1387 int s; 1388 struct periph_list *periph_head; 1389 1390 periph_head = &device->periphs; 1391 1392 /* Release the slot for this peripheral */ 1393 s = splsoftcam(); 1394 camq_resize(&device->drvq, device->drvq.array_size - 1); 1395 1396 device->generation++; 1397 1398 SLIST_REMOVE(periph_head, periph, cam_periph, periph_links); 1399 1400 splx(s); 1401 } 1402 1403 xsoftc.generation++; 1404 1405 } 1406 1407 void 1408 xpt_announce_periph(struct cam_periph *periph, char *announce_string) 1409 { 1410 int s; 1411 u_int mb; 1412 struct cam_path *path; 1413 struct ccb_trans_settings cts; 1414 1415 path = periph->path; 1416 /* 1417 * To ensure that this is printed in one piece, 1418 * mask out CAM interrupts. 1419 */ 1420 s = splsoftcam(); 1421 printf("%s%d at %s%d bus %d target %d lun %d\n", 1422 periph->periph_name, periph->unit_number, 1423 path->bus->sim->sim_name, 1424 path->bus->sim->unit_number, 1425 path->bus->sim->bus_id, 1426 path->target->target_id, 1427 path->device->lun_id); 1428 printf("%s%d: ", periph->periph_name, periph->unit_number); 1429 scsi_print_inquiry(&path->device->inq_data); 1430 if ((bootverbose) 1431 && (path->device->serial_num_len > 0)) { 1432 /* Don't wrap the screen - print only the first 60 chars */ 1433 printf("%s%d: Serial Number %.60s\n", periph->periph_name, 1434 periph->unit_number, path->device->serial_num); 1435 } 1436 xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1); 1437 cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; 1438 cts.flags = CCB_TRANS_CURRENT_SETTINGS; 1439 xpt_action((union ccb*)&cts); 1440 if (cts.ccb_h.status == CAM_REQ_CMP) { 1441 u_int speed; 1442 u_int freq; 1443 1444 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0 1445 && cts.sync_offset != 0) { 1446 freq = scsi_calc_syncsrate(cts.sync_period); 1447 speed = freq; 1448 } else { 1449 struct ccb_pathinq cpi; 1450 1451 /* Ask the SIM for its base transfer speed */ 1452 xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1); 1453 cpi.ccb_h.func_code = XPT_PATH_INQ; 1454 xpt_action((union ccb *)&cpi); 1455 1456 speed = cpi.base_transfer_speed; 1457 freq = 0; 1458 } 1459 if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) 1460 speed *= (0x01 << cts.bus_width); 1461 mb = speed / 1000; 1462 if (mb > 0) 1463 printf("%s%d: %d.%03dMB/s transfers", 1464 periph->periph_name, periph->unit_number, 1465 mb, speed % 1000); 1466 else 1467 printf("%s%d: %dKB/s transfers", periph->periph_name, 1468 periph->unit_number, (speed % 1000) * 1000); 1469 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0 1470 && cts.sync_offset != 0) { 1471 printf(" (%d.%03dMHz, offset %d", freq / 1000, 1472 freq % 1000, cts.sync_offset); 1473 } 1474 if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0 1475 && cts.bus_width > 0) { 1476 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0 1477 && cts.sync_offset != 0) { 1478 printf(", "); 1479 } else { 1480 printf(" ("); 1481 } 1482 printf("%dbit)", 8 * (0x01 << cts.bus_width)); 1483 } else if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0 1484 && cts.sync_offset != 0) { 1485 printf(")"); 1486 } 1487 1488 if (path->device->inq_flags & SID_CmdQue 1489 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) { 1490 printf(", Tagged Queueing Enabled"); 1491 } 1492 1493 printf("\n"); 1494 } else if (path->device->inq_flags & SID_CmdQue 1495 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) { 1496 printf("%s%d: Tagged Queueing Enabled\n", 1497 periph->periph_name, periph->unit_number); 1498 } 1499 1500 /* 1501 * We only want to print the caller's announce string if they've 1502 * passed one in.. 1503 */ 1504 if (announce_string != NULL) 1505 printf("%s%d: %s\n", periph->periph_name, 1506 periph->unit_number, announce_string); 1507 splx(s); 1508 } 1509 1510 1511 static dev_match_ret 1512 xptbusmatch(struct dev_match_pattern *patterns, int num_patterns, 1513 struct cam_eb *bus) 1514 { 1515 dev_match_ret retval; 1516 int i; 1517 1518 retval = DM_RET_NONE; 1519 1520 /* 1521 * If we aren't given something to match against, that's an error. 1522 */ 1523 if (bus == NULL) 1524 return(DM_RET_ERROR); 1525 1526 /* 1527 * If there are no match entries, then this bus matches no 1528 * matter what. 1529 */ 1530 if ((patterns == NULL) || (num_patterns == 0)) 1531 return(DM_RET_DESCEND | DM_RET_COPY); 1532 1533 for (i = 0; i < num_patterns; i++) { 1534 struct bus_match_pattern *cur_pattern; 1535 1536 /* 1537 * If the pattern in question isn't for a bus node, we 1538 * aren't interested. However, we do indicate to the 1539 * calling routine that we should continue descending the 1540 * tree, since the user wants to match against lower-level 1541 * EDT elements. 1542 */ 1543 if (patterns[i].type != DEV_MATCH_BUS) { 1544 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) 1545 retval |= DM_RET_DESCEND; 1546 continue; 1547 } 1548 1549 cur_pattern = &patterns[i].pattern.bus_pattern; 1550 1551 /* 1552 * If they want to match any bus node, we give them any 1553 * device node. 1554 */ 1555 if (cur_pattern->flags == BUS_MATCH_ANY) { 1556 /* set the copy flag */ 1557 retval |= DM_RET_COPY; 1558 1559 /* 1560 * If we've already decided on an action, go ahead 1561 * and return. 1562 */ 1563 if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE) 1564 return(retval); 1565 } 1566 1567 /* 1568 * Not sure why someone would do this... 1569 */ 1570 if (cur_pattern->flags == BUS_MATCH_NONE) 1571 continue; 1572 1573 if (((cur_pattern->flags & BUS_MATCH_PATH) != 0) 1574 && (cur_pattern->path_id != bus->path_id)) 1575 continue; 1576 1577 if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0) 1578 && (cur_pattern->bus_id != bus->sim->bus_id)) 1579 continue; 1580 1581 if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0) 1582 && (cur_pattern->unit_number != bus->sim->unit_number)) 1583 continue; 1584 1585 if (((cur_pattern->flags & BUS_MATCH_NAME) != 0) 1586 && (strncmp(cur_pattern->dev_name, bus->sim->sim_name, 1587 DEV_IDLEN) != 0)) 1588 continue; 1589 1590 /* 1591 * If we get to this point, the user definitely wants 1592 * information on this bus. So tell the caller to copy the 1593 * data out. 1594 */ 1595 retval |= DM_RET_COPY; 1596 1597 /* 1598 * If the return action has been set to descend, then we 1599 * know that we've already seen a non-bus matching 1600 * expression, therefore we need to further descend the tree. 1601 * This won't change by continuing around the loop, so we 1602 * go ahead and return. If we haven't seen a non-bus 1603 * matching expression, we keep going around the loop until 1604 * we exhaust the matching expressions. We'll set the stop 1605 * flag once we fall out of the loop. 1606 */ 1607 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND) 1608 return(retval); 1609 } 1610 1611 /* 1612 * If the return action hasn't been set to descend yet, that means 1613 * we haven't seen anything other than bus matching patterns. So 1614 * tell the caller to stop descending the tree -- the user doesn't 1615 * want to match against lower level tree elements. 1616 */ 1617 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) 1618 retval |= DM_RET_STOP; 1619 1620 return(retval); 1621 } 1622 1623 static dev_match_ret 1624 xptdevicematch(struct dev_match_pattern *patterns, int num_patterns, 1625 struct cam_ed *device) 1626 { 1627 dev_match_ret retval; 1628 int i; 1629 1630 retval = DM_RET_NONE; 1631 1632 /* 1633 * If we aren't given something to match against, that's an error. 1634 */ 1635 if (device == NULL) 1636 return(DM_RET_ERROR); 1637 1638 /* 1639 * If there are no match entries, then this device matches no 1640 * matter what. 1641 */ 1642 if ((patterns == NULL) || (patterns == 0)) 1643 return(DM_RET_DESCEND | DM_RET_COPY); 1644 1645 for (i = 0; i < num_patterns; i++) { 1646 struct device_match_pattern *cur_pattern; 1647 1648 /* 1649 * If the pattern in question isn't for a device node, we 1650 * aren't interested. 1651 */ 1652 if (patterns[i].type != DEV_MATCH_DEVICE) { 1653 if ((patterns[i].type == DEV_MATCH_PERIPH) 1654 && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)) 1655 retval |= DM_RET_DESCEND; 1656 continue; 1657 } 1658 1659 cur_pattern = &patterns[i].pattern.device_pattern; 1660 1661 /* 1662 * If they want to match any device node, we give them any 1663 * device node. 1664 */ 1665 if (cur_pattern->flags == DEV_MATCH_ANY) { 1666 /* set the copy flag */ 1667 retval |= DM_RET_COPY; 1668 1669 1670 /* 1671 * If we've already decided on an action, go ahead 1672 * and return. 1673 */ 1674 if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE) 1675 return(retval); 1676 } 1677 1678 /* 1679 * Not sure why someone would do this... 1680 */ 1681 if (cur_pattern->flags == DEV_MATCH_NONE) 1682 continue; 1683 1684 if (((cur_pattern->flags & DEV_MATCH_PATH) != 0) 1685 && (cur_pattern->path_id != device->target->bus->path_id)) 1686 continue; 1687 1688 if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0) 1689 && (cur_pattern->target_id != device->target->target_id)) 1690 continue; 1691 1692 if (((cur_pattern->flags & DEV_MATCH_LUN) != 0) 1693 && (cur_pattern->target_lun != device->lun_id)) 1694 continue; 1695 1696 if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0) 1697 && (cam_quirkmatch((caddr_t)&device->inq_data, 1698 (caddr_t)&cur_pattern->inq_pat, 1699 1, sizeof(cur_pattern->inq_pat), 1700 scsi_static_inquiry_match) == NULL)) 1701 continue; 1702 1703 /* 1704 * If we get to this point, the user definitely wants 1705 * information on this device. So tell the caller to copy 1706 * the data out. 1707 */ 1708 retval |= DM_RET_COPY; 1709 1710 /* 1711 * If the return action has been set to descend, then we 1712 * know that we've already seen a peripheral matching 1713 * expression, therefore we need to further descend the tree. 1714 * This won't change by continuing around the loop, so we 1715 * go ahead and return. If we haven't seen a peripheral 1716 * matching expression, we keep going around the loop until 1717 * we exhaust the matching expressions. We'll set the stop 1718 * flag once we fall out of the loop. 1719 */ 1720 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND) 1721 return(retval); 1722 } 1723 1724 /* 1725 * If the return action hasn't been set to descend yet, that means 1726 * we haven't seen any peripheral matching patterns. So tell the 1727 * caller to stop descending the tree -- the user doesn't want to 1728 * match against lower level tree elements. 1729 */ 1730 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) 1731 retval |= DM_RET_STOP; 1732 1733 return(retval); 1734 } 1735 1736 /* 1737 * Match a single peripheral against any number of match patterns. 1738 */ 1739 static dev_match_ret 1740 xptperiphmatch(struct dev_match_pattern *patterns, int num_patterns, 1741 struct cam_periph *periph) 1742 { 1743 dev_match_ret retval; 1744 int i; 1745 1746 /* 1747 * If we aren't given something to match against, that's an error. 1748 */ 1749 if (periph == NULL) 1750 return(DM_RET_ERROR); 1751 1752 /* 1753 * If there are no match entries, then this peripheral matches no 1754 * matter what. 1755 */ 1756 if ((patterns == NULL) || (num_patterns == 0)) 1757 return(DM_RET_STOP | DM_RET_COPY); 1758 1759 /* 1760 * There aren't any nodes below a peripheral node, so there's no 1761 * reason to descend the tree any further. 1762 */ 1763 retval = DM_RET_STOP; 1764 1765 for (i = 0; i < num_patterns; i++) { 1766 struct periph_match_pattern *cur_pattern; 1767 1768 /* 1769 * If the pattern in question isn't for a peripheral, we 1770 * aren't interested. 1771 */ 1772 if (patterns[i].type != DEV_MATCH_PERIPH) 1773 continue; 1774 1775 cur_pattern = &patterns[i].pattern.periph_pattern; 1776 1777 /* 1778 * If they want to match on anything, then we will do so. 1779 */ 1780 if (cur_pattern->flags == PERIPH_MATCH_ANY) { 1781 /* set the copy flag */ 1782 retval |= DM_RET_COPY; 1783 1784 /* 1785 * We've already set the return action to stop, 1786 * since there are no nodes below peripherals in 1787 * the tree. 1788 */ 1789 return(retval); 1790 } 1791 1792 /* 1793 * Not sure why someone would do this... 1794 */ 1795 if (cur_pattern->flags == PERIPH_MATCH_NONE) 1796 continue; 1797 1798 if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0) 1799 && (cur_pattern->path_id != periph->path->bus->path_id)) 1800 continue; 1801 1802 /* 1803 * For the target and lun id's, we have to make sure the 1804 * target and lun pointers aren't NULL. The xpt peripheral 1805 * has a wildcard target and device. 1806 */ 1807 if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0) 1808 && ((periph->path->target == NULL) 1809 ||(cur_pattern->target_id != periph->path->target->target_id))) 1810 continue; 1811 1812 if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0) 1813 && ((periph->path->device == NULL) 1814 || (cur_pattern->target_lun != periph->path->device->lun_id))) 1815 continue; 1816 1817 if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0) 1818 && (cur_pattern->unit_number != periph->unit_number)) 1819 continue; 1820 1821 if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0) 1822 && (strncmp(cur_pattern->periph_name, periph->periph_name, 1823 DEV_IDLEN) != 0)) 1824 continue; 1825 1826 /* 1827 * If we get to this point, the user definitely wants 1828 * information on this peripheral. So tell the caller to 1829 * copy the data out. 1830 */ 1831 retval |= DM_RET_COPY; 1832 1833 /* 1834 * The return action has already been set to stop, since 1835 * peripherals don't have any nodes below them in the EDT. 1836 */ 1837 return(retval); 1838 } 1839 1840 /* 1841 * If we get to this point, the peripheral that was passed in 1842 * doesn't match any of the patterns. 1843 */ 1844 return(retval); 1845 } 1846 1847 static int 1848 xptedtbusfunc(struct cam_eb *bus, void *arg) 1849 { 1850 struct ccb_dev_match *cdm; 1851 dev_match_ret retval; 1852 1853 cdm = (struct ccb_dev_match *)arg; 1854 1855 /* 1856 * If our position is for something deeper in the tree, that means 1857 * that we've already seen this node. So, we keep going down. 1858 */ 1859 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1860 && (cdm->pos.cookie.bus == bus) 1861 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1862 && (cdm->pos.cookie.target != NULL)) 1863 retval = DM_RET_DESCEND; 1864 else 1865 retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus); 1866 1867 /* 1868 * If we got an error, bail out of the search. 1869 */ 1870 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 1871 cdm->status = CAM_DEV_MATCH_ERROR; 1872 return(0); 1873 } 1874 1875 /* 1876 * If the copy flag is set, copy this bus out. 1877 */ 1878 if (retval & DM_RET_COPY) { 1879 int spaceleft, j; 1880 1881 spaceleft = cdm->match_buf_len - (cdm->num_matches * 1882 sizeof(struct dev_match_result)); 1883 1884 /* 1885 * If we don't have enough space to put in another 1886 * match result, save our position and tell the 1887 * user there are more devices to check. 1888 */ 1889 if (spaceleft < sizeof(struct dev_match_result)) { 1890 bzero(&cdm->pos, sizeof(cdm->pos)); 1891 cdm->pos.position_type = 1892 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS; 1893 1894 cdm->pos.cookie.bus = bus; 1895 cdm->pos.generations[CAM_BUS_GENERATION]= 1896 bus_generation; 1897 cdm->status = CAM_DEV_MATCH_MORE; 1898 return(0); 1899 } 1900 j = cdm->num_matches; 1901 cdm->num_matches++; 1902 cdm->matches[j].type = DEV_MATCH_BUS; 1903 cdm->matches[j].result.bus_result.path_id = bus->path_id; 1904 cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id; 1905 cdm->matches[j].result.bus_result.unit_number = 1906 bus->sim->unit_number; 1907 strncpy(cdm->matches[j].result.bus_result.dev_name, 1908 bus->sim->sim_name, DEV_IDLEN); 1909 } 1910 1911 /* 1912 * If the user is only interested in busses, there's no 1913 * reason to descend to the next level in the tree. 1914 */ 1915 if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP) 1916 return(1); 1917 1918 /* 1919 * If there is a target generation recorded, check it to 1920 * make sure the target list hasn't changed. 1921 */ 1922 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1923 && (bus == cdm->pos.cookie.bus) 1924 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1925 && (cdm->pos.generations[CAM_TARGET_GENERATION] != 0) 1926 && (cdm->pos.generations[CAM_TARGET_GENERATION] != 1927 bus->generation)) { 1928 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 1929 return(0); 1930 } 1931 1932 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1933 && (cdm->pos.cookie.bus == bus) 1934 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1935 && (cdm->pos.cookie.target != NULL)) 1936 return(xpttargettraverse(bus, 1937 (struct cam_et *)cdm->pos.cookie.target, 1938 xptedttargetfunc, arg)); 1939 else 1940 return(xpttargettraverse(bus, NULL, xptedttargetfunc, arg)); 1941 } 1942 1943 static int 1944 xptedttargetfunc(struct cam_et *target, void *arg) 1945 { 1946 struct ccb_dev_match *cdm; 1947 1948 cdm = (struct ccb_dev_match *)arg; 1949 1950 /* 1951 * If there is a device list generation recorded, check it to 1952 * make sure the device list hasn't changed. 1953 */ 1954 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1955 && (cdm->pos.cookie.bus == target->bus) 1956 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1957 && (cdm->pos.cookie.target == target) 1958 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) 1959 && (cdm->pos.generations[CAM_DEV_GENERATION] != 0) 1960 && (cdm->pos.generations[CAM_DEV_GENERATION] != 1961 target->generation)) { 1962 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 1963 return(0); 1964 } 1965 1966 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1967 && (cdm->pos.cookie.bus == target->bus) 1968 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1969 && (cdm->pos.cookie.target == target) 1970 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) 1971 && (cdm->pos.cookie.device != NULL)) 1972 return(xptdevicetraverse(target, 1973 (struct cam_ed *)cdm->pos.cookie.device, 1974 xptedtdevicefunc, arg)); 1975 else 1976 return(xptdevicetraverse(target, NULL, xptedtdevicefunc, arg)); 1977 } 1978 1979 static int 1980 xptedtdevicefunc(struct cam_ed *device, void *arg) 1981 { 1982 1983 struct ccb_dev_match *cdm; 1984 dev_match_ret retval; 1985 1986 cdm = (struct ccb_dev_match *)arg; 1987 1988 /* 1989 * If our position is for something deeper in the tree, that means 1990 * that we've already seen this node. So, we keep going down. 1991 */ 1992 if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE) 1993 && (cdm->pos.cookie.device == device) 1994 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 1995 && (cdm->pos.cookie.periph != NULL)) 1996 retval = DM_RET_DESCEND; 1997 else 1998 retval = xptdevicematch(cdm->patterns, cdm->num_patterns, 1999 device); 2000 2001 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 2002 cdm->status = CAM_DEV_MATCH_ERROR; 2003 return(0); 2004 } 2005 2006 /* 2007 * If the copy flag is set, copy this device out. 2008 */ 2009 if (retval & DM_RET_COPY) { 2010 int spaceleft, j; 2011 2012 spaceleft = cdm->match_buf_len - (cdm->num_matches * 2013 sizeof(struct dev_match_result)); 2014 2015 /* 2016 * If we don't have enough space to put in another 2017 * match result, save our position and tell the 2018 * user there are more devices to check. 2019 */ 2020 if (spaceleft < sizeof(struct dev_match_result)) { 2021 bzero(&cdm->pos, sizeof(cdm->pos)); 2022 cdm->pos.position_type = 2023 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS | 2024 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE; 2025 2026 cdm->pos.cookie.bus = device->target->bus; 2027 cdm->pos.generations[CAM_BUS_GENERATION]= 2028 bus_generation; 2029 cdm->pos.cookie.target = device->target; 2030 cdm->pos.generations[CAM_TARGET_GENERATION] = 2031 device->target->bus->generation; 2032 cdm->pos.cookie.device = device; 2033 cdm->pos.generations[CAM_DEV_GENERATION] = 2034 device->target->generation; 2035 cdm->status = CAM_DEV_MATCH_MORE; 2036 return(0); 2037 } 2038 j = cdm->num_matches; 2039 cdm->num_matches++; 2040 cdm->matches[j].type = DEV_MATCH_DEVICE; 2041 cdm->matches[j].result.device_result.path_id = 2042 device->target->bus->path_id; 2043 cdm->matches[j].result.device_result.target_id = 2044 device->target->target_id; 2045 cdm->matches[j].result.device_result.target_lun = 2046 device->lun_id; 2047 bcopy(&device->inq_data, 2048 &cdm->matches[j].result.device_result.inq_data, 2049 sizeof(struct scsi_inquiry_data)); 2050 2051 /* Let the user know whether this device is unconfigured */ 2052 if (device->flags & CAM_DEV_UNCONFIGURED) 2053 cdm->matches[j].result.device_result.flags = 2054 DEV_RESULT_UNCONFIGURED; 2055 else 2056 cdm->matches[j].result.device_result.flags = 2057 DEV_RESULT_NOFLAG; 2058 } 2059 2060 /* 2061 * If the user isn't interested in peripherals, don't descend 2062 * the tree any further. 2063 */ 2064 if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP) 2065 return(1); 2066 2067 /* 2068 * If there is a peripheral list generation recorded, make sure 2069 * it hasn't changed. 2070 */ 2071 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 2072 && (device->target->bus == cdm->pos.cookie.bus) 2073 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 2074 && (device->target == cdm->pos.cookie.target) 2075 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) 2076 && (device == cdm->pos.cookie.device) 2077 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 2078 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0) 2079 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 2080 device->generation)){ 2081 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 2082 return(0); 2083 } 2084 2085 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 2086 && (cdm->pos.cookie.bus == device->target->bus) 2087 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 2088 && (cdm->pos.cookie.target == device->target) 2089 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) 2090 && (cdm->pos.cookie.device == device) 2091 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 2092 && (cdm->pos.cookie.periph != NULL)) 2093 return(xptperiphtraverse(device, 2094 (struct cam_periph *)cdm->pos.cookie.periph, 2095 xptedtperiphfunc, arg)); 2096 else 2097 return(xptperiphtraverse(device, NULL, xptedtperiphfunc, arg)); 2098 } 2099 2100 static int 2101 xptedtperiphfunc(struct cam_periph *periph, void *arg) 2102 { 2103 struct ccb_dev_match *cdm; 2104 dev_match_ret retval; 2105 2106 cdm = (struct ccb_dev_match *)arg; 2107 2108 retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph); 2109 2110 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 2111 cdm->status = CAM_DEV_MATCH_ERROR; 2112 return(0); 2113 } 2114 2115 /* 2116 * If the copy flag is set, copy this peripheral out. 2117 */ 2118 if (retval & DM_RET_COPY) { 2119 int spaceleft, j; 2120 2121 spaceleft = cdm->match_buf_len - (cdm->num_matches * 2122 sizeof(struct dev_match_result)); 2123 2124 /* 2125 * If we don't have enough space to put in another 2126 * match result, save our position and tell the 2127 * user there are more devices to check. 2128 */ 2129 if (spaceleft < sizeof(struct dev_match_result)) { 2130 bzero(&cdm->pos, sizeof(cdm->pos)); 2131 cdm->pos.position_type = 2132 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS | 2133 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE | 2134 CAM_DEV_POS_PERIPH; 2135 2136 cdm->pos.cookie.bus = periph->path->bus; 2137 cdm->pos.generations[CAM_BUS_GENERATION]= 2138 bus_generation; 2139 cdm->pos.cookie.target = periph->path->target; 2140 cdm->pos.generations[CAM_TARGET_GENERATION] = 2141 periph->path->bus->generation; 2142 cdm->pos.cookie.device = periph->path->device; 2143 cdm->pos.generations[CAM_DEV_GENERATION] = 2144 periph->path->target->generation; 2145 cdm->pos.cookie.periph = periph; 2146 cdm->pos.generations[CAM_PERIPH_GENERATION] = 2147 periph->path->device->generation; 2148 cdm->status = CAM_DEV_MATCH_MORE; 2149 return(0); 2150 } 2151 2152 j = cdm->num_matches; 2153 cdm->num_matches++; 2154 cdm->matches[j].type = DEV_MATCH_PERIPH; 2155 cdm->matches[j].result.periph_result.path_id = 2156 periph->path->bus->path_id; 2157 cdm->matches[j].result.periph_result.target_id = 2158 periph->path->target->target_id; 2159 cdm->matches[j].result.periph_result.target_lun = 2160 periph->path->device->lun_id; 2161 cdm->matches[j].result.periph_result.unit_number = 2162 periph->unit_number; 2163 strncpy(cdm->matches[j].result.periph_result.periph_name, 2164 periph->periph_name, DEV_IDLEN); 2165 } 2166 2167 return(1); 2168 } 2169 2170 static int 2171 xptedtmatch(struct ccb_dev_match *cdm) 2172 { 2173 int ret; 2174 2175 cdm->num_matches = 0; 2176 2177 /* 2178 * Check the bus list generation. If it has changed, the user 2179 * needs to reset everything and start over. 2180 */ 2181 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 2182 && (cdm->pos.generations[CAM_BUS_GENERATION] != 0) 2183 && (cdm->pos.generations[CAM_BUS_GENERATION] != bus_generation)) { 2184 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 2185 return(0); 2186 } 2187 2188 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 2189 && (cdm->pos.cookie.bus != NULL)) 2190 ret = xptbustraverse((struct cam_eb *)cdm->pos.cookie.bus, 2191 xptedtbusfunc, cdm); 2192 else 2193 ret = xptbustraverse(NULL, xptedtbusfunc, cdm); 2194 2195 /* 2196 * If we get back 0, that means that we had to stop before fully 2197 * traversing the EDT. It also means that one of the subroutines 2198 * has set the status field to the proper value. If we get back 1, 2199 * we've fully traversed the EDT and copied out any matching entries. 2200 */ 2201 if (ret == 1) 2202 cdm->status = CAM_DEV_MATCH_LAST; 2203 2204 return(ret); 2205 } 2206 2207 static int 2208 xptplistpdrvfunc(struct periph_driver **pdrv, void *arg) 2209 { 2210 struct ccb_dev_match *cdm; 2211 2212 cdm = (struct ccb_dev_match *)arg; 2213 2214 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR) 2215 && (cdm->pos.cookie.pdrv == pdrv) 2216 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 2217 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0) 2218 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 2219 (*pdrv)->generation)) { 2220 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 2221 return(0); 2222 } 2223 2224 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR) 2225 && (cdm->pos.cookie.pdrv == pdrv) 2226 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 2227 && (cdm->pos.cookie.periph != NULL)) 2228 return(xptpdperiphtraverse(pdrv, 2229 (struct cam_periph *)cdm->pos.cookie.periph, 2230 xptplistperiphfunc, arg)); 2231 else 2232 return(xptpdperiphtraverse(pdrv, NULL,xptplistperiphfunc, arg)); 2233 } 2234 2235 static int 2236 xptplistperiphfunc(struct cam_periph *periph, void *arg) 2237 { 2238 struct ccb_dev_match *cdm; 2239 dev_match_ret retval; 2240 2241 cdm = (struct ccb_dev_match *)arg; 2242 2243 retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph); 2244 2245 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 2246 cdm->status = CAM_DEV_MATCH_ERROR; 2247 return(0); 2248 } 2249 2250 /* 2251 * If the copy flag is set, copy this peripheral out. 2252 */ 2253 if (retval & DM_RET_COPY) { 2254 int spaceleft, j; 2255 2256 spaceleft = cdm->match_buf_len - (cdm->num_matches * 2257 sizeof(struct dev_match_result)); 2258 2259 /* 2260 * If we don't have enough space to put in another 2261 * match result, save our position and tell the 2262 * user there are more devices to check. 2263 */ 2264 if (spaceleft < sizeof(struct dev_match_result)) { 2265 struct periph_driver **pdrv; 2266 2267 pdrv = NULL; 2268 bzero(&cdm->pos, sizeof(cdm->pos)); 2269 cdm->pos.position_type = 2270 CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR | 2271 CAM_DEV_POS_PERIPH; 2272 2273 /* 2274 * This may look a bit non-sensical, but it is 2275 * actually quite logical. There are very few 2276 * peripheral drivers, and bloating every peripheral 2277 * structure with a pointer back to its parent 2278 * peripheral driver linker set entry would cost 2279 * more in the long run than doing this quick lookup. 2280 */ 2281 for (pdrv = 2282 (struct periph_driver **)periphdriver_set.ls_items; 2283 *pdrv != NULL; pdrv++) { 2284 if (strcmp((*pdrv)->driver_name, 2285 periph->periph_name) == 0) 2286 break; 2287 } 2288 2289 if (pdrv == NULL) { 2290 cdm->status = CAM_DEV_MATCH_ERROR; 2291 return(0); 2292 } 2293 2294 cdm->pos.cookie.pdrv = pdrv; 2295 /* 2296 * The periph generation slot does double duty, as 2297 * does the periph pointer slot. They are used for 2298 * both edt and pdrv lookups and positioning. 2299 */ 2300 cdm->pos.cookie.periph = periph; 2301 cdm->pos.generations[CAM_PERIPH_GENERATION] = 2302 (*pdrv)->generation; 2303 cdm->status = CAM_DEV_MATCH_MORE; 2304 return(0); 2305 } 2306 2307 j = cdm->num_matches; 2308 cdm->num_matches++; 2309 cdm->matches[j].type = DEV_MATCH_PERIPH; 2310 cdm->matches[j].result.periph_result.path_id = 2311 periph->path->bus->path_id; 2312 2313 /* 2314 * The transport layer peripheral doesn't have a target or 2315 * lun. 2316 */ 2317 if (periph->path->target) 2318 cdm->matches[j].result.periph_result.target_id = 2319 periph->path->target->target_id; 2320 else 2321 cdm->matches[j].result.periph_result.target_id = -1; 2322 2323 if (periph->path->device) 2324 cdm->matches[j].result.periph_result.target_lun = 2325 periph->path->device->lun_id; 2326 else 2327 cdm->matches[j].result.periph_result.target_lun = -1; 2328 2329 cdm->matches[j].result.periph_result.unit_number = 2330 periph->unit_number; 2331 strncpy(cdm->matches[j].result.periph_result.periph_name, 2332 periph->periph_name, DEV_IDLEN); 2333 } 2334 2335 return(1); 2336 } 2337 2338 static int 2339 xptperiphlistmatch(struct ccb_dev_match *cdm) 2340 { 2341 int ret; 2342 2343 cdm->num_matches = 0; 2344 2345 /* 2346 * At this point in the edt traversal function, we check the bus 2347 * list generation to make sure that no busses have been added or 2348 * removed since the user last sent a XPT_DEV_MATCH ccb through. 2349 * For the peripheral driver list traversal function, however, we 2350 * don't have to worry about new peripheral driver types coming or 2351 * going; they're in a linker set, and therefore can't change 2352 * without a recompile. 2353 */ 2354 2355 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR) 2356 && (cdm->pos.cookie.pdrv != NULL)) 2357 ret = xptpdrvtraverse( 2358 (struct periph_driver **)cdm->pos.cookie.pdrv, 2359 xptplistpdrvfunc, cdm); 2360 else 2361 ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm); 2362 2363 /* 2364 * If we get back 0, that means that we had to stop before fully 2365 * traversing the peripheral driver tree. It also means that one of 2366 * the subroutines has set the status field to the proper value. If 2367 * we get back 1, we've fully traversed the EDT and copied out any 2368 * matching entries. 2369 */ 2370 if (ret == 1) 2371 cdm->status = CAM_DEV_MATCH_LAST; 2372 2373 return(ret); 2374 } 2375 2376 static int 2377 xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg) 2378 { 2379 struct cam_eb *bus, *next_bus; 2380 int retval; 2381 2382 retval = 1; 2383 2384 for (bus = (start_bus ? start_bus : TAILQ_FIRST(&xpt_busses)); 2385 bus != NULL; 2386 bus = next_bus) { 2387 next_bus = TAILQ_NEXT(bus, links); 2388 2389 retval = tr_func(bus, arg); 2390 if (retval == 0) 2391 return(retval); 2392 } 2393 2394 return(retval); 2395 } 2396 2397 static int 2398 xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target, 2399 xpt_targetfunc_t *tr_func, void *arg) 2400 { 2401 struct cam_et *target, *next_target; 2402 int retval; 2403 2404 retval = 1; 2405 for (target = (start_target ? start_target : 2406 TAILQ_FIRST(&bus->et_entries)); 2407 target != NULL; target = next_target) { 2408 2409 next_target = TAILQ_NEXT(target, links); 2410 2411 retval = tr_func(target, arg); 2412 2413 if (retval == 0) 2414 return(retval); 2415 } 2416 2417 return(retval); 2418 } 2419 2420 static int 2421 xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device, 2422 xpt_devicefunc_t *tr_func, void *arg) 2423 { 2424 struct cam_ed *device, *next_device; 2425 int retval; 2426 2427 retval = 1; 2428 for (device = (start_device ? start_device : 2429 TAILQ_FIRST(&target->ed_entries)); 2430 device != NULL; 2431 device = next_device) { 2432 2433 next_device = TAILQ_NEXT(device, links); 2434 2435 retval = tr_func(device, arg); 2436 2437 if (retval == 0) 2438 return(retval); 2439 } 2440 2441 return(retval); 2442 } 2443 2444 static int 2445 xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph, 2446 xpt_periphfunc_t *tr_func, void *arg) 2447 { 2448 struct cam_periph *periph, *next_periph; 2449 int retval; 2450 2451 retval = 1; 2452 2453 for (periph = (start_periph ? start_periph : 2454 SLIST_FIRST(&device->periphs)); 2455 periph != NULL; 2456 periph = next_periph) { 2457 2458 next_periph = SLIST_NEXT(periph, periph_links); 2459 2460 retval = tr_func(periph, arg); 2461 if (retval == 0) 2462 return(retval); 2463 } 2464 2465 return(retval); 2466 } 2467 2468 static int 2469 xptpdrvtraverse(struct periph_driver **start_pdrv, 2470 xpt_pdrvfunc_t *tr_func, void *arg) 2471 { 2472 struct periph_driver **pdrv; 2473 int retval; 2474 2475 retval = 1; 2476 2477 /* 2478 * We don't traverse the peripheral driver list like we do the 2479 * other lists, because it is a linker set, and therefore cannot be 2480 * changed during runtime. If the peripheral driver list is ever 2481 * re-done to be something other than a linker set (i.e. it can 2482 * change while the system is running), the list traversal should 2483 * be modified to work like the other traversal functions. 2484 */ 2485 for (pdrv = (start_pdrv ? start_pdrv : 2486 (struct periph_driver **)periphdriver_set.ls_items); 2487 *pdrv != NULL; pdrv++) { 2488 retval = tr_func(pdrv, arg); 2489 2490 if (retval == 0) 2491 return(retval); 2492 } 2493 2494 return(retval); 2495 } 2496 2497 static int 2498 xptpdperiphtraverse(struct periph_driver **pdrv, 2499 struct cam_periph *start_periph, 2500 xpt_periphfunc_t *tr_func, void *arg) 2501 { 2502 struct cam_periph *periph, *next_periph; 2503 int retval; 2504 2505 retval = 1; 2506 2507 for (periph = (start_periph ? start_periph : 2508 TAILQ_FIRST(&(*pdrv)->units)); periph != NULL; 2509 periph = next_periph) { 2510 2511 next_periph = TAILQ_NEXT(periph, unit_links); 2512 2513 retval = tr_func(periph, arg); 2514 if (retval == 0) 2515 return(retval); 2516 } 2517 return(retval); 2518 } 2519 2520 static int 2521 xptdefbusfunc(struct cam_eb *bus, void *arg) 2522 { 2523 struct xpt_traverse_config *tr_config; 2524 2525 tr_config = (struct xpt_traverse_config *)arg; 2526 2527 if (tr_config->depth == XPT_DEPTH_BUS) { 2528 xpt_busfunc_t *tr_func; 2529 2530 tr_func = (xpt_busfunc_t *)tr_config->tr_func; 2531 2532 return(tr_func(bus, tr_config->tr_arg)); 2533 } else 2534 return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg)); 2535 } 2536 2537 static int 2538 xptdeftargetfunc(struct cam_et *target, void *arg) 2539 { 2540 struct xpt_traverse_config *tr_config; 2541 2542 tr_config = (struct xpt_traverse_config *)arg; 2543 2544 if (tr_config->depth == XPT_DEPTH_TARGET) { 2545 xpt_targetfunc_t *tr_func; 2546 2547 tr_func = (xpt_targetfunc_t *)tr_config->tr_func; 2548 2549 return(tr_func(target, tr_config->tr_arg)); 2550 } else 2551 return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg)); 2552 } 2553 2554 static int 2555 xptdefdevicefunc(struct cam_ed *device, void *arg) 2556 { 2557 struct xpt_traverse_config *tr_config; 2558 2559 tr_config = (struct xpt_traverse_config *)arg; 2560 2561 if (tr_config->depth == XPT_DEPTH_DEVICE) { 2562 xpt_devicefunc_t *tr_func; 2563 2564 tr_func = (xpt_devicefunc_t *)tr_config->tr_func; 2565 2566 return(tr_func(device, tr_config->tr_arg)); 2567 } else 2568 return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg)); 2569 } 2570 2571 static int 2572 xptdefperiphfunc(struct cam_periph *periph, void *arg) 2573 { 2574 struct xpt_traverse_config *tr_config; 2575 xpt_periphfunc_t *tr_func; 2576 2577 tr_config = (struct xpt_traverse_config *)arg; 2578 2579 tr_func = (xpt_periphfunc_t *)tr_config->tr_func; 2580 2581 /* 2582 * Unlike the other default functions, we don't check for depth 2583 * here. The peripheral driver level is the last level in the EDT, 2584 * so if we're here, we should execute the function in question. 2585 */ 2586 return(tr_func(periph, tr_config->tr_arg)); 2587 } 2588 2589 /* 2590 * Execute the given function for every bus in the EDT. 2591 */ 2592 static int 2593 xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg) 2594 { 2595 struct xpt_traverse_config tr_config; 2596 2597 tr_config.depth = XPT_DEPTH_BUS; 2598 tr_config.tr_func = tr_func; 2599 tr_config.tr_arg = arg; 2600 2601 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config)); 2602 } 2603 2604 #ifdef notusedyet 2605 /* 2606 * Execute the given function for every target in the EDT. 2607 */ 2608 static int 2609 xpt_for_all_targets(xpt_targetfunc_t *tr_func, void *arg) 2610 { 2611 struct xpt_traverse_config tr_config; 2612 2613 tr_config.depth = XPT_DEPTH_TARGET; 2614 tr_config.tr_func = tr_func; 2615 tr_config.tr_arg = arg; 2616 2617 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config)); 2618 } 2619 #endif /* notusedyet */ 2620 2621 /* 2622 * Execute the given function for every device in the EDT. 2623 */ 2624 static int 2625 xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg) 2626 { 2627 struct xpt_traverse_config tr_config; 2628 2629 tr_config.depth = XPT_DEPTH_DEVICE; 2630 tr_config.tr_func = tr_func; 2631 tr_config.tr_arg = arg; 2632 2633 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config)); 2634 } 2635 2636 #ifdef notusedyet 2637 /* 2638 * Execute the given function for every peripheral in the EDT. 2639 */ 2640 static int 2641 xpt_for_all_periphs(xpt_periphfunc_t *tr_func, void *arg) 2642 { 2643 struct xpt_traverse_config tr_config; 2644 2645 tr_config.depth = XPT_DEPTH_PERIPH; 2646 tr_config.tr_func = tr_func; 2647 tr_config.tr_arg = arg; 2648 2649 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config)); 2650 } 2651 #endif /* notusedyet */ 2652 2653 static int 2654 xptsetasyncfunc(struct cam_ed *device, void *arg) 2655 { 2656 struct cam_path path; 2657 struct ccb_getdev cgd; 2658 struct async_node *cur_entry; 2659 2660 cur_entry = (struct async_node *)arg; 2661 2662 /* 2663 * Don't report unconfigured devices (Wildcard devs, 2664 * devices only for target mode, device instances 2665 * that have been invalidated but are waiting for 2666 * their last reference count to be released). 2667 */ 2668 if ((device->flags & CAM_DEV_UNCONFIGURED) != 0) 2669 return (1); 2670 2671 xpt_compile_path(&path, 2672 NULL, 2673 device->target->bus->path_id, 2674 device->target->target_id, 2675 device->lun_id); 2676 xpt_setup_ccb(&cgd.ccb_h, &path, /*priority*/1); 2677 cgd.ccb_h.func_code = XPT_GDEV_TYPE; 2678 xpt_action((union ccb *)&cgd); 2679 cur_entry->callback(cur_entry->callback_arg, 2680 AC_FOUND_DEVICE, 2681 &path, &cgd); 2682 xpt_release_path(&path); 2683 2684 return(1); 2685 } 2686 2687 static int 2688 xptsetasyncbusfunc(struct cam_eb *bus, void *arg) 2689 { 2690 struct cam_path path; 2691 struct ccb_pathinq cpi; 2692 struct async_node *cur_entry; 2693 2694 cur_entry = (struct async_node *)arg; 2695 2696 xpt_compile_path(&path, /*periph*/NULL, 2697 bus->sim->path_id, 2698 CAM_TARGET_WILDCARD, 2699 CAM_LUN_WILDCARD); 2700 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1); 2701 cpi.ccb_h.func_code = XPT_PATH_INQ; 2702 xpt_action((union ccb *)&cpi); 2703 cur_entry->callback(cur_entry->callback_arg, 2704 AC_PATH_REGISTERED, 2705 &path, &cpi); 2706 xpt_release_path(&path); 2707 2708 return(1); 2709 } 2710 2711 void 2712 xpt_action(union ccb *start_ccb) 2713 { 2714 int iopl; 2715 2716 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action\n")); 2717 2718 start_ccb->ccb_h.status = CAM_REQ_INPROG; 2719 2720 iopl = splsoftcam(); 2721 switch (start_ccb->ccb_h.func_code) { 2722 case XPT_SCSI_IO: 2723 { 2724 #ifdef CAMDEBUG 2725 char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1]; 2726 struct cam_path *path; 2727 2728 path = start_ccb->ccb_h.path; 2729 #endif 2730 2731 /* 2732 * For the sake of compatibility with SCSI-1 2733 * devices that may not understand the identify 2734 * message, we include lun information in the 2735 * second byte of all commands. SCSI-1 specifies 2736 * that luns are a 3 bit value and reserves only 3 2737 * bits for lun information in the CDB. Later 2738 * revisions of the SCSI spec allow for more than 8 2739 * luns, but have deprecated lun information in the 2740 * CDB. So, if the lun won't fit, we must omit. 2741 * 2742 * Also be aware that during initial probing for devices, 2743 * the inquiry information is unknown but initialized to 0. 2744 * This means that this code will be exercised while probing 2745 * devices with an ANSI revision greater than 2. 2746 */ 2747 if (SID_ANSI_REV(&start_ccb->ccb_h.path->device->inq_data) <= 2 2748 && start_ccb->ccb_h.target_lun < 8 2749 && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) { 2750 2751 start_ccb->csio.cdb_io.cdb_bytes[1] |= 2752 start_ccb->ccb_h.target_lun << 5; 2753 } 2754 start_ccb->csio.scsi_status = SCSI_STATUS_OK; 2755 CAM_DEBUG(path, CAM_DEBUG_CDB,("%s. CDB: %s\n", 2756 scsi_op_desc(start_ccb->csio.cdb_io.cdb_bytes[0], 2757 &path->device->inq_data), 2758 scsi_cdb_string(start_ccb->csio.cdb_io.cdb_bytes, 2759 cdb_str, sizeof(cdb_str)))); 2760 /* FALLTHROUGH */ 2761 } 2762 case XPT_TARGET_IO: 2763 case XPT_CONT_TARGET_IO: 2764 start_ccb->csio.sense_resid = 0; 2765 start_ccb->csio.resid = 0; 2766 /* FALLTHROUGH */ 2767 case XPT_RESET_DEV: 2768 case XPT_ENG_EXEC: 2769 { 2770 struct cam_path *path; 2771 int s; 2772 int runq; 2773 2774 path = start_ccb->ccb_h.path; 2775 s = splsoftcam(); 2776 2777 cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb); 2778 if (path->device->qfrozen_cnt == 0) 2779 runq = xpt_schedule_dev_sendq(path->bus, path->device); 2780 else 2781 runq = 0; 2782 splx(s); 2783 if (runq != 0) 2784 xpt_run_dev_sendq(path->bus); 2785 break; 2786 } 2787 case XPT_SET_TRAN_SETTINGS: 2788 { 2789 xpt_set_transfer_settings(&start_ccb->cts, 2790 start_ccb->ccb_h.path->device, 2791 /*async_update*/FALSE); 2792 break; 2793 } 2794 case XPT_CALC_GEOMETRY: 2795 { 2796 struct cam_sim *sim; 2797 2798 /* Filter out garbage */ 2799 if (start_ccb->ccg.block_size == 0 2800 || start_ccb->ccg.volume_size == 0) { 2801 start_ccb->ccg.cylinders = 0; 2802 start_ccb->ccg.heads = 0; 2803 start_ccb->ccg.secs_per_track = 0; 2804 start_ccb->ccb_h.status = CAM_REQ_CMP; 2805 break; 2806 } 2807 #ifdef PC98 2808 /* 2809 * In a PC-98 system, geometry translation depens on 2810 * the "real" device geometry obtained from mode page 4. 2811 * SCSI geometry translation is performed in the 2812 * initialization routine of the SCSI BIOS and the result 2813 * stored in host memory. If the translation is available 2814 * in host memory, use it. If not, rely on the default 2815 * translation the device driver performs. 2816 */ 2817 if (scsi_da_bios_params(&start_ccb->ccg) != 0) { 2818 start_ccb->ccb_h.status = CAM_REQ_CMP; 2819 break; 2820 } 2821 #endif 2822 sim = start_ccb->ccb_h.path->bus->sim; 2823 (*(sim->sim_action))(sim, start_ccb); 2824 break; 2825 } 2826 case XPT_ABORT: 2827 { 2828 union ccb* abort_ccb; 2829 int s; 2830 2831 abort_ccb = start_ccb->cab.abort_ccb; 2832 if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) { 2833 2834 if (abort_ccb->ccb_h.pinfo.index >= 0) { 2835 struct cam_ccbq *ccbq; 2836 2837 ccbq = &abort_ccb->ccb_h.path->device->ccbq; 2838 cam_ccbq_remove_ccb(ccbq, abort_ccb); 2839 abort_ccb->ccb_h.status = 2840 CAM_REQ_ABORTED|CAM_DEV_QFRZN; 2841 xpt_freeze_devq(abort_ccb->ccb_h.path, 1); 2842 s = splcam(); 2843 xpt_done(abort_ccb); 2844 splx(s); 2845 start_ccb->ccb_h.status = CAM_REQ_CMP; 2846 break; 2847 } 2848 if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX 2849 && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) { 2850 /* 2851 * We've caught this ccb en route to 2852 * the SIM. Flag it for abort and the 2853 * SIM will do so just before starting 2854 * real work on the CCB. 2855 */ 2856 abort_ccb->ccb_h.status = 2857 CAM_REQ_ABORTED|CAM_DEV_QFRZN; 2858 xpt_freeze_devq(abort_ccb->ccb_h.path, 1); 2859 start_ccb->ccb_h.status = CAM_REQ_CMP; 2860 break; 2861 } 2862 } 2863 if (XPT_FC_IS_QUEUED(abort_ccb) 2864 && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) { 2865 /* 2866 * It's already completed but waiting 2867 * for our SWI to get to it. 2868 */ 2869 start_ccb->ccb_h.status = CAM_UA_ABORT; 2870 break; 2871 } 2872 /* 2873 * If we weren't able to take care of the abort request 2874 * in the XPT, pass the request down to the SIM for processing. 2875 */ 2876 /* FALLTHROUGH */ 2877 } 2878 case XPT_ACCEPT_TARGET_IO: 2879 case XPT_EN_LUN: 2880 case XPT_IMMED_NOTIFY: 2881 case XPT_NOTIFY_ACK: 2882 case XPT_GET_TRAN_SETTINGS: 2883 case XPT_RESET_BUS: 2884 { 2885 struct cam_sim *sim; 2886 2887 sim = start_ccb->ccb_h.path->bus->sim; 2888 (*(sim->sim_action))(sim, start_ccb); 2889 break; 2890 } 2891 case XPT_PATH_INQ: 2892 { 2893 struct cam_sim *sim; 2894 2895 sim = start_ccb->ccb_h.path->bus->sim; 2896 (*(sim->sim_action))(sim, start_ccb); 2897 break; 2898 } 2899 case XPT_PATH_STATS: 2900 start_ccb->cpis.last_reset = 2901 start_ccb->ccb_h.path->bus->last_reset; 2902 start_ccb->ccb_h.status = CAM_REQ_CMP; 2903 break; 2904 case XPT_GDEV_TYPE: 2905 { 2906 struct cam_ed *dev; 2907 int s; 2908 2909 dev = start_ccb->ccb_h.path->device; 2910 s = splcam(); 2911 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) { 2912 start_ccb->ccb_h.status = CAM_DEV_NOT_THERE; 2913 } else { 2914 struct ccb_getdev *cgd; 2915 struct cam_eb *bus; 2916 struct cam_et *tar; 2917 2918 cgd = &start_ccb->cgd; 2919 bus = cgd->ccb_h.path->bus; 2920 tar = cgd->ccb_h.path->target; 2921 cgd->inq_data = dev->inq_data; 2922 cgd->pd_type = SID_TYPE(&dev->inq_data); 2923 #ifndef GARBAGE_COLLECT 2924 cgd->dev_openings = dev->ccbq.dev_openings; 2925 cgd->dev_active = dev->ccbq.dev_active; 2926 cgd->devq_openings = dev->ccbq.devq_openings; 2927 cgd->devq_queued = dev->ccbq.queue.entries; 2928 cgd->held = dev->ccbq.held; 2929 cgd->maxtags = dev->quirk->maxtags; 2930 cgd->mintags = dev->quirk->mintags; 2931 #endif 2932 cgd->ccb_h.status = CAM_REQ_CMP; 2933 cgd->serial_num_len = dev->serial_num_len; 2934 if ((dev->serial_num_len > 0) 2935 && (dev->serial_num != NULL)) 2936 bcopy(dev->serial_num, cgd->serial_num, 2937 dev->serial_num_len); 2938 } 2939 splx(s); 2940 break; 2941 } 2942 case XPT_GDEV_STATS: 2943 { 2944 struct cam_ed *dev; 2945 int s; 2946 2947 dev = start_ccb->ccb_h.path->device; 2948 s = splcam(); 2949 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) { 2950 start_ccb->ccb_h.status = CAM_DEV_NOT_THERE; 2951 } else { 2952 struct ccb_getdevstats *cgds; 2953 struct cam_eb *bus; 2954 struct cam_et *tar; 2955 2956 cgds = &start_ccb->cgds; 2957 bus = cgds->ccb_h.path->bus; 2958 tar = cgds->ccb_h.path->target; 2959 cgds->dev_openings = dev->ccbq.dev_openings; 2960 cgds->dev_active = dev->ccbq.dev_active; 2961 cgds->devq_openings = dev->ccbq.devq_openings; 2962 cgds->devq_queued = dev->ccbq.queue.entries; 2963 cgds->held = dev->ccbq.held; 2964 cgds->last_reset = tar->last_reset; 2965 cgds->maxtags = dev->quirk->maxtags; 2966 cgds->mintags = dev->quirk->mintags; 2967 if (timevalcmp(&tar->last_reset, &bus->last_reset, <)) 2968 cgds->last_reset = bus->last_reset; 2969 cgds->ccb_h.status = CAM_REQ_CMP; 2970 } 2971 splx(s); 2972 break; 2973 } 2974 case XPT_GDEVLIST: 2975 { 2976 struct cam_periph *nperiph; 2977 struct periph_list *periph_head; 2978 struct ccb_getdevlist *cgdl; 2979 int i; 2980 int s; 2981 struct cam_ed *device; 2982 int found; 2983 2984 2985 found = 0; 2986 2987 /* 2988 * Don't want anyone mucking with our data. 2989 */ 2990 s = splcam(); 2991 device = start_ccb->ccb_h.path->device; 2992 periph_head = &device->periphs; 2993 cgdl = &start_ccb->cgdl; 2994 2995 /* 2996 * Check and see if the list has changed since the user 2997 * last requested a list member. If so, tell them that the 2998 * list has changed, and therefore they need to start over 2999 * from the beginning. 3000 */ 3001 if ((cgdl->index != 0) && 3002 (cgdl->generation != device->generation)) { 3003 cgdl->status = CAM_GDEVLIST_LIST_CHANGED; 3004 splx(s); 3005 break; 3006 } 3007 3008 /* 3009 * Traverse the list of peripherals and attempt to find 3010 * the requested peripheral. 3011 */ 3012 for (nperiph = periph_head->slh_first, i = 0; 3013 (nperiph != NULL) && (i <= cgdl->index); 3014 nperiph = nperiph->periph_links.sle_next, i++) { 3015 if (i == cgdl->index) { 3016 strncpy(cgdl->periph_name, 3017 nperiph->periph_name, 3018 DEV_IDLEN); 3019 cgdl->unit_number = nperiph->unit_number; 3020 found = 1; 3021 } 3022 } 3023 if (found == 0) { 3024 cgdl->status = CAM_GDEVLIST_ERROR; 3025 splx(s); 3026 break; 3027 } 3028 3029 if (nperiph == NULL) 3030 cgdl->status = CAM_GDEVLIST_LAST_DEVICE; 3031 else 3032 cgdl->status = CAM_GDEVLIST_MORE_DEVS; 3033 3034 cgdl->index++; 3035 cgdl->generation = device->generation; 3036 3037 splx(s); 3038 cgdl->ccb_h.status = CAM_REQ_CMP; 3039 break; 3040 } 3041 case XPT_DEV_MATCH: 3042 { 3043 int s; 3044 dev_pos_type position_type; 3045 struct ccb_dev_match *cdm; 3046 int ret; 3047 3048 cdm = &start_ccb->cdm; 3049 3050 /* 3051 * Prevent EDT changes while we traverse it. 3052 */ 3053 s = splcam(); 3054 /* 3055 * There are two ways of getting at information in the EDT. 3056 * The first way is via the primary EDT tree. It starts 3057 * with a list of busses, then a list of targets on a bus, 3058 * then devices/luns on a target, and then peripherals on a 3059 * device/lun. The "other" way is by the peripheral driver 3060 * lists. The peripheral driver lists are organized by 3061 * peripheral driver. (obviously) So it makes sense to 3062 * use the peripheral driver list if the user is looking 3063 * for something like "da1", or all "da" devices. If the 3064 * user is looking for something on a particular bus/target 3065 * or lun, it's generally better to go through the EDT tree. 3066 */ 3067 3068 if (cdm->pos.position_type != CAM_DEV_POS_NONE) 3069 position_type = cdm->pos.position_type; 3070 else { 3071 int i; 3072 3073 position_type = CAM_DEV_POS_NONE; 3074 3075 for (i = 0; i < cdm->num_patterns; i++) { 3076 if ((cdm->patterns[i].type == DEV_MATCH_BUS) 3077 ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){ 3078 position_type = CAM_DEV_POS_EDT; 3079 break; 3080 } 3081 } 3082 3083 if (cdm->num_patterns == 0) 3084 position_type = CAM_DEV_POS_EDT; 3085 else if (position_type == CAM_DEV_POS_NONE) 3086 position_type = CAM_DEV_POS_PDRV; 3087 } 3088 3089 switch(position_type & CAM_DEV_POS_TYPEMASK) { 3090 case CAM_DEV_POS_EDT: 3091 ret = xptedtmatch(cdm); 3092 break; 3093 case CAM_DEV_POS_PDRV: 3094 ret = xptperiphlistmatch(cdm); 3095 break; 3096 default: 3097 cdm->status = CAM_DEV_MATCH_ERROR; 3098 break; 3099 } 3100 3101 splx(s); 3102 3103 if (cdm->status == CAM_DEV_MATCH_ERROR) 3104 start_ccb->ccb_h.status = CAM_REQ_CMP_ERR; 3105 else 3106 start_ccb->ccb_h.status = CAM_REQ_CMP; 3107 3108 break; 3109 } 3110 case XPT_SASYNC_CB: 3111 { 3112 struct ccb_setasync *csa; 3113 struct async_node *cur_entry; 3114 struct async_list *async_head; 3115 u_int32_t added; 3116 int s; 3117 3118 csa = &start_ccb->csa; 3119 added = csa->event_enable; 3120 async_head = &csa->ccb_h.path->device->asyncs; 3121 3122 /* 3123 * If there is already an entry for us, simply 3124 * update it. 3125 */ 3126 s = splcam(); 3127 cur_entry = SLIST_FIRST(async_head); 3128 while (cur_entry != NULL) { 3129 if ((cur_entry->callback_arg == csa->callback_arg) 3130 && (cur_entry->callback == csa->callback)) 3131 break; 3132 cur_entry = SLIST_NEXT(cur_entry, links); 3133 } 3134 3135 if (cur_entry != NULL) { 3136 /* 3137 * If the request has no flags set, 3138 * remove the entry. 3139 */ 3140 added &= ~cur_entry->event_enable; 3141 if (csa->event_enable == 0) { 3142 SLIST_REMOVE(async_head, cur_entry, 3143 async_node, links); 3144 csa->ccb_h.path->device->refcount--; 3145 free(cur_entry, M_DEVBUF); 3146 } else { 3147 cur_entry->event_enable = csa->event_enable; 3148 } 3149 } else { 3150 cur_entry = malloc(sizeof(*cur_entry), M_DEVBUF, 3151 M_NOWAIT); 3152 if (cur_entry == NULL) { 3153 splx(s); 3154 csa->ccb_h.status = CAM_RESRC_UNAVAIL; 3155 break; 3156 } 3157 cur_entry->callback_arg = csa->callback_arg; 3158 cur_entry->callback = csa->callback; 3159 cur_entry->event_enable = csa->event_enable; 3160 SLIST_INSERT_HEAD(async_head, cur_entry, links); 3161 csa->ccb_h.path->device->refcount++; 3162 } 3163 3164 if ((added & AC_FOUND_DEVICE) != 0) { 3165 /* 3166 * Get this peripheral up to date with all 3167 * the currently existing devices. 3168 */ 3169 xpt_for_all_devices(xptsetasyncfunc, cur_entry); 3170 } 3171 if ((added & AC_PATH_REGISTERED) != 0) { 3172 /* 3173 * Get this peripheral up to date with all 3174 * the currently existing busses. 3175 */ 3176 xpt_for_all_busses(xptsetasyncbusfunc, cur_entry); 3177 } 3178 splx(s); 3179 start_ccb->ccb_h.status = CAM_REQ_CMP; 3180 break; 3181 } 3182 case XPT_REL_SIMQ: 3183 { 3184 struct ccb_relsim *crs; 3185 struct cam_ed *dev; 3186 int s; 3187 3188 crs = &start_ccb->crs; 3189 dev = crs->ccb_h.path->device; 3190 if (dev == NULL) { 3191 3192 crs->ccb_h.status = CAM_DEV_NOT_THERE; 3193 break; 3194 } 3195 3196 s = splcam(); 3197 3198 if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) { 3199 3200 if ((dev->inq_data.flags & SID_CmdQue) != 0) { 3201 3202 /* Don't ever go below one opening */ 3203 if (crs->openings > 0) { 3204 xpt_dev_ccbq_resize(crs->ccb_h.path, 3205 crs->openings); 3206 3207 if (bootverbose) { 3208 xpt_print_path(crs->ccb_h.path); 3209 printf("tagged openings " 3210 "now %d\n", 3211 crs->openings); 3212 } 3213 } 3214 } 3215 } 3216 3217 if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) { 3218 3219 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) { 3220 3221 /* 3222 * Just extend the old timeout and decrement 3223 * the freeze count so that a single timeout 3224 * is sufficient for releasing the queue. 3225 */ 3226 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE; 3227 untimeout(xpt_release_devq_timeout, 3228 dev, dev->c_handle); 3229 } else { 3230 3231 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 3232 } 3233 3234 dev->c_handle = 3235 timeout(xpt_release_devq_timeout, 3236 dev, 3237 (crs->release_timeout * hz) / 1000); 3238 3239 dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING; 3240 3241 } 3242 3243 if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) { 3244 3245 if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) { 3246 /* 3247 * Decrement the freeze count so that a single 3248 * completion is still sufficient to unfreeze 3249 * the queue. 3250 */ 3251 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE; 3252 } else { 3253 3254 dev->flags |= CAM_DEV_REL_ON_COMPLETE; 3255 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 3256 } 3257 } 3258 3259 if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) { 3260 3261 if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0 3262 || (dev->ccbq.dev_active == 0)) { 3263 3264 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE; 3265 } else { 3266 3267 dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY; 3268 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 3269 } 3270 } 3271 splx(s); 3272 3273 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) { 3274 3275 xpt_release_devq(crs->ccb_h.path, /*count*/1, 3276 /*run_queue*/TRUE); 3277 } 3278 start_ccb->crs.qfrozen_cnt = dev->qfrozen_cnt; 3279 start_ccb->ccb_h.status = CAM_REQ_CMP; 3280 break; 3281 } 3282 case XPT_SCAN_BUS: 3283 xpt_scan_bus(start_ccb->ccb_h.path->periph, start_ccb); 3284 break; 3285 case XPT_SCAN_LUN: 3286 xpt_scan_lun(start_ccb->ccb_h.path->periph, 3287 start_ccb->ccb_h.path, start_ccb->crcn.flags, 3288 start_ccb); 3289 break; 3290 case XPT_DEBUG: { 3291 #ifdef CAMDEBUG 3292 int s; 3293 3294 s = splcam(); 3295 #ifdef CAM_DEBUG_DELAY 3296 cam_debug_delay = CAM_DEBUG_DELAY; 3297 #endif 3298 cam_dflags = start_ccb->cdbg.flags; 3299 if (cam_dpath != NULL) { 3300 xpt_free_path(cam_dpath); 3301 cam_dpath = NULL; 3302 } 3303 3304 if (cam_dflags != CAM_DEBUG_NONE) { 3305 if (xpt_create_path(&cam_dpath, xpt_periph, 3306 start_ccb->ccb_h.path_id, 3307 start_ccb->ccb_h.target_id, 3308 start_ccb->ccb_h.target_lun) != 3309 CAM_REQ_CMP) { 3310 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 3311 cam_dflags = CAM_DEBUG_NONE; 3312 } else { 3313 start_ccb->ccb_h.status = CAM_REQ_CMP; 3314 xpt_print_path(cam_dpath); 3315 printf("debugging flags now %x\n", cam_dflags); 3316 } 3317 } else { 3318 cam_dpath = NULL; 3319 start_ccb->ccb_h.status = CAM_REQ_CMP; 3320 } 3321 splx(s); 3322 #else /* !CAMDEBUG */ 3323 start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 3324 #endif /* CAMDEBUG */ 3325 break; 3326 } 3327 case XPT_NOOP: 3328 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0) 3329 xpt_freeze_devq(start_ccb->ccb_h.path, 1); 3330 start_ccb->ccb_h.status = CAM_REQ_CMP; 3331 break; 3332 default: 3333 case XPT_SDEV_TYPE: 3334 case XPT_TERM_IO: 3335 case XPT_ENG_INQ: 3336 /* XXX Implement */ 3337 start_ccb->ccb_h.status = CAM_PROVIDE_FAIL; 3338 break; 3339 } 3340 splx(iopl); 3341 } 3342 3343 void 3344 xpt_polled_action(union ccb *start_ccb) 3345 { 3346 int s; 3347 u_int32_t timeout; 3348 struct cam_sim *sim; 3349 struct cam_devq *devq; 3350 struct cam_ed *dev; 3351 3352 timeout = start_ccb->ccb_h.timeout; 3353 sim = start_ccb->ccb_h.path->bus->sim; 3354 devq = sim->devq; 3355 dev = start_ccb->ccb_h.path->device; 3356 3357 s = splcam(); 3358 3359 /* 3360 * Steal an opening so that no other queued requests 3361 * can get it before us while we simulate interrupts. 3362 */ 3363 dev->ccbq.devq_openings--; 3364 dev->ccbq.dev_openings--; 3365 3366 while((devq->send_openings <= 0 || dev->ccbq.dev_openings < 0) 3367 && (--timeout > 0)) { 3368 DELAY(1000); 3369 (*(sim->sim_poll))(sim); 3370 swi_camnet(); 3371 swi_cambio(); 3372 } 3373 3374 dev->ccbq.devq_openings++; 3375 dev->ccbq.dev_openings++; 3376 3377 if (timeout != 0) { 3378 xpt_action(start_ccb); 3379 while(--timeout > 0) { 3380 (*(sim->sim_poll))(sim); 3381 swi_camnet(); 3382 swi_cambio(); 3383 if ((start_ccb->ccb_h.status & CAM_STATUS_MASK) 3384 != CAM_REQ_INPROG) 3385 break; 3386 DELAY(1000); 3387 } 3388 if (timeout == 0) { 3389 /* 3390 * XXX Is it worth adding a sim_timeout entry 3391 * point so we can attempt recovery? If 3392 * this is only used for dumps, I don't think 3393 * it is. 3394 */ 3395 start_ccb->ccb_h.status = CAM_CMD_TIMEOUT; 3396 } 3397 } else { 3398 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 3399 } 3400 splx(s); 3401 } 3402 3403 /* 3404 * Schedule a peripheral driver to receive a ccb when it's 3405 * target device has space for more transactions. 3406 */ 3407 void 3408 xpt_schedule(struct cam_periph *perph, u_int32_t new_priority) 3409 { 3410 struct cam_ed *device; 3411 int s; 3412 int runq; 3413 3414 CAM_DEBUG(perph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n")); 3415 device = perph->path->device; 3416 s = splsoftcam(); 3417 if (periph_is_queued(perph)) { 3418 /* Simply reorder based on new priority */ 3419 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE, 3420 (" change priority to %d\n", new_priority)); 3421 if (new_priority < perph->pinfo.priority) { 3422 camq_change_priority(&device->drvq, 3423 perph->pinfo.index, 3424 new_priority); 3425 } 3426 runq = 0; 3427 } else { 3428 /* New entry on the queue */ 3429 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE, 3430 (" added periph to queue\n")); 3431 perph->pinfo.priority = new_priority; 3432 perph->pinfo.generation = ++device->drvq.generation; 3433 camq_insert(&device->drvq, &perph->pinfo); 3434 runq = xpt_schedule_dev_allocq(perph->path->bus, device); 3435 } 3436 splx(s); 3437 if (runq != 0) { 3438 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE, 3439 (" calling xpt_run_devq\n")); 3440 xpt_run_dev_allocq(perph->path->bus); 3441 } 3442 } 3443 3444 3445 /* 3446 * Schedule a device to run on a given queue. 3447 * If the device was inserted as a new entry on the queue, 3448 * return 1 meaning the device queue should be run. If we 3449 * were already queued, implying someone else has already 3450 * started the queue, return 0 so the caller doesn't attempt 3451 * to run the queue. Must be run at either splsoftcam 3452 * (or splcam since that encompases splsoftcam). 3453 */ 3454 static int 3455 xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo, 3456 u_int32_t new_priority) 3457 { 3458 int retval; 3459 u_int32_t old_priority; 3460 3461 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n")); 3462 3463 old_priority = pinfo->priority; 3464 3465 /* 3466 * Are we already queued? 3467 */ 3468 if (pinfo->index != CAM_UNQUEUED_INDEX) { 3469 /* Simply reorder based on new priority */ 3470 if (new_priority < old_priority) { 3471 camq_change_priority(queue, pinfo->index, 3472 new_priority); 3473 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3474 ("changed priority to %d\n", 3475 new_priority)); 3476 } 3477 retval = 0; 3478 } else { 3479 /* New entry on the queue */ 3480 if (new_priority < old_priority) 3481 pinfo->priority = new_priority; 3482 3483 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3484 ("Inserting onto queue\n")); 3485 pinfo->generation = ++queue->generation; 3486 camq_insert(queue, pinfo); 3487 retval = 1; 3488 } 3489 return (retval); 3490 } 3491 3492 static void 3493 xpt_run_dev_allocq(struct cam_eb *bus) 3494 { 3495 struct cam_devq *devq; 3496 int s; 3497 3498 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_allocq\n")); 3499 devq = bus->sim->devq; 3500 3501 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3502 (" qfrozen_cnt == 0x%x, entries == %d, " 3503 "openings == %d, active == %d\n", 3504 devq->alloc_queue.qfrozen_cnt, 3505 devq->alloc_queue.entries, 3506 devq->alloc_openings, 3507 devq->alloc_active)); 3508 3509 s = splsoftcam(); 3510 devq->alloc_queue.qfrozen_cnt++; 3511 while ((devq->alloc_queue.entries > 0) 3512 && (devq->alloc_openings > 0) 3513 && (devq->alloc_queue.qfrozen_cnt <= 1)) { 3514 struct cam_ed_qinfo *qinfo; 3515 struct cam_ed *device; 3516 union ccb *work_ccb; 3517 struct cam_periph *drv; 3518 struct camq *drvq; 3519 3520 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue, 3521 CAMQ_HEAD); 3522 device = qinfo->device; 3523 3524 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3525 ("running device %p\n", device)); 3526 3527 drvq = &device->drvq; 3528 3529 #ifdef CAMDEBUG 3530 if (drvq->entries <= 0) { 3531 panic("xpt_run_dev_allocq: " 3532 "Device on queue without any work to do"); 3533 } 3534 #endif 3535 if ((work_ccb = xpt_get_ccb(device)) != NULL) { 3536 devq->alloc_openings--; 3537 devq->alloc_active++; 3538 drv = (struct cam_periph*)camq_remove(drvq, CAMQ_HEAD); 3539 splx(s); 3540 xpt_setup_ccb(&work_ccb->ccb_h, drv->path, 3541 drv->pinfo.priority); 3542 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3543 ("calling periph start\n")); 3544 drv->periph_start(drv, work_ccb); 3545 } else { 3546 /* 3547 * Malloc failure in alloc_ccb 3548 */ 3549 /* 3550 * XXX add us to a list to be run from free_ccb 3551 * if we don't have any ccbs active on this 3552 * device queue otherwise we may never get run 3553 * again. 3554 */ 3555 break; 3556 } 3557 3558 /* Raise IPL for possible insertion and test at top of loop */ 3559 s = splsoftcam(); 3560 3561 if (drvq->entries > 0) { 3562 /* We have more work. Attempt to reschedule */ 3563 xpt_schedule_dev_allocq(bus, device); 3564 } 3565 } 3566 devq->alloc_queue.qfrozen_cnt--; 3567 splx(s); 3568 } 3569 3570 static void 3571 xpt_run_dev_sendq(struct cam_eb *bus) 3572 { 3573 struct cam_devq *devq; 3574 int s; 3575 3576 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_sendq\n")); 3577 3578 devq = bus->sim->devq; 3579 3580 s = splcam(); 3581 devq->send_queue.qfrozen_cnt++; 3582 splx(s); 3583 s = splsoftcam(); 3584 while ((devq->send_queue.entries > 0) 3585 && (devq->send_openings > 0)) { 3586 struct cam_ed_qinfo *qinfo; 3587 struct cam_ed *device; 3588 union ccb *work_ccb; 3589 struct cam_sim *sim; 3590 int ospl; 3591 3592 ospl = splcam(); 3593 if (devq->send_queue.qfrozen_cnt > 1) { 3594 splx(ospl); 3595 break; 3596 } 3597 3598 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue, 3599 CAMQ_HEAD); 3600 device = qinfo->device; 3601 3602 /* 3603 * If the device has been "frozen", don't attempt 3604 * to run it. 3605 */ 3606 if (device->qfrozen_cnt > 0) { 3607 splx(ospl); 3608 continue; 3609 } 3610 3611 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3612 ("running device %p\n", device)); 3613 3614 work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD); 3615 if (work_ccb == NULL) { 3616 printf("device on run queue with no ccbs???"); 3617 splx(ospl); 3618 continue; 3619 } 3620 3621 if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) { 3622 3623 if (num_highpower <= 0) { 3624 /* 3625 * We got a high power command, but we 3626 * don't have any available slots. Freeze 3627 * the device queue until we have a slot 3628 * available. 3629 */ 3630 device->qfrozen_cnt++; 3631 STAILQ_INSERT_TAIL(&highpowerq, 3632 &work_ccb->ccb_h, 3633 xpt_links.stqe); 3634 3635 splx(ospl); 3636 continue; 3637 } else { 3638 /* 3639 * Consume a high power slot while 3640 * this ccb runs. 3641 */ 3642 num_highpower--; 3643 } 3644 } 3645 devq->active_dev = device; 3646 cam_ccbq_remove_ccb(&device->ccbq, work_ccb); 3647 3648 cam_ccbq_send_ccb(&device->ccbq, work_ccb); 3649 splx(ospl); 3650 3651 devq->send_openings--; 3652 devq->send_active++; 3653 3654 if (device->ccbq.queue.entries > 0) 3655 xpt_schedule_dev_sendq(bus, device); 3656 3657 if (work_ccb && (work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0){ 3658 /* 3659 * The client wants to freeze the queue 3660 * after this CCB is sent. 3661 */ 3662 ospl = splcam(); 3663 device->qfrozen_cnt++; 3664 splx(ospl); 3665 } 3666 3667 splx(s); 3668 3669 if ((device->inq_flags & SID_CmdQue) != 0) 3670 work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID; 3671 else 3672 /* 3673 * Clear this in case of a retried CCB that failed 3674 * due to a rejected tag. 3675 */ 3676 work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID; 3677 3678 /* 3679 * Device queues can be shared among multiple sim instances 3680 * that reside on different busses. Use the SIM in the queue 3681 * CCB's path, rather than the one in the bus that was passed 3682 * into this function. 3683 */ 3684 sim = work_ccb->ccb_h.path->bus->sim; 3685 (*(sim->sim_action))(sim, work_ccb); 3686 3687 ospl = splcam(); 3688 devq->active_dev = NULL; 3689 splx(ospl); 3690 /* Raise IPL for possible insertion and test at top of loop */ 3691 s = splsoftcam(); 3692 } 3693 splx(s); 3694 s = splcam(); 3695 devq->send_queue.qfrozen_cnt--; 3696 splx(s); 3697 } 3698 3699 /* 3700 * This function merges stuff from the slave ccb into the master ccb, while 3701 * keeping important fields in the master ccb constant. 3702 */ 3703 void 3704 xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb) 3705 { 3706 /* 3707 * Pull fields that are valid for peripheral drivers to set 3708 * into the master CCB along with the CCB "payload". 3709 */ 3710 master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count; 3711 master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code; 3712 master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout; 3713 master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags; 3714 bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1], 3715 sizeof(union ccb) - sizeof(struct ccb_hdr)); 3716 } 3717 3718 void 3719 xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority) 3720 { 3721 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n")); 3722 ccb_h->pinfo.priority = priority; 3723 ccb_h->path = path; 3724 ccb_h->path_id = path->bus->path_id; 3725 if (path->target) 3726 ccb_h->target_id = path->target->target_id; 3727 else 3728 ccb_h->target_id = CAM_TARGET_WILDCARD; 3729 if (path->device) { 3730 ccb_h->target_lun = path->device->lun_id; 3731 ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation; 3732 } else { 3733 ccb_h->target_lun = CAM_TARGET_WILDCARD; 3734 } 3735 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX; 3736 ccb_h->flags = 0; 3737 } 3738 3739 /* Path manipulation functions */ 3740 cam_status 3741 xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph, 3742 path_id_t path_id, target_id_t target_id, lun_id_t lun_id) 3743 { 3744 struct cam_path *path; 3745 cam_status status; 3746 3747 path = (struct cam_path *)malloc(sizeof(*path), M_DEVBUF, M_NOWAIT); 3748 3749 if (path == NULL) { 3750 status = CAM_RESRC_UNAVAIL; 3751 return(status); 3752 } 3753 status = xpt_compile_path(path, perph, path_id, target_id, lun_id); 3754 if (status != CAM_REQ_CMP) { 3755 free(path, M_DEVBUF); 3756 path = NULL; 3757 } 3758 *new_path_ptr = path; 3759 return (status); 3760 } 3761 3762 static cam_status 3763 xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph, 3764 path_id_t path_id, target_id_t target_id, lun_id_t lun_id) 3765 { 3766 struct cam_eb *bus; 3767 struct cam_et *target; 3768 struct cam_ed *device; 3769 cam_status status; 3770 int s; 3771 3772 status = CAM_REQ_CMP; /* Completed without error */ 3773 target = NULL; /* Wildcarded */ 3774 device = NULL; /* Wildcarded */ 3775 3776 /* 3777 * We will potentially modify the EDT, so block interrupts 3778 * that may attempt to create cam paths. 3779 */ 3780 s = splcam(); 3781 bus = xpt_find_bus(path_id); 3782 if (bus == NULL) { 3783 status = CAM_PATH_INVALID; 3784 } else { 3785 target = xpt_find_target(bus, target_id); 3786 if (target == NULL) { 3787 /* Create one */ 3788 struct cam_et *new_target; 3789 3790 new_target = xpt_alloc_target(bus, target_id); 3791 if (new_target == NULL) { 3792 status = CAM_RESRC_UNAVAIL; 3793 } else { 3794 target = new_target; 3795 } 3796 } 3797 if (target != NULL) { 3798 device = xpt_find_device(target, lun_id); 3799 if (device == NULL) { 3800 /* Create one */ 3801 struct cam_ed *new_device; 3802 3803 new_device = xpt_alloc_device(bus, 3804 target, 3805 lun_id); 3806 if (new_device == NULL) { 3807 status = CAM_RESRC_UNAVAIL; 3808 } else { 3809 device = new_device; 3810 } 3811 } 3812 } 3813 } 3814 splx(s); 3815 3816 /* 3817 * Only touch the user's data if we are successful. 3818 */ 3819 if (status == CAM_REQ_CMP) { 3820 new_path->periph = perph; 3821 new_path->bus = bus; 3822 new_path->target = target; 3823 new_path->device = device; 3824 CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n")); 3825 } else { 3826 if (device != NULL) 3827 xpt_release_device(bus, target, device); 3828 if (target != NULL) 3829 xpt_release_target(bus, target); 3830 if (bus != NULL) 3831 xpt_release_bus(bus); 3832 } 3833 return (status); 3834 } 3835 3836 static void 3837 xpt_release_path(struct cam_path *path) 3838 { 3839 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n")); 3840 if (path->device != NULL) { 3841 xpt_release_device(path->bus, path->target, path->device); 3842 path->device = NULL; 3843 } 3844 if (path->target != NULL) { 3845 xpt_release_target(path->bus, path->target); 3846 path->target = NULL; 3847 } 3848 if (path->bus != NULL) { 3849 xpt_release_bus(path->bus); 3850 path->bus = NULL; 3851 } 3852 } 3853 3854 void 3855 xpt_free_path(struct cam_path *path) 3856 { 3857 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n")); 3858 xpt_release_path(path); 3859 free(path, M_DEVBUF); 3860 } 3861 3862 3863 /* 3864 * Return -1 for failure, 0 for exact match, 1 for match with wildcards 3865 * in path1, 2 for match with wildcards in path2. 3866 */ 3867 int 3868 xpt_path_comp(struct cam_path *path1, struct cam_path *path2) 3869 { 3870 int retval = 0; 3871 3872 if (path1->bus != path2->bus) { 3873 if (path1->bus->path_id == CAM_BUS_WILDCARD) 3874 retval = 1; 3875 else if (path2->bus->path_id == CAM_BUS_WILDCARD) 3876 retval = 2; 3877 else 3878 return (-1); 3879 } 3880 if (path1->target != path2->target) { 3881 if (path1->target->target_id == CAM_TARGET_WILDCARD) { 3882 if (retval == 0) 3883 retval = 1; 3884 } else if (path2->target->target_id == CAM_TARGET_WILDCARD) 3885 retval = 2; 3886 else 3887 return (-1); 3888 } 3889 if (path1->device != path2->device) { 3890 if (path1->device->lun_id == CAM_LUN_WILDCARD) { 3891 if (retval == 0) 3892 retval = 1; 3893 } else if (path2->device->lun_id == CAM_LUN_WILDCARD) 3894 retval = 2; 3895 else 3896 return (-1); 3897 } 3898 return (retval); 3899 } 3900 3901 void 3902 xpt_print_path(struct cam_path *path) 3903 { 3904 if (path == NULL) 3905 printf("(nopath): "); 3906 else { 3907 if (path->periph != NULL) 3908 printf("(%s%d:", path->periph->periph_name, 3909 path->periph->unit_number); 3910 else 3911 printf("(noperiph:"); 3912 3913 if (path->bus != NULL) 3914 printf("%s%d:%d:", path->bus->sim->sim_name, 3915 path->bus->sim->unit_number, 3916 path->bus->sim->bus_id); 3917 else 3918 printf("nobus:"); 3919 3920 if (path->target != NULL) 3921 printf("%d:", path->target->target_id); 3922 else 3923 printf("X:"); 3924 3925 if (path->device != NULL) 3926 printf("%d): ", path->device->lun_id); 3927 else 3928 printf("X): "); 3929 } 3930 } 3931 3932 path_id_t 3933 xpt_path_path_id(struct cam_path *path) 3934 { 3935 return(path->bus->path_id); 3936 } 3937 3938 target_id_t 3939 xpt_path_target_id(struct cam_path *path) 3940 { 3941 if (path->target != NULL) 3942 return (path->target->target_id); 3943 else 3944 return (CAM_TARGET_WILDCARD); 3945 } 3946 3947 lun_id_t 3948 xpt_path_lun_id(struct cam_path *path) 3949 { 3950 if (path->device != NULL) 3951 return (path->device->lun_id); 3952 else 3953 return (CAM_LUN_WILDCARD); 3954 } 3955 3956 struct cam_sim * 3957 xpt_path_sim(struct cam_path *path) 3958 { 3959 return (path->bus->sim); 3960 } 3961 3962 struct cam_periph* 3963 xpt_path_periph(struct cam_path *path) 3964 { 3965 return (path->periph); 3966 } 3967 3968 /* 3969 * Release a CAM control block for the caller. Remit the cost of the structure 3970 * to the device referenced by the path. If the this device had no 'credits' 3971 * and peripheral drivers have registered async callbacks for this notification 3972 * call them now. 3973 */ 3974 void 3975 xpt_release_ccb(union ccb *free_ccb) 3976 { 3977 int s; 3978 struct cam_path *path; 3979 struct cam_ed *device; 3980 struct cam_eb *bus; 3981 3982 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n")); 3983 path = free_ccb->ccb_h.path; 3984 device = path->device; 3985 bus = path->bus; 3986 s = splsoftcam(); 3987 cam_ccbq_release_opening(&device->ccbq); 3988 if (xpt_ccb_count > xpt_max_ccbs) { 3989 xpt_free_ccb(free_ccb); 3990 xpt_ccb_count--; 3991 } else { 3992 SLIST_INSERT_HEAD(&ccb_freeq, &free_ccb->ccb_h, xpt_links.sle); 3993 } 3994 bus->sim->devq->alloc_openings++; 3995 bus->sim->devq->alloc_active--; 3996 /* XXX Turn this into an inline function - xpt_run_device?? */ 3997 if ((device_is_alloc_queued(device) == 0) 3998 && (device->drvq.entries > 0)) { 3999 xpt_schedule_dev_allocq(bus, device); 4000 } 4001 splx(s); 4002 if (dev_allocq_is_runnable(bus->sim->devq)) 4003 xpt_run_dev_allocq(bus); 4004 } 4005 4006 /* Functions accessed by SIM drivers */ 4007 4008 /* 4009 * A sim structure, listing the SIM entry points and instance 4010 * identification info is passed to xpt_bus_register to hook the SIM 4011 * into the CAM framework. xpt_bus_register creates a cam_eb entry 4012 * for this new bus and places it in the array of busses and assigns 4013 * it a path_id. The path_id may be influenced by "hard wiring" 4014 * information specified by the user. Once interrupt services are 4015 * availible, the bus will be probed. 4016 */ 4017 int32_t 4018 xpt_bus_register(struct cam_sim *sim, u_int32_t bus) 4019 { 4020 static path_id_t buscount; 4021 struct cam_eb *new_bus; 4022 struct ccb_pathinq cpi; 4023 int s; 4024 4025 sim->bus_id = bus; 4026 new_bus = (struct cam_eb *)malloc(sizeof(*new_bus), 4027 M_DEVBUF, M_NOWAIT); 4028 if (new_bus == NULL) { 4029 /* Couldn't satisfy request */ 4030 return (CAM_RESRC_UNAVAIL); 4031 } 4032 4033 bzero(new_bus, sizeof(*new_bus)); 4034 4035 if (strcmp(sim->sim_name, "xpt") != 0) { 4036 4037 sim->path_id = xptpathid(sim->sim_name, sim->unit_number, 4038 sim->bus_id, &buscount); 4039 } 4040 4041 new_bus->path_id = sim->path_id; 4042 new_bus->sim = sim; 4043 TAILQ_INIT(&new_bus->et_entries); 4044 timevalclear(&new_bus->last_reset); 4045 new_bus->refcount = 1; /* Held until a bus_deregister event */ 4046 s = splcam(); 4047 TAILQ_INSERT_TAIL(&xpt_busses, new_bus, links); 4048 bus_generation++; 4049 splx(s); 4050 4051 /* Notify interested parties */ 4052 if (sim->path_id != CAM_XPT_PATH_ID) { 4053 struct cam_path path; 4054 4055 xpt_compile_path(&path, /*periph*/NULL, sim->path_id, 4056 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 4057 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1); 4058 cpi.ccb_h.func_code = XPT_PATH_INQ; 4059 xpt_action((union ccb *)&cpi); 4060 xpt_async(AC_PATH_REGISTERED, xpt_periph->path, &cpi); 4061 xpt_release_path(&path); 4062 } 4063 return (CAM_SUCCESS); 4064 } 4065 4066 static int 4067 xptnextfreebus(path_id_t startbus) 4068 { 4069 struct cam_sim_config *sim_conf; 4070 4071 sim_conf = cam_sinit; 4072 while (sim_conf->sim_name != NULL) { 4073 4074 if (IS_SPECIFIED(sim_conf->pathid) 4075 && (startbus == sim_conf->pathid)) { 4076 ++startbus; 4077 /* Start the search over */ 4078 sim_conf = cam_sinit; 4079 } else { 4080 sim_conf++; 4081 } 4082 } 4083 return (startbus); 4084 } 4085 4086 static int 4087 xptpathid(const char *sim_name, int sim_unit, 4088 int sim_bus, path_id_t *nextpath) 4089 { 4090 struct cam_sim_config *sim_conf; 4091 path_id_t pathid; 4092 4093 pathid = CAM_XPT_PATH_ID; 4094 for (sim_conf = cam_sinit; sim_conf->sim_name != NULL; sim_conf++) { 4095 4096 if (!IS_SPECIFIED(sim_conf->pathid)) 4097 continue; 4098 4099 if (!strcmp(sim_name, sim_conf->sim_name) 4100 && (sim_unit == sim_conf->sim_unit)) { 4101 4102 if (IS_SPECIFIED(sim_conf->sim_bus)) { 4103 if (sim_bus == sim_conf->sim_bus) { 4104 pathid = sim_conf->pathid; 4105 break; 4106 } 4107 } else if (sim_bus == 0) { 4108 /* Unspecified matches bus 0 */ 4109 pathid = sim_conf->pathid; 4110 break; 4111 } else { 4112 printf("Ambiguous scbus configuration for %s%d " 4113 "bus %d, cannot wire down. The kernel " 4114 "config entry for scbus%d should " 4115 "specify a controller bus.\n" 4116 "Scbus will be assigned dynamically.\n", 4117 sim_name, sim_unit, sim_bus, 4118 sim_conf->pathid); 4119 break; 4120 } 4121 } 4122 } 4123 4124 if (pathid == CAM_XPT_PATH_ID) { 4125 pathid = xptnextfreebus(*nextpath); 4126 *nextpath = pathid + 1; 4127 } 4128 return (pathid); 4129 } 4130 4131 int32_t 4132 xpt_bus_deregister(path_id) 4133 u_int8_t path_id; 4134 { 4135 /* XXX */ 4136 return (CAM_SUCCESS); 4137 } 4138 4139 void 4140 xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg) 4141 { 4142 struct cam_eb *bus; 4143 struct cam_et *target, *next_target; 4144 struct cam_ed *device, *next_device; 4145 int s; 4146 4147 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_async\n")); 4148 4149 /* 4150 * Most async events come from a CAM interrupt context. In 4151 * a few cases, the error recovery code at the peripheral layer, 4152 * which may run from our SWI or a process context, may signal 4153 * deferred events with a call to xpt_async. Ensure async 4154 * notifications are serialized by blocking cam interrupts. 4155 */ 4156 s = splcam(); 4157 4158 bus = path->bus; 4159 4160 if (async_code == AC_BUS_RESET) { 4161 int s; 4162 4163 s = splclock(); 4164 /* Update our notion of when the last reset occurred */ 4165 microtime(&bus->last_reset); 4166 splx(s); 4167 } 4168 4169 for (target = TAILQ_FIRST(&bus->et_entries); 4170 target != NULL; 4171 target = next_target) { 4172 4173 next_target = TAILQ_NEXT(target, links); 4174 4175 if (path->target != target 4176 && path->target->target_id != CAM_TARGET_WILDCARD) 4177 continue; 4178 4179 if (async_code == AC_SENT_BDR) { 4180 int s; 4181 4182 /* Update our notion of when the last reset occurred */ 4183 s = splclock(); 4184 microtime(&path->target->last_reset); 4185 splx(s); 4186 } 4187 4188 for (device = TAILQ_FIRST(&target->ed_entries); 4189 device != NULL; 4190 device = next_device) { 4191 cam_status status; 4192 struct cam_path newpath; 4193 4194 next_device = TAILQ_NEXT(device, links); 4195 4196 if (path->device != device 4197 && path->device->lun_id != CAM_LUN_WILDCARD) 4198 continue; 4199 4200 /* 4201 * We need our own path with wildcards expanded to 4202 * handle certain types of events. 4203 */ 4204 if ((async_code == AC_SENT_BDR) 4205 || (async_code == AC_BUS_RESET) 4206 || (async_code == AC_INQ_CHANGED)) 4207 status = xpt_compile_path(&newpath, NULL, 4208 bus->path_id, 4209 target->target_id, 4210 device->lun_id); 4211 else 4212 status = CAM_REQ_CMP_ERR; 4213 4214 if (status == CAM_REQ_CMP) { 4215 4216 /* 4217 * Allow transfer negotiation to occur in a 4218 * tag free environment. 4219 */ 4220 if (async_code == AC_SENT_BDR 4221 || async_code == AC_BUS_RESET) 4222 xpt_toggle_tags(&newpath); 4223 4224 if (async_code == AC_INQ_CHANGED) { 4225 /* 4226 * We've sent a start unit command, or 4227 * something similar to a device that 4228 * may have caused its inquiry data to 4229 * change. So we re-scan the device to 4230 * refresh the inquiry data for it. 4231 */ 4232 xpt_scan_lun(newpath.periph, &newpath, 4233 CAM_EXPECT_INQ_CHANGE, 4234 NULL); 4235 } 4236 xpt_release_path(&newpath); 4237 } else if (async_code == AC_LOST_DEVICE) { 4238 device->flags |= CAM_DEV_UNCONFIGURED; 4239 } else if (async_code == AC_TRANSFER_NEG) { 4240 struct ccb_trans_settings *settings; 4241 4242 settings = 4243 (struct ccb_trans_settings *)async_arg; 4244 xpt_set_transfer_settings(settings, device, 4245 /*async_update*/TRUE); 4246 } 4247 4248 xpt_async_bcast(&device->asyncs, 4249 async_code, 4250 path, 4251 async_arg); 4252 } 4253 } 4254 4255 /* 4256 * If this wasn't a fully wildcarded async, tell all 4257 * clients that want all async events. 4258 */ 4259 if (bus != xpt_periph->path->bus) 4260 xpt_async_bcast(&xpt_periph->path->device->asyncs, async_code, 4261 path, async_arg); 4262 splx(s); 4263 } 4264 4265 static void 4266 xpt_async_bcast(struct async_list *async_head, 4267 u_int32_t async_code, 4268 struct cam_path *path, void *async_arg) 4269 { 4270 struct async_node *cur_entry; 4271 4272 cur_entry = SLIST_FIRST(async_head); 4273 while (cur_entry != NULL) { 4274 struct async_node *next_entry; 4275 /* 4276 * Grab the next list entry before we call the current 4277 * entry's callback. This is because the callback function 4278 * can delete its async callback entry. 4279 */ 4280 next_entry = SLIST_NEXT(cur_entry, links); 4281 if ((cur_entry->event_enable & async_code) != 0) 4282 cur_entry->callback(cur_entry->callback_arg, 4283 async_code, path, 4284 async_arg); 4285 cur_entry = next_entry; 4286 } 4287 } 4288 4289 u_int32_t 4290 xpt_freeze_devq(struct cam_path *path, u_int count) 4291 { 4292 int s; 4293 struct ccb_hdr *ccbh; 4294 4295 s = splcam(); 4296 path->device->qfrozen_cnt += count; 4297 4298 /* 4299 * Mark the last CCB in the queue as needing 4300 * to be requeued if the driver hasn't 4301 * changed it's state yet. This fixes a race 4302 * where a ccb is just about to be queued to 4303 * a controller driver when it's interrupt routine 4304 * freezes the queue. To completly close the 4305 * hole, controller drives must check to see 4306 * if a ccb's status is still CAM_REQ_INPROG 4307 * under spl protection just before they queue 4308 * the CCB. See ahc_action/ahc_freeze_devq for 4309 * an example. 4310 */ 4311 ccbh = TAILQ_LAST(&path->device->ccbq.active_ccbs, ccb_hdr_tailq); 4312 if (ccbh && ccbh->status == CAM_REQ_INPROG) 4313 ccbh->status = CAM_REQUEUE_REQ; 4314 splx(s); 4315 return (path->device->qfrozen_cnt); 4316 } 4317 4318 u_int32_t 4319 xpt_freeze_simq(struct cam_sim *sim, u_int count) 4320 { 4321 sim->devq->send_queue.qfrozen_cnt += count; 4322 if (sim->devq->active_dev != NULL) { 4323 struct ccb_hdr *ccbh; 4324 4325 ccbh = TAILQ_LAST(&sim->devq->active_dev->ccbq.active_ccbs, 4326 ccb_hdr_tailq); 4327 if (ccbh && ccbh->status == CAM_REQ_INPROG) 4328 ccbh->status = CAM_REQUEUE_REQ; 4329 } 4330 return (sim->devq->send_queue.qfrozen_cnt); 4331 } 4332 4333 static void 4334 xpt_release_devq_timeout(void *arg) 4335 { 4336 struct cam_ed *device; 4337 4338 device = (struct cam_ed *)arg; 4339 4340 xpt_release_devq_device(device, /*count*/1, /*run_queue*/TRUE); 4341 } 4342 4343 void 4344 xpt_release_devq(struct cam_path *path, u_int count, int run_queue) 4345 { 4346 xpt_release_devq_device(path->device, count, run_queue); 4347 } 4348 4349 static void 4350 xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue) 4351 { 4352 int rundevq; 4353 int s0, s1; 4354 4355 rundevq = 0; 4356 s0 = splsoftcam(); 4357 s1 = splcam(); 4358 if (dev->qfrozen_cnt > 0) { 4359 4360 count = (count > dev->qfrozen_cnt) ? dev->qfrozen_cnt : count; 4361 dev->qfrozen_cnt -= count; 4362 if (dev->qfrozen_cnt == 0) { 4363 4364 /* 4365 * No longer need to wait for a successful 4366 * command completion. 4367 */ 4368 dev->flags &= ~CAM_DEV_REL_ON_COMPLETE; 4369 4370 /* 4371 * Remove any timeouts that might be scheduled 4372 * to release this queue. 4373 */ 4374 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) { 4375 untimeout(xpt_release_devq_timeout, dev, 4376 dev->c_handle); 4377 dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING; 4378 } 4379 4380 /* 4381 * Now that we are unfrozen schedule the 4382 * device so any pending transactions are 4383 * run. 4384 */ 4385 if ((dev->ccbq.queue.entries > 0) 4386 && (xpt_schedule_dev_sendq(dev->target->bus, dev)) 4387 && (run_queue != 0)) { 4388 rundevq = 1; 4389 } 4390 } 4391 } 4392 splx(s1); 4393 if (rundevq != 0) 4394 xpt_run_dev_sendq(dev->target->bus); 4395 splx(s0); 4396 } 4397 4398 void 4399 xpt_release_simq(struct cam_sim *sim, int run_queue) 4400 { 4401 int s; 4402 struct camq *sendq; 4403 4404 sendq = &(sim->devq->send_queue); 4405 s = splcam(); 4406 if (sendq->qfrozen_cnt > 0) { 4407 4408 sendq->qfrozen_cnt--; 4409 if (sendq->qfrozen_cnt == 0) { 4410 struct cam_eb *bus; 4411 4412 /* 4413 * If there is a timeout scheduled to release this 4414 * sim queue, remove it. The queue frozen count is 4415 * already at 0. 4416 */ 4417 if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){ 4418 untimeout(xpt_release_simq_timeout, sim, 4419 sim->c_handle); 4420 sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING; 4421 } 4422 bus = xpt_find_bus(sim->path_id); 4423 splx(s); 4424 4425 if (run_queue) { 4426 /* 4427 * Now that we are unfrozen run the send queue. 4428 */ 4429 xpt_run_dev_sendq(bus); 4430 } 4431 xpt_release_bus(bus); 4432 } else 4433 splx(s); 4434 } else 4435 splx(s); 4436 } 4437 4438 static void 4439 xpt_release_simq_timeout(void *arg) 4440 { 4441 struct cam_sim *sim; 4442 4443 sim = (struct cam_sim *)arg; 4444 xpt_release_simq(sim, /* run_queue */ TRUE); 4445 } 4446 4447 void 4448 xpt_done(union ccb *done_ccb) 4449 { 4450 int s; 4451 4452 s = splcam(); 4453 4454 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done\n")); 4455 if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) != 0) { 4456 /* 4457 * Queue up the request for handling by our SWI handler 4458 * any of the "non-immediate" type of ccbs. 4459 */ 4460 switch (done_ccb->ccb_h.path->periph->type) { 4461 case CAM_PERIPH_BIO: 4462 TAILQ_INSERT_TAIL(&cam_bioq, &done_ccb->ccb_h, 4463 sim_links.tqe); 4464 done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX; 4465 setsoftcambio(); 4466 break; 4467 case CAM_PERIPH_NET: 4468 TAILQ_INSERT_TAIL(&cam_netq, &done_ccb->ccb_h, 4469 sim_links.tqe); 4470 done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX; 4471 setsoftcamnet(); 4472 break; 4473 } 4474 } 4475 splx(s); 4476 } 4477 4478 union ccb * 4479 xpt_alloc_ccb() 4480 { 4481 union ccb *new_ccb; 4482 4483 new_ccb = malloc(sizeof(*new_ccb), M_DEVBUF, M_WAITOK); 4484 return (new_ccb); 4485 } 4486 4487 void 4488 xpt_free_ccb(union ccb *free_ccb) 4489 { 4490 free(free_ccb, M_DEVBUF); 4491 } 4492 4493 4494 4495 /* Private XPT functions */ 4496 4497 /* 4498 * Get a CAM control block for the caller. Charge the structure to the device 4499 * referenced by the path. If the this device has no 'credits' then the 4500 * device already has the maximum number of outstanding operations under way 4501 * and we return NULL. If we don't have sufficient resources to allocate more 4502 * ccbs, we also return NULL. 4503 */ 4504 static union ccb * 4505 xpt_get_ccb(struct cam_ed *device) 4506 { 4507 union ccb *new_ccb; 4508 int s; 4509 4510 s = splsoftcam(); 4511 if ((new_ccb = (union ccb *)ccb_freeq.slh_first) == NULL) { 4512 new_ccb = malloc(sizeof(*new_ccb), M_DEVBUF, M_NOWAIT); 4513 if (new_ccb == NULL) { 4514 splx(s); 4515 return (NULL); 4516 } 4517 callout_handle_init(&new_ccb->ccb_h.timeout_ch); 4518 SLIST_INSERT_HEAD(&ccb_freeq, &new_ccb->ccb_h, 4519 xpt_links.sle); 4520 xpt_ccb_count++; 4521 } 4522 cam_ccbq_take_opening(&device->ccbq); 4523 SLIST_REMOVE_HEAD(&ccb_freeq, xpt_links.sle); 4524 splx(s); 4525 return (new_ccb); 4526 } 4527 4528 static void 4529 xpt_release_bus(struct cam_eb *bus) 4530 { 4531 int s; 4532 4533 s = splcam(); 4534 if ((--bus->refcount == 0) 4535 && (TAILQ_FIRST(&bus->et_entries) == NULL)) { 4536 TAILQ_REMOVE(&xpt_busses, bus, links); 4537 bus_generation++; 4538 splx(s); 4539 free(bus, M_DEVBUF); 4540 } else 4541 splx(s); 4542 } 4543 4544 static struct cam_et * 4545 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id) 4546 { 4547 struct cam_et *target; 4548 4549 target = (struct cam_et *)malloc(sizeof(*target), M_DEVBUF, M_NOWAIT); 4550 if (target != NULL) { 4551 struct cam_et *cur_target; 4552 4553 target->bus = bus; 4554 target->target_id = target_id; 4555 target->refcount = 1; 4556 /* 4557 * Hold a reference to our parent bus so it 4558 * will not go away before we do. 4559 */ 4560 bus->refcount++; 4561 TAILQ_INIT(&target->ed_entries); 4562 timevalclear(&target->last_reset); 4563 4564 /* Insertion sort into our bus's target list */ 4565 cur_target = TAILQ_FIRST(&bus->et_entries); 4566 while (cur_target != NULL && cur_target->target_id < target_id) 4567 cur_target = TAILQ_NEXT(cur_target, links); 4568 4569 if (cur_target != NULL) { 4570 TAILQ_INSERT_BEFORE(cur_target, target, links); 4571 } else { 4572 TAILQ_INSERT_TAIL(&bus->et_entries, target, links); 4573 } 4574 bus->generation++; 4575 } 4576 return (target); 4577 } 4578 4579 static void 4580 xpt_release_target(struct cam_eb *bus, struct cam_et *target) 4581 { 4582 int s; 4583 4584 s = splcam(); 4585 if ((--target->refcount == 0) 4586 && (TAILQ_FIRST(&target->ed_entries) == NULL)) { 4587 TAILQ_REMOVE(&bus->et_entries, target, links); 4588 bus->generation++; 4589 splx(s); 4590 free(target, M_DEVBUF); 4591 xpt_release_bus(bus); 4592 } else 4593 splx(s); 4594 } 4595 4596 static struct cam_ed * 4597 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id) 4598 { 4599 struct cam_ed *device; 4600 struct cam_devq *devq; 4601 cam_status status; 4602 4603 /* Make space for us in the device queue on our bus */ 4604 devq = bus->sim->devq; 4605 status = cam_devq_resize(devq, devq->alloc_queue.array_size + 1); 4606 4607 if (status != CAM_REQ_CMP) { 4608 device = NULL; 4609 } else { 4610 device = (struct cam_ed *)malloc(sizeof(*device), 4611 M_DEVBUF, M_NOWAIT); 4612 } 4613 4614 if (device != NULL) { 4615 struct cam_ed *cur_device; 4616 4617 bzero(device, sizeof(*device)); 4618 4619 SLIST_INIT(&device->asyncs); 4620 SLIST_INIT(&device->periphs); 4621 callout_handle_init(&device->c_handle); 4622 device->refcount = 1; 4623 device->flags |= CAM_DEV_UNCONFIGURED; 4624 /* 4625 * Take the default quirk entry until we have inquiry 4626 * data and can determine a better quirk to use. 4627 */ 4628 device->quirk = &xpt_quirk_table[xpt_quirk_table_size - 1]; 4629 4630 cam_init_pinfo(&device->alloc_ccb_entry.pinfo); 4631 device->alloc_ccb_entry.device = device; 4632 cam_init_pinfo(&device->send_ccb_entry.pinfo); 4633 device->send_ccb_entry.device = device; 4634 4635 device->target = target; 4636 /* 4637 * Hold a reference to our parent target so it 4638 * will not go away before we do. 4639 */ 4640 target->refcount++; 4641 4642 device->lun_id = lun_id; 4643 4644 /* Initialize our queues */ 4645 if (camq_init(&device->drvq, 0) != 0) { 4646 free(device, M_DEVBUF); 4647 return (NULL); 4648 } 4649 4650 if (cam_ccbq_init(&device->ccbq, 4651 bus->sim->max_dev_openings) != 0) { 4652 camq_fini(&device->drvq); 4653 free(device, M_DEVBUF); 4654 return (NULL); 4655 } 4656 /* 4657 * XXX should be limited by number of CCBs this bus can 4658 * do. 4659 */ 4660 xpt_max_ccbs += device->ccbq.devq_openings; 4661 /* Insertion sort into our target's device list */ 4662 cur_device = TAILQ_FIRST(&target->ed_entries); 4663 while (cur_device != NULL && cur_device->lun_id < lun_id) 4664 cur_device = TAILQ_NEXT(cur_device, links); 4665 if (cur_device != NULL) { 4666 TAILQ_INSERT_BEFORE(cur_device, device, links); 4667 } else { 4668 TAILQ_INSERT_TAIL(&target->ed_entries, device, links); 4669 } 4670 target->generation++; 4671 } 4672 return (device); 4673 } 4674 4675 static void 4676 xpt_release_device(struct cam_eb *bus, struct cam_et *target, 4677 struct cam_ed *device) 4678 { 4679 int s; 4680 4681 s = splcam(); 4682 if ((--device->refcount == 0) 4683 && ((device->flags & CAM_DEV_UNCONFIGURED) != 0)) { 4684 struct cam_devq *devq; 4685 4686 if (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX 4687 || device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX) 4688 panic("Removing device while still queued for ccbs"); 4689 4690 if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) 4691 untimeout(xpt_release_devq_timeout, device, 4692 device->c_handle); 4693 4694 TAILQ_REMOVE(&target->ed_entries, device,links); 4695 target->generation++; 4696 xpt_max_ccbs -= device->ccbq.devq_openings; 4697 /* Release our slot in the devq */ 4698 devq = bus->sim->devq; 4699 cam_devq_resize(devq, devq->alloc_queue.array_size - 1); 4700 splx(s); 4701 free(device, M_DEVBUF); 4702 } else 4703 splx(s); 4704 } 4705 4706 static u_int32_t 4707 xpt_dev_ccbq_resize(struct cam_path *path, int newopenings) 4708 { 4709 int s; 4710 int diff; 4711 int result; 4712 struct cam_ed *dev; 4713 4714 dev = path->device; 4715 s = splsoftcam(); 4716 4717 diff = newopenings - (dev->ccbq.dev_active + dev->ccbq.dev_openings); 4718 result = cam_ccbq_resize(&dev->ccbq, newopenings); 4719 if (result == CAM_REQ_CMP && (diff < 0)) { 4720 dev->flags |= CAM_DEV_RESIZE_QUEUE_NEEDED; 4721 } 4722 /* Adjust the global limit */ 4723 xpt_max_ccbs += diff; 4724 splx(s); 4725 return (result); 4726 } 4727 4728 static struct cam_eb * 4729 xpt_find_bus(path_id_t path_id) 4730 { 4731 struct cam_eb *bus; 4732 4733 for (bus = TAILQ_FIRST(&xpt_busses); 4734 bus != NULL; 4735 bus = TAILQ_NEXT(bus, links)) { 4736 if (bus->path_id == path_id) { 4737 bus->refcount++; 4738 break; 4739 } 4740 } 4741 return (bus); 4742 } 4743 4744 static struct cam_et * 4745 xpt_find_target(struct cam_eb *bus, target_id_t target_id) 4746 { 4747 struct cam_et *target; 4748 4749 for (target = TAILQ_FIRST(&bus->et_entries); 4750 target != NULL; 4751 target = TAILQ_NEXT(target, links)) { 4752 if (target->target_id == target_id) { 4753 target->refcount++; 4754 break; 4755 } 4756 } 4757 return (target); 4758 } 4759 4760 static struct cam_ed * 4761 xpt_find_device(struct cam_et *target, lun_id_t lun_id) 4762 { 4763 struct cam_ed *device; 4764 4765 for (device = TAILQ_FIRST(&target->ed_entries); 4766 device != NULL; 4767 device = TAILQ_NEXT(device, links)) { 4768 if (device->lun_id == lun_id) { 4769 device->refcount++; 4770 break; 4771 } 4772 } 4773 return (device); 4774 } 4775 4776 typedef struct { 4777 union ccb *request_ccb; 4778 struct ccb_pathinq *cpi; 4779 int pending_count; 4780 } xpt_scan_bus_info; 4781 4782 /* 4783 * To start a scan, request_ccb is an XPT_SCAN_BUS ccb. 4784 * As the scan progresses, xpt_scan_bus is used as the 4785 * callback on completion function. 4786 */ 4787 static void 4788 xpt_scan_bus(struct cam_periph *periph, union ccb *request_ccb) 4789 { 4790 CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE, 4791 ("xpt_scan_bus\n")); 4792 switch (request_ccb->ccb_h.func_code) { 4793 case XPT_SCAN_BUS: 4794 { 4795 xpt_scan_bus_info *scan_info; 4796 union ccb *work_ccb; 4797 struct cam_path *path; 4798 u_int i; 4799 u_int max_target; 4800 u_int initiator_id; 4801 4802 /* Find out the characteristics of the bus */ 4803 work_ccb = xpt_alloc_ccb(); 4804 xpt_setup_ccb(&work_ccb->ccb_h, request_ccb->ccb_h.path, 4805 request_ccb->ccb_h.pinfo.priority); 4806 work_ccb->ccb_h.func_code = XPT_PATH_INQ; 4807 xpt_action(work_ccb); 4808 if (work_ccb->ccb_h.status != CAM_REQ_CMP) { 4809 request_ccb->ccb_h.status = work_ccb->ccb_h.status; 4810 xpt_free_ccb(work_ccb); 4811 xpt_done(request_ccb); 4812 return; 4813 } 4814 4815 if ((work_ccb->cpi.hba_misc & PIM_NOINITIATOR) != 0) { 4816 /* 4817 * Can't scan the bus on an adapter that 4818 * cannot perform the initiator role. 4819 */ 4820 request_ccb->ccb_h.status = CAM_REQ_CMP; 4821 xpt_free_ccb(work_ccb); 4822 xpt_done(request_ccb); 4823 return; 4824 } 4825 4826 /* Save some state for use while we probe for devices */ 4827 scan_info = (xpt_scan_bus_info *) 4828 malloc(sizeof(xpt_scan_bus_info), M_TEMP, M_WAITOK); 4829 scan_info->request_ccb = request_ccb; 4830 scan_info->cpi = &work_ccb->cpi; 4831 4832 /* Cache on our stack so we can work asynchronously */ 4833 max_target = scan_info->cpi->max_target; 4834 initiator_id = scan_info->cpi->initiator_id; 4835 4836 /* 4837 * Don't count the initiator if the 4838 * initiator is addressable. 4839 */ 4840 scan_info->pending_count = max_target + 1; 4841 if (initiator_id <= max_target) 4842 scan_info->pending_count--; 4843 4844 for (i = 0; i <= max_target; i++) { 4845 cam_status status; 4846 if (i == initiator_id) 4847 continue; 4848 4849 status = xpt_create_path(&path, xpt_periph, 4850 request_ccb->ccb_h.path_id, 4851 i, 0); 4852 if (status != CAM_REQ_CMP) { 4853 printf("xpt_scan_bus: xpt_create_path failed" 4854 " with status %#x, bus scan halted\n", 4855 status); 4856 break; 4857 } 4858 work_ccb = xpt_alloc_ccb(); 4859 xpt_setup_ccb(&work_ccb->ccb_h, path, 4860 request_ccb->ccb_h.pinfo.priority); 4861 work_ccb->ccb_h.func_code = XPT_SCAN_LUN; 4862 work_ccb->ccb_h.cbfcnp = xpt_scan_bus; 4863 work_ccb->ccb_h.ppriv_ptr0 = scan_info; 4864 work_ccb->crcn.flags = request_ccb->crcn.flags; 4865 #if 0 4866 printf("xpt_scan_bus: probing %d:%d:%d\n", 4867 request_ccb->ccb_h.path_id, i, 0); 4868 #endif 4869 xpt_action(work_ccb); 4870 } 4871 break; 4872 } 4873 case XPT_SCAN_LUN: 4874 { 4875 xpt_scan_bus_info *scan_info; 4876 path_id_t path_id; 4877 target_id_t target_id; 4878 lun_id_t lun_id; 4879 4880 /* Reuse the same CCB to query if a device was really found */ 4881 scan_info = (xpt_scan_bus_info *)request_ccb->ccb_h.ppriv_ptr0; 4882 xpt_setup_ccb(&request_ccb->ccb_h, request_ccb->ccb_h.path, 4883 request_ccb->ccb_h.pinfo.priority); 4884 request_ccb->ccb_h.func_code = XPT_GDEV_TYPE; 4885 4886 path_id = request_ccb->ccb_h.path_id; 4887 target_id = request_ccb->ccb_h.target_id; 4888 lun_id = request_ccb->ccb_h.target_lun; 4889 xpt_action(request_ccb); 4890 4891 #if 0 4892 printf("xpt_scan_bus: got back probe from %d:%d:%d\n", 4893 path_id, target_id, lun_id); 4894 #endif 4895 4896 if (request_ccb->ccb_h.status != CAM_REQ_CMP) { 4897 struct cam_ed *device; 4898 struct cam_et *target; 4899 int s, phl; 4900 4901 /* 4902 * If we already probed lun 0 successfully, or 4903 * we have additional configured luns on this 4904 * target that might have "gone away", go onto 4905 * the next lun. 4906 */ 4907 target = request_ccb->ccb_h.path->target; 4908 /* 4909 * We may touch devices that we don't 4910 * hold references too, so ensure they 4911 * don't disappear out from under us. 4912 * The target above is referenced by the 4913 * path in the request ccb. 4914 */ 4915 phl = 0; 4916 s = splcam(); 4917 device = TAILQ_FIRST(&target->ed_entries); 4918 if (device != NULL) { 4919 phl = device->quirk->quirks & CAM_QUIRK_HILUNS; 4920 if (device->lun_id == 0) 4921 device = TAILQ_NEXT(device, links); 4922 } 4923 splx(s); 4924 if ((lun_id != 0) || (device != NULL)) { 4925 if (lun_id < (CAM_SCSI2_MAXLUN-1) || phl) 4926 lun_id++; 4927 } 4928 } else { 4929 struct cam_ed *device; 4930 4931 device = request_ccb->ccb_h.path->device; 4932 4933 if ((device->quirk->quirks & CAM_QUIRK_NOLUNS) == 0) { 4934 /* Try the next lun */ 4935 if (lun_id < (CAM_SCSI2_MAXLUN-1) || 4936 (device->quirk->quirks & CAM_QUIRK_HILUNS)) 4937 lun_id++; 4938 } 4939 } 4940 4941 xpt_free_path(request_ccb->ccb_h.path); 4942 4943 /* Check Bounds */ 4944 if ((lun_id == request_ccb->ccb_h.target_lun) 4945 || lun_id > scan_info->cpi->max_lun) { 4946 /* We're done */ 4947 4948 xpt_free_ccb(request_ccb); 4949 scan_info->pending_count--; 4950 if (scan_info->pending_count == 0) { 4951 xpt_free_ccb((union ccb *)scan_info->cpi); 4952 request_ccb = scan_info->request_ccb; 4953 free(scan_info, M_TEMP); 4954 request_ccb->ccb_h.status = CAM_REQ_CMP; 4955 xpt_done(request_ccb); 4956 } 4957 } else { 4958 /* Try the next device */ 4959 struct cam_path *path; 4960 cam_status status; 4961 4962 path = request_ccb->ccb_h.path; 4963 status = xpt_create_path(&path, xpt_periph, 4964 path_id, target_id, lun_id); 4965 if (status != CAM_REQ_CMP) { 4966 printf("xpt_scan_bus: xpt_create_path failed " 4967 "with status %#x, halting LUN scan\n", 4968 status); 4969 xpt_free_ccb(request_ccb); 4970 scan_info->pending_count--; 4971 if (scan_info->pending_count == 0) { 4972 xpt_free_ccb( 4973 (union ccb *)scan_info->cpi); 4974 request_ccb = scan_info->request_ccb; 4975 free(scan_info, M_TEMP); 4976 request_ccb->ccb_h.status = CAM_REQ_CMP; 4977 xpt_done(request_ccb); 4978 break; 4979 } 4980 } 4981 xpt_setup_ccb(&request_ccb->ccb_h, path, 4982 request_ccb->ccb_h.pinfo.priority); 4983 request_ccb->ccb_h.func_code = XPT_SCAN_LUN; 4984 request_ccb->ccb_h.cbfcnp = xpt_scan_bus; 4985 request_ccb->ccb_h.ppriv_ptr0 = scan_info; 4986 request_ccb->crcn.flags = 4987 scan_info->request_ccb->crcn.flags; 4988 #if 0 4989 xpt_print_path(path); 4990 printf("xpt_scan bus probing\n"); 4991 #endif 4992 xpt_action(request_ccb); 4993 } 4994 break; 4995 } 4996 default: 4997 break; 4998 } 4999 } 5000 5001 typedef enum { 5002 PROBE_TUR, 5003 PROBE_INQUIRY, 5004 PROBE_MODE_SENSE, 5005 PROBE_SERIAL_NUM, 5006 PROBE_TUR_FOR_NEGOTIATION 5007 } probe_action; 5008 5009 typedef enum { 5010 PROBE_INQUIRY_CKSUM = 0x01, 5011 PROBE_SERIAL_CKSUM = 0x02, 5012 PROBE_NO_ANNOUNCE = 0x04 5013 } probe_flags; 5014 5015 typedef struct { 5016 TAILQ_HEAD(, ccb_hdr) request_ccbs; 5017 probe_action action; 5018 union ccb saved_ccb; 5019 probe_flags flags; 5020 MD5_CTX context; 5021 u_int8_t digest[16]; 5022 } probe_softc; 5023 5024 static void 5025 xpt_scan_lun(struct cam_periph *periph, struct cam_path *path, 5026 cam_flags flags, union ccb *request_ccb) 5027 { 5028 struct ccb_pathinq cpi; 5029 cam_status status; 5030 struct cam_path *new_path; 5031 struct cam_periph *old_periph; 5032 int s; 5033 5034 CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE, 5035 ("xpt_scan_lun\n")); 5036 5037 xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1); 5038 cpi.ccb_h.func_code = XPT_PATH_INQ; 5039 xpt_action((union ccb *)&cpi); 5040 5041 if (cpi.ccb_h.status != CAM_REQ_CMP) { 5042 if (request_ccb != NULL) { 5043 request_ccb->ccb_h.status = cpi.ccb_h.status; 5044 xpt_done(request_ccb); 5045 } 5046 return; 5047 } 5048 5049 if ((cpi.hba_misc & PIM_NOINITIATOR) != 0) { 5050 /* 5051 * Can't scan the bus on an adapter that 5052 * cannot perform the initiator role. 5053 */ 5054 if (request_ccb != NULL) { 5055 request_ccb->ccb_h.status = CAM_REQ_CMP; 5056 xpt_done(request_ccb); 5057 } 5058 return; 5059 } 5060 5061 if (request_ccb == NULL) { 5062 request_ccb = malloc(sizeof(union ccb), M_TEMP, M_NOWAIT); 5063 if (request_ccb == NULL) { 5064 xpt_print_path(path); 5065 printf("xpt_scan_lun: can't allocate CCB, can't " 5066 "continue\n"); 5067 return; 5068 } 5069 new_path = malloc(sizeof(*new_path), M_TEMP, M_NOWAIT); 5070 if (new_path == NULL) { 5071 xpt_print_path(path); 5072 printf("xpt_scan_lun: can't allocate path, can't " 5073 "continue\n"); 5074 free(request_ccb, M_TEMP); 5075 return; 5076 } 5077 status = xpt_compile_path(new_path, xpt_periph, 5078 path->bus->path_id, 5079 path->target->target_id, 5080 path->device->lun_id); 5081 5082 if (status != CAM_REQ_CMP) { 5083 xpt_print_path(path); 5084 printf("xpt_scan_lun: can't compile path, can't " 5085 "continue\n"); 5086 free(request_ccb, M_TEMP); 5087 free(new_path, M_TEMP); 5088 return; 5089 } 5090 xpt_setup_ccb(&request_ccb->ccb_h, new_path, /*priority*/ 1); 5091 request_ccb->ccb_h.cbfcnp = xptscandone; 5092 request_ccb->ccb_h.func_code = XPT_SCAN_LUN; 5093 request_ccb->crcn.flags = flags; 5094 } 5095 5096 s = splsoftcam(); 5097 if ((old_periph = cam_periph_find(path, "probe")) != NULL) { 5098 probe_softc *softc; 5099 5100 softc = (probe_softc *)old_periph->softc; 5101 TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h, 5102 periph_links.tqe); 5103 } else { 5104 status = cam_periph_alloc(proberegister, NULL, probecleanup, 5105 probestart, "probe", 5106 CAM_PERIPH_BIO, 5107 request_ccb->ccb_h.path, NULL, 0, 5108 request_ccb); 5109 5110 if (status != CAM_REQ_CMP) { 5111 xpt_print_path(path); 5112 printf("xpt_scan_lun: cam_alloc_periph returned an " 5113 "error, can't continue probe\n"); 5114 request_ccb->ccb_h.status = status; 5115 xpt_done(request_ccb); 5116 } 5117 } 5118 splx(s); 5119 } 5120 5121 static void 5122 xptscandone(struct cam_periph *periph, union ccb *done_ccb) 5123 { 5124 xpt_release_path(done_ccb->ccb_h.path); 5125 free(done_ccb->ccb_h.path, M_TEMP); 5126 free(done_ccb, M_TEMP); 5127 } 5128 5129 static cam_status 5130 proberegister(struct cam_periph *periph, void *arg) 5131 { 5132 union ccb *request_ccb; /* CCB representing the probe request */ 5133 probe_softc *softc; 5134 5135 request_ccb = (union ccb *)arg; 5136 if (periph == NULL) { 5137 printf("proberegister: periph was NULL!!\n"); 5138 return(CAM_REQ_CMP_ERR); 5139 } 5140 5141 if (request_ccb == NULL) { 5142 printf("proberegister: no probe CCB, can't register device\n"); 5143 return(CAM_REQ_CMP_ERR); 5144 } 5145 5146 softc = (probe_softc *)malloc(sizeof(*softc), M_TEMP, M_NOWAIT); 5147 5148 if (softc == NULL) { 5149 printf("proberegister: Unable to probe new device. " 5150 "Unable to allocate softc\n"); 5151 return(CAM_REQ_CMP_ERR); 5152 } 5153 TAILQ_INIT(&softc->request_ccbs); 5154 TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h, 5155 periph_links.tqe); 5156 softc->flags = 0; 5157 periph->softc = softc; 5158 cam_periph_acquire(periph); 5159 /* 5160 * Ensure we've waited at least a bus settle 5161 * delay before attempting to probe the device. 5162 * For HBAs that don't do bus resets, this won't make a difference. 5163 */ 5164 cam_periph_freeze_after_event(periph, &periph->path->bus->last_reset, 5165 SCSI_DELAY); 5166 probeschedule(periph); 5167 return(CAM_REQ_CMP); 5168 } 5169 5170 static void 5171 probeschedule(struct cam_periph *periph) 5172 { 5173 struct ccb_pathinq cpi; 5174 union ccb *ccb; 5175 probe_softc *softc; 5176 5177 softc = (probe_softc *)periph->softc; 5178 ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs); 5179 5180 xpt_setup_ccb(&cpi.ccb_h, periph->path, /*priority*/1); 5181 cpi.ccb_h.func_code = XPT_PATH_INQ; 5182 xpt_action((union ccb *)&cpi); 5183 5184 /* 5185 * If a device has gone away and another device, or the same one, 5186 * is back in the same place, it should have a unit attention 5187 * condition pending. It will not report the unit attention in 5188 * response to an inquiry, which may leave invalid transfer 5189 * negotiations in effect. The TUR will reveal the unit attention 5190 * condition. Only send the TUR for lun 0, since some devices 5191 * will get confused by commands other than inquiry to non-existent 5192 * luns. If you think a device has gone away start your scan from 5193 * lun 0. This will insure that any bogus transfer settings are 5194 * invalidated. 5195 * 5196 * If we haven't seen the device before and the controller supports 5197 * some kind of transfer negotiation, negotiate with the first 5198 * sent command if no bus reset was performed at startup. This 5199 * ensures that the device is not confused by transfer negotiation 5200 * settings left over by loader or BIOS action. 5201 */ 5202 if (((ccb->ccb_h.path->device->flags & CAM_DEV_UNCONFIGURED) == 0) 5203 && (ccb->ccb_h.target_lun == 0)) { 5204 softc->action = PROBE_TUR; 5205 } else if ((cpi.hba_inquiry & (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE)) != 0 5206 && (cpi.hba_misc & PIM_NOBUSRESET) != 0) { 5207 proberequestdefaultnegotiation(periph); 5208 softc->action = PROBE_INQUIRY; 5209 } else { 5210 softc->action = PROBE_INQUIRY; 5211 } 5212 5213 if (ccb->crcn.flags & CAM_EXPECT_INQ_CHANGE) 5214 softc->flags |= PROBE_NO_ANNOUNCE; 5215 else 5216 softc->flags &= ~PROBE_NO_ANNOUNCE; 5217 5218 xpt_schedule(periph, ccb->ccb_h.pinfo.priority); 5219 } 5220 5221 static void 5222 probestart(struct cam_periph *periph, union ccb *start_ccb) 5223 { 5224 /* Probe the device that our peripheral driver points to */ 5225 struct ccb_scsiio *csio; 5226 probe_softc *softc; 5227 5228 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probestart\n")); 5229 5230 softc = (probe_softc *)periph->softc; 5231 csio = &start_ccb->csio; 5232 5233 switch (softc->action) { 5234 case PROBE_TUR: 5235 case PROBE_TUR_FOR_NEGOTIATION: 5236 { 5237 scsi_test_unit_ready(csio, 5238 /*retries*/4, 5239 probedone, 5240 MSG_SIMPLE_Q_TAG, 5241 SSD_FULL_SIZE, 5242 /*timeout*/60000); 5243 break; 5244 } 5245 case PROBE_INQUIRY: 5246 { 5247 struct scsi_inquiry_data *inq_buf; 5248 5249 inq_buf = &periph->path->device->inq_data; 5250 /* 5251 * If the device is currently configured, we calculate an 5252 * MD5 checksum of the inquiry data, and if the serial number 5253 * length is greater than 0, add the serial number data 5254 * into the checksum as well. Once the inquiry and the 5255 * serial number check finish, we attempt to figure out 5256 * whether we still have the same device. 5257 */ 5258 if ((periph->path->device->flags & CAM_DEV_UNCONFIGURED) == 0) { 5259 5260 MD5Init(&softc->context); 5261 MD5Update(&softc->context, (unsigned char *)inq_buf, 5262 sizeof(struct scsi_inquiry_data)); 5263 softc->flags |= PROBE_INQUIRY_CKSUM; 5264 if (periph->path->device->serial_num_len > 0) { 5265 MD5Update(&softc->context, 5266 periph->path->device->serial_num, 5267 periph->path->device->serial_num_len); 5268 softc->flags |= PROBE_SERIAL_CKSUM; 5269 } 5270 MD5Final(softc->digest, &softc->context); 5271 } 5272 5273 scsi_inquiry(csio, 5274 /*retries*/4, 5275 probedone, 5276 MSG_SIMPLE_Q_TAG, 5277 (u_int8_t *)inq_buf, 5278 sizeof(*inq_buf), 5279 /*evpd*/FALSE, 5280 /*page_code*/0, 5281 SSD_MIN_SIZE, 5282 /*timeout*/60 * 1000); 5283 break; 5284 } 5285 case PROBE_MODE_SENSE: 5286 { 5287 void *mode_buf; 5288 int mode_buf_len; 5289 5290 mode_buf_len = sizeof(struct scsi_mode_header_6) 5291 + sizeof(struct scsi_mode_blk_desc) 5292 + sizeof(struct scsi_control_page); 5293 mode_buf = malloc(mode_buf_len, M_TEMP, M_NOWAIT); 5294 if (mode_buf != NULL) { 5295 scsi_mode_sense(csio, 5296 /*retries*/4, 5297 probedone, 5298 MSG_SIMPLE_Q_TAG, 5299 /*dbd*/FALSE, 5300 SMS_PAGE_CTRL_CURRENT, 5301 SMS_CONTROL_MODE_PAGE, 5302 mode_buf, 5303 mode_buf_len, 5304 SSD_FULL_SIZE, 5305 /*timeout*/60000); 5306 break; 5307 } 5308 xpt_print_path(periph->path); 5309 printf("Unable to mode sense control page - malloc failure\n"); 5310 softc->action = PROBE_SERIAL_NUM; 5311 /* FALLTHROUGH */ 5312 } 5313 case PROBE_SERIAL_NUM: 5314 { 5315 struct scsi_vpd_unit_serial_number *serial_buf; 5316 struct cam_ed* device; 5317 5318 serial_buf = NULL; 5319 device = periph->path->device; 5320 device->serial_num = NULL; 5321 device->serial_num_len = 0; 5322 5323 if ((device->quirk->quirks & CAM_QUIRK_NOSERIAL) == 0) 5324 serial_buf = (struct scsi_vpd_unit_serial_number *) 5325 malloc(sizeof(*serial_buf), M_TEMP, M_NOWAIT); 5326 5327 if (serial_buf != NULL) { 5328 bzero(serial_buf, sizeof(*serial_buf)); 5329 scsi_inquiry(csio, 5330 /*retries*/4, 5331 probedone, 5332 MSG_SIMPLE_Q_TAG, 5333 (u_int8_t *)serial_buf, 5334 sizeof(*serial_buf), 5335 /*evpd*/TRUE, 5336 SVPD_UNIT_SERIAL_NUMBER, 5337 SSD_MIN_SIZE, 5338 /*timeout*/60 * 1000); 5339 break; 5340 } 5341 /* 5342 * We'll have to do without, let our probedone 5343 * routine finish up for us. 5344 */ 5345 start_ccb->csio.data_ptr = NULL; 5346 probedone(periph, start_ccb); 5347 return; 5348 } 5349 } 5350 xpt_action(start_ccb); 5351 } 5352 5353 static void 5354 proberequestdefaultnegotiation(struct cam_periph *periph) 5355 { 5356 struct ccb_trans_settings cts; 5357 5358 xpt_setup_ccb(&cts.ccb_h, periph->path, /*priority*/1); 5359 cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; 5360 cts.flags = CCB_TRANS_USER_SETTINGS; 5361 xpt_action((union ccb *)&cts); 5362 cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS; 5363 cts.flags &= ~CCB_TRANS_USER_SETTINGS; 5364 cts.flags |= CCB_TRANS_CURRENT_SETTINGS; 5365 xpt_action((union ccb *)&cts); 5366 } 5367 5368 static void 5369 probedone(struct cam_periph *periph, union ccb *done_ccb) 5370 { 5371 probe_softc *softc; 5372 struct cam_path *path; 5373 u_int32_t priority; 5374 5375 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probedone\n")); 5376 5377 softc = (probe_softc *)periph->softc; 5378 path = done_ccb->ccb_h.path; 5379 priority = done_ccb->ccb_h.pinfo.priority; 5380 5381 switch (softc->action) { 5382 case PROBE_TUR: 5383 { 5384 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 5385 5386 if (cam_periph_error(done_ccb, 0, 5387 SF_NO_PRINT, NULL) == ERESTART) 5388 return; 5389 else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 5390 /* Don't wedge the queue */ 5391 xpt_release_devq(done_ccb->ccb_h.path, 5392 /*count*/1, 5393 /*run_queue*/TRUE); 5394 } 5395 softc->action = PROBE_INQUIRY; 5396 xpt_release_ccb(done_ccb); 5397 xpt_schedule(periph, priority); 5398 return; 5399 } 5400 case PROBE_INQUIRY: 5401 { 5402 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 5403 struct scsi_inquiry_data *inq_buf; 5404 u_int8_t periph_qual; 5405 u_int8_t periph_dtype; 5406 5407 path->device->flags |= CAM_DEV_INQUIRY_DATA_VALID; 5408 inq_buf = &path->device->inq_data; 5409 5410 periph_qual = SID_QUAL(inq_buf); 5411 periph_dtype = SID_TYPE(inq_buf); 5412 if (periph_dtype != T_NODEVICE) { 5413 switch(periph_qual) { 5414 case SID_QUAL_LU_CONNECTED: 5415 { 5416 xpt_find_quirk(path->device); 5417 5418 if ((inq_buf->flags & SID_CmdQue) != 0) 5419 softc->action = 5420 PROBE_MODE_SENSE; 5421 else 5422 softc->action = 5423 PROBE_SERIAL_NUM; 5424 5425 path->device->flags &= 5426 ~CAM_DEV_UNCONFIGURED; 5427 5428 xpt_release_ccb(done_ccb); 5429 xpt_schedule(periph, priority); 5430 return; 5431 } 5432 default: 5433 break; 5434 } 5435 } 5436 } else if (cam_periph_error(done_ccb, 0, 5437 done_ccb->ccb_h.target_lun > 0 5438 ? SF_RETRY_UA|SF_QUIET_IR 5439 : SF_RETRY_UA, 5440 &softc->saved_ccb) == ERESTART) { 5441 return; 5442 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 5443 /* Don't wedge the queue */ 5444 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1, 5445 /*run_queue*/TRUE); 5446 } 5447 /* 5448 * If we get to this point, we got an error status back 5449 * from the inquiry and the error status doesn't require 5450 * automatically retrying the command. Therefore, the 5451 * inquiry failed. If we had inquiry information before 5452 * for this device, but this latest inquiry command failed, 5453 * the device has probably gone away. If this device isn't 5454 * already marked unconfigured, notify the peripheral 5455 * drivers that this device is no more. 5456 */ 5457 if ((path->device->flags & CAM_DEV_UNCONFIGURED) == 0) 5458 /* Send the async notification. */ 5459 xpt_async(AC_LOST_DEVICE, path, NULL); 5460 5461 xpt_release_ccb(done_ccb); 5462 break; 5463 } 5464 case PROBE_MODE_SENSE: 5465 { 5466 struct ccb_scsiio *csio; 5467 struct scsi_mode_header_6 *mode_hdr; 5468 5469 csio = &done_ccb->csio; 5470 mode_hdr = (struct scsi_mode_header_6 *)csio->data_ptr; 5471 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 5472 struct scsi_control_page *page; 5473 u_int8_t *offset; 5474 5475 offset = ((u_int8_t *)&mode_hdr[1]) 5476 + mode_hdr->blk_desc_len; 5477 page = (struct scsi_control_page *)offset; 5478 path->device->queue_flags = page->queue_flags; 5479 } else if (cam_periph_error(done_ccb, 0, 5480 SF_RETRY_UA|SF_NO_PRINT, 5481 &softc->saved_ccb) == ERESTART) { 5482 return; 5483 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 5484 /* Don't wedge the queue */ 5485 xpt_release_devq(done_ccb->ccb_h.path, 5486 /*count*/1, /*run_queue*/TRUE); 5487 } 5488 xpt_release_ccb(done_ccb); 5489 free(mode_hdr, M_TEMP); 5490 softc->action = PROBE_SERIAL_NUM; 5491 xpt_schedule(periph, priority); 5492 return; 5493 } 5494 case PROBE_SERIAL_NUM: 5495 { 5496 struct ccb_scsiio *csio; 5497 struct scsi_vpd_unit_serial_number *serial_buf; 5498 u_int32_t priority; 5499 int changed; 5500 int have_serialnum; 5501 5502 changed = 1; 5503 have_serialnum = 0; 5504 csio = &done_ccb->csio; 5505 priority = done_ccb->ccb_h.pinfo.priority; 5506 serial_buf = 5507 (struct scsi_vpd_unit_serial_number *)csio->data_ptr; 5508 5509 /* Clean up from previous instance of this device */ 5510 if (path->device->serial_num != NULL) { 5511 free(path->device->serial_num, M_DEVBUF); 5512 path->device->serial_num = NULL; 5513 path->device->serial_num_len = 0; 5514 } 5515 5516 if (serial_buf == NULL) { 5517 /* 5518 * Don't process the command as it was never sent 5519 */ 5520 } else if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP 5521 && (serial_buf->length > 0)) { 5522 5523 have_serialnum = 1; 5524 path->device->serial_num = 5525 (u_int8_t *)malloc((serial_buf->length + 1), 5526 M_DEVBUF, M_NOWAIT); 5527 if (path->device->serial_num != NULL) { 5528 bcopy(serial_buf->serial_num, 5529 path->device->serial_num, 5530 serial_buf->length); 5531 path->device->serial_num_len = 5532 serial_buf->length; 5533 path->device->serial_num[serial_buf->length] 5534 = '\0'; 5535 } 5536 } else if (cam_periph_error(done_ccb, 0, 5537 SF_RETRY_UA|SF_NO_PRINT, 5538 &softc->saved_ccb) == ERESTART) { 5539 return; 5540 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 5541 /* Don't wedge the queue */ 5542 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1, 5543 /*run_queue*/TRUE); 5544 } 5545 5546 /* 5547 * Let's see if we have seen this device before. 5548 */ 5549 if ((softc->flags & PROBE_INQUIRY_CKSUM) != 0) { 5550 MD5_CTX context; 5551 u_int8_t digest[16]; 5552 5553 MD5Init(&context); 5554 5555 MD5Update(&context, 5556 (unsigned char *)&path->device->inq_data, 5557 sizeof(struct scsi_inquiry_data)); 5558 5559 if (have_serialnum) 5560 MD5Update(&context, serial_buf->serial_num, 5561 serial_buf->length); 5562 5563 MD5Final(digest, &context); 5564 if (bcmp(softc->digest, digest, 16) == 0) 5565 changed = 0; 5566 5567 /* 5568 * XXX Do we need to do a TUR in order to ensure 5569 * that the device really hasn't changed??? 5570 */ 5571 if ((changed != 0) 5572 && ((softc->flags & PROBE_NO_ANNOUNCE) == 0)) 5573 xpt_async(AC_LOST_DEVICE, path, NULL); 5574 } 5575 if (serial_buf != NULL) 5576 free(serial_buf, M_TEMP); 5577 5578 if (changed != 0) { 5579 /* 5580 * Now that we have all the necessary 5581 * information to safely perform transfer 5582 * negotiations... Controllers don't perform 5583 * any negotiation or tagged queuing until 5584 * after the first XPT_SET_TRAN_SETTINGS ccb is 5585 * received. So, on a new device, just retreive 5586 * the user settings, and set them as the current 5587 * settings to set the device up. 5588 */ 5589 proberequestdefaultnegotiation(periph); 5590 xpt_release_ccb(done_ccb); 5591 5592 /* 5593 * Perform a TUR to allow the controller to 5594 * perform any necessary transfer negotiation. 5595 */ 5596 softc->action = PROBE_TUR_FOR_NEGOTIATION; 5597 xpt_schedule(periph, priority); 5598 return; 5599 } 5600 xpt_release_ccb(done_ccb); 5601 break; 5602 } 5603 case PROBE_TUR_FOR_NEGOTIATION: 5604 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 5605 /* Don't wedge the queue */ 5606 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1, 5607 /*run_queue*/TRUE); 5608 } 5609 5610 path->device->flags &= ~CAM_DEV_UNCONFIGURED; 5611 5612 if ((softc->flags & PROBE_NO_ANNOUNCE) == 0) { 5613 /* Inform the XPT that a new device has been found */ 5614 done_ccb->ccb_h.func_code = XPT_GDEV_TYPE; 5615 xpt_action(done_ccb); 5616 5617 xpt_async(AC_FOUND_DEVICE, xpt_periph->path, done_ccb); 5618 } 5619 xpt_release_ccb(done_ccb); 5620 break; 5621 } 5622 done_ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs); 5623 TAILQ_REMOVE(&softc->request_ccbs, &done_ccb->ccb_h, periph_links.tqe); 5624 done_ccb->ccb_h.status = CAM_REQ_CMP; 5625 xpt_done(done_ccb); 5626 if (TAILQ_FIRST(&softc->request_ccbs) == NULL) { 5627 cam_periph_invalidate(periph); 5628 cam_periph_release(periph); 5629 } else { 5630 probeschedule(periph); 5631 } 5632 } 5633 5634 static void 5635 probecleanup(struct cam_periph *periph) 5636 { 5637 free(periph->softc, M_TEMP); 5638 } 5639 5640 static void 5641 xpt_find_quirk(struct cam_ed *device) 5642 { 5643 caddr_t match; 5644 5645 match = cam_quirkmatch((caddr_t)&device->inq_data, 5646 (caddr_t)xpt_quirk_table, 5647 sizeof(xpt_quirk_table)/sizeof(*xpt_quirk_table), 5648 sizeof(*xpt_quirk_table), scsi_inquiry_match); 5649 5650 if (match == NULL) 5651 panic("xpt_find_quirk: device didn't match wildcard entry!!"); 5652 5653 device->quirk = (struct xpt_quirk_entry *)match; 5654 } 5655 5656 static void 5657 xpt_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device, 5658 int async_update) 5659 { 5660 struct cam_sim *sim; 5661 int qfrozen; 5662 5663 sim = cts->ccb_h.path->bus->sim; 5664 if (async_update == FALSE) { 5665 struct scsi_inquiry_data *inq_data; 5666 struct ccb_pathinq cpi; 5667 struct ccb_trans_settings cur_cts; 5668 5669 if (device == NULL) { 5670 cts->ccb_h.status = CAM_PATH_INVALID; 5671 xpt_done((union ccb *)cts); 5672 return; 5673 } 5674 5675 /* 5676 * Perform sanity checking against what the 5677 * controller and device can do. 5678 */ 5679 xpt_setup_ccb(&cpi.ccb_h, cts->ccb_h.path, /*priority*/1); 5680 cpi.ccb_h.func_code = XPT_PATH_INQ; 5681 xpt_action((union ccb *)&cpi); 5682 xpt_setup_ccb(&cur_cts.ccb_h, cts->ccb_h.path, /*priority*/1); 5683 cur_cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; 5684 cur_cts.flags = CCB_TRANS_CURRENT_SETTINGS; 5685 xpt_action((union ccb *)&cur_cts); 5686 inq_data = &device->inq_data; 5687 5688 /* Fill in any gaps in what the user gave us */ 5689 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) == 0) 5690 cts->sync_period = cur_cts.sync_period; 5691 if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) == 0) 5692 cts->sync_offset = cur_cts.sync_offset; 5693 if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) == 0) 5694 cts->bus_width = cur_cts.bus_width; 5695 if ((cts->valid & CCB_TRANS_DISC_VALID) == 0) { 5696 cts->flags &= ~CCB_TRANS_DISC_ENB; 5697 cts->flags |= cur_cts.flags & CCB_TRANS_DISC_ENB; 5698 } 5699 if ((cts->valid & CCB_TRANS_TQ_VALID) == 0) { 5700 cts->flags &= ~CCB_TRANS_TAG_ENB; 5701 cts->flags |= cur_cts.flags & CCB_TRANS_TAG_ENB; 5702 } 5703 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0 5704 && (inq_data->flags & SID_Sync) == 0) 5705 || (cpi.hba_inquiry & PI_SDTR_ABLE) == 0) { 5706 /* Force async */ 5707 cts->sync_period = 0; 5708 cts->sync_offset = 0; 5709 } 5710 5711 switch (cts->bus_width) { 5712 case MSG_EXT_WDTR_BUS_32_BIT: 5713 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0 5714 || (inq_data->flags & SID_WBus32) != 0) 5715 && (cpi.hba_inquiry & PI_WIDE_32) != 0) 5716 break; 5717 /* Fall Through to 16-bit */ 5718 case MSG_EXT_WDTR_BUS_16_BIT: 5719 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0 5720 || (inq_data->flags & SID_WBus16) != 0) 5721 && (cpi.hba_inquiry & PI_WIDE_16) != 0) { 5722 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 5723 break; 5724 } 5725 /* Fall Through to 8-bit */ 5726 default: /* New bus width?? */ 5727 case MSG_EXT_WDTR_BUS_8_BIT: 5728 /* All targets can do this */ 5729 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 5730 break; 5731 } 5732 5733 if ((cts->flags & CCB_TRANS_DISC_ENB) == 0) { 5734 /* 5735 * Can't tag queue without disconnection. 5736 */ 5737 cts->flags &= ~CCB_TRANS_TAG_ENB; 5738 cts->valid |= CCB_TRANS_TQ_VALID; 5739 } 5740 5741 if ((cpi.hba_inquiry & PI_TAG_ABLE) == 0 5742 || (inq_data->flags & SID_CmdQue) == 0 5743 || (device->queue_flags & SCP_QUEUE_DQUE) != 0 5744 || (device->quirk->mintags == 0)) { 5745 /* 5746 * Can't tag on hardware that doesn't support, 5747 * doesn't have it enabled, or has broken tag support. 5748 */ 5749 cts->flags &= ~CCB_TRANS_TAG_ENB; 5750 } 5751 } 5752 5753 qfrozen = FALSE; 5754 if ((cts->valid & CCB_TRANS_TQ_VALID) != 0 5755 && (async_update == FALSE)) { 5756 int device_tagenb; 5757 5758 /* 5759 * If we are transitioning from tags to no-tags or 5760 * vice-versa, we need to carefully freeze and restart 5761 * the queue so that we don't overlap tagged and non-tagged 5762 * commands. We also temporarily stop tags if there is 5763 * a change in transfer negotiation settings to allow 5764 * "tag-less" negotiation. 5765 */ 5766 if ((device->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 5767 || (device->inq_flags & SID_CmdQue) != 0) 5768 device_tagenb = TRUE; 5769 else 5770 device_tagenb = FALSE; 5771 5772 if (((cts->flags & CCB_TRANS_TAG_ENB) != 0 5773 && device_tagenb == FALSE) 5774 || ((cts->flags & CCB_TRANS_TAG_ENB) == 0 5775 && device_tagenb == TRUE)) { 5776 5777 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) { 5778 /* 5779 * Delay change to use tags until after a 5780 * few commands have gone to this device so 5781 * the controller has time to perform transfer 5782 * negotiations without tagged messages getting 5783 * in the way. 5784 */ 5785 device->tag_delay_count = CAM_TAG_DELAY_COUNT; 5786 device->flags |= CAM_DEV_TAG_AFTER_COUNT; 5787 } else { 5788 xpt_freeze_devq(cts->ccb_h.path, /*count*/1); 5789 qfrozen = TRUE; 5790 device->inq_flags &= ~SID_CmdQue; 5791 xpt_dev_ccbq_resize(cts->ccb_h.path, 5792 sim->max_dev_openings); 5793 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT; 5794 device->tag_delay_count = 0; 5795 } 5796 } 5797 } 5798 5799 if (async_update == FALSE) { 5800 /* 5801 * If we are currently performing tagged transactions to 5802 * this device and want to change its negotiation parameters, 5803 * go non-tagged for a bit to give the controller a chance to 5804 * negotiate unhampered by tag messages. 5805 */ 5806 if ((device->inq_flags & SID_CmdQue) != 0 5807 && (cts->flags & (CCB_TRANS_SYNC_RATE_VALID| 5808 CCB_TRANS_SYNC_OFFSET_VALID| 5809 CCB_TRANS_BUS_WIDTH_VALID)) != 0) 5810 xpt_toggle_tags(cts->ccb_h.path); 5811 5812 (*(sim->sim_action))(sim, (union ccb *)cts); 5813 } 5814 5815 if (qfrozen) { 5816 struct ccb_relsim crs; 5817 5818 xpt_setup_ccb(&crs.ccb_h, cts->ccb_h.path, 5819 /*priority*/1); 5820 crs.ccb_h.func_code = XPT_REL_SIMQ; 5821 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY; 5822 crs.openings 5823 = crs.release_timeout 5824 = crs.qfrozen_cnt 5825 = 0; 5826 xpt_action((union ccb *)&crs); 5827 } 5828 } 5829 5830 static void 5831 xpt_toggle_tags(struct cam_path *path) 5832 { 5833 struct cam_ed *dev; 5834 5835 /* 5836 * Give controllers a chance to renegotiate 5837 * before starting tag operations. We 5838 * "toggle" tagged queuing off then on 5839 * which causes the tag enable command delay 5840 * counter to come into effect. 5841 */ 5842 dev = path->device; 5843 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 5844 || ((dev->inq_flags & SID_CmdQue) != 0 5845 && (dev->inq_flags & (SID_Sync|SID_WBus16|SID_WBus32)) != 0)) { 5846 struct ccb_trans_settings cts; 5847 5848 xpt_setup_ccb(&cts.ccb_h, path, 1); 5849 cts.flags = 0; 5850 cts.valid = CCB_TRANS_TQ_VALID; 5851 xpt_set_transfer_settings(&cts, path->device, 5852 /*async_update*/TRUE); 5853 cts.flags = CCB_TRANS_TAG_ENB; 5854 xpt_set_transfer_settings(&cts, path->device, 5855 /*async_update*/TRUE); 5856 } 5857 } 5858 5859 static void 5860 xpt_start_tags(struct cam_path *path) 5861 { 5862 struct ccb_relsim crs; 5863 struct cam_ed *device; 5864 struct cam_sim *sim; 5865 int newopenings; 5866 5867 device = path->device; 5868 sim = path->bus->sim; 5869 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT; 5870 xpt_freeze_devq(path, /*count*/1); 5871 device->inq_flags |= SID_CmdQue; 5872 newopenings = min(device->quirk->maxtags, sim->max_tagged_dev_openings); 5873 xpt_dev_ccbq_resize(path, newopenings); 5874 xpt_setup_ccb(&crs.ccb_h, path, /*priority*/1); 5875 crs.ccb_h.func_code = XPT_REL_SIMQ; 5876 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY; 5877 crs.openings 5878 = crs.release_timeout 5879 = crs.qfrozen_cnt 5880 = 0; 5881 xpt_action((union ccb *)&crs); 5882 } 5883 5884 static int busses_to_config; 5885 static int busses_to_reset; 5886 5887 static int 5888 xptconfigbuscountfunc(struct cam_eb *bus, void *arg) 5889 { 5890 if (bus->path_id != CAM_XPT_PATH_ID) { 5891 struct cam_path path; 5892 struct ccb_pathinq cpi; 5893 int can_negotiate; 5894 5895 busses_to_config++; 5896 xpt_compile_path(&path, NULL, bus->path_id, 5897 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 5898 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1); 5899 cpi.ccb_h.func_code = XPT_PATH_INQ; 5900 xpt_action((union ccb *)&cpi); 5901 can_negotiate = cpi.hba_inquiry; 5902 can_negotiate &= (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE); 5903 if ((cpi.hba_misc & PIM_NOBUSRESET) == 0 5904 && can_negotiate) 5905 busses_to_reset++; 5906 xpt_release_path(&path); 5907 } 5908 5909 return(1); 5910 } 5911 5912 static int 5913 xptconfigfunc(struct cam_eb *bus, void *arg) 5914 { 5915 struct cam_path *path; 5916 union ccb *work_ccb; 5917 5918 if (bus->path_id != CAM_XPT_PATH_ID) { 5919 cam_status status; 5920 int can_negotiate; 5921 5922 work_ccb = xpt_alloc_ccb(); 5923 if ((status = xpt_create_path(&path, xpt_periph, bus->path_id, 5924 CAM_TARGET_WILDCARD, 5925 CAM_LUN_WILDCARD)) !=CAM_REQ_CMP){ 5926 printf("xptconfigfunc: xpt_create_path failed with " 5927 "status %#x for bus %d\n", status, bus->path_id); 5928 printf("xptconfigfunc: halting bus configuration\n"); 5929 xpt_free_ccb(work_ccb); 5930 busses_to_config--; 5931 xpt_finishconfig(xpt_periph, NULL); 5932 return(0); 5933 } 5934 xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1); 5935 work_ccb->ccb_h.func_code = XPT_PATH_INQ; 5936 xpt_action(work_ccb); 5937 if (work_ccb->ccb_h.status != CAM_REQ_CMP) { 5938 printf("xptconfigfunc: CPI failed on bus %d " 5939 "with status %d\n", bus->path_id, 5940 work_ccb->ccb_h.status); 5941 xpt_finishconfig(xpt_periph, work_ccb); 5942 return(1); 5943 } 5944 5945 can_negotiate = work_ccb->cpi.hba_inquiry; 5946 can_negotiate &= (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE); 5947 if ((work_ccb->cpi.hba_misc & PIM_NOBUSRESET) == 0 5948 && (can_negotiate != 0)) { 5949 xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1); 5950 work_ccb->ccb_h.func_code = XPT_RESET_BUS; 5951 work_ccb->ccb_h.cbfcnp = NULL; 5952 CAM_DEBUG(path, CAM_DEBUG_SUBTRACE, 5953 ("Resetting Bus\n")); 5954 xpt_action(work_ccb); 5955 xpt_finishconfig(xpt_periph, work_ccb); 5956 } else { 5957 /* Act as though we performed a successful BUS RESET */ 5958 work_ccb->ccb_h.func_code = XPT_RESET_BUS; 5959 xpt_finishconfig(xpt_periph, work_ccb); 5960 } 5961 } 5962 5963 return(1); 5964 } 5965 5966 static void 5967 xpt_config(void *arg) 5968 { 5969 /* Now that interrupts are enabled, go find our devices */ 5970 5971 #ifdef CAMDEBUG 5972 /* Setup debugging flags and path */ 5973 #ifdef CAM_DEBUG_FLAGS 5974 cam_dflags = CAM_DEBUG_FLAGS; 5975 #else /* !CAM_DEBUG_FLAGS */ 5976 cam_dflags = CAM_DEBUG_NONE; 5977 #endif /* CAM_DEBUG_FLAGS */ 5978 #ifdef CAM_DEBUG_BUS 5979 if (cam_dflags != CAM_DEBUG_NONE) { 5980 if (xpt_create_path(&cam_dpath, xpt_periph, 5981 CAM_DEBUG_BUS, CAM_DEBUG_TARGET, 5982 CAM_DEBUG_LUN) != CAM_REQ_CMP) { 5983 printf("xpt_config: xpt_create_path() failed for debug" 5984 " target %d:%d:%d, debugging disabled\n", 5985 CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN); 5986 cam_dflags = CAM_DEBUG_NONE; 5987 } 5988 } else 5989 cam_dpath = NULL; 5990 #else /* !CAM_DEBUG_BUS */ 5991 cam_dpath = NULL; 5992 #endif /* CAM_DEBUG_BUS */ 5993 #endif /* CAMDEBUG */ 5994 5995 /* 5996 * Scan all installed busses. 5997 */ 5998 xpt_for_all_busses(xptconfigbuscountfunc, NULL); 5999 6000 if (busses_to_config == 0) { 6001 /* Call manually because we don't have any busses */ 6002 xpt_finishconfig(xpt_periph, NULL); 6003 } else { 6004 if (busses_to_reset > 0 && SCSI_DELAY >= 2000) { 6005 printf("Waiting %d seconds for SCSI " 6006 "devices to settle\n", SCSI_DELAY/1000); 6007 } 6008 xpt_for_all_busses(xptconfigfunc, NULL); 6009 } 6010 } 6011 6012 /* 6013 * If the given device only has one peripheral attached to it, and if that 6014 * peripheral is the passthrough driver, announce it. This insures that the 6015 * user sees some sort of announcement for every peripheral in their system. 6016 */ 6017 static int 6018 xptpassannouncefunc(struct cam_ed *device, void *arg) 6019 { 6020 struct cam_periph *periph; 6021 int i; 6022 6023 for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL; 6024 periph = SLIST_NEXT(periph, periph_links), i++); 6025 6026 periph = SLIST_FIRST(&device->periphs); 6027 if ((i == 1) 6028 && (strncmp(periph->periph_name, "pass", 4) == 0)) 6029 xpt_announce_periph(periph, NULL); 6030 6031 return(1); 6032 } 6033 6034 static void 6035 xpt_finishconfig(struct cam_periph *periph, union ccb *done_ccb) 6036 { 6037 struct periph_driver **p_drv; 6038 int i; 6039 6040 if (done_ccb != NULL) { 6041 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, 6042 ("xpt_finishconfig\n")); 6043 switch(done_ccb->ccb_h.func_code) { 6044 case XPT_RESET_BUS: 6045 if (done_ccb->ccb_h.status == CAM_REQ_CMP) { 6046 done_ccb->ccb_h.func_code = XPT_SCAN_BUS; 6047 done_ccb->ccb_h.cbfcnp = xpt_finishconfig; 6048 xpt_action(done_ccb); 6049 return; 6050 } 6051 /* FALLTHROUGH */ 6052 case XPT_SCAN_BUS: 6053 default: 6054 xpt_free_path(done_ccb->ccb_h.path); 6055 busses_to_config--; 6056 break; 6057 } 6058 } 6059 6060 if (busses_to_config == 0) { 6061 /* Register all the peripheral drivers */ 6062 /* XXX This will have to change when we have loadable modules */ 6063 p_drv = (struct periph_driver **)periphdriver_set.ls_items; 6064 for (i = 0; p_drv[i] != NULL; i++) { 6065 (*p_drv[i]->init)(); 6066 } 6067 6068 /* 6069 * Check for devices with no "standard" peripheral driver 6070 * attached. For any devices like that, announce the 6071 * passthrough driver so the user will see something. 6072 */ 6073 xpt_for_all_devices(xptpassannouncefunc, NULL); 6074 6075 /* Release our hook so that the boot can continue. */ 6076 config_intrhook_disestablish(xpt_config_hook); 6077 free(xpt_config_hook, M_TEMP); 6078 xpt_config_hook = NULL; 6079 } 6080 if (done_ccb != NULL) 6081 xpt_free_ccb(done_ccb); 6082 } 6083 6084 static void 6085 xptaction(struct cam_sim *sim, union ccb *work_ccb) 6086 { 6087 CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n")); 6088 6089 switch (work_ccb->ccb_h.func_code) { 6090 /* Common cases first */ 6091 case XPT_PATH_INQ: /* Path routing inquiry */ 6092 { 6093 struct ccb_pathinq *cpi; 6094 6095 cpi = &work_ccb->cpi; 6096 cpi->version_num = 1; /* XXX??? */ 6097 cpi->hba_inquiry = 0; 6098 cpi->target_sprt = 0; 6099 cpi->hba_misc = 0; 6100 cpi->hba_eng_cnt = 0; 6101 cpi->max_target = 0; 6102 cpi->max_lun = 0; 6103 cpi->initiator_id = 0; 6104 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 6105 strncpy(cpi->hba_vid, "", HBA_IDLEN); 6106 strncpy(cpi->dev_name, sim->sim_name, DEV_IDLEN); 6107 cpi->unit_number = sim->unit_number; 6108 cpi->bus_id = sim->bus_id; 6109 cpi->base_transfer_speed = 0; 6110 cpi->ccb_h.status = CAM_REQ_CMP; 6111 xpt_done(work_ccb); 6112 break; 6113 } 6114 default: 6115 work_ccb->ccb_h.status = CAM_REQ_INVALID; 6116 xpt_done(work_ccb); 6117 break; 6118 } 6119 } 6120 6121 /* 6122 * Should only be called by the machine interrupt dispatch routines, 6123 * so put these prototypes here instead of in the header. 6124 */ 6125 6126 static void 6127 swi_camnet(void) 6128 { 6129 camisr(&cam_netq); 6130 } 6131 6132 static void 6133 swi_cambio(void) 6134 { 6135 camisr(&cam_bioq); 6136 } 6137 6138 static void 6139 camisr(cam_isrq_t *queue) 6140 { 6141 int s; 6142 struct ccb_hdr *ccb_h; 6143 6144 s = splcam(); 6145 while ((ccb_h = TAILQ_FIRST(queue)) != NULL) { 6146 int runq; 6147 6148 TAILQ_REMOVE(queue, ccb_h, sim_links.tqe); 6149 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX; 6150 splx(s); 6151 6152 CAM_DEBUG(ccb_h->path, CAM_DEBUG_TRACE, 6153 ("camisr")); 6154 6155 runq = FALSE; 6156 6157 if (ccb_h->flags & CAM_HIGH_POWER) { 6158 struct highpowerlist *hphead; 6159 struct cam_ed *device; 6160 union ccb *send_ccb; 6161 6162 hphead = &highpowerq; 6163 6164 send_ccb = (union ccb *)STAILQ_FIRST(hphead); 6165 6166 /* 6167 * Increment the count since this command is done. 6168 */ 6169 num_highpower++; 6170 6171 /* 6172 * Any high powered commands queued up? 6173 */ 6174 if (send_ccb != NULL) { 6175 device = send_ccb->ccb_h.path->device; 6176 6177 STAILQ_REMOVE_HEAD(hphead, xpt_links.stqe); 6178 6179 xpt_release_devq(send_ccb->ccb_h.path, 6180 /*count*/1, /*runqueue*/TRUE); 6181 } 6182 } 6183 if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) { 6184 struct cam_ed *dev; 6185 6186 dev = ccb_h->path->device; 6187 6188 s = splcam(); 6189 cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h); 6190 6191 ccb_h->path->bus->sim->devq->send_active--; 6192 ccb_h->path->bus->sim->devq->send_openings++; 6193 splx(s); 6194 6195 if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0 6196 || ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0 6197 && (dev->ccbq.dev_active == 0))) { 6198 6199 xpt_release_devq(ccb_h->path, /*count*/1, 6200 /*run_queue*/TRUE); 6201 } 6202 6203 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 6204 && (--dev->tag_delay_count == 0)) 6205 xpt_start_tags(ccb_h->path); 6206 6207 if ((dev->ccbq.queue.entries > 0) 6208 && (dev->qfrozen_cnt == 0) 6209 && (device_is_send_queued(dev) == 0)) { 6210 runq = xpt_schedule_dev_sendq(ccb_h->path->bus, 6211 dev); 6212 } 6213 } 6214 6215 if (ccb_h->status & CAM_RELEASE_SIMQ) { 6216 xpt_release_simq(ccb_h->path->bus->sim, 6217 /*run_queue*/TRUE); 6218 } else if ((ccb_h->flags & CAM_DEV_QFRZDIS) 6219 && (ccb_h->status & CAM_DEV_QFRZN)) { 6220 xpt_release_devq(ccb_h->path, /*count*/1, 6221 /*run_queue*/TRUE); 6222 ccb_h->status &= ~CAM_DEV_QFRZN; 6223 } else if (runq) { 6224 xpt_run_dev_sendq(ccb_h->path->bus); 6225 } 6226 6227 /* Call the peripheral driver's callback */ 6228 (*ccb_h->cbfcnp)(ccb_h->path->periph, 6229 (union ccb *)ccb_h); 6230 6231 /* Raise IPL for while test */ 6232 s = splcam(); 6233 } 6234 splx(s); 6235 } 6236