1 /* 2 * Implementation of the Common Access Method Transport (XPT) layer. 3 * 4 * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs. 5 * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions, and the following disclaimer, 13 * without modification, immediately at the beginning of the file. 14 * 2. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * $FreeBSD$ 30 */ 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/types.h> 34 #include <sys/malloc.h> 35 #include <sys/device.h> 36 #include <sys/kernel.h> 37 #include <sys/time.h> 38 #include <sys/conf.h> 39 #include <sys/fcntl.h> 40 #include <sys/md5.h> 41 #include <sys/devicestat.h> 42 #include <sys/interrupt.h> 43 44 #ifdef PC98 45 #include <pc98/pc98/pc98_machdep.h> /* geometry translation */ 46 #endif 47 48 #include <machine/clock.h> 49 #include <machine/ipl.h> 50 51 #include <cam/cam.h> 52 #include <cam/cam_conf.h> 53 #include <cam/cam_ccb.h> 54 #include <cam/cam_periph.h> 55 #include <cam/cam_sim.h> 56 #include <cam/cam_xpt.h> 57 #include <cam/cam_xpt_sim.h> 58 #include <cam/cam_xpt_periph.h> 59 #include <cam/cam_debug.h> 60 61 #include <cam/scsi/scsi_all.h> 62 #include <cam/scsi/scsi_message.h> 63 #include <cam/scsi/scsi_pass.h> 64 #include "opt_cam.h" 65 66 /* Datastructures internal to the xpt layer */ 67 68 /* 69 * Definition of an async handler callback block. These are used to add 70 * SIMs and peripherals to the async callback lists. 71 */ 72 struct async_node { 73 SLIST_ENTRY(async_node) links; 74 u_int32_t event_enable; /* Async Event enables */ 75 void (*callback)(void *arg, u_int32_t code, 76 struct cam_path *path, void *args); 77 void *callback_arg; 78 }; 79 80 SLIST_HEAD(async_list, async_node); 81 SLIST_HEAD(periph_list, cam_periph); 82 static STAILQ_HEAD(highpowerlist, ccb_hdr) highpowerq; 83 84 /* 85 * This is the maximum number of high powered commands (e.g. start unit) 86 * that can be outstanding at a particular time. 87 */ 88 #ifndef CAM_MAX_HIGHPOWER 89 #define CAM_MAX_HIGHPOWER 4 90 #endif 91 92 /* number of high powered commands that can go through right now */ 93 static int num_highpower = CAM_MAX_HIGHPOWER; 94 95 /* 96 * Structure for queueing a device in a run queue. 97 * There is one run queue for allocating new ccbs, 98 * and another for sending ccbs to the controller. 99 */ 100 struct cam_ed_qinfo { 101 cam_pinfo pinfo; 102 struct cam_ed *device; 103 }; 104 105 /* 106 * The CAM EDT (Existing Device Table) contains the device information for 107 * all devices for all busses in the system. The table contains a 108 * cam_ed structure for each device on the bus. 109 */ 110 struct cam_ed { 111 TAILQ_ENTRY(cam_ed) links; 112 struct cam_ed_qinfo alloc_ccb_entry; 113 struct cam_ed_qinfo send_ccb_entry; 114 struct cam_et *target; 115 lun_id_t lun_id; 116 struct camq drvq; /* 117 * Queue of type drivers wanting to do 118 * work on this device. 119 */ 120 struct cam_ccbq ccbq; /* Queue of pending ccbs */ 121 struct async_list asyncs; /* Async callback info for this B/T/L */ 122 struct periph_list periphs; /* All attached devices */ 123 u_int generation; /* Generation number */ 124 struct cam_periph *owner; /* Peripheral driver's ownership tag */ 125 struct xpt_quirk_entry *quirk; /* Oddities about this device */ 126 /* Storage for the inquiry data */ 127 struct scsi_inquiry_data inq_data; 128 u_int8_t inq_flags; /* 129 * Current settings for inquiry flags. 130 * This allows us to override settings 131 * like disconnection and tagged 132 * queuing for a device. 133 */ 134 u_int8_t queue_flags; /* Queue flags from the control page */ 135 u_int8_t *serial_num; 136 u_int8_t serial_num_len; 137 u_int32_t qfrozen_cnt; 138 u_int32_t flags; 139 #define CAM_DEV_UNCONFIGURED 0x01 140 #define CAM_DEV_REL_TIMEOUT_PENDING 0x02 141 #define CAM_DEV_REL_ON_COMPLETE 0x04 142 #define CAM_DEV_REL_ON_QUEUE_EMPTY 0x08 143 #define CAM_DEV_RESIZE_QUEUE_NEEDED 0x10 144 #define CAM_DEV_TAG_AFTER_COUNT 0x20 145 #define CAM_DEV_INQUIRY_DATA_VALID 0x40 146 u_int32_t tag_delay_count; 147 #define CAM_TAG_DELAY_COUNT 5 148 u_int32_t refcount; 149 struct callout_handle c_handle; 150 }; 151 152 /* 153 * Each target is represented by an ET (Existing Target). These 154 * entries are created when a target is successfully probed with an 155 * identify, and removed when a device fails to respond after a number 156 * of retries, or a bus rescan finds the device missing. 157 */ 158 struct cam_et { 159 TAILQ_HEAD(, cam_ed) ed_entries; 160 TAILQ_ENTRY(cam_et) links; 161 struct cam_eb *bus; 162 target_id_t target_id; 163 u_int32_t refcount; 164 u_int generation; 165 struct timeval last_reset; 166 }; 167 168 /* 169 * Each bus is represented by an EB (Existing Bus). These entries 170 * are created by calls to xpt_bus_register and deleted by calls to 171 * xpt_bus_deregister. 172 */ 173 struct cam_eb { 174 TAILQ_HEAD(, cam_et) et_entries; 175 TAILQ_ENTRY(cam_eb) links; 176 path_id_t path_id; 177 struct cam_sim *sim; 178 struct timeval last_reset; 179 u_int32_t flags; 180 #define CAM_EB_RUNQ_SCHEDULED 0x01 181 u_int32_t refcount; 182 u_int generation; 183 }; 184 185 struct cam_path { 186 struct cam_periph *periph; 187 struct cam_eb *bus; 188 struct cam_et *target; 189 struct cam_ed *device; 190 }; 191 192 struct xpt_quirk_entry { 193 struct scsi_inquiry_pattern inq_pat; 194 u_int8_t quirks; 195 #define CAM_QUIRK_NOLUNS 0x01 196 #define CAM_QUIRK_NOSERIAL 0x02 197 #define CAM_QUIRK_HILUNS 0x04 198 u_int mintags; 199 u_int maxtags; 200 }; 201 #define CAM_SCSI2_MAXLUN 8 202 203 typedef enum { 204 XPT_FLAG_OPEN = 0x01 205 } xpt_flags; 206 207 struct xpt_softc { 208 xpt_flags flags; 209 u_int32_t generation; 210 }; 211 212 static const char quantum[] = "QUANTUM"; 213 static const char sony[] = "SONY"; 214 static const char west_digital[] = "WDIGTL"; 215 static const char samsung[] = "SAMSUNG"; 216 static const char seagate[] = "SEAGATE"; 217 static const char microp[] = "MICROP"; 218 219 static struct xpt_quirk_entry xpt_quirk_table[] = 220 { 221 { 222 /* Reports QUEUE FULL for temporary resource shortages */ 223 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP39100*", "*" }, 224 /*quirks*/0, /*mintags*/24, /*maxtags*/32 225 }, 226 { 227 /* Reports QUEUE FULL for temporary resource shortages */ 228 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP34550*", "*" }, 229 /*quirks*/0, /*mintags*/24, /*maxtags*/32 230 }, 231 { 232 /* Reports QUEUE FULL for temporary resource shortages */ 233 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP32275*", "*" }, 234 /*quirks*/0, /*mintags*/24, /*maxtags*/32 235 }, 236 { 237 /* Broken tagged queuing drive */ 238 { T_DIRECT, SIP_MEDIA_FIXED, microp, "4421-07*", "*" }, 239 /*quirks*/0, /*mintags*/0, /*maxtags*/0 240 }, 241 { 242 /* Broken tagged queuing drive */ 243 { T_DIRECT, SIP_MEDIA_FIXED, "HP", "C372*", "*" }, 244 /*quirks*/0, /*mintags*/0, /*maxtags*/0 245 }, 246 { 247 /* Broken tagged queuing drive */ 248 { T_DIRECT, SIP_MEDIA_FIXED, microp, "3391*", "x43h" }, 249 /*quirks*/0, /*mintags*/0, /*maxtags*/0 250 }, 251 { 252 /* 253 * Unfortunately, the Quantum Atlas III has the same 254 * problem as the Atlas II drives above. 255 * Reported by: "Johan Granlund" <johan@granlund.nu> 256 * 257 * For future reference, the drive with the problem was: 258 * QUANTUM QM39100TD-SW N1B0 259 * 260 * It's possible that Quantum will fix the problem in later 261 * firmware revisions. If that happens, the quirk entry 262 * will need to be made specific to the firmware revisions 263 * with the problem. 264 * 265 */ 266 /* Reports QUEUE FULL for temporary resource shortages */ 267 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM39100*", "*" }, 268 /*quirks*/0, /*mintags*/24, /*maxtags*/32 269 }, 270 { 271 /* 272 * 18 Gig Atlas III, same problem as the 9G version. 273 * Reported by: Andre Albsmeier 274 * <andre.albsmeier@mchp.siemens.de> 275 * 276 * For future reference, the drive with the problem was: 277 * QUANTUM QM318000TD-S N491 278 */ 279 /* Reports QUEUE FULL for temporary resource shortages */ 280 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM318000*", "*" }, 281 /*quirks*/0, /*mintags*/24, /*maxtags*/32 282 }, 283 { 284 /* 285 * Broken tagged queuing drive 286 * Reported by: Bret Ford <bford@uop.cs.uop.edu> 287 * and: Martin Renters <martin@tdc.on.ca> 288 */ 289 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST410800*", "71*" }, 290 /*quirks*/0, /*mintags*/0, /*maxtags*/0 291 }, 292 /* 293 * The Seagate Medalist Pro drives have very poor write 294 * performance with anything more than 2 tags. 295 * 296 * Reported by: Paul van der Zwan <paulz@trantor.xs4all.nl> 297 * Drive: <SEAGATE ST36530N 1444> 298 * 299 * Reported by: Jeremy Lea <reg@shale.csir.co.za> 300 * Drive: <SEAGATE ST34520W 1281> 301 * 302 * No one has actually reported that the 9G version 303 * (ST39140*) of the Medalist Pro has the same problem, but 304 * we're assuming that it does because the 4G and 6.5G 305 * versions of the drive are broken. 306 */ 307 { 308 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST34520*", "*"}, 309 /*quirks*/0, /*mintags*/2, /*maxtags*/2 310 }, 311 { 312 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST36530*", "*"}, 313 /*quirks*/0, /*mintags*/2, /*maxtags*/2 314 }, 315 { 316 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST39140*", "*"}, 317 /*quirks*/0, /*mintags*/2, /*maxtags*/2 318 }, 319 { 320 /* 321 * Slow when tagged queueing is enabled. Write performance 322 * steadily drops off with more and more concurrent 323 * transactions. Best sequential write performance with 324 * tagged queueing turned off and write caching turned on. 325 * 326 * PR: kern/10398 327 * Submitted by: Hideaki Okada <hokada@isl.melco.co.jp> 328 * Drive: DCAS-34330 w/ "S65A" firmware. 329 * 330 * The drive with the problem had the "S65A" firmware 331 * revision, and has also been reported (by Stephen J. 332 * Roznowski <sjr@home.net>) for a drive with the "S61A" 333 * firmware revision. 334 * 335 * Although no one has reported problems with the 2 gig 336 * version of the DCAS drive, the assumption is that it 337 * has the same problems as the 4 gig version. Therefore 338 * this quirk entries disables tagged queueing for all 339 * DCAS drives. 340 */ 341 { T_DIRECT, SIP_MEDIA_FIXED, "IBM", "DCAS*", "*" }, 342 /*quirks*/0, /*mintags*/0, /*maxtags*/0 343 }, 344 { 345 /* Broken tagged queuing drive */ 346 { T_DIRECT, SIP_MEDIA_REMOVABLE, "iomega", "jaz*", "*" }, 347 /*quirks*/0, /*mintags*/0, /*maxtags*/0 348 }, 349 { 350 /* Broken tagged queuing drive */ 351 { T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CFP2107*", "*" }, 352 /*quirks*/0, /*mintags*/0, /*maxtags*/0 353 }, 354 { 355 /* 356 * Broken tagged queuing drive. 357 * Submitted by: 358 * NAKAJI Hiroyuki <nakaji@zeisei.dpri.kyoto-u.ac.jp> 359 * in PR kern/9535 360 */ 361 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN34324U*", "*" }, 362 /*quirks*/0, /*mintags*/0, /*maxtags*/0 363 }, 364 { 365 /* 366 * Slow when tagged queueing is enabled. (1.5MB/sec versus 367 * 8MB/sec.) 368 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu> 369 * Best performance with these drives is achieved with 370 * tagged queueing turned off, and write caching turned on. 371 */ 372 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "WDE*", "*" }, 373 /*quirks*/0, /*mintags*/0, /*maxtags*/0 374 }, 375 { 376 /* 377 * Slow when tagged queueing is enabled. (1.5MB/sec versus 378 * 8MB/sec.) 379 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu> 380 * Best performance with these drives is achieved with 381 * tagged queueing turned off, and write caching turned on. 382 */ 383 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "ENTERPRISE", "*" }, 384 /*quirks*/0, /*mintags*/0, /*maxtags*/0 385 }, 386 { 387 /* 388 * Doesn't handle queue full condition correctly, 389 * so we need to limit maxtags to what the device 390 * can handle instead of determining this automatically. 391 */ 392 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN321010S*", "*" }, 393 /*quirks*/0, /*mintags*/2, /*maxtags*/32 394 }, 395 { 396 /* Really only one LUN */ 397 { T_ENCLOSURE, SIP_MEDIA_FIXED, "SUN", "SENA*", "*" }, 398 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 399 }, 400 { 401 /* I can't believe we need a quirk for DPT volumes. */ 402 { T_ANY, SIP_MEDIA_FIXED|SIP_MEDIA_REMOVABLE, "DPT", "*", "*" }, 403 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, 404 /*mintags*/0, /*maxtags*/255 405 }, 406 { 407 /* 408 * Many Sony CDROM drives don't like multi-LUN probing. 409 */ 410 { T_CDROM, SIP_MEDIA_REMOVABLE, sony, "CD-ROM CDU*", "*" }, 411 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 412 }, 413 { 414 /* 415 * This drive doesn't like multiple LUN probing. 416 * Submitted by: Parag Patel <parag@cgt.com> 417 */ 418 { T_WORM, SIP_MEDIA_REMOVABLE, sony, "CD-R CDU9*", "*" }, 419 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 420 }, 421 { 422 /* 423 * The 8200 doesn't like multi-lun probing, and probably 424 * don't like serial number requests either. 425 */ 426 { 427 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE", 428 "EXB-8200*", "*" 429 }, 430 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 431 }, 432 { 433 /* 434 * This old revision of the TDC3600 is also SCSI-1, and 435 * hangs upon serial number probing. 436 */ 437 { 438 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "TANDBERG", 439 " TDC 3600", "U07:" 440 }, 441 CAM_QUIRK_NOSERIAL, /*mintags*/0, /*maxtags*/0 442 }, 443 { 444 /* 445 * Would repond to all LUNs if asked for. 446 */ 447 { 448 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "CALIPER", 449 "CP150", "*" 450 }, 451 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 452 }, 453 { 454 /* 455 * Would repond to all LUNs if asked for. 456 */ 457 { 458 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "KENNEDY", 459 "96X2*", "*" 460 }, 461 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 462 }, 463 { 464 /* Submitted by: Matthew Dodd <winter@jurai.net> */ 465 { T_PROCESSOR, SIP_MEDIA_FIXED, "Cabletrn", "EA41*", "*" }, 466 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 467 }, 468 { 469 /* Submitted by: Matthew Dodd <winter@jurai.net> */ 470 { T_PROCESSOR, SIP_MEDIA_FIXED, "CABLETRN", "EA41*", "*" }, 471 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 472 }, 473 { 474 /* Default tagged queuing parameters for all devices */ 475 { 476 T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, 477 /*vendor*/"*", /*product*/"*", /*revision*/"*" 478 }, 479 /*quirks*/0, /*mintags*/2, /*maxtags*/255 480 }, 481 }; 482 483 static const int xpt_quirk_table_size = 484 sizeof(xpt_quirk_table) / sizeof(*xpt_quirk_table); 485 486 typedef enum { 487 DM_RET_COPY = 0x01, 488 DM_RET_FLAG_MASK = 0x0f, 489 DM_RET_NONE = 0x00, 490 DM_RET_STOP = 0x10, 491 DM_RET_DESCEND = 0x20, 492 DM_RET_ERROR = 0x30, 493 DM_RET_ACTION_MASK = 0xf0 494 } dev_match_ret; 495 496 typedef enum { 497 XPT_DEPTH_BUS, 498 XPT_DEPTH_TARGET, 499 XPT_DEPTH_DEVICE, 500 XPT_DEPTH_PERIPH 501 } xpt_traverse_depth; 502 503 struct xpt_traverse_config { 504 xpt_traverse_depth depth; 505 void *tr_func; 506 void *tr_arg; 507 }; 508 509 typedef int xpt_busfunc_t (struct cam_eb *bus, void *arg); 510 typedef int xpt_targetfunc_t (struct cam_et *target, void *arg); 511 typedef int xpt_devicefunc_t (struct cam_ed *device, void *arg); 512 typedef int xpt_periphfunc_t (struct cam_periph *periph, void *arg); 513 typedef int xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg); 514 515 /* Transport layer configuration information */ 516 static struct xpt_softc xsoftc; 517 518 /* Queues for our software interrupt handler */ 519 typedef TAILQ_HEAD(cam_isrq, ccb_hdr) cam_isrq_t; 520 static cam_isrq_t cam_bioq; 521 static cam_isrq_t cam_netq; 522 523 /* "Pool" of inactive ccbs managed by xpt_alloc_ccb and xpt_free_ccb */ 524 static SLIST_HEAD(,ccb_hdr) ccb_freeq; 525 static u_int xpt_max_ccbs; /* 526 * Maximum size of ccb pool. Modified as 527 * devices are added/removed or have their 528 * opening counts changed. 529 */ 530 static u_int xpt_ccb_count; /* Current count of allocated ccbs */ 531 532 static struct cam_periph *xpt_periph; 533 534 static periph_init_t xpt_periph_init; 535 536 static periph_init_t probe_periph_init; 537 538 static struct periph_driver xpt_driver = 539 { 540 xpt_periph_init, "xpt", 541 TAILQ_HEAD_INITIALIZER(xpt_driver.units) 542 }; 543 544 static struct periph_driver probe_driver = 545 { 546 probe_periph_init, "probe", 547 TAILQ_HEAD_INITIALIZER(probe_driver.units) 548 }; 549 550 DATA_SET(periphdriver_set, xpt_driver); 551 DATA_SET(periphdriver_set, probe_driver); 552 553 #define XPT_CDEV_MAJOR 104 554 555 static d_open_t xptopen; 556 static d_close_t xptclose; 557 static d_ioctl_t xptioctl; 558 559 static struct cdevsw xpt_cdevsw = { 560 /* open */ xptopen, 561 /* close */ xptclose, 562 /* read */ noread, 563 /* write */ nowrite, 564 /* ioctl */ xptioctl, 565 /* stop */ nostop, 566 /* reset */ noreset, 567 /* devtotty */ nodevtotty, 568 /* poll */ nopoll, 569 /* mmap */ nommap, 570 /* strategy */ nostrategy, 571 /* name */ "xpt", 572 /* parms */ noparms, 573 /* maj */ XPT_CDEV_MAJOR, 574 /* dump */ nodump, 575 /* psize */ nopsize, 576 /* flags */ 0, 577 /* maxio */ 0, 578 /* bmaj */ -1 579 }; 580 581 static struct intr_config_hook *xpt_config_hook; 582 583 /* Registered busses */ 584 static TAILQ_HEAD(,cam_eb) xpt_busses; 585 static u_int bus_generation; 586 587 /* Storage for debugging datastructures */ 588 #ifdef CAMDEBUG 589 struct cam_path *cam_dpath; 590 u_int32_t cam_dflags; 591 u_int32_t cam_debug_delay; 592 #endif 593 594 #if defined(CAM_DEBUG_FLAGS) && !defined(CAMDEBUG) 595 #error "You must have options CAMDEBUG to use options CAM_DEBUG_FLAGS" 596 #endif 597 598 /* 599 * In order to enable the CAM_DEBUG_* options, the user must have CAMDEBUG 600 * enabled. Also, the user must have either none, or all of CAM_DEBUG_BUS, 601 * CAM_DEBUG_TARGET, and CAM_DEBUG_LUN specified. 602 */ 603 #if defined(CAM_DEBUG_BUS) || defined(CAM_DEBUG_TARGET) \ 604 || defined(CAM_DEBUG_LUN) 605 #ifdef CAMDEBUG 606 #if !defined(CAM_DEBUG_BUS) || !defined(CAM_DEBUG_TARGET) \ 607 || !defined(CAM_DEBUG_LUN) 608 #error "You must define all or none of CAM_DEBUG_BUS, CAM_DEBUG_TARGET \ 609 and CAM_DEBUG_LUN" 610 #endif /* !CAM_DEBUG_BUS || !CAM_DEBUG_TARGET || !CAM_DEBUG_LUN */ 611 #else /* !CAMDEBUG */ 612 #error "You must use options CAMDEBUG if you use the CAM_DEBUG_* options" 613 #endif /* CAMDEBUG */ 614 #endif /* CAM_DEBUG_BUS || CAM_DEBUG_TARGET || CAM_DEBUG_LUN */ 615 616 /* Our boot-time initialization hook */ 617 static void xpt_init(void *); 618 SYSINIT(cam, SI_SUB_CONFIGURE, SI_ORDER_SECOND, xpt_init, NULL); 619 620 static cam_status xpt_compile_path(struct cam_path *new_path, 621 struct cam_periph *perph, 622 path_id_t path_id, 623 target_id_t target_id, 624 lun_id_t lun_id); 625 626 static void xpt_release_path(struct cam_path *path); 627 628 static void xpt_async_bcast(struct async_list *async_head, 629 u_int32_t async_code, 630 struct cam_path *path, 631 void *async_arg); 632 static int xptnextfreebus(path_id_t startbus); 633 static int xptpathid(const char *sim_name, int sim_unit, int sim_bus, 634 path_id_t *nextpath); 635 static union ccb *xpt_get_ccb(struct cam_ed *device); 636 static int xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo, 637 u_int32_t new_priority); 638 static void xpt_run_dev_allocq(struct cam_eb *bus); 639 static void xpt_run_dev_sendq(struct cam_eb *bus); 640 static timeout_t xpt_release_devq_timeout; 641 static timeout_t xpt_release_simq_timeout; 642 static void xpt_release_bus(struct cam_eb *bus); 643 static void xpt_release_devq_device(struct cam_ed *dev, u_int count, 644 int run_queue); 645 static struct cam_et* 646 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id); 647 static void xpt_release_target(struct cam_eb *bus, struct cam_et *target); 648 static struct cam_ed* 649 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, 650 lun_id_t lun_id); 651 static void xpt_release_device(struct cam_eb *bus, struct cam_et *target, 652 struct cam_ed *device); 653 static u_int32_t xpt_dev_ccbq_resize(struct cam_path *path, int newopenings); 654 static struct cam_eb* 655 xpt_find_bus(path_id_t path_id); 656 static struct cam_et* 657 xpt_find_target(struct cam_eb *bus, target_id_t target_id); 658 static struct cam_ed* 659 xpt_find_device(struct cam_et *target, lun_id_t lun_id); 660 static void xpt_scan_bus(struct cam_periph *periph, union ccb *ccb); 661 static void xpt_scan_lun(struct cam_periph *periph, 662 struct cam_path *path, cam_flags flags, 663 union ccb *ccb); 664 static void xptscandone(struct cam_periph *periph, union ccb *done_ccb); 665 static xpt_busfunc_t xptconfigbuscountfunc; 666 static xpt_busfunc_t xptconfigfunc; 667 static void xpt_config(void *arg); 668 static xpt_devicefunc_t xptpassannouncefunc; 669 static void xpt_finishconfig(struct cam_periph *periph, union ccb *ccb); 670 static void xptaction(struct cam_sim *sim, union ccb *work_ccb); 671 static swihand_t swi_camnet; 672 static swihand_t swi_cambio; 673 static void camisr(cam_isrq_t *queue); 674 #if 0 675 static void xptstart(struct cam_periph *periph, union ccb *work_ccb); 676 static void xptasync(struct cam_periph *periph, 677 u_int32_t code, cam_path *path); 678 #endif 679 static dev_match_ret xptbusmatch(struct dev_match_pattern *patterns, 680 int num_patterns, struct cam_eb *bus); 681 static dev_match_ret xptdevicematch(struct dev_match_pattern *patterns, 682 int num_patterns, struct cam_ed *device); 683 static dev_match_ret xptperiphmatch(struct dev_match_pattern *patterns, 684 int num_patterns, 685 struct cam_periph *periph); 686 static xpt_busfunc_t xptedtbusfunc; 687 static xpt_targetfunc_t xptedttargetfunc; 688 static xpt_devicefunc_t xptedtdevicefunc; 689 static xpt_periphfunc_t xptedtperiphfunc; 690 static xpt_pdrvfunc_t xptplistpdrvfunc; 691 static xpt_periphfunc_t xptplistperiphfunc; 692 static int xptedtmatch(struct ccb_dev_match *cdm); 693 static int xptperiphlistmatch(struct ccb_dev_match *cdm); 694 static int xptbustraverse(struct cam_eb *start_bus, 695 xpt_busfunc_t *tr_func, void *arg); 696 static int xpttargettraverse(struct cam_eb *bus, 697 struct cam_et *start_target, 698 xpt_targetfunc_t *tr_func, void *arg); 699 static int xptdevicetraverse(struct cam_et *target, 700 struct cam_ed *start_device, 701 xpt_devicefunc_t *tr_func, void *arg); 702 static int xptperiphtraverse(struct cam_ed *device, 703 struct cam_periph *start_periph, 704 xpt_periphfunc_t *tr_func, void *arg); 705 static int xptpdrvtraverse(struct periph_driver **start_pdrv, 706 xpt_pdrvfunc_t *tr_func, void *arg); 707 static int xptpdperiphtraverse(struct periph_driver **pdrv, 708 struct cam_periph *start_periph, 709 xpt_periphfunc_t *tr_func, 710 void *arg); 711 static xpt_busfunc_t xptdefbusfunc; 712 static xpt_targetfunc_t xptdeftargetfunc; 713 static xpt_devicefunc_t xptdefdevicefunc; 714 static xpt_periphfunc_t xptdefperiphfunc; 715 static int xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg); 716 #ifdef notusedyet 717 static int xpt_for_all_targets(xpt_targetfunc_t *tr_func, 718 void *arg); 719 #endif 720 static int xpt_for_all_devices(xpt_devicefunc_t *tr_func, 721 void *arg); 722 #ifdef notusedyet 723 static int xpt_for_all_periphs(xpt_periphfunc_t *tr_func, 724 void *arg); 725 #endif 726 static xpt_devicefunc_t xptsetasyncfunc; 727 static xpt_busfunc_t xptsetasyncbusfunc; 728 static cam_status xptregister(struct cam_periph *periph, 729 void *arg); 730 static cam_status proberegister(struct cam_periph *periph, 731 void *arg); 732 static void probeschedule(struct cam_periph *probe_periph); 733 static void probestart(struct cam_periph *periph, union ccb *start_ccb); 734 static void proberequestdefaultnegotiation(struct cam_periph *periph); 735 static void probedone(struct cam_periph *periph, union ccb *done_ccb); 736 static void probecleanup(struct cam_periph *periph); 737 static void xpt_find_quirk(struct cam_ed *device); 738 static void xpt_set_transfer_settings(struct ccb_trans_settings *cts, 739 struct cam_ed *device, 740 int async_update); 741 static void xpt_toggle_tags(struct cam_path *path); 742 static void xpt_start_tags(struct cam_path *path); 743 static __inline int xpt_schedule_dev_allocq(struct cam_eb *bus, 744 struct cam_ed *dev); 745 static __inline int xpt_schedule_dev_sendq(struct cam_eb *bus, 746 struct cam_ed *dev); 747 static __inline int periph_is_queued(struct cam_periph *periph); 748 static __inline int device_is_alloc_queued(struct cam_ed *device); 749 static __inline int device_is_send_queued(struct cam_ed *device); 750 static __inline int dev_allocq_is_runnable(struct cam_devq *devq); 751 752 static __inline int 753 xpt_schedule_dev_allocq(struct cam_eb *bus, struct cam_ed *dev) 754 { 755 int retval; 756 757 if (dev->ccbq.devq_openings > 0) { 758 if ((dev->flags & CAM_DEV_RESIZE_QUEUE_NEEDED) != 0) { 759 cam_ccbq_resize(&dev->ccbq, 760 dev->ccbq.dev_openings 761 + dev->ccbq.dev_active); 762 dev->flags &= ~CAM_DEV_RESIZE_QUEUE_NEEDED; 763 } 764 /* 765 * The priority of a device waiting for CCB resources 766 * is that of the the highest priority peripheral driver 767 * enqueued. 768 */ 769 retval = xpt_schedule_dev(&bus->sim->devq->alloc_queue, 770 &dev->alloc_ccb_entry.pinfo, 771 CAMQ_GET_HEAD(&dev->drvq)->priority); 772 } else { 773 retval = 0; 774 } 775 776 return (retval); 777 } 778 779 static __inline int 780 xpt_schedule_dev_sendq(struct cam_eb *bus, struct cam_ed *dev) 781 { 782 int retval; 783 784 if (dev->ccbq.dev_openings > 0) { 785 /* 786 * The priority of a device waiting for controller 787 * resources is that of the the highest priority CCB 788 * enqueued. 789 */ 790 retval = 791 xpt_schedule_dev(&bus->sim->devq->send_queue, 792 &dev->send_ccb_entry.pinfo, 793 CAMQ_GET_HEAD(&dev->ccbq.queue)->priority); 794 } else { 795 retval = 0; 796 } 797 return (retval); 798 } 799 800 static __inline int 801 periph_is_queued(struct cam_periph *periph) 802 { 803 return (periph->pinfo.index != CAM_UNQUEUED_INDEX); 804 } 805 806 static __inline int 807 device_is_alloc_queued(struct cam_ed *device) 808 { 809 return (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX); 810 } 811 812 static __inline int 813 device_is_send_queued(struct cam_ed *device) 814 { 815 return (device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX); 816 } 817 818 static __inline int 819 dev_allocq_is_runnable(struct cam_devq *devq) 820 { 821 /* 822 * Have work to do. 823 * Have space to do more work. 824 * Allowed to do work. 825 */ 826 return ((devq->alloc_queue.qfrozen_cnt == 0) 827 && (devq->alloc_queue.entries > 0) 828 && (devq->alloc_openings > 0)); 829 } 830 831 static void 832 xpt_periph_init() 833 { 834 cdevsw_add(&xpt_cdevsw); 835 } 836 837 static void 838 probe_periph_init() 839 { 840 } 841 842 843 static void 844 xptdone(struct cam_periph *periph, union ccb *done_ccb) 845 { 846 /* Caller will release the CCB */ 847 wakeup(&done_ccb->ccb_h.cbfcnp); 848 } 849 850 static int 851 xptopen(dev_t dev, int flags, int fmt, struct proc *p) 852 { 853 int unit; 854 855 unit = minor(dev) & 0xff; 856 857 /* 858 * Only allow read-write access. 859 */ 860 if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0)) 861 return(EPERM); 862 863 /* 864 * We don't allow nonblocking access. 865 */ 866 if ((flags & O_NONBLOCK) != 0) { 867 printf("xpt%d: can't do nonblocking accesss\n", unit); 868 return(ENODEV); 869 } 870 871 /* 872 * We only have one transport layer right now. If someone accesses 873 * us via something other than minor number 1, point out their 874 * mistake. 875 */ 876 if (unit != 0) { 877 printf("xptopen: got invalid xpt unit %d\n", unit); 878 return(ENXIO); 879 } 880 881 /* Mark ourselves open */ 882 xsoftc.flags |= XPT_FLAG_OPEN; 883 884 return(0); 885 } 886 887 static int 888 xptclose(dev_t dev, int flag, int fmt, struct proc *p) 889 { 890 int unit; 891 892 unit = minor(dev) & 0xff; 893 894 /* 895 * We only have one transport layer right now. If someone accesses 896 * us via something other than minor number 1, point out their 897 * mistake. 898 */ 899 if (unit != 0) { 900 printf("xptclose: got invalid xpt unit %d\n", unit); 901 return(ENXIO); 902 } 903 904 /* Mark ourselves closed */ 905 xsoftc.flags &= ~XPT_FLAG_OPEN; 906 907 return(0); 908 } 909 910 static int 911 xptioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p) 912 { 913 int unit, error; 914 915 error = 0; 916 unit = minor(dev) & 0xff; 917 918 /* 919 * We only have one transport layer right now. If someone accesses 920 * us via something other than minor number 1, point out their 921 * mistake. 922 */ 923 if (unit != 0) { 924 printf("xptioctl: got invalid xpt unit %d\n", unit); 925 return(ENXIO); 926 } 927 928 switch(cmd) { 929 /* 930 * For the transport layer CAMIOCOMMAND ioctl, we really only want 931 * to accept CCB types that don't quite make sense to send through a 932 * passthrough driver. 933 */ 934 case CAMIOCOMMAND: { 935 union ccb *ccb; 936 union ccb *inccb; 937 938 inccb = (union ccb *)addr; 939 940 switch(inccb->ccb_h.func_code) { 941 case XPT_SCAN_BUS: 942 case XPT_RESET_BUS: 943 if ((inccb->ccb_h.target_id != CAM_TARGET_WILDCARD) 944 || (inccb->ccb_h.target_lun != CAM_LUN_WILDCARD)) { 945 error = EINVAL; 946 break; 947 } 948 /* FALLTHROUGH */ 949 case XPT_SCAN_LUN: 950 951 ccb = xpt_alloc_ccb(); 952 953 /* 954 * Create a path using the bus, target, and lun the 955 * user passed in. 956 */ 957 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, 958 inccb->ccb_h.path_id, 959 inccb->ccb_h.target_id, 960 inccb->ccb_h.target_lun) != 961 CAM_REQ_CMP){ 962 error = EINVAL; 963 xpt_free_ccb(ccb); 964 break; 965 } 966 /* Ensure all of our fields are correct */ 967 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, 968 inccb->ccb_h.pinfo.priority); 969 xpt_merge_ccb(ccb, inccb); 970 ccb->ccb_h.cbfcnp = xptdone; 971 cam_periph_runccb(ccb, NULL, 0, 0, NULL); 972 bcopy(ccb, inccb, sizeof(union ccb)); 973 xpt_free_path(ccb->ccb_h.path); 974 xpt_free_ccb(ccb); 975 break; 976 977 case XPT_DEBUG: { 978 union ccb ccb; 979 980 /* 981 * This is an immediate CCB, so it's okay to 982 * allocate it on the stack. 983 */ 984 985 /* 986 * Create a path using the bus, target, and lun the 987 * user passed in. 988 */ 989 if (xpt_create_path(&ccb.ccb_h.path, xpt_periph, 990 inccb->ccb_h.path_id, 991 inccb->ccb_h.target_id, 992 inccb->ccb_h.target_lun) != 993 CAM_REQ_CMP){ 994 error = EINVAL; 995 break; 996 } 997 /* Ensure all of our fields are correct */ 998 xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path, 999 inccb->ccb_h.pinfo.priority); 1000 xpt_merge_ccb(&ccb, inccb); 1001 ccb.ccb_h.cbfcnp = xptdone; 1002 xpt_action(&ccb); 1003 bcopy(&ccb, inccb, sizeof(union ccb)); 1004 xpt_free_path(ccb.ccb_h.path); 1005 break; 1006 1007 } 1008 case XPT_DEV_MATCH: { 1009 struct cam_periph_map_info mapinfo; 1010 struct cam_path *old_path; 1011 1012 /* 1013 * We can't deal with physical addresses for this 1014 * type of transaction. 1015 */ 1016 if (inccb->ccb_h.flags & CAM_DATA_PHYS) { 1017 error = EINVAL; 1018 break; 1019 } 1020 1021 /* 1022 * Save this in case the caller had it set to 1023 * something in particular. 1024 */ 1025 old_path = inccb->ccb_h.path; 1026 1027 /* 1028 * We really don't need a path for the matching 1029 * code. The path is needed because of the 1030 * debugging statements in xpt_action(). They 1031 * assume that the CCB has a valid path. 1032 */ 1033 inccb->ccb_h.path = xpt_periph->path; 1034 1035 bzero(&mapinfo, sizeof(mapinfo)); 1036 1037 /* 1038 * Map the pattern and match buffers into kernel 1039 * virtual address space. 1040 */ 1041 error = cam_periph_mapmem(inccb, &mapinfo); 1042 1043 if (error) { 1044 inccb->ccb_h.path = old_path; 1045 break; 1046 } 1047 1048 /* 1049 * This is an immediate CCB, we can send it on directly. 1050 */ 1051 xpt_action(inccb); 1052 1053 /* 1054 * Map the buffers back into user space. 1055 */ 1056 cam_periph_unmapmem(inccb, &mapinfo); 1057 1058 inccb->ccb_h.path = old_path; 1059 1060 error = 0; 1061 break; 1062 } 1063 default: 1064 error = EINVAL; 1065 break; 1066 } 1067 break; 1068 } 1069 /* 1070 * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input, 1071 * with the periphal driver name and unit name filled in. The other 1072 * fields don't really matter as input. The passthrough driver name 1073 * ("pass"), and unit number are passed back in the ccb. The current 1074 * device generation number, and the index into the device peripheral 1075 * driver list, and the status are also passed back. Note that 1076 * since we do everything in one pass, unlike the XPT_GDEVLIST ccb, 1077 * we never return a status of CAM_GDEVLIST_LIST_CHANGED. It is 1078 * (or rather should be) impossible for the device peripheral driver 1079 * list to change since we look at the whole thing in one pass, and 1080 * we do it with splcam protection. 1081 * 1082 */ 1083 case CAMGETPASSTHRU: { 1084 union ccb *ccb; 1085 struct cam_periph *periph; 1086 struct periph_driver **p_drv; 1087 char *name; 1088 int unit; 1089 int cur_generation; 1090 int base_periph_found; 1091 int splbreaknum; 1092 int s; 1093 1094 ccb = (union ccb *)addr; 1095 unit = ccb->cgdl.unit_number; 1096 name = ccb->cgdl.periph_name; 1097 /* 1098 * Every 100 devices, we want to drop our spl protection to 1099 * give the software interrupt handler a chance to run. 1100 * Most systems won't run into this check, but this should 1101 * avoid starvation in the software interrupt handler in 1102 * large systems. 1103 */ 1104 splbreaknum = 100; 1105 1106 ccb = (union ccb *)addr; 1107 1108 base_periph_found = 0; 1109 1110 /* 1111 * Sanity check -- make sure we don't get a null peripheral 1112 * driver name. 1113 */ 1114 if (*ccb->cgdl.periph_name == '\0') { 1115 error = EINVAL; 1116 break; 1117 } 1118 1119 /* Keep the list from changing while we traverse it */ 1120 s = splcam(); 1121 ptstartover: 1122 cur_generation = xsoftc.generation; 1123 1124 /* first find our driver in the list of drivers */ 1125 for (p_drv = (struct periph_driver **)periphdriver_set.ls_items; 1126 *p_drv != NULL; p_drv++) 1127 if (strcmp((*p_drv)->driver_name, name) == 0) 1128 break; 1129 1130 if (*p_drv == NULL) { 1131 splx(s); 1132 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 1133 ccb->cgdl.status = CAM_GDEVLIST_ERROR; 1134 *ccb->cgdl.periph_name = '\0'; 1135 ccb->cgdl.unit_number = 0; 1136 error = ENOENT; 1137 break; 1138 } 1139 1140 /* 1141 * Run through every peripheral instance of this driver 1142 * and check to see whether it matches the unit passed 1143 * in by the user. If it does, get out of the loops and 1144 * find the passthrough driver associated with that 1145 * peripheral driver. 1146 */ 1147 for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL; 1148 periph = TAILQ_NEXT(periph, unit_links)) { 1149 1150 if (periph->unit_number == unit) { 1151 break; 1152 } else if (--splbreaknum == 0) { 1153 splx(s); 1154 s = splcam(); 1155 splbreaknum = 100; 1156 if (cur_generation != xsoftc.generation) 1157 goto ptstartover; 1158 } 1159 } 1160 /* 1161 * If we found the peripheral driver that the user passed 1162 * in, go through all of the peripheral drivers for that 1163 * particular device and look for a passthrough driver. 1164 */ 1165 if (periph != NULL) { 1166 struct cam_ed *device; 1167 int i; 1168 1169 base_periph_found = 1; 1170 device = periph->path->device; 1171 for (i = 0, periph = device->periphs.slh_first; 1172 periph != NULL; 1173 periph = periph->periph_links.sle_next, i++) { 1174 /* 1175 * Check to see whether we have a 1176 * passthrough device or not. 1177 */ 1178 if (strcmp(periph->periph_name, "pass") == 0) { 1179 /* 1180 * Fill in the getdevlist fields. 1181 */ 1182 strcpy(ccb->cgdl.periph_name, 1183 periph->periph_name); 1184 ccb->cgdl.unit_number = 1185 periph->unit_number; 1186 if (periph->periph_links.sle_next) 1187 ccb->cgdl.status = 1188 CAM_GDEVLIST_MORE_DEVS; 1189 else 1190 ccb->cgdl.status = 1191 CAM_GDEVLIST_LAST_DEVICE; 1192 ccb->cgdl.generation = 1193 device->generation; 1194 ccb->cgdl.index = i; 1195 /* 1196 * Fill in some CCB header fields 1197 * that the user may want. 1198 */ 1199 ccb->ccb_h.path_id = 1200 periph->path->bus->path_id; 1201 ccb->ccb_h.target_id = 1202 periph->path->target->target_id; 1203 ccb->ccb_h.target_lun = 1204 periph->path->device->lun_id; 1205 ccb->ccb_h.status = CAM_REQ_CMP; 1206 break; 1207 } 1208 } 1209 } 1210 1211 /* 1212 * If the periph is null here, one of two things has 1213 * happened. The first possibility is that we couldn't 1214 * find the unit number of the particular peripheral driver 1215 * that the user is asking about. e.g. the user asks for 1216 * the passthrough driver for "da11". We find the list of 1217 * "da" peripherals all right, but there is no unit 11. 1218 * The other possibility is that we went through the list 1219 * of peripheral drivers attached to the device structure, 1220 * but didn't find one with the name "pass". Either way, 1221 * we return ENOENT, since we couldn't find something. 1222 */ 1223 if (periph == NULL) { 1224 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 1225 ccb->cgdl.status = CAM_GDEVLIST_ERROR; 1226 *ccb->cgdl.periph_name = '\0'; 1227 ccb->cgdl.unit_number = 0; 1228 error = ENOENT; 1229 /* 1230 * It is unfortunate that this is even necessary, 1231 * but there are many, many clueless users out there. 1232 * If this is true, the user is looking for the 1233 * passthrough driver, but doesn't have one in his 1234 * kernel. 1235 */ 1236 if (base_periph_found == 1) { 1237 printf("xptioctl: pass driver is not in the " 1238 "kernel\n"); 1239 printf("xptioctl: put \"device pass0\" in " 1240 "your kernel config file\n"); 1241 } 1242 } 1243 splx(s); 1244 break; 1245 } 1246 default: 1247 error = ENOTTY; 1248 break; 1249 } 1250 1251 return(error); 1252 } 1253 1254 /* Functions accessed by the peripheral drivers */ 1255 static void 1256 xpt_init(dummy) 1257 void *dummy; 1258 { 1259 struct cam_sim *xpt_sim; 1260 struct cam_path *path; 1261 struct cam_devq; 1262 cam_status status; 1263 1264 TAILQ_INIT(&xpt_busses); 1265 TAILQ_INIT(&cam_bioq); 1266 TAILQ_INIT(&cam_netq); 1267 SLIST_INIT(&ccb_freeq); 1268 STAILQ_INIT(&highpowerq); 1269 1270 /* 1271 * The xpt layer is, itself, the equivelent of a SIM. 1272 * Allow 16 ccbs in the ccb pool for it. This should 1273 * give decent parallelism when we probe busses and 1274 * perform other XPT functions. 1275 */ 1276 xpt_sim = (struct cam_sim *)malloc(sizeof(*xpt_sim), 1277 M_DEVBUF, M_WAITOK); 1278 xpt_sim->sim_action = xptaction; 1279 xpt_sim->sim_name = "xpt"; 1280 xpt_sim->path_id = CAM_XPT_PATH_ID; 1281 xpt_sim->bus_id = 0; 1282 xpt_sim->max_tagged_dev_openings = 0; 1283 xpt_sim->max_dev_openings = 0; 1284 xpt_sim->devq = cam_simq_alloc(16); 1285 xpt_max_ccbs = 16; 1286 1287 xpt_bus_register(xpt_sim, 0); 1288 1289 /* 1290 * Looking at the XPT from the SIM layer, the XPT is 1291 * the equivelent of a peripheral driver. Allocate 1292 * a peripheral driver entry for us. 1293 */ 1294 if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID, 1295 CAM_TARGET_WILDCARD, 1296 CAM_LUN_WILDCARD)) != CAM_REQ_CMP) { 1297 printf("xpt_init: xpt_create_path failed with status %#x," 1298 " failing attach\n", status); 1299 return; 1300 } 1301 1302 cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO, 1303 path, NULL, 0, NULL); 1304 xpt_free_path(path); 1305 1306 xpt_sim->softc = xpt_periph; 1307 1308 /* 1309 * Register a callback for when interrupts are enabled. 1310 */ 1311 xpt_config_hook = 1312 (struct intr_config_hook *)malloc(sizeof(struct intr_config_hook), 1313 M_TEMP, M_NOWAIT); 1314 if (xpt_config_hook == NULL) { 1315 printf("xpt_init: Cannot malloc config hook " 1316 "- failing attach\n"); 1317 return; 1318 } 1319 bzero(xpt_config_hook, sizeof(*xpt_config_hook)); 1320 1321 xpt_config_hook->ich_func = xpt_config; 1322 if (config_intrhook_establish(xpt_config_hook) != 0) { 1323 free (xpt_config_hook, M_TEMP); 1324 printf("xpt_init: config_intrhook_establish failed " 1325 "- failing attach\n"); 1326 } 1327 1328 /* Install our software interrupt handlers */ 1329 register_swi(SWI_CAMNET, swi_camnet); 1330 register_swi(SWI_CAMBIO, swi_cambio); 1331 } 1332 1333 static cam_status 1334 xptregister(struct cam_periph *periph, void *arg) 1335 { 1336 if (periph == NULL) { 1337 printf("xptregister: periph was NULL!!\n"); 1338 return(CAM_REQ_CMP_ERR); 1339 } 1340 1341 periph->softc = NULL; 1342 1343 xpt_periph = periph; 1344 1345 return(CAM_REQ_CMP); 1346 } 1347 1348 int32_t 1349 xpt_add_periph(struct cam_periph *periph) 1350 { 1351 struct cam_ed *device; 1352 int32_t status; 1353 struct periph_list *periph_head; 1354 1355 device = periph->path->device; 1356 1357 periph_head = &device->periphs; 1358 1359 status = CAM_REQ_CMP; 1360 1361 if (device != NULL) { 1362 int s; 1363 1364 /* 1365 * Make room for this peripheral 1366 * so it will fit in the queue 1367 * when it's scheduled to run 1368 */ 1369 s = splsoftcam(); 1370 status = camq_resize(&device->drvq, 1371 device->drvq.array_size + 1); 1372 1373 device->generation++; 1374 1375 SLIST_INSERT_HEAD(periph_head, periph, periph_links); 1376 1377 splx(s); 1378 } 1379 1380 xsoftc.generation++; 1381 1382 return (status); 1383 } 1384 1385 void 1386 xpt_remove_periph(struct cam_periph *periph) 1387 { 1388 struct cam_ed *device; 1389 1390 device = periph->path->device; 1391 1392 if (device != NULL) { 1393 int s; 1394 struct periph_list *periph_head; 1395 1396 periph_head = &device->periphs; 1397 1398 /* Release the slot for this peripheral */ 1399 s = splsoftcam(); 1400 camq_resize(&device->drvq, device->drvq.array_size - 1); 1401 1402 device->generation++; 1403 1404 SLIST_REMOVE(periph_head, periph, cam_periph, periph_links); 1405 1406 splx(s); 1407 } 1408 1409 xsoftc.generation++; 1410 1411 } 1412 1413 void 1414 xpt_announce_periph(struct cam_periph *periph, char *announce_string) 1415 { 1416 int s; 1417 u_int mb; 1418 struct cam_path *path; 1419 struct ccb_trans_settings cts; 1420 1421 path = periph->path; 1422 /* 1423 * To ensure that this is printed in one piece, 1424 * mask out CAM interrupts. 1425 */ 1426 s = splsoftcam(); 1427 printf("%s%d at %s%d bus %d target %d lun %d\n", 1428 periph->periph_name, periph->unit_number, 1429 path->bus->sim->sim_name, 1430 path->bus->sim->unit_number, 1431 path->bus->sim->bus_id, 1432 path->target->target_id, 1433 path->device->lun_id); 1434 printf("%s%d: ", periph->periph_name, periph->unit_number); 1435 scsi_print_inquiry(&path->device->inq_data); 1436 if ((bootverbose) 1437 && (path->device->serial_num_len > 0)) { 1438 /* Don't wrap the screen - print only the first 60 chars */ 1439 printf("%s%d: Serial Number %.60s\n", periph->periph_name, 1440 periph->unit_number, path->device->serial_num); 1441 } 1442 xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1); 1443 cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; 1444 cts.flags = CCB_TRANS_CURRENT_SETTINGS; 1445 xpt_action((union ccb*)&cts); 1446 if (cts.ccb_h.status == CAM_REQ_CMP) { 1447 u_int speed; 1448 u_int freq; 1449 1450 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0 1451 && cts.sync_offset != 0) { 1452 freq = scsi_calc_syncsrate(cts.sync_period); 1453 speed = freq; 1454 } else { 1455 struct ccb_pathinq cpi; 1456 1457 /* Ask the SIM for its base transfer speed */ 1458 xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1); 1459 cpi.ccb_h.func_code = XPT_PATH_INQ; 1460 xpt_action((union ccb *)&cpi); 1461 1462 speed = cpi.base_transfer_speed; 1463 freq = 0; 1464 } 1465 if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) 1466 speed *= (0x01 << cts.bus_width); 1467 mb = speed / 1000; 1468 if (mb > 0) 1469 printf("%s%d: %d.%03dMB/s transfers", 1470 periph->periph_name, periph->unit_number, 1471 mb, speed % 1000); 1472 else 1473 printf("%s%d: %dKB/s transfers", periph->periph_name, 1474 periph->unit_number, (speed % 1000) * 1000); 1475 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0 1476 && cts.sync_offset != 0) { 1477 printf(" (%d.%03dMHz, offset %d", freq / 1000, 1478 freq % 1000, cts.sync_offset); 1479 } 1480 if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0 1481 && cts.bus_width > 0) { 1482 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0 1483 && cts.sync_offset != 0) { 1484 printf(", "); 1485 } else { 1486 printf(" ("); 1487 } 1488 printf("%dbit)", 8 * (0x01 << cts.bus_width)); 1489 } else if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0 1490 && cts.sync_offset != 0) { 1491 printf(")"); 1492 } 1493 1494 if (path->device->inq_flags & SID_CmdQue 1495 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) { 1496 printf(", Tagged Queueing Enabled"); 1497 } 1498 1499 printf("\n"); 1500 } else if (path->device->inq_flags & SID_CmdQue 1501 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) { 1502 printf("%s%d: Tagged Queueing Enabled\n", 1503 periph->periph_name, periph->unit_number); 1504 } 1505 1506 /* 1507 * We only want to print the caller's announce string if they've 1508 * passed one in.. 1509 */ 1510 if (announce_string != NULL) 1511 printf("%s%d: %s\n", periph->periph_name, 1512 periph->unit_number, announce_string); 1513 splx(s); 1514 } 1515 1516 1517 static dev_match_ret 1518 xptbusmatch(struct dev_match_pattern *patterns, int num_patterns, 1519 struct cam_eb *bus) 1520 { 1521 dev_match_ret retval; 1522 int i; 1523 1524 retval = DM_RET_NONE; 1525 1526 /* 1527 * If we aren't given something to match against, that's an error. 1528 */ 1529 if (bus == NULL) 1530 return(DM_RET_ERROR); 1531 1532 /* 1533 * If there are no match entries, then this bus matches no 1534 * matter what. 1535 */ 1536 if ((patterns == NULL) || (num_patterns == 0)) 1537 return(DM_RET_DESCEND | DM_RET_COPY); 1538 1539 for (i = 0; i < num_patterns; i++) { 1540 struct bus_match_pattern *cur_pattern; 1541 1542 /* 1543 * If the pattern in question isn't for a bus node, we 1544 * aren't interested. However, we do indicate to the 1545 * calling routine that we should continue descending the 1546 * tree, since the user wants to match against lower-level 1547 * EDT elements. 1548 */ 1549 if (patterns[i].type != DEV_MATCH_BUS) { 1550 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) 1551 retval |= DM_RET_DESCEND; 1552 continue; 1553 } 1554 1555 cur_pattern = &patterns[i].pattern.bus_pattern; 1556 1557 /* 1558 * If they want to match any bus node, we give them any 1559 * device node. 1560 */ 1561 if (cur_pattern->flags == BUS_MATCH_ANY) { 1562 /* set the copy flag */ 1563 retval |= DM_RET_COPY; 1564 1565 /* 1566 * If we've already decided on an action, go ahead 1567 * and return. 1568 */ 1569 if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE) 1570 return(retval); 1571 } 1572 1573 /* 1574 * Not sure why someone would do this... 1575 */ 1576 if (cur_pattern->flags == BUS_MATCH_NONE) 1577 continue; 1578 1579 if (((cur_pattern->flags & BUS_MATCH_PATH) != 0) 1580 && (cur_pattern->path_id != bus->path_id)) 1581 continue; 1582 1583 if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0) 1584 && (cur_pattern->bus_id != bus->sim->bus_id)) 1585 continue; 1586 1587 if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0) 1588 && (cur_pattern->unit_number != bus->sim->unit_number)) 1589 continue; 1590 1591 if (((cur_pattern->flags & BUS_MATCH_NAME) != 0) 1592 && (strncmp(cur_pattern->dev_name, bus->sim->sim_name, 1593 DEV_IDLEN) != 0)) 1594 continue; 1595 1596 /* 1597 * If we get to this point, the user definitely wants 1598 * information on this bus. So tell the caller to copy the 1599 * data out. 1600 */ 1601 retval |= DM_RET_COPY; 1602 1603 /* 1604 * If the return action has been set to descend, then we 1605 * know that we've already seen a non-bus matching 1606 * expression, therefore we need to further descend the tree. 1607 * This won't change by continuing around the loop, so we 1608 * go ahead and return. If we haven't seen a non-bus 1609 * matching expression, we keep going around the loop until 1610 * we exhaust the matching expressions. We'll set the stop 1611 * flag once we fall out of the loop. 1612 */ 1613 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND) 1614 return(retval); 1615 } 1616 1617 /* 1618 * If the return action hasn't been set to descend yet, that means 1619 * we haven't seen anything other than bus matching patterns. So 1620 * tell the caller to stop descending the tree -- the user doesn't 1621 * want to match against lower level tree elements. 1622 */ 1623 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) 1624 retval |= DM_RET_STOP; 1625 1626 return(retval); 1627 } 1628 1629 static dev_match_ret 1630 xptdevicematch(struct dev_match_pattern *patterns, int num_patterns, 1631 struct cam_ed *device) 1632 { 1633 dev_match_ret retval; 1634 int i; 1635 1636 retval = DM_RET_NONE; 1637 1638 /* 1639 * If we aren't given something to match against, that's an error. 1640 */ 1641 if (device == NULL) 1642 return(DM_RET_ERROR); 1643 1644 /* 1645 * If there are no match entries, then this device matches no 1646 * matter what. 1647 */ 1648 if ((patterns == NULL) || (patterns == 0)) 1649 return(DM_RET_DESCEND | DM_RET_COPY); 1650 1651 for (i = 0; i < num_patterns; i++) { 1652 struct device_match_pattern *cur_pattern; 1653 1654 /* 1655 * If the pattern in question isn't for a device node, we 1656 * aren't interested. 1657 */ 1658 if (patterns[i].type != DEV_MATCH_DEVICE) { 1659 if ((patterns[i].type == DEV_MATCH_PERIPH) 1660 && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)) 1661 retval |= DM_RET_DESCEND; 1662 continue; 1663 } 1664 1665 cur_pattern = &patterns[i].pattern.device_pattern; 1666 1667 /* 1668 * If they want to match any device node, we give them any 1669 * device node. 1670 */ 1671 if (cur_pattern->flags == DEV_MATCH_ANY) { 1672 /* set the copy flag */ 1673 retval |= DM_RET_COPY; 1674 1675 1676 /* 1677 * If we've already decided on an action, go ahead 1678 * and return. 1679 */ 1680 if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE) 1681 return(retval); 1682 } 1683 1684 /* 1685 * Not sure why someone would do this... 1686 */ 1687 if (cur_pattern->flags == DEV_MATCH_NONE) 1688 continue; 1689 1690 if (((cur_pattern->flags & DEV_MATCH_PATH) != 0) 1691 && (cur_pattern->path_id != device->target->bus->path_id)) 1692 continue; 1693 1694 if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0) 1695 && (cur_pattern->target_id != device->target->target_id)) 1696 continue; 1697 1698 if (((cur_pattern->flags & DEV_MATCH_LUN) != 0) 1699 && (cur_pattern->target_lun != device->lun_id)) 1700 continue; 1701 1702 if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0) 1703 && (cam_quirkmatch((caddr_t)&device->inq_data, 1704 (caddr_t)&cur_pattern->inq_pat, 1705 1, sizeof(cur_pattern->inq_pat), 1706 scsi_static_inquiry_match) == NULL)) 1707 continue; 1708 1709 /* 1710 * If we get to this point, the user definitely wants 1711 * information on this device. So tell the caller to copy 1712 * the data out. 1713 */ 1714 retval |= DM_RET_COPY; 1715 1716 /* 1717 * If the return action has been set to descend, then we 1718 * know that we've already seen a peripheral matching 1719 * expression, therefore we need to further descend the tree. 1720 * This won't change by continuing around the loop, so we 1721 * go ahead and return. If we haven't seen a peripheral 1722 * matching expression, we keep going around the loop until 1723 * we exhaust the matching expressions. We'll set the stop 1724 * flag once we fall out of the loop. 1725 */ 1726 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND) 1727 return(retval); 1728 } 1729 1730 /* 1731 * If the return action hasn't been set to descend yet, that means 1732 * we haven't seen any peripheral matching patterns. So tell the 1733 * caller to stop descending the tree -- the user doesn't want to 1734 * match against lower level tree elements. 1735 */ 1736 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) 1737 retval |= DM_RET_STOP; 1738 1739 return(retval); 1740 } 1741 1742 /* 1743 * Match a single peripheral against any number of match patterns. 1744 */ 1745 static dev_match_ret 1746 xptperiphmatch(struct dev_match_pattern *patterns, int num_patterns, 1747 struct cam_periph *periph) 1748 { 1749 dev_match_ret retval; 1750 int i; 1751 1752 /* 1753 * If we aren't given something to match against, that's an error. 1754 */ 1755 if (periph == NULL) 1756 return(DM_RET_ERROR); 1757 1758 /* 1759 * If there are no match entries, then this peripheral matches no 1760 * matter what. 1761 */ 1762 if ((patterns == NULL) || (num_patterns == 0)) 1763 return(DM_RET_STOP | DM_RET_COPY); 1764 1765 /* 1766 * There aren't any nodes below a peripheral node, so there's no 1767 * reason to descend the tree any further. 1768 */ 1769 retval = DM_RET_STOP; 1770 1771 for (i = 0; i < num_patterns; i++) { 1772 struct periph_match_pattern *cur_pattern; 1773 1774 /* 1775 * If the pattern in question isn't for a peripheral, we 1776 * aren't interested. 1777 */ 1778 if (patterns[i].type != DEV_MATCH_PERIPH) 1779 continue; 1780 1781 cur_pattern = &patterns[i].pattern.periph_pattern; 1782 1783 /* 1784 * If they want to match on anything, then we will do so. 1785 */ 1786 if (cur_pattern->flags == PERIPH_MATCH_ANY) { 1787 /* set the copy flag */ 1788 retval |= DM_RET_COPY; 1789 1790 /* 1791 * We've already set the return action to stop, 1792 * since there are no nodes below peripherals in 1793 * the tree. 1794 */ 1795 return(retval); 1796 } 1797 1798 /* 1799 * Not sure why someone would do this... 1800 */ 1801 if (cur_pattern->flags == PERIPH_MATCH_NONE) 1802 continue; 1803 1804 if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0) 1805 && (cur_pattern->path_id != periph->path->bus->path_id)) 1806 continue; 1807 1808 /* 1809 * For the target and lun id's, we have to make sure the 1810 * target and lun pointers aren't NULL. The xpt peripheral 1811 * has a wildcard target and device. 1812 */ 1813 if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0) 1814 && ((periph->path->target == NULL) 1815 ||(cur_pattern->target_id != periph->path->target->target_id))) 1816 continue; 1817 1818 if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0) 1819 && ((periph->path->device == NULL) 1820 || (cur_pattern->target_lun != periph->path->device->lun_id))) 1821 continue; 1822 1823 if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0) 1824 && (cur_pattern->unit_number != periph->unit_number)) 1825 continue; 1826 1827 if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0) 1828 && (strncmp(cur_pattern->periph_name, periph->periph_name, 1829 DEV_IDLEN) != 0)) 1830 continue; 1831 1832 /* 1833 * If we get to this point, the user definitely wants 1834 * information on this peripheral. So tell the caller to 1835 * copy the data out. 1836 */ 1837 retval |= DM_RET_COPY; 1838 1839 /* 1840 * The return action has already been set to stop, since 1841 * peripherals don't have any nodes below them in the EDT. 1842 */ 1843 return(retval); 1844 } 1845 1846 /* 1847 * If we get to this point, the peripheral that was passed in 1848 * doesn't match any of the patterns. 1849 */ 1850 return(retval); 1851 } 1852 1853 static int 1854 xptedtbusfunc(struct cam_eb *bus, void *arg) 1855 { 1856 struct ccb_dev_match *cdm; 1857 dev_match_ret retval; 1858 1859 cdm = (struct ccb_dev_match *)arg; 1860 1861 /* 1862 * If our position is for something deeper in the tree, that means 1863 * that we've already seen this node. So, we keep going down. 1864 */ 1865 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1866 && (cdm->pos.cookie.bus == bus) 1867 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1868 && (cdm->pos.cookie.target != NULL)) 1869 retval = DM_RET_DESCEND; 1870 else 1871 retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus); 1872 1873 /* 1874 * If we got an error, bail out of the search. 1875 */ 1876 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 1877 cdm->status = CAM_DEV_MATCH_ERROR; 1878 return(0); 1879 } 1880 1881 /* 1882 * If the copy flag is set, copy this bus out. 1883 */ 1884 if (retval & DM_RET_COPY) { 1885 int spaceleft, j; 1886 1887 spaceleft = cdm->match_buf_len - (cdm->num_matches * 1888 sizeof(struct dev_match_result)); 1889 1890 /* 1891 * If we don't have enough space to put in another 1892 * match result, save our position and tell the 1893 * user there are more devices to check. 1894 */ 1895 if (spaceleft < sizeof(struct dev_match_result)) { 1896 bzero(&cdm->pos, sizeof(cdm->pos)); 1897 cdm->pos.position_type = 1898 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS; 1899 1900 cdm->pos.cookie.bus = bus; 1901 cdm->pos.generations[CAM_BUS_GENERATION]= 1902 bus_generation; 1903 cdm->status = CAM_DEV_MATCH_MORE; 1904 return(0); 1905 } 1906 j = cdm->num_matches; 1907 cdm->num_matches++; 1908 cdm->matches[j].type = DEV_MATCH_BUS; 1909 cdm->matches[j].result.bus_result.path_id = bus->path_id; 1910 cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id; 1911 cdm->matches[j].result.bus_result.unit_number = 1912 bus->sim->unit_number; 1913 strncpy(cdm->matches[j].result.bus_result.dev_name, 1914 bus->sim->sim_name, DEV_IDLEN); 1915 } 1916 1917 /* 1918 * If the user is only interested in busses, there's no 1919 * reason to descend to the next level in the tree. 1920 */ 1921 if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP) 1922 return(1); 1923 1924 /* 1925 * If there is a target generation recorded, check it to 1926 * make sure the target list hasn't changed. 1927 */ 1928 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1929 && (bus == cdm->pos.cookie.bus) 1930 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1931 && (cdm->pos.generations[CAM_TARGET_GENERATION] != 0) 1932 && (cdm->pos.generations[CAM_TARGET_GENERATION] != 1933 bus->generation)) { 1934 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 1935 return(0); 1936 } 1937 1938 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1939 && (cdm->pos.cookie.bus == bus) 1940 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1941 && (cdm->pos.cookie.target != NULL)) 1942 return(xpttargettraverse(bus, 1943 (struct cam_et *)cdm->pos.cookie.target, 1944 xptedttargetfunc, arg)); 1945 else 1946 return(xpttargettraverse(bus, NULL, xptedttargetfunc, arg)); 1947 } 1948 1949 static int 1950 xptedttargetfunc(struct cam_et *target, void *arg) 1951 { 1952 struct ccb_dev_match *cdm; 1953 1954 cdm = (struct ccb_dev_match *)arg; 1955 1956 /* 1957 * If there is a device list generation recorded, check it to 1958 * make sure the device list hasn't changed. 1959 */ 1960 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1961 && (cdm->pos.cookie.bus == target->bus) 1962 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1963 && (cdm->pos.cookie.target == target) 1964 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) 1965 && (cdm->pos.generations[CAM_DEV_GENERATION] != 0) 1966 && (cdm->pos.generations[CAM_DEV_GENERATION] != 1967 target->generation)) { 1968 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 1969 return(0); 1970 } 1971 1972 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1973 && (cdm->pos.cookie.bus == target->bus) 1974 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1975 && (cdm->pos.cookie.target == target) 1976 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) 1977 && (cdm->pos.cookie.device != NULL)) 1978 return(xptdevicetraverse(target, 1979 (struct cam_ed *)cdm->pos.cookie.device, 1980 xptedtdevicefunc, arg)); 1981 else 1982 return(xptdevicetraverse(target, NULL, xptedtdevicefunc, arg)); 1983 } 1984 1985 static int 1986 xptedtdevicefunc(struct cam_ed *device, void *arg) 1987 { 1988 1989 struct ccb_dev_match *cdm; 1990 dev_match_ret retval; 1991 1992 cdm = (struct ccb_dev_match *)arg; 1993 1994 /* 1995 * If our position is for something deeper in the tree, that means 1996 * that we've already seen this node. So, we keep going down. 1997 */ 1998 if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE) 1999 && (cdm->pos.cookie.device == device) 2000 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 2001 && (cdm->pos.cookie.periph != NULL)) 2002 retval = DM_RET_DESCEND; 2003 else 2004 retval = xptdevicematch(cdm->patterns, cdm->num_patterns, 2005 device); 2006 2007 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 2008 cdm->status = CAM_DEV_MATCH_ERROR; 2009 return(0); 2010 } 2011 2012 /* 2013 * If the copy flag is set, copy this device out. 2014 */ 2015 if (retval & DM_RET_COPY) { 2016 int spaceleft, j; 2017 2018 spaceleft = cdm->match_buf_len - (cdm->num_matches * 2019 sizeof(struct dev_match_result)); 2020 2021 /* 2022 * If we don't have enough space to put in another 2023 * match result, save our position and tell the 2024 * user there are more devices to check. 2025 */ 2026 if (spaceleft < sizeof(struct dev_match_result)) { 2027 bzero(&cdm->pos, sizeof(cdm->pos)); 2028 cdm->pos.position_type = 2029 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS | 2030 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE; 2031 2032 cdm->pos.cookie.bus = device->target->bus; 2033 cdm->pos.generations[CAM_BUS_GENERATION]= 2034 bus_generation; 2035 cdm->pos.cookie.target = device->target; 2036 cdm->pos.generations[CAM_TARGET_GENERATION] = 2037 device->target->bus->generation; 2038 cdm->pos.cookie.device = device; 2039 cdm->pos.generations[CAM_DEV_GENERATION] = 2040 device->target->generation; 2041 cdm->status = CAM_DEV_MATCH_MORE; 2042 return(0); 2043 } 2044 j = cdm->num_matches; 2045 cdm->num_matches++; 2046 cdm->matches[j].type = DEV_MATCH_DEVICE; 2047 cdm->matches[j].result.device_result.path_id = 2048 device->target->bus->path_id; 2049 cdm->matches[j].result.device_result.target_id = 2050 device->target->target_id; 2051 cdm->matches[j].result.device_result.target_lun = 2052 device->lun_id; 2053 bcopy(&device->inq_data, 2054 &cdm->matches[j].result.device_result.inq_data, 2055 sizeof(struct scsi_inquiry_data)); 2056 2057 /* Let the user know whether this device is unconfigured */ 2058 if (device->flags & CAM_DEV_UNCONFIGURED) 2059 cdm->matches[j].result.device_result.flags = 2060 DEV_RESULT_UNCONFIGURED; 2061 else 2062 cdm->matches[j].result.device_result.flags = 2063 DEV_RESULT_NOFLAG; 2064 } 2065 2066 /* 2067 * If the user isn't interested in peripherals, don't descend 2068 * the tree any further. 2069 */ 2070 if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP) 2071 return(1); 2072 2073 /* 2074 * If there is a peripheral list generation recorded, make sure 2075 * it hasn't changed. 2076 */ 2077 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 2078 && (device->target->bus == cdm->pos.cookie.bus) 2079 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 2080 && (device->target == cdm->pos.cookie.target) 2081 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) 2082 && (device == cdm->pos.cookie.device) 2083 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 2084 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0) 2085 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 2086 device->generation)){ 2087 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 2088 return(0); 2089 } 2090 2091 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 2092 && (cdm->pos.cookie.bus == device->target->bus) 2093 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 2094 && (cdm->pos.cookie.target == device->target) 2095 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) 2096 && (cdm->pos.cookie.device == device) 2097 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 2098 && (cdm->pos.cookie.periph != NULL)) 2099 return(xptperiphtraverse(device, 2100 (struct cam_periph *)cdm->pos.cookie.periph, 2101 xptedtperiphfunc, arg)); 2102 else 2103 return(xptperiphtraverse(device, NULL, xptedtperiphfunc, arg)); 2104 } 2105 2106 static int 2107 xptedtperiphfunc(struct cam_periph *periph, void *arg) 2108 { 2109 struct ccb_dev_match *cdm; 2110 dev_match_ret retval; 2111 2112 cdm = (struct ccb_dev_match *)arg; 2113 2114 retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph); 2115 2116 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 2117 cdm->status = CAM_DEV_MATCH_ERROR; 2118 return(0); 2119 } 2120 2121 /* 2122 * If the copy flag is set, copy this peripheral out. 2123 */ 2124 if (retval & DM_RET_COPY) { 2125 int spaceleft, j; 2126 2127 spaceleft = cdm->match_buf_len - (cdm->num_matches * 2128 sizeof(struct dev_match_result)); 2129 2130 /* 2131 * If we don't have enough space to put in another 2132 * match result, save our position and tell the 2133 * user there are more devices to check. 2134 */ 2135 if (spaceleft < sizeof(struct dev_match_result)) { 2136 bzero(&cdm->pos, sizeof(cdm->pos)); 2137 cdm->pos.position_type = 2138 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS | 2139 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE | 2140 CAM_DEV_POS_PERIPH; 2141 2142 cdm->pos.cookie.bus = periph->path->bus; 2143 cdm->pos.generations[CAM_BUS_GENERATION]= 2144 bus_generation; 2145 cdm->pos.cookie.target = periph->path->target; 2146 cdm->pos.generations[CAM_TARGET_GENERATION] = 2147 periph->path->bus->generation; 2148 cdm->pos.cookie.device = periph->path->device; 2149 cdm->pos.generations[CAM_DEV_GENERATION] = 2150 periph->path->target->generation; 2151 cdm->pos.cookie.periph = periph; 2152 cdm->pos.generations[CAM_PERIPH_GENERATION] = 2153 periph->path->device->generation; 2154 cdm->status = CAM_DEV_MATCH_MORE; 2155 return(0); 2156 } 2157 2158 j = cdm->num_matches; 2159 cdm->num_matches++; 2160 cdm->matches[j].type = DEV_MATCH_PERIPH; 2161 cdm->matches[j].result.periph_result.path_id = 2162 periph->path->bus->path_id; 2163 cdm->matches[j].result.periph_result.target_id = 2164 periph->path->target->target_id; 2165 cdm->matches[j].result.periph_result.target_lun = 2166 periph->path->device->lun_id; 2167 cdm->matches[j].result.periph_result.unit_number = 2168 periph->unit_number; 2169 strncpy(cdm->matches[j].result.periph_result.periph_name, 2170 periph->periph_name, DEV_IDLEN); 2171 } 2172 2173 return(1); 2174 } 2175 2176 static int 2177 xptedtmatch(struct ccb_dev_match *cdm) 2178 { 2179 int ret; 2180 2181 cdm->num_matches = 0; 2182 2183 /* 2184 * Check the bus list generation. If it has changed, the user 2185 * needs to reset everything and start over. 2186 */ 2187 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 2188 && (cdm->pos.generations[CAM_BUS_GENERATION] != 0) 2189 && (cdm->pos.generations[CAM_BUS_GENERATION] != bus_generation)) { 2190 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 2191 return(0); 2192 } 2193 2194 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 2195 && (cdm->pos.cookie.bus != NULL)) 2196 ret = xptbustraverse((struct cam_eb *)cdm->pos.cookie.bus, 2197 xptedtbusfunc, cdm); 2198 else 2199 ret = xptbustraverse(NULL, xptedtbusfunc, cdm); 2200 2201 /* 2202 * If we get back 0, that means that we had to stop before fully 2203 * traversing the EDT. It also means that one of the subroutines 2204 * has set the status field to the proper value. If we get back 1, 2205 * we've fully traversed the EDT and copied out any matching entries. 2206 */ 2207 if (ret == 1) 2208 cdm->status = CAM_DEV_MATCH_LAST; 2209 2210 return(ret); 2211 } 2212 2213 static int 2214 xptplistpdrvfunc(struct periph_driver **pdrv, void *arg) 2215 { 2216 struct ccb_dev_match *cdm; 2217 2218 cdm = (struct ccb_dev_match *)arg; 2219 2220 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR) 2221 && (cdm->pos.cookie.pdrv == pdrv) 2222 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 2223 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0) 2224 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 2225 (*pdrv)->generation)) { 2226 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 2227 return(0); 2228 } 2229 2230 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR) 2231 && (cdm->pos.cookie.pdrv == pdrv) 2232 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 2233 && (cdm->pos.cookie.periph != NULL)) 2234 return(xptpdperiphtraverse(pdrv, 2235 (struct cam_periph *)cdm->pos.cookie.periph, 2236 xptplistperiphfunc, arg)); 2237 else 2238 return(xptpdperiphtraverse(pdrv, NULL,xptplistperiphfunc, arg)); 2239 } 2240 2241 static int 2242 xptplistperiphfunc(struct cam_periph *periph, void *arg) 2243 { 2244 struct ccb_dev_match *cdm; 2245 dev_match_ret retval; 2246 2247 cdm = (struct ccb_dev_match *)arg; 2248 2249 retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph); 2250 2251 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 2252 cdm->status = CAM_DEV_MATCH_ERROR; 2253 return(0); 2254 } 2255 2256 /* 2257 * If the copy flag is set, copy this peripheral out. 2258 */ 2259 if (retval & DM_RET_COPY) { 2260 int spaceleft, j; 2261 2262 spaceleft = cdm->match_buf_len - (cdm->num_matches * 2263 sizeof(struct dev_match_result)); 2264 2265 /* 2266 * If we don't have enough space to put in another 2267 * match result, save our position and tell the 2268 * user there are more devices to check. 2269 */ 2270 if (spaceleft < sizeof(struct dev_match_result)) { 2271 struct periph_driver **pdrv; 2272 2273 pdrv = NULL; 2274 bzero(&cdm->pos, sizeof(cdm->pos)); 2275 cdm->pos.position_type = 2276 CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR | 2277 CAM_DEV_POS_PERIPH; 2278 2279 /* 2280 * This may look a bit non-sensical, but it is 2281 * actually quite logical. There are very few 2282 * peripheral drivers, and bloating every peripheral 2283 * structure with a pointer back to its parent 2284 * peripheral driver linker set entry would cost 2285 * more in the long run than doing this quick lookup. 2286 */ 2287 for (pdrv = 2288 (struct periph_driver **)periphdriver_set.ls_items; 2289 *pdrv != NULL; pdrv++) { 2290 if (strcmp((*pdrv)->driver_name, 2291 periph->periph_name) == 0) 2292 break; 2293 } 2294 2295 if (pdrv == NULL) { 2296 cdm->status = CAM_DEV_MATCH_ERROR; 2297 return(0); 2298 } 2299 2300 cdm->pos.cookie.pdrv = pdrv; 2301 /* 2302 * The periph generation slot does double duty, as 2303 * does the periph pointer slot. They are used for 2304 * both edt and pdrv lookups and positioning. 2305 */ 2306 cdm->pos.cookie.periph = periph; 2307 cdm->pos.generations[CAM_PERIPH_GENERATION] = 2308 (*pdrv)->generation; 2309 cdm->status = CAM_DEV_MATCH_MORE; 2310 return(0); 2311 } 2312 2313 j = cdm->num_matches; 2314 cdm->num_matches++; 2315 cdm->matches[j].type = DEV_MATCH_PERIPH; 2316 cdm->matches[j].result.periph_result.path_id = 2317 periph->path->bus->path_id; 2318 2319 /* 2320 * The transport layer peripheral doesn't have a target or 2321 * lun. 2322 */ 2323 if (periph->path->target) 2324 cdm->matches[j].result.periph_result.target_id = 2325 periph->path->target->target_id; 2326 else 2327 cdm->matches[j].result.periph_result.target_id = -1; 2328 2329 if (periph->path->device) 2330 cdm->matches[j].result.periph_result.target_lun = 2331 periph->path->device->lun_id; 2332 else 2333 cdm->matches[j].result.periph_result.target_lun = -1; 2334 2335 cdm->matches[j].result.periph_result.unit_number = 2336 periph->unit_number; 2337 strncpy(cdm->matches[j].result.periph_result.periph_name, 2338 periph->periph_name, DEV_IDLEN); 2339 } 2340 2341 return(1); 2342 } 2343 2344 static int 2345 xptperiphlistmatch(struct ccb_dev_match *cdm) 2346 { 2347 int ret; 2348 2349 cdm->num_matches = 0; 2350 2351 /* 2352 * At this point in the edt traversal function, we check the bus 2353 * list generation to make sure that no busses have been added or 2354 * removed since the user last sent a XPT_DEV_MATCH ccb through. 2355 * For the peripheral driver list traversal function, however, we 2356 * don't have to worry about new peripheral driver types coming or 2357 * going; they're in a linker set, and therefore can't change 2358 * without a recompile. 2359 */ 2360 2361 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR) 2362 && (cdm->pos.cookie.pdrv != NULL)) 2363 ret = xptpdrvtraverse( 2364 (struct periph_driver **)cdm->pos.cookie.pdrv, 2365 xptplistpdrvfunc, cdm); 2366 else 2367 ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm); 2368 2369 /* 2370 * If we get back 0, that means that we had to stop before fully 2371 * traversing the peripheral driver tree. It also means that one of 2372 * the subroutines has set the status field to the proper value. If 2373 * we get back 1, we've fully traversed the EDT and copied out any 2374 * matching entries. 2375 */ 2376 if (ret == 1) 2377 cdm->status = CAM_DEV_MATCH_LAST; 2378 2379 return(ret); 2380 } 2381 2382 static int 2383 xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg) 2384 { 2385 struct cam_eb *bus, *next_bus; 2386 int retval; 2387 2388 retval = 1; 2389 2390 for (bus = (start_bus ? start_bus : TAILQ_FIRST(&xpt_busses)); 2391 bus != NULL; 2392 bus = next_bus) { 2393 next_bus = TAILQ_NEXT(bus, links); 2394 2395 retval = tr_func(bus, arg); 2396 if (retval == 0) 2397 return(retval); 2398 } 2399 2400 return(retval); 2401 } 2402 2403 static int 2404 xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target, 2405 xpt_targetfunc_t *tr_func, void *arg) 2406 { 2407 struct cam_et *target, *next_target; 2408 int retval; 2409 2410 retval = 1; 2411 for (target = (start_target ? start_target : 2412 TAILQ_FIRST(&bus->et_entries)); 2413 target != NULL; target = next_target) { 2414 2415 next_target = TAILQ_NEXT(target, links); 2416 2417 retval = tr_func(target, arg); 2418 2419 if (retval == 0) 2420 return(retval); 2421 } 2422 2423 return(retval); 2424 } 2425 2426 static int 2427 xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device, 2428 xpt_devicefunc_t *tr_func, void *arg) 2429 { 2430 struct cam_ed *device, *next_device; 2431 int retval; 2432 2433 retval = 1; 2434 for (device = (start_device ? start_device : 2435 TAILQ_FIRST(&target->ed_entries)); 2436 device != NULL; 2437 device = next_device) { 2438 2439 next_device = TAILQ_NEXT(device, links); 2440 2441 retval = tr_func(device, arg); 2442 2443 if (retval == 0) 2444 return(retval); 2445 } 2446 2447 return(retval); 2448 } 2449 2450 static int 2451 xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph, 2452 xpt_periphfunc_t *tr_func, void *arg) 2453 { 2454 struct cam_periph *periph, *next_periph; 2455 int retval; 2456 2457 retval = 1; 2458 2459 for (periph = (start_periph ? start_periph : 2460 SLIST_FIRST(&device->periphs)); 2461 periph != NULL; 2462 periph = next_periph) { 2463 2464 next_periph = SLIST_NEXT(periph, periph_links); 2465 2466 retval = tr_func(periph, arg); 2467 if (retval == 0) 2468 return(retval); 2469 } 2470 2471 return(retval); 2472 } 2473 2474 static int 2475 xptpdrvtraverse(struct periph_driver **start_pdrv, 2476 xpt_pdrvfunc_t *tr_func, void *arg) 2477 { 2478 struct periph_driver **pdrv; 2479 int retval; 2480 2481 retval = 1; 2482 2483 /* 2484 * We don't traverse the peripheral driver list like we do the 2485 * other lists, because it is a linker set, and therefore cannot be 2486 * changed during runtime. If the peripheral driver list is ever 2487 * re-done to be something other than a linker set (i.e. it can 2488 * change while the system is running), the list traversal should 2489 * be modified to work like the other traversal functions. 2490 */ 2491 for (pdrv = (start_pdrv ? start_pdrv : 2492 (struct periph_driver **)periphdriver_set.ls_items); 2493 *pdrv != NULL; pdrv++) { 2494 retval = tr_func(pdrv, arg); 2495 2496 if (retval == 0) 2497 return(retval); 2498 } 2499 2500 return(retval); 2501 } 2502 2503 static int 2504 xptpdperiphtraverse(struct periph_driver **pdrv, 2505 struct cam_periph *start_periph, 2506 xpt_periphfunc_t *tr_func, void *arg) 2507 { 2508 struct cam_periph *periph, *next_periph; 2509 int retval; 2510 2511 retval = 1; 2512 2513 for (periph = (start_periph ? start_periph : 2514 TAILQ_FIRST(&(*pdrv)->units)); periph != NULL; 2515 periph = next_periph) { 2516 2517 next_periph = TAILQ_NEXT(periph, unit_links); 2518 2519 retval = tr_func(periph, arg); 2520 if (retval == 0) 2521 return(retval); 2522 } 2523 return(retval); 2524 } 2525 2526 static int 2527 xptdefbusfunc(struct cam_eb *bus, void *arg) 2528 { 2529 struct xpt_traverse_config *tr_config; 2530 2531 tr_config = (struct xpt_traverse_config *)arg; 2532 2533 if (tr_config->depth == XPT_DEPTH_BUS) { 2534 xpt_busfunc_t *tr_func; 2535 2536 tr_func = (xpt_busfunc_t *)tr_config->tr_func; 2537 2538 return(tr_func(bus, tr_config->tr_arg)); 2539 } else 2540 return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg)); 2541 } 2542 2543 static int 2544 xptdeftargetfunc(struct cam_et *target, void *arg) 2545 { 2546 struct xpt_traverse_config *tr_config; 2547 2548 tr_config = (struct xpt_traverse_config *)arg; 2549 2550 if (tr_config->depth == XPT_DEPTH_TARGET) { 2551 xpt_targetfunc_t *tr_func; 2552 2553 tr_func = (xpt_targetfunc_t *)tr_config->tr_func; 2554 2555 return(tr_func(target, tr_config->tr_arg)); 2556 } else 2557 return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg)); 2558 } 2559 2560 static int 2561 xptdefdevicefunc(struct cam_ed *device, void *arg) 2562 { 2563 struct xpt_traverse_config *tr_config; 2564 2565 tr_config = (struct xpt_traverse_config *)arg; 2566 2567 if (tr_config->depth == XPT_DEPTH_DEVICE) { 2568 xpt_devicefunc_t *tr_func; 2569 2570 tr_func = (xpt_devicefunc_t *)tr_config->tr_func; 2571 2572 return(tr_func(device, tr_config->tr_arg)); 2573 } else 2574 return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg)); 2575 } 2576 2577 static int 2578 xptdefperiphfunc(struct cam_periph *periph, void *arg) 2579 { 2580 struct xpt_traverse_config *tr_config; 2581 xpt_periphfunc_t *tr_func; 2582 2583 tr_config = (struct xpt_traverse_config *)arg; 2584 2585 tr_func = (xpt_periphfunc_t *)tr_config->tr_func; 2586 2587 /* 2588 * Unlike the other default functions, we don't check for depth 2589 * here. The peripheral driver level is the last level in the EDT, 2590 * so if we're here, we should execute the function in question. 2591 */ 2592 return(tr_func(periph, tr_config->tr_arg)); 2593 } 2594 2595 /* 2596 * Execute the given function for every bus in the EDT. 2597 */ 2598 static int 2599 xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg) 2600 { 2601 struct xpt_traverse_config tr_config; 2602 2603 tr_config.depth = XPT_DEPTH_BUS; 2604 tr_config.tr_func = tr_func; 2605 tr_config.tr_arg = arg; 2606 2607 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config)); 2608 } 2609 2610 #ifdef notusedyet 2611 /* 2612 * Execute the given function for every target in the EDT. 2613 */ 2614 static int 2615 xpt_for_all_targets(xpt_targetfunc_t *tr_func, void *arg) 2616 { 2617 struct xpt_traverse_config tr_config; 2618 2619 tr_config.depth = XPT_DEPTH_TARGET; 2620 tr_config.tr_func = tr_func; 2621 tr_config.tr_arg = arg; 2622 2623 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config)); 2624 } 2625 #endif /* notusedyet */ 2626 2627 /* 2628 * Execute the given function for every device in the EDT. 2629 */ 2630 static int 2631 xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg) 2632 { 2633 struct xpt_traverse_config tr_config; 2634 2635 tr_config.depth = XPT_DEPTH_DEVICE; 2636 tr_config.tr_func = tr_func; 2637 tr_config.tr_arg = arg; 2638 2639 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config)); 2640 } 2641 2642 #ifdef notusedyet 2643 /* 2644 * Execute the given function for every peripheral in the EDT. 2645 */ 2646 static int 2647 xpt_for_all_periphs(xpt_periphfunc_t *tr_func, void *arg) 2648 { 2649 struct xpt_traverse_config tr_config; 2650 2651 tr_config.depth = XPT_DEPTH_PERIPH; 2652 tr_config.tr_func = tr_func; 2653 tr_config.tr_arg = arg; 2654 2655 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config)); 2656 } 2657 #endif /* notusedyet */ 2658 2659 static int 2660 xptsetasyncfunc(struct cam_ed *device, void *arg) 2661 { 2662 struct cam_path path; 2663 struct ccb_getdev cgd; 2664 struct async_node *cur_entry; 2665 2666 cur_entry = (struct async_node *)arg; 2667 2668 /* 2669 * Don't report unconfigured devices (Wildcard devs, 2670 * devices only for target mode, device instances 2671 * that have been invalidated but are waiting for 2672 * their last reference count to be released). 2673 */ 2674 if ((device->flags & CAM_DEV_UNCONFIGURED) != 0) 2675 return (1); 2676 2677 xpt_compile_path(&path, 2678 NULL, 2679 device->target->bus->path_id, 2680 device->target->target_id, 2681 device->lun_id); 2682 xpt_setup_ccb(&cgd.ccb_h, &path, /*priority*/1); 2683 cgd.ccb_h.func_code = XPT_GDEV_TYPE; 2684 xpt_action((union ccb *)&cgd); 2685 cur_entry->callback(cur_entry->callback_arg, 2686 AC_FOUND_DEVICE, 2687 &path, &cgd); 2688 xpt_release_path(&path); 2689 2690 return(1); 2691 } 2692 2693 static int 2694 xptsetasyncbusfunc(struct cam_eb *bus, void *arg) 2695 { 2696 struct cam_path path; 2697 struct ccb_pathinq cpi; 2698 struct async_node *cur_entry; 2699 2700 cur_entry = (struct async_node *)arg; 2701 2702 xpt_compile_path(&path, /*periph*/NULL, 2703 bus->sim->path_id, 2704 CAM_TARGET_WILDCARD, 2705 CAM_LUN_WILDCARD); 2706 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1); 2707 cpi.ccb_h.func_code = XPT_PATH_INQ; 2708 xpt_action((union ccb *)&cpi); 2709 cur_entry->callback(cur_entry->callback_arg, 2710 AC_PATH_REGISTERED, 2711 &path, &cpi); 2712 xpt_release_path(&path); 2713 2714 return(1); 2715 } 2716 2717 void 2718 xpt_action(union ccb *start_ccb) 2719 { 2720 int iopl; 2721 2722 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action\n")); 2723 2724 start_ccb->ccb_h.status = CAM_REQ_INPROG; 2725 2726 iopl = splsoftcam(); 2727 switch (start_ccb->ccb_h.func_code) { 2728 case XPT_SCSI_IO: 2729 { 2730 #ifdef CAMDEBUG 2731 char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1]; 2732 struct cam_path *path; 2733 2734 path = start_ccb->ccb_h.path; 2735 #endif 2736 2737 /* 2738 * For the sake of compatibility with SCSI-1 2739 * devices that may not understand the identify 2740 * message, we include lun information in the 2741 * second byte of all commands. SCSI-1 specifies 2742 * that luns are a 3 bit value and reserves only 3 2743 * bits for lun information in the CDB. Later 2744 * revisions of the SCSI spec allow for more than 8 2745 * luns, but have deprecated lun information in the 2746 * CDB. So, if the lun won't fit, we must omit. 2747 * 2748 * Also be aware that during initial probing for devices, 2749 * the inquiry information is unknown but initialized to 0. 2750 * This means that this code will be exercised while probing 2751 * devices with an ANSI revision greater than 2. 2752 */ 2753 if (SID_ANSI_REV(&start_ccb->ccb_h.path->device->inq_data) <= 2 2754 && start_ccb->ccb_h.target_lun < 8 2755 && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) { 2756 2757 start_ccb->csio.cdb_io.cdb_bytes[1] |= 2758 start_ccb->ccb_h.target_lun << 5; 2759 } 2760 start_ccb->csio.scsi_status = SCSI_STATUS_OK; 2761 CAM_DEBUG(path, CAM_DEBUG_CDB,("%s. CDB: %s\n", 2762 scsi_op_desc(start_ccb->csio.cdb_io.cdb_bytes[0], 2763 &path->device->inq_data), 2764 scsi_cdb_string(start_ccb->csio.cdb_io.cdb_bytes, 2765 cdb_str, sizeof(cdb_str)))); 2766 /* FALLTHROUGH */ 2767 } 2768 case XPT_TARGET_IO: 2769 case XPT_CONT_TARGET_IO: 2770 start_ccb->csio.sense_resid = 0; 2771 start_ccb->csio.resid = 0; 2772 /* FALLTHROUGH */ 2773 case XPT_ENG_EXEC: 2774 { 2775 struct cam_path *path; 2776 int s; 2777 int runq; 2778 2779 path = start_ccb->ccb_h.path; 2780 s = splsoftcam(); 2781 2782 cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb); 2783 if (path->device->qfrozen_cnt == 0) 2784 runq = xpt_schedule_dev_sendq(path->bus, path->device); 2785 else 2786 runq = 0; 2787 splx(s); 2788 if (runq != 0) 2789 xpt_run_dev_sendq(path->bus); 2790 break; 2791 } 2792 case XPT_SET_TRAN_SETTINGS: 2793 { 2794 xpt_set_transfer_settings(&start_ccb->cts, 2795 start_ccb->ccb_h.path->device, 2796 /*async_update*/FALSE); 2797 break; 2798 } 2799 case XPT_CALC_GEOMETRY: 2800 { 2801 struct cam_sim *sim; 2802 2803 /* Filter out garbage */ 2804 if (start_ccb->ccg.block_size == 0 2805 || start_ccb->ccg.volume_size == 0) { 2806 start_ccb->ccg.cylinders = 0; 2807 start_ccb->ccg.heads = 0; 2808 start_ccb->ccg.secs_per_track = 0; 2809 start_ccb->ccb_h.status = CAM_REQ_CMP; 2810 break; 2811 } 2812 #ifdef PC98 2813 /* 2814 * In a PC-98 system, geometry translation depens on 2815 * the "real" device geometry obtained from mode page 4. 2816 * SCSI geometry translation is performed in the 2817 * initialization routine of the SCSI BIOS and the result 2818 * stored in host memory. If the translation is available 2819 * in host memory, use it. If not, rely on the default 2820 * translation the device driver performs. 2821 */ 2822 if (scsi_da_bios_params(&start_ccb->ccg) != 0) { 2823 start_ccb->ccb_h.status = CAM_REQ_CMP; 2824 break; 2825 } 2826 #endif 2827 sim = start_ccb->ccb_h.path->bus->sim; 2828 (*(sim->sim_action))(sim, start_ccb); 2829 break; 2830 } 2831 case XPT_ABORT: 2832 { 2833 union ccb* abort_ccb; 2834 int s; 2835 2836 abort_ccb = start_ccb->cab.abort_ccb; 2837 if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) { 2838 2839 if (abort_ccb->ccb_h.pinfo.index >= 0) { 2840 struct cam_ccbq *ccbq; 2841 2842 ccbq = &abort_ccb->ccb_h.path->device->ccbq; 2843 cam_ccbq_remove_ccb(ccbq, abort_ccb); 2844 abort_ccb->ccb_h.status = 2845 CAM_REQ_ABORTED|CAM_DEV_QFRZN; 2846 xpt_freeze_devq(abort_ccb->ccb_h.path, 1); 2847 s = splcam(); 2848 xpt_done(abort_ccb); 2849 splx(s); 2850 start_ccb->ccb_h.status = CAM_REQ_CMP; 2851 break; 2852 } 2853 if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX 2854 && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) { 2855 /* 2856 * We've caught this ccb en route to 2857 * the SIM. Flag it for abort and the 2858 * SIM will do so just before starting 2859 * real work on the CCB. 2860 */ 2861 abort_ccb->ccb_h.status = 2862 CAM_REQ_ABORTED|CAM_DEV_QFRZN; 2863 xpt_freeze_devq(abort_ccb->ccb_h.path, 1); 2864 start_ccb->ccb_h.status = CAM_REQ_CMP; 2865 break; 2866 } 2867 } 2868 if (XPT_FC_IS_QUEUED(abort_ccb) 2869 && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) { 2870 /* 2871 * It's already completed but waiting 2872 * for our SWI to get to it. 2873 */ 2874 start_ccb->ccb_h.status = CAM_UA_ABORT; 2875 break; 2876 } 2877 /* 2878 * If we weren't able to take care of the abort request 2879 * in the XPT, pass the request down to the SIM for processing. 2880 */ 2881 /* FALLTHROUGH */ 2882 } 2883 case XPT_RESET_DEV: 2884 case XPT_ACCEPT_TARGET_IO: 2885 case XPT_EN_LUN: 2886 case XPT_IMMED_NOTIFY: 2887 case XPT_NOTIFY_ACK: 2888 case XPT_GET_TRAN_SETTINGS: 2889 case XPT_RESET_BUS: 2890 { 2891 struct cam_sim *sim; 2892 2893 sim = start_ccb->ccb_h.path->bus->sim; 2894 (*(sim->sim_action))(sim, start_ccb); 2895 break; 2896 } 2897 case XPT_PATH_INQ: 2898 { 2899 struct cam_sim *sim; 2900 2901 sim = start_ccb->ccb_h.path->bus->sim; 2902 (*(sim->sim_action))(sim, start_ccb); 2903 break; 2904 } 2905 case XPT_PATH_STATS: 2906 start_ccb->cpis.last_reset = 2907 start_ccb->ccb_h.path->bus->last_reset; 2908 start_ccb->ccb_h.status = CAM_REQ_CMP; 2909 break; 2910 case XPT_GDEV_TYPE: 2911 { 2912 struct cam_ed *dev; 2913 int s; 2914 2915 dev = start_ccb->ccb_h.path->device; 2916 s = splcam(); 2917 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) { 2918 start_ccb->ccb_h.status = CAM_DEV_NOT_THERE; 2919 } else { 2920 struct ccb_getdev *cgd; 2921 struct cam_eb *bus; 2922 struct cam_et *tar; 2923 2924 cgd = &start_ccb->cgd; 2925 bus = cgd->ccb_h.path->bus; 2926 tar = cgd->ccb_h.path->target; 2927 cgd->inq_data = dev->inq_data; 2928 cgd->pd_type = SID_TYPE(&dev->inq_data); 2929 #ifndef GARBAGE_COLLECT 2930 cgd->dev_openings = dev->ccbq.dev_openings; 2931 cgd->dev_active = dev->ccbq.dev_active; 2932 cgd->devq_openings = dev->ccbq.devq_openings; 2933 cgd->devq_queued = dev->ccbq.queue.entries; 2934 cgd->held = dev->ccbq.held; 2935 cgd->maxtags = dev->quirk->maxtags; 2936 cgd->mintags = dev->quirk->mintags; 2937 #endif 2938 cgd->ccb_h.status = CAM_REQ_CMP; 2939 cgd->serial_num_len = dev->serial_num_len; 2940 if ((dev->serial_num_len > 0) 2941 && (dev->serial_num != NULL)) 2942 bcopy(dev->serial_num, cgd->serial_num, 2943 dev->serial_num_len); 2944 } 2945 splx(s); 2946 break; 2947 } 2948 case XPT_GDEV_STATS: 2949 { 2950 struct cam_ed *dev; 2951 int s; 2952 2953 dev = start_ccb->ccb_h.path->device; 2954 s = splcam(); 2955 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) { 2956 start_ccb->ccb_h.status = CAM_DEV_NOT_THERE; 2957 } else { 2958 struct ccb_getdevstats *cgds; 2959 struct cam_eb *bus; 2960 struct cam_et *tar; 2961 2962 cgds = &start_ccb->cgds; 2963 bus = cgds->ccb_h.path->bus; 2964 tar = cgds->ccb_h.path->target; 2965 cgds->dev_openings = dev->ccbq.dev_openings; 2966 cgds->dev_active = dev->ccbq.dev_active; 2967 cgds->devq_openings = dev->ccbq.devq_openings; 2968 cgds->devq_queued = dev->ccbq.queue.entries; 2969 cgds->held = dev->ccbq.held; 2970 cgds->last_reset = tar->last_reset; 2971 cgds->maxtags = dev->quirk->maxtags; 2972 cgds->mintags = dev->quirk->mintags; 2973 if (timevalcmp(&tar->last_reset, &bus->last_reset, <)) 2974 cgds->last_reset = bus->last_reset; 2975 cgds->ccb_h.status = CAM_REQ_CMP; 2976 } 2977 splx(s); 2978 break; 2979 } 2980 case XPT_GDEVLIST: 2981 { 2982 struct cam_periph *nperiph; 2983 struct periph_list *periph_head; 2984 struct ccb_getdevlist *cgdl; 2985 int i; 2986 int s; 2987 struct cam_ed *device; 2988 int found; 2989 2990 2991 found = 0; 2992 2993 /* 2994 * Don't want anyone mucking with our data. 2995 */ 2996 s = splcam(); 2997 device = start_ccb->ccb_h.path->device; 2998 periph_head = &device->periphs; 2999 cgdl = &start_ccb->cgdl; 3000 3001 /* 3002 * Check and see if the list has changed since the user 3003 * last requested a list member. If so, tell them that the 3004 * list has changed, and therefore they need to start over 3005 * from the beginning. 3006 */ 3007 if ((cgdl->index != 0) && 3008 (cgdl->generation != device->generation)) { 3009 cgdl->status = CAM_GDEVLIST_LIST_CHANGED; 3010 splx(s); 3011 break; 3012 } 3013 3014 /* 3015 * Traverse the list of peripherals and attempt to find 3016 * the requested peripheral. 3017 */ 3018 for (nperiph = periph_head->slh_first, i = 0; 3019 (nperiph != NULL) && (i <= cgdl->index); 3020 nperiph = nperiph->periph_links.sle_next, i++) { 3021 if (i == cgdl->index) { 3022 strncpy(cgdl->periph_name, 3023 nperiph->periph_name, 3024 DEV_IDLEN); 3025 cgdl->unit_number = nperiph->unit_number; 3026 found = 1; 3027 } 3028 } 3029 if (found == 0) { 3030 cgdl->status = CAM_GDEVLIST_ERROR; 3031 splx(s); 3032 break; 3033 } 3034 3035 if (nperiph == NULL) 3036 cgdl->status = CAM_GDEVLIST_LAST_DEVICE; 3037 else 3038 cgdl->status = CAM_GDEVLIST_MORE_DEVS; 3039 3040 cgdl->index++; 3041 cgdl->generation = device->generation; 3042 3043 splx(s); 3044 cgdl->ccb_h.status = CAM_REQ_CMP; 3045 break; 3046 } 3047 case XPT_DEV_MATCH: 3048 { 3049 int s; 3050 dev_pos_type position_type; 3051 struct ccb_dev_match *cdm; 3052 int ret; 3053 3054 cdm = &start_ccb->cdm; 3055 3056 /* 3057 * Prevent EDT changes while we traverse it. 3058 */ 3059 s = splcam(); 3060 /* 3061 * There are two ways of getting at information in the EDT. 3062 * The first way is via the primary EDT tree. It starts 3063 * with a list of busses, then a list of targets on a bus, 3064 * then devices/luns on a target, and then peripherals on a 3065 * device/lun. The "other" way is by the peripheral driver 3066 * lists. The peripheral driver lists are organized by 3067 * peripheral driver. (obviously) So it makes sense to 3068 * use the peripheral driver list if the user is looking 3069 * for something like "da1", or all "da" devices. If the 3070 * user is looking for something on a particular bus/target 3071 * or lun, it's generally better to go through the EDT tree. 3072 */ 3073 3074 if (cdm->pos.position_type != CAM_DEV_POS_NONE) 3075 position_type = cdm->pos.position_type; 3076 else { 3077 int i; 3078 3079 position_type = CAM_DEV_POS_NONE; 3080 3081 for (i = 0; i < cdm->num_patterns; i++) { 3082 if ((cdm->patterns[i].type == DEV_MATCH_BUS) 3083 ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){ 3084 position_type = CAM_DEV_POS_EDT; 3085 break; 3086 } 3087 } 3088 3089 if (cdm->num_patterns == 0) 3090 position_type = CAM_DEV_POS_EDT; 3091 else if (position_type == CAM_DEV_POS_NONE) 3092 position_type = CAM_DEV_POS_PDRV; 3093 } 3094 3095 switch(position_type & CAM_DEV_POS_TYPEMASK) { 3096 case CAM_DEV_POS_EDT: 3097 ret = xptedtmatch(cdm); 3098 break; 3099 case CAM_DEV_POS_PDRV: 3100 ret = xptperiphlistmatch(cdm); 3101 break; 3102 default: 3103 cdm->status = CAM_DEV_MATCH_ERROR; 3104 break; 3105 } 3106 3107 splx(s); 3108 3109 if (cdm->status == CAM_DEV_MATCH_ERROR) 3110 start_ccb->ccb_h.status = CAM_REQ_CMP_ERR; 3111 else 3112 start_ccb->ccb_h.status = CAM_REQ_CMP; 3113 3114 break; 3115 } 3116 case XPT_SASYNC_CB: 3117 { 3118 struct ccb_setasync *csa; 3119 struct async_node *cur_entry; 3120 struct async_list *async_head; 3121 u_int32_t added; 3122 int s; 3123 3124 csa = &start_ccb->csa; 3125 added = csa->event_enable; 3126 async_head = &csa->ccb_h.path->device->asyncs; 3127 3128 /* 3129 * If there is already an entry for us, simply 3130 * update it. 3131 */ 3132 s = splcam(); 3133 cur_entry = SLIST_FIRST(async_head); 3134 while (cur_entry != NULL) { 3135 if ((cur_entry->callback_arg == csa->callback_arg) 3136 && (cur_entry->callback == csa->callback)) 3137 break; 3138 cur_entry = SLIST_NEXT(cur_entry, links); 3139 } 3140 3141 if (cur_entry != NULL) { 3142 /* 3143 * If the request has no flags set, 3144 * remove the entry. 3145 */ 3146 added &= ~cur_entry->event_enable; 3147 if (csa->event_enable == 0) { 3148 SLIST_REMOVE(async_head, cur_entry, 3149 async_node, links); 3150 csa->ccb_h.path->device->refcount--; 3151 free(cur_entry, M_DEVBUF); 3152 } else { 3153 cur_entry->event_enable = csa->event_enable; 3154 } 3155 } else { 3156 cur_entry = malloc(sizeof(*cur_entry), M_DEVBUF, 3157 M_NOWAIT); 3158 if (cur_entry == NULL) { 3159 splx(s); 3160 csa->ccb_h.status = CAM_RESRC_UNAVAIL; 3161 break; 3162 } 3163 cur_entry->callback_arg = csa->callback_arg; 3164 cur_entry->callback = csa->callback; 3165 cur_entry->event_enable = csa->event_enable; 3166 SLIST_INSERT_HEAD(async_head, cur_entry, links); 3167 csa->ccb_h.path->device->refcount++; 3168 } 3169 3170 if ((added & AC_FOUND_DEVICE) != 0) { 3171 /* 3172 * Get this peripheral up to date with all 3173 * the currently existing devices. 3174 */ 3175 xpt_for_all_devices(xptsetasyncfunc, cur_entry); 3176 } 3177 if ((added & AC_PATH_REGISTERED) != 0) { 3178 /* 3179 * Get this peripheral up to date with all 3180 * the currently existing busses. 3181 */ 3182 xpt_for_all_busses(xptsetasyncbusfunc, cur_entry); 3183 } 3184 splx(s); 3185 start_ccb->ccb_h.status = CAM_REQ_CMP; 3186 break; 3187 } 3188 case XPT_REL_SIMQ: 3189 { 3190 struct ccb_relsim *crs; 3191 struct cam_ed *dev; 3192 int s; 3193 3194 crs = &start_ccb->crs; 3195 dev = crs->ccb_h.path->device; 3196 if (dev == NULL) { 3197 3198 crs->ccb_h.status = CAM_DEV_NOT_THERE; 3199 break; 3200 } 3201 3202 s = splcam(); 3203 3204 if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) { 3205 3206 if ((dev->inq_data.flags & SID_CmdQue) != 0) { 3207 3208 /* Don't ever go below one opening */ 3209 if (crs->openings > 0) { 3210 xpt_dev_ccbq_resize(crs->ccb_h.path, 3211 crs->openings); 3212 3213 if (bootverbose) { 3214 xpt_print_path(crs->ccb_h.path); 3215 printf("tagged openings " 3216 "now %d\n", 3217 crs->openings); 3218 } 3219 } 3220 } 3221 } 3222 3223 if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) { 3224 3225 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) { 3226 3227 /* 3228 * Just extend the old timeout and decrement 3229 * the freeze count so that a single timeout 3230 * is sufficient for releasing the queue. 3231 */ 3232 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE; 3233 untimeout(xpt_release_devq_timeout, 3234 dev, dev->c_handle); 3235 } else { 3236 3237 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 3238 } 3239 3240 dev->c_handle = 3241 timeout(xpt_release_devq_timeout, 3242 dev, 3243 (crs->release_timeout * hz) / 1000); 3244 3245 dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING; 3246 3247 } 3248 3249 if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) { 3250 3251 if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) { 3252 /* 3253 * Decrement the freeze count so that a single 3254 * completion is still sufficient to unfreeze 3255 * the queue. 3256 */ 3257 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE; 3258 } else { 3259 3260 dev->flags |= CAM_DEV_REL_ON_COMPLETE; 3261 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 3262 } 3263 } 3264 3265 if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) { 3266 3267 if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0 3268 || (dev->ccbq.dev_active == 0)) { 3269 3270 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE; 3271 } else { 3272 3273 dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY; 3274 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 3275 } 3276 } 3277 splx(s); 3278 3279 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) { 3280 3281 xpt_release_devq(crs->ccb_h.path, /*count*/1, 3282 /*run_queue*/TRUE); 3283 } 3284 start_ccb->crs.qfrozen_cnt = dev->qfrozen_cnt; 3285 start_ccb->ccb_h.status = CAM_REQ_CMP; 3286 break; 3287 } 3288 case XPT_SCAN_BUS: 3289 xpt_scan_bus(start_ccb->ccb_h.path->periph, start_ccb); 3290 break; 3291 case XPT_SCAN_LUN: 3292 xpt_scan_lun(start_ccb->ccb_h.path->periph, 3293 start_ccb->ccb_h.path, start_ccb->crcn.flags, 3294 start_ccb); 3295 break; 3296 case XPT_DEBUG: { 3297 #ifdef CAMDEBUG 3298 int s; 3299 3300 s = splcam(); 3301 #ifdef CAM_DEBUG_DELAY 3302 cam_debug_delay = CAM_DEBUG_DELAY; 3303 #endif 3304 cam_dflags = start_ccb->cdbg.flags; 3305 if (cam_dpath != NULL) { 3306 xpt_free_path(cam_dpath); 3307 cam_dpath = NULL; 3308 } 3309 3310 if (cam_dflags != CAM_DEBUG_NONE) { 3311 if (xpt_create_path(&cam_dpath, xpt_periph, 3312 start_ccb->ccb_h.path_id, 3313 start_ccb->ccb_h.target_id, 3314 start_ccb->ccb_h.target_lun) != 3315 CAM_REQ_CMP) { 3316 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 3317 cam_dflags = CAM_DEBUG_NONE; 3318 } else { 3319 start_ccb->ccb_h.status = CAM_REQ_CMP; 3320 xpt_print_path(cam_dpath); 3321 printf("debugging flags now %x\n", cam_dflags); 3322 } 3323 } else { 3324 cam_dpath = NULL; 3325 start_ccb->ccb_h.status = CAM_REQ_CMP; 3326 } 3327 splx(s); 3328 #else /* !CAMDEBUG */ 3329 start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 3330 #endif /* CAMDEBUG */ 3331 break; 3332 } 3333 case XPT_NOOP: 3334 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0) 3335 xpt_freeze_devq(start_ccb->ccb_h.path, 1); 3336 start_ccb->ccb_h.status = CAM_REQ_CMP; 3337 break; 3338 default: 3339 case XPT_SDEV_TYPE: 3340 case XPT_TERM_IO: 3341 case XPT_ENG_INQ: 3342 /* XXX Implement */ 3343 start_ccb->ccb_h.status = CAM_PROVIDE_FAIL; 3344 break; 3345 } 3346 splx(iopl); 3347 } 3348 3349 void 3350 xpt_polled_action(union ccb *start_ccb) 3351 { 3352 int s; 3353 u_int32_t timeout; 3354 struct cam_sim *sim; 3355 struct cam_devq *devq; 3356 struct cam_ed *dev; 3357 3358 timeout = start_ccb->ccb_h.timeout; 3359 sim = start_ccb->ccb_h.path->bus->sim; 3360 devq = sim->devq; 3361 dev = start_ccb->ccb_h.path->device; 3362 3363 s = splcam(); 3364 3365 /* 3366 * Steal an opening so that no other queued requests 3367 * can get it before us while we simulate interrupts. 3368 */ 3369 dev->ccbq.devq_openings--; 3370 dev->ccbq.dev_openings--; 3371 3372 while((devq->send_openings <= 0 || dev->ccbq.dev_openings < 0) 3373 && (--timeout > 0)) { 3374 DELAY(1000); 3375 (*(sim->sim_poll))(sim); 3376 swi_camnet(); 3377 swi_cambio(); 3378 } 3379 3380 dev->ccbq.devq_openings++; 3381 dev->ccbq.dev_openings++; 3382 3383 if (timeout != 0) { 3384 xpt_action(start_ccb); 3385 while(--timeout > 0) { 3386 (*(sim->sim_poll))(sim); 3387 swi_camnet(); 3388 swi_cambio(); 3389 if ((start_ccb->ccb_h.status & CAM_STATUS_MASK) 3390 != CAM_REQ_INPROG) 3391 break; 3392 DELAY(1000); 3393 } 3394 if (timeout == 0) { 3395 /* 3396 * XXX Is it worth adding a sim_timeout entry 3397 * point so we can attempt recovery? If 3398 * this is only used for dumps, I don't think 3399 * it is. 3400 */ 3401 start_ccb->ccb_h.status = CAM_CMD_TIMEOUT; 3402 } 3403 } else { 3404 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 3405 } 3406 splx(s); 3407 } 3408 3409 /* 3410 * Schedule a peripheral driver to receive a ccb when it's 3411 * target device has space for more transactions. 3412 */ 3413 void 3414 xpt_schedule(struct cam_periph *perph, u_int32_t new_priority) 3415 { 3416 struct cam_ed *device; 3417 int s; 3418 int runq; 3419 3420 CAM_DEBUG(perph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n")); 3421 device = perph->path->device; 3422 s = splsoftcam(); 3423 if (periph_is_queued(perph)) { 3424 /* Simply reorder based on new priority */ 3425 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE, 3426 (" change priority to %d\n", new_priority)); 3427 if (new_priority < perph->pinfo.priority) { 3428 camq_change_priority(&device->drvq, 3429 perph->pinfo.index, 3430 new_priority); 3431 } 3432 runq = 0; 3433 } else { 3434 /* New entry on the queue */ 3435 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE, 3436 (" added periph to queue\n")); 3437 perph->pinfo.priority = new_priority; 3438 perph->pinfo.generation = ++device->drvq.generation; 3439 camq_insert(&device->drvq, &perph->pinfo); 3440 runq = xpt_schedule_dev_allocq(perph->path->bus, device); 3441 } 3442 splx(s); 3443 if (runq != 0) { 3444 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE, 3445 (" calling xpt_run_devq\n")); 3446 xpt_run_dev_allocq(perph->path->bus); 3447 } 3448 } 3449 3450 3451 /* 3452 * Schedule a device to run on a given queue. 3453 * If the device was inserted as a new entry on the queue, 3454 * return 1 meaning the device queue should be run. If we 3455 * were already queued, implying someone else has already 3456 * started the queue, return 0 so the caller doesn't attempt 3457 * to run the queue. Must be run at either splsoftcam 3458 * (or splcam since that encompases splsoftcam). 3459 */ 3460 static int 3461 xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo, 3462 u_int32_t new_priority) 3463 { 3464 int retval; 3465 u_int32_t old_priority; 3466 3467 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n")); 3468 3469 old_priority = pinfo->priority; 3470 3471 /* 3472 * Are we already queued? 3473 */ 3474 if (pinfo->index != CAM_UNQUEUED_INDEX) { 3475 /* Simply reorder based on new priority */ 3476 if (new_priority < old_priority) { 3477 camq_change_priority(queue, pinfo->index, 3478 new_priority); 3479 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3480 ("changed priority to %d\n", 3481 new_priority)); 3482 } 3483 retval = 0; 3484 } else { 3485 /* New entry on the queue */ 3486 if (new_priority < old_priority) 3487 pinfo->priority = new_priority; 3488 3489 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3490 ("Inserting onto queue\n")); 3491 pinfo->generation = ++queue->generation; 3492 camq_insert(queue, pinfo); 3493 retval = 1; 3494 } 3495 return (retval); 3496 } 3497 3498 static void 3499 xpt_run_dev_allocq(struct cam_eb *bus) 3500 { 3501 struct cam_devq *devq; 3502 int s; 3503 3504 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_allocq\n")); 3505 devq = bus->sim->devq; 3506 3507 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3508 (" qfrozen_cnt == 0x%x, entries == %d, " 3509 "openings == %d, active == %d\n", 3510 devq->alloc_queue.qfrozen_cnt, 3511 devq->alloc_queue.entries, 3512 devq->alloc_openings, 3513 devq->alloc_active)); 3514 3515 s = splsoftcam(); 3516 devq->alloc_queue.qfrozen_cnt++; 3517 while ((devq->alloc_queue.entries > 0) 3518 && (devq->alloc_openings > 0) 3519 && (devq->alloc_queue.qfrozen_cnt <= 1)) { 3520 struct cam_ed_qinfo *qinfo; 3521 struct cam_ed *device; 3522 union ccb *work_ccb; 3523 struct cam_periph *drv; 3524 struct camq *drvq; 3525 3526 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue, 3527 CAMQ_HEAD); 3528 device = qinfo->device; 3529 3530 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3531 ("running device %p\n", device)); 3532 3533 drvq = &device->drvq; 3534 3535 #ifdef CAMDEBUG 3536 if (drvq->entries <= 0) { 3537 panic("xpt_run_dev_allocq: " 3538 "Device on queue without any work to do"); 3539 } 3540 #endif 3541 if ((work_ccb = xpt_get_ccb(device)) != NULL) { 3542 devq->alloc_openings--; 3543 devq->alloc_active++; 3544 drv = (struct cam_periph*)camq_remove(drvq, CAMQ_HEAD); 3545 splx(s); 3546 xpt_setup_ccb(&work_ccb->ccb_h, drv->path, 3547 drv->pinfo.priority); 3548 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3549 ("calling periph start\n")); 3550 drv->periph_start(drv, work_ccb); 3551 } else { 3552 /* 3553 * Malloc failure in alloc_ccb 3554 */ 3555 /* 3556 * XXX add us to a list to be run from free_ccb 3557 * if we don't have any ccbs active on this 3558 * device queue otherwise we may never get run 3559 * again. 3560 */ 3561 break; 3562 } 3563 3564 /* Raise IPL for possible insertion and test at top of loop */ 3565 s = splsoftcam(); 3566 3567 if (drvq->entries > 0) { 3568 /* We have more work. Attempt to reschedule */ 3569 xpt_schedule_dev_allocq(bus, device); 3570 } 3571 } 3572 devq->alloc_queue.qfrozen_cnt--; 3573 splx(s); 3574 } 3575 3576 static void 3577 xpt_run_dev_sendq(struct cam_eb *bus) 3578 { 3579 struct cam_devq *devq; 3580 int s; 3581 3582 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_sendq\n")); 3583 3584 devq = bus->sim->devq; 3585 3586 s = splcam(); 3587 devq->send_queue.qfrozen_cnt++; 3588 splx(s); 3589 s = splsoftcam(); 3590 while ((devq->send_queue.entries > 0) 3591 && (devq->send_openings > 0)) { 3592 struct cam_ed_qinfo *qinfo; 3593 struct cam_ed *device; 3594 union ccb *work_ccb; 3595 struct cam_sim *sim; 3596 int ospl; 3597 3598 ospl = splcam(); 3599 if (devq->send_queue.qfrozen_cnt > 1) { 3600 splx(ospl); 3601 break; 3602 } 3603 3604 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue, 3605 CAMQ_HEAD); 3606 device = qinfo->device; 3607 3608 /* 3609 * If the device has been "frozen", don't attempt 3610 * to run it. 3611 */ 3612 if (device->qfrozen_cnt > 0) { 3613 splx(ospl); 3614 continue; 3615 } 3616 3617 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3618 ("running device %p\n", device)); 3619 3620 work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD); 3621 if (work_ccb == NULL) { 3622 printf("device on run queue with no ccbs???"); 3623 splx(ospl); 3624 continue; 3625 } 3626 3627 if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) { 3628 3629 if (num_highpower <= 0) { 3630 /* 3631 * We got a high power command, but we 3632 * don't have any available slots. Freeze 3633 * the device queue until we have a slot 3634 * available. 3635 */ 3636 device->qfrozen_cnt++; 3637 STAILQ_INSERT_TAIL(&highpowerq, 3638 &work_ccb->ccb_h, 3639 xpt_links.stqe); 3640 3641 splx(ospl); 3642 continue; 3643 } else { 3644 /* 3645 * Consume a high power slot while 3646 * this ccb runs. 3647 */ 3648 num_highpower--; 3649 } 3650 } 3651 devq->active_dev = device; 3652 cam_ccbq_remove_ccb(&device->ccbq, work_ccb); 3653 3654 cam_ccbq_send_ccb(&device->ccbq, work_ccb); 3655 splx(ospl); 3656 3657 devq->send_openings--; 3658 devq->send_active++; 3659 3660 if (device->ccbq.queue.entries > 0) 3661 xpt_schedule_dev_sendq(bus, device); 3662 3663 if (work_ccb && (work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0){ 3664 /* 3665 * The client wants to freeze the queue 3666 * after this CCB is sent. 3667 */ 3668 ospl = splcam(); 3669 device->qfrozen_cnt++; 3670 splx(ospl); 3671 } 3672 3673 splx(s); 3674 3675 if ((device->inq_flags & SID_CmdQue) != 0) 3676 work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID; 3677 else 3678 /* 3679 * Clear this in case of a retried CCB that failed 3680 * due to a rejected tag. 3681 */ 3682 work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID; 3683 3684 /* 3685 * Device queues can be shared among multiple sim instances 3686 * that reside on different busses. Use the SIM in the queue 3687 * CCB's path, rather than the one in the bus that was passed 3688 * into this function. 3689 */ 3690 sim = work_ccb->ccb_h.path->bus->sim; 3691 (*(sim->sim_action))(sim, work_ccb); 3692 3693 ospl = splcam(); 3694 devq->active_dev = NULL; 3695 splx(ospl); 3696 /* Raise IPL for possible insertion and test at top of loop */ 3697 s = splsoftcam(); 3698 } 3699 splx(s); 3700 s = splcam(); 3701 devq->send_queue.qfrozen_cnt--; 3702 splx(s); 3703 } 3704 3705 /* 3706 * This function merges stuff from the slave ccb into the master ccb, while 3707 * keeping important fields in the master ccb constant. 3708 */ 3709 void 3710 xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb) 3711 { 3712 /* 3713 * Pull fields that are valid for peripheral drivers to set 3714 * into the master CCB along with the CCB "payload". 3715 */ 3716 master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count; 3717 master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code; 3718 master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout; 3719 master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags; 3720 bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1], 3721 sizeof(union ccb) - sizeof(struct ccb_hdr)); 3722 } 3723 3724 void 3725 xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority) 3726 { 3727 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n")); 3728 ccb_h->pinfo.priority = priority; 3729 ccb_h->path = path; 3730 ccb_h->path_id = path->bus->path_id; 3731 if (path->target) 3732 ccb_h->target_id = path->target->target_id; 3733 else 3734 ccb_h->target_id = CAM_TARGET_WILDCARD; 3735 if (path->device) { 3736 ccb_h->target_lun = path->device->lun_id; 3737 ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation; 3738 } else { 3739 ccb_h->target_lun = CAM_TARGET_WILDCARD; 3740 } 3741 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX; 3742 ccb_h->flags = 0; 3743 } 3744 3745 /* Path manipulation functions */ 3746 cam_status 3747 xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph, 3748 path_id_t path_id, target_id_t target_id, lun_id_t lun_id) 3749 { 3750 struct cam_path *path; 3751 cam_status status; 3752 3753 path = (struct cam_path *)malloc(sizeof(*path), M_DEVBUF, M_NOWAIT); 3754 3755 if (path == NULL) { 3756 status = CAM_RESRC_UNAVAIL; 3757 return(status); 3758 } 3759 status = xpt_compile_path(path, perph, path_id, target_id, lun_id); 3760 if (status != CAM_REQ_CMP) { 3761 free(path, M_DEVBUF); 3762 path = NULL; 3763 } 3764 *new_path_ptr = path; 3765 return (status); 3766 } 3767 3768 static cam_status 3769 xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph, 3770 path_id_t path_id, target_id_t target_id, lun_id_t lun_id) 3771 { 3772 struct cam_eb *bus; 3773 struct cam_et *target; 3774 struct cam_ed *device; 3775 cam_status status; 3776 int s; 3777 3778 status = CAM_REQ_CMP; /* Completed without error */ 3779 target = NULL; /* Wildcarded */ 3780 device = NULL; /* Wildcarded */ 3781 3782 /* 3783 * We will potentially modify the EDT, so block interrupts 3784 * that may attempt to create cam paths. 3785 */ 3786 s = splcam(); 3787 bus = xpt_find_bus(path_id); 3788 if (bus == NULL) { 3789 status = CAM_PATH_INVALID; 3790 } else { 3791 target = xpt_find_target(bus, target_id); 3792 if (target == NULL) { 3793 /* Create one */ 3794 struct cam_et *new_target; 3795 3796 new_target = xpt_alloc_target(bus, target_id); 3797 if (new_target == NULL) { 3798 status = CAM_RESRC_UNAVAIL; 3799 } else { 3800 target = new_target; 3801 } 3802 } 3803 if (target != NULL) { 3804 device = xpt_find_device(target, lun_id); 3805 if (device == NULL) { 3806 /* Create one */ 3807 struct cam_ed *new_device; 3808 3809 new_device = xpt_alloc_device(bus, 3810 target, 3811 lun_id); 3812 if (new_device == NULL) { 3813 status = CAM_RESRC_UNAVAIL; 3814 } else { 3815 device = new_device; 3816 } 3817 } 3818 } 3819 } 3820 splx(s); 3821 3822 /* 3823 * Only touch the user's data if we are successful. 3824 */ 3825 if (status == CAM_REQ_CMP) { 3826 new_path->periph = perph; 3827 new_path->bus = bus; 3828 new_path->target = target; 3829 new_path->device = device; 3830 CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n")); 3831 } else { 3832 if (device != NULL) 3833 xpt_release_device(bus, target, device); 3834 if (target != NULL) 3835 xpt_release_target(bus, target); 3836 if (bus != NULL) 3837 xpt_release_bus(bus); 3838 } 3839 return (status); 3840 } 3841 3842 static void 3843 xpt_release_path(struct cam_path *path) 3844 { 3845 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n")); 3846 if (path->device != NULL) { 3847 xpt_release_device(path->bus, path->target, path->device); 3848 path->device = NULL; 3849 } 3850 if (path->target != NULL) { 3851 xpt_release_target(path->bus, path->target); 3852 path->target = NULL; 3853 } 3854 if (path->bus != NULL) { 3855 xpt_release_bus(path->bus); 3856 path->bus = NULL; 3857 } 3858 } 3859 3860 void 3861 xpt_free_path(struct cam_path *path) 3862 { 3863 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n")); 3864 xpt_release_path(path); 3865 free(path, M_DEVBUF); 3866 } 3867 3868 3869 /* 3870 * Return -1 for failure, 0 for exact match, 1 for match with wildcards 3871 * in path1, 2 for match with wildcards in path2. 3872 */ 3873 int 3874 xpt_path_comp(struct cam_path *path1, struct cam_path *path2) 3875 { 3876 int retval = 0; 3877 3878 if (path1->bus != path2->bus) { 3879 if (path1->bus->path_id == CAM_BUS_WILDCARD) 3880 retval = 1; 3881 else if (path2->bus->path_id == CAM_BUS_WILDCARD) 3882 retval = 2; 3883 else 3884 return (-1); 3885 } 3886 if (path1->target != path2->target) { 3887 if (path1->target->target_id == CAM_TARGET_WILDCARD) { 3888 if (retval == 0) 3889 retval = 1; 3890 } else if (path2->target->target_id == CAM_TARGET_WILDCARD) 3891 retval = 2; 3892 else 3893 return (-1); 3894 } 3895 if (path1->device != path2->device) { 3896 if (path1->device->lun_id == CAM_LUN_WILDCARD) { 3897 if (retval == 0) 3898 retval = 1; 3899 } else if (path2->device->lun_id == CAM_LUN_WILDCARD) 3900 retval = 2; 3901 else 3902 return (-1); 3903 } 3904 return (retval); 3905 } 3906 3907 void 3908 xpt_print_path(struct cam_path *path) 3909 { 3910 if (path == NULL) 3911 printf("(nopath): "); 3912 else { 3913 if (path->periph != NULL) 3914 printf("(%s%d:", path->periph->periph_name, 3915 path->periph->unit_number); 3916 else 3917 printf("(noperiph:"); 3918 3919 if (path->bus != NULL) 3920 printf("%s%d:%d:", path->bus->sim->sim_name, 3921 path->bus->sim->unit_number, 3922 path->bus->sim->bus_id); 3923 else 3924 printf("nobus:"); 3925 3926 if (path->target != NULL) 3927 printf("%d:", path->target->target_id); 3928 else 3929 printf("X:"); 3930 3931 if (path->device != NULL) 3932 printf("%d): ", path->device->lun_id); 3933 else 3934 printf("X): "); 3935 } 3936 } 3937 3938 path_id_t 3939 xpt_path_path_id(struct cam_path *path) 3940 { 3941 return(path->bus->path_id); 3942 } 3943 3944 target_id_t 3945 xpt_path_target_id(struct cam_path *path) 3946 { 3947 if (path->target != NULL) 3948 return (path->target->target_id); 3949 else 3950 return (CAM_TARGET_WILDCARD); 3951 } 3952 3953 lun_id_t 3954 xpt_path_lun_id(struct cam_path *path) 3955 { 3956 if (path->device != NULL) 3957 return (path->device->lun_id); 3958 else 3959 return (CAM_LUN_WILDCARD); 3960 } 3961 3962 struct cam_sim * 3963 xpt_path_sim(struct cam_path *path) 3964 { 3965 return (path->bus->sim); 3966 } 3967 3968 struct cam_periph* 3969 xpt_path_periph(struct cam_path *path) 3970 { 3971 return (path->periph); 3972 } 3973 3974 /* 3975 * Release a CAM control block for the caller. Remit the cost of the structure 3976 * to the device referenced by the path. If the this device had no 'credits' 3977 * and peripheral drivers have registered async callbacks for this notification 3978 * call them now. 3979 */ 3980 void 3981 xpt_release_ccb(union ccb *free_ccb) 3982 { 3983 int s; 3984 struct cam_path *path; 3985 struct cam_ed *device; 3986 struct cam_eb *bus; 3987 3988 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n")); 3989 path = free_ccb->ccb_h.path; 3990 device = path->device; 3991 bus = path->bus; 3992 s = splsoftcam(); 3993 cam_ccbq_release_opening(&device->ccbq); 3994 if (xpt_ccb_count > xpt_max_ccbs) { 3995 xpt_free_ccb(free_ccb); 3996 xpt_ccb_count--; 3997 } else { 3998 SLIST_INSERT_HEAD(&ccb_freeq, &free_ccb->ccb_h, xpt_links.sle); 3999 } 4000 bus->sim->devq->alloc_openings++; 4001 bus->sim->devq->alloc_active--; 4002 /* XXX Turn this into an inline function - xpt_run_device?? */ 4003 if ((device_is_alloc_queued(device) == 0) 4004 && (device->drvq.entries > 0)) { 4005 xpt_schedule_dev_allocq(bus, device); 4006 } 4007 splx(s); 4008 if (dev_allocq_is_runnable(bus->sim->devq)) 4009 xpt_run_dev_allocq(bus); 4010 } 4011 4012 /* Functions accessed by SIM drivers */ 4013 4014 /* 4015 * A sim structure, listing the SIM entry points and instance 4016 * identification info is passed to xpt_bus_register to hook the SIM 4017 * into the CAM framework. xpt_bus_register creates a cam_eb entry 4018 * for this new bus and places it in the array of busses and assigns 4019 * it a path_id. The path_id may be influenced by "hard wiring" 4020 * information specified by the user. Once interrupt services are 4021 * availible, the bus will be probed. 4022 */ 4023 int32_t 4024 xpt_bus_register(struct cam_sim *sim, u_int32_t bus) 4025 { 4026 static path_id_t buscount; 4027 struct cam_eb *new_bus; 4028 struct ccb_pathinq cpi; 4029 int s; 4030 4031 sim->bus_id = bus; 4032 new_bus = (struct cam_eb *)malloc(sizeof(*new_bus), 4033 M_DEVBUF, M_NOWAIT); 4034 if (new_bus == NULL) { 4035 /* Couldn't satisfy request */ 4036 return (CAM_RESRC_UNAVAIL); 4037 } 4038 4039 bzero(new_bus, sizeof(*new_bus)); 4040 4041 if (strcmp(sim->sim_name, "xpt") != 0) { 4042 4043 sim->path_id = xptpathid(sim->sim_name, sim->unit_number, 4044 sim->bus_id, &buscount); 4045 } 4046 4047 new_bus->path_id = sim->path_id; 4048 new_bus->sim = sim; 4049 TAILQ_INIT(&new_bus->et_entries); 4050 timevalclear(&new_bus->last_reset); 4051 new_bus->refcount = 1; /* Held until a bus_deregister event */ 4052 s = splcam(); 4053 TAILQ_INSERT_TAIL(&xpt_busses, new_bus, links); 4054 bus_generation++; 4055 splx(s); 4056 4057 /* Notify interested parties */ 4058 if (sim->path_id != CAM_XPT_PATH_ID) { 4059 struct cam_path path; 4060 4061 xpt_compile_path(&path, /*periph*/NULL, sim->path_id, 4062 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 4063 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1); 4064 cpi.ccb_h.func_code = XPT_PATH_INQ; 4065 xpt_action((union ccb *)&cpi); 4066 xpt_async(AC_PATH_REGISTERED, xpt_periph->path, &cpi); 4067 xpt_release_path(&path); 4068 } 4069 return (CAM_SUCCESS); 4070 } 4071 4072 static int 4073 xptnextfreebus(path_id_t startbus) 4074 { 4075 struct cam_sim_config *sim_conf; 4076 4077 sim_conf = cam_sinit; 4078 while (sim_conf->sim_name != NULL) { 4079 4080 if (IS_SPECIFIED(sim_conf->pathid) 4081 && (startbus == sim_conf->pathid)) { 4082 ++startbus; 4083 /* Start the search over */ 4084 sim_conf = cam_sinit; 4085 } else { 4086 sim_conf++; 4087 } 4088 } 4089 return (startbus); 4090 } 4091 4092 static int 4093 xptpathid(const char *sim_name, int sim_unit, 4094 int sim_bus, path_id_t *nextpath) 4095 { 4096 struct cam_sim_config *sim_conf; 4097 path_id_t pathid; 4098 4099 pathid = CAM_XPT_PATH_ID; 4100 for (sim_conf = cam_sinit; sim_conf->sim_name != NULL; sim_conf++) { 4101 4102 if (!IS_SPECIFIED(sim_conf->pathid)) 4103 continue; 4104 4105 if (!strcmp(sim_name, sim_conf->sim_name) 4106 && (sim_unit == sim_conf->sim_unit)) { 4107 4108 if (IS_SPECIFIED(sim_conf->sim_bus)) { 4109 if (sim_bus == sim_conf->sim_bus) { 4110 pathid = sim_conf->pathid; 4111 break; 4112 } 4113 } else if (sim_bus == 0) { 4114 /* Unspecified matches bus 0 */ 4115 pathid = sim_conf->pathid; 4116 break; 4117 } else { 4118 printf("Ambiguous scbus configuration for %s%d " 4119 "bus %d, cannot wire down. The kernel " 4120 "config entry for scbus%d should " 4121 "specify a controller bus.\n" 4122 "Scbus will be assigned dynamically.\n", 4123 sim_name, sim_unit, sim_bus, 4124 sim_conf->pathid); 4125 break; 4126 } 4127 } 4128 } 4129 4130 if (pathid == CAM_XPT_PATH_ID) { 4131 pathid = xptnextfreebus(*nextpath); 4132 *nextpath = pathid + 1; 4133 } 4134 return (pathid); 4135 } 4136 4137 int32_t 4138 xpt_bus_deregister(path_id) 4139 u_int8_t path_id; 4140 { 4141 /* XXX */ 4142 return (CAM_SUCCESS); 4143 } 4144 4145 void 4146 xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg) 4147 { 4148 struct cam_eb *bus; 4149 struct cam_et *target, *next_target; 4150 struct cam_ed *device, *next_device; 4151 int s; 4152 4153 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_async\n")); 4154 4155 /* 4156 * Most async events come from a CAM interrupt context. In 4157 * a few cases, the error recovery code at the peripheral layer, 4158 * which may run from our SWI or a process context, may signal 4159 * deferred events with a call to xpt_async. Ensure async 4160 * notifications are serialized by blocking cam interrupts. 4161 */ 4162 s = splcam(); 4163 4164 bus = path->bus; 4165 4166 if (async_code == AC_BUS_RESET) { 4167 int s; 4168 4169 s = splclock(); 4170 /* Update our notion of when the last reset occurred */ 4171 microtime(&bus->last_reset); 4172 splx(s); 4173 } 4174 4175 for (target = TAILQ_FIRST(&bus->et_entries); 4176 target != NULL; 4177 target = next_target) { 4178 4179 next_target = TAILQ_NEXT(target, links); 4180 4181 if (path->target != target 4182 && path->target->target_id != CAM_TARGET_WILDCARD) 4183 continue; 4184 4185 if (async_code == AC_SENT_BDR) { 4186 int s; 4187 4188 /* Update our notion of when the last reset occurred */ 4189 s = splclock(); 4190 microtime(&path->target->last_reset); 4191 splx(s); 4192 } 4193 4194 for (device = TAILQ_FIRST(&target->ed_entries); 4195 device != NULL; 4196 device = next_device) { 4197 cam_status status; 4198 struct cam_path newpath; 4199 4200 next_device = TAILQ_NEXT(device, links); 4201 4202 if (path->device != device 4203 && path->device->lun_id != CAM_LUN_WILDCARD) 4204 continue; 4205 4206 /* 4207 * We need our own path with wildcards expanded to 4208 * handle certain types of events. 4209 */ 4210 if ((async_code == AC_SENT_BDR) 4211 || (async_code == AC_BUS_RESET) 4212 || (async_code == AC_INQ_CHANGED)) 4213 status = xpt_compile_path(&newpath, NULL, 4214 bus->path_id, 4215 target->target_id, 4216 device->lun_id); 4217 else 4218 status = CAM_REQ_CMP_ERR; 4219 4220 if (status == CAM_REQ_CMP) { 4221 4222 /* 4223 * Allow transfer negotiation to occur in a 4224 * tag free environment. 4225 */ 4226 if (async_code == AC_SENT_BDR 4227 || async_code == AC_BUS_RESET) 4228 xpt_toggle_tags(&newpath); 4229 4230 if (async_code == AC_INQ_CHANGED) { 4231 /* 4232 * We've sent a start unit command, or 4233 * something similar to a device that 4234 * may have caused its inquiry data to 4235 * change. So we re-scan the device to 4236 * refresh the inquiry data for it. 4237 */ 4238 xpt_scan_lun(newpath.periph, &newpath, 4239 CAM_EXPECT_INQ_CHANGE, 4240 NULL); 4241 } 4242 xpt_release_path(&newpath); 4243 } else if (async_code == AC_LOST_DEVICE) { 4244 device->flags |= CAM_DEV_UNCONFIGURED; 4245 } else if (async_code == AC_TRANSFER_NEG) { 4246 struct ccb_trans_settings *settings; 4247 4248 settings = 4249 (struct ccb_trans_settings *)async_arg; 4250 xpt_set_transfer_settings(settings, device, 4251 /*async_update*/TRUE); 4252 } 4253 4254 xpt_async_bcast(&device->asyncs, 4255 async_code, 4256 path, 4257 async_arg); 4258 } 4259 } 4260 4261 /* 4262 * If this wasn't a fully wildcarded async, tell all 4263 * clients that want all async events. 4264 */ 4265 if (bus != xpt_periph->path->bus) 4266 xpt_async_bcast(&xpt_periph->path->device->asyncs, async_code, 4267 path, async_arg); 4268 splx(s); 4269 } 4270 4271 static void 4272 xpt_async_bcast(struct async_list *async_head, 4273 u_int32_t async_code, 4274 struct cam_path *path, void *async_arg) 4275 { 4276 struct async_node *cur_entry; 4277 4278 cur_entry = SLIST_FIRST(async_head); 4279 while (cur_entry != NULL) { 4280 struct async_node *next_entry; 4281 /* 4282 * Grab the next list entry before we call the current 4283 * entry's callback. This is because the callback function 4284 * can delete its async callback entry. 4285 */ 4286 next_entry = SLIST_NEXT(cur_entry, links); 4287 if ((cur_entry->event_enable & async_code) != 0) 4288 cur_entry->callback(cur_entry->callback_arg, 4289 async_code, path, 4290 async_arg); 4291 cur_entry = next_entry; 4292 } 4293 } 4294 4295 u_int32_t 4296 xpt_freeze_devq(struct cam_path *path, u_int count) 4297 { 4298 int s; 4299 struct ccb_hdr *ccbh; 4300 4301 s = splcam(); 4302 path->device->qfrozen_cnt += count; 4303 4304 /* 4305 * Mark the last CCB in the queue as needing 4306 * to be requeued if the driver hasn't 4307 * changed it's state yet. This fixes a race 4308 * where a ccb is just about to be queued to 4309 * a controller driver when it's interrupt routine 4310 * freezes the queue. To completly close the 4311 * hole, controller drives must check to see 4312 * if a ccb's status is still CAM_REQ_INPROG 4313 * under spl protection just before they queue 4314 * the CCB. See ahc_action/ahc_freeze_devq for 4315 * an example. 4316 */ 4317 ccbh = TAILQ_LAST(&path->device->ccbq.active_ccbs, ccb_hdr_tailq); 4318 if (ccbh && ccbh->status == CAM_REQ_INPROG) 4319 ccbh->status = CAM_REQUEUE_REQ; 4320 splx(s); 4321 return (path->device->qfrozen_cnt); 4322 } 4323 4324 u_int32_t 4325 xpt_freeze_simq(struct cam_sim *sim, u_int count) 4326 { 4327 sim->devq->send_queue.qfrozen_cnt += count; 4328 if (sim->devq->active_dev != NULL) { 4329 struct ccb_hdr *ccbh; 4330 4331 ccbh = TAILQ_LAST(&sim->devq->active_dev->ccbq.active_ccbs, 4332 ccb_hdr_tailq); 4333 if (ccbh && ccbh->status == CAM_REQ_INPROG) 4334 ccbh->status = CAM_REQUEUE_REQ; 4335 } 4336 return (sim->devq->send_queue.qfrozen_cnt); 4337 } 4338 4339 static void 4340 xpt_release_devq_timeout(void *arg) 4341 { 4342 struct cam_ed *device; 4343 4344 device = (struct cam_ed *)arg; 4345 4346 xpt_release_devq_device(device, /*count*/1, /*run_queue*/TRUE); 4347 } 4348 4349 void 4350 xpt_release_devq(struct cam_path *path, u_int count, int run_queue) 4351 { 4352 xpt_release_devq_device(path->device, count, run_queue); 4353 } 4354 4355 static void 4356 xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue) 4357 { 4358 int rundevq; 4359 int s; 4360 4361 rundevq = 0; 4362 s = splcam(); 4363 if (dev->qfrozen_cnt > 0) { 4364 4365 count = (count > dev->qfrozen_cnt) ? dev->qfrozen_cnt : count; 4366 dev->qfrozen_cnt -= count; 4367 if (dev->qfrozen_cnt == 0) { 4368 4369 /* 4370 * No longer need to wait for a successful 4371 * command completion. 4372 */ 4373 dev->flags &= ~CAM_DEV_REL_ON_COMPLETE; 4374 4375 /* 4376 * Remove any timeouts that might be scheduled 4377 * to release this queue. 4378 */ 4379 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) { 4380 untimeout(xpt_release_devq_timeout, dev, 4381 dev->c_handle); 4382 dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING; 4383 } 4384 4385 /* 4386 * Now that we are unfrozen schedule the 4387 * device so any pending transactions are 4388 * run. 4389 */ 4390 if ((dev->ccbq.queue.entries > 0) 4391 && (xpt_schedule_dev_sendq(dev->target->bus, dev)) 4392 && (run_queue != 0)) { 4393 rundevq = 1; 4394 } 4395 } 4396 } 4397 splx(s); 4398 if (rundevq != 0) 4399 xpt_run_dev_sendq(dev->target->bus); 4400 } 4401 4402 void 4403 xpt_release_simq(struct cam_sim *sim, int run_queue) 4404 { 4405 int s; 4406 struct camq *sendq; 4407 4408 sendq = &(sim->devq->send_queue); 4409 s = splcam(); 4410 if (sendq->qfrozen_cnt > 0) { 4411 4412 sendq->qfrozen_cnt--; 4413 if (sendq->qfrozen_cnt == 0) { 4414 struct cam_eb *bus; 4415 4416 /* 4417 * If there is a timeout scheduled to release this 4418 * sim queue, remove it. The queue frozen count is 4419 * already at 0. 4420 */ 4421 if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){ 4422 untimeout(xpt_release_simq_timeout, sim, 4423 sim->c_handle); 4424 sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING; 4425 } 4426 bus = xpt_find_bus(sim->path_id); 4427 splx(s); 4428 4429 if (run_queue) { 4430 /* 4431 * Now that we are unfrozen run the send queue. 4432 */ 4433 xpt_run_dev_sendq(bus); 4434 } 4435 xpt_release_bus(bus); 4436 } else 4437 splx(s); 4438 } else 4439 splx(s); 4440 } 4441 4442 static void 4443 xpt_release_simq_timeout(void *arg) 4444 { 4445 struct cam_sim *sim; 4446 4447 sim = (struct cam_sim *)arg; 4448 xpt_release_simq(sim, /* run_queue */ TRUE); 4449 } 4450 4451 void 4452 xpt_done(union ccb *done_ccb) 4453 { 4454 int s; 4455 4456 s = splcam(); 4457 4458 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done\n")); 4459 if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) != 0) { 4460 /* 4461 * Queue up the request for handling by our SWI handler 4462 * any of the "non-immediate" type of ccbs. 4463 */ 4464 switch (done_ccb->ccb_h.path->periph->type) { 4465 case CAM_PERIPH_BIO: 4466 TAILQ_INSERT_TAIL(&cam_bioq, &done_ccb->ccb_h, 4467 sim_links.tqe); 4468 done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX; 4469 setsoftcambio(); 4470 break; 4471 case CAM_PERIPH_NET: 4472 TAILQ_INSERT_TAIL(&cam_netq, &done_ccb->ccb_h, 4473 sim_links.tqe); 4474 done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX; 4475 setsoftcamnet(); 4476 break; 4477 } 4478 } 4479 splx(s); 4480 } 4481 4482 union ccb * 4483 xpt_alloc_ccb() 4484 { 4485 union ccb *new_ccb; 4486 4487 new_ccb = malloc(sizeof(*new_ccb), M_DEVBUF, M_WAITOK); 4488 return (new_ccb); 4489 } 4490 4491 void 4492 xpt_free_ccb(union ccb *free_ccb) 4493 { 4494 free(free_ccb, M_DEVBUF); 4495 } 4496 4497 4498 4499 /* Private XPT functions */ 4500 4501 /* 4502 * Get a CAM control block for the caller. Charge the structure to the device 4503 * referenced by the path. If the this device has no 'credits' then the 4504 * device already has the maximum number of outstanding operations under way 4505 * and we return NULL. If we don't have sufficient resources to allocate more 4506 * ccbs, we also return NULL. 4507 */ 4508 static union ccb * 4509 xpt_get_ccb(struct cam_ed *device) 4510 { 4511 union ccb *new_ccb; 4512 int s; 4513 4514 s = splsoftcam(); 4515 if ((new_ccb = (union ccb *)ccb_freeq.slh_first) == NULL) { 4516 new_ccb = malloc(sizeof(*new_ccb), M_DEVBUF, M_NOWAIT); 4517 if (new_ccb == NULL) { 4518 splx(s); 4519 return (NULL); 4520 } 4521 callout_handle_init(&new_ccb->ccb_h.timeout_ch); 4522 SLIST_INSERT_HEAD(&ccb_freeq, &new_ccb->ccb_h, 4523 xpt_links.sle); 4524 xpt_ccb_count++; 4525 } 4526 cam_ccbq_take_opening(&device->ccbq); 4527 SLIST_REMOVE_HEAD(&ccb_freeq, xpt_links.sle); 4528 splx(s); 4529 return (new_ccb); 4530 } 4531 4532 static void 4533 xpt_release_bus(struct cam_eb *bus) 4534 { 4535 int s; 4536 4537 s = splcam(); 4538 if ((--bus->refcount == 0) 4539 && (TAILQ_FIRST(&bus->et_entries) == NULL)) { 4540 TAILQ_REMOVE(&xpt_busses, bus, links); 4541 bus_generation++; 4542 splx(s); 4543 free(bus, M_DEVBUF); 4544 } else 4545 splx(s); 4546 } 4547 4548 static struct cam_et * 4549 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id) 4550 { 4551 struct cam_et *target; 4552 4553 target = (struct cam_et *)malloc(sizeof(*target), M_DEVBUF, M_NOWAIT); 4554 if (target != NULL) { 4555 struct cam_et *cur_target; 4556 4557 target->bus = bus; 4558 target->target_id = target_id; 4559 target->refcount = 1; 4560 /* 4561 * Hold a reference to our parent bus so it 4562 * will not go away before we do. 4563 */ 4564 bus->refcount++; 4565 TAILQ_INIT(&target->ed_entries); 4566 timevalclear(&target->last_reset); 4567 4568 /* Insertion sort into our bus's target list */ 4569 cur_target = TAILQ_FIRST(&bus->et_entries); 4570 while (cur_target != NULL && cur_target->target_id < target_id) 4571 cur_target = TAILQ_NEXT(cur_target, links); 4572 4573 if (cur_target != NULL) { 4574 TAILQ_INSERT_BEFORE(cur_target, target, links); 4575 } else { 4576 TAILQ_INSERT_TAIL(&bus->et_entries, target, links); 4577 } 4578 bus->generation++; 4579 } 4580 return (target); 4581 } 4582 4583 static void 4584 xpt_release_target(struct cam_eb *bus, struct cam_et *target) 4585 { 4586 int s; 4587 4588 s = splcam(); 4589 if ((--target->refcount == 0) 4590 && (TAILQ_FIRST(&target->ed_entries) == NULL)) { 4591 TAILQ_REMOVE(&bus->et_entries, target, links); 4592 bus->generation++; 4593 splx(s); 4594 free(target, M_DEVBUF); 4595 xpt_release_bus(bus); 4596 } else 4597 splx(s); 4598 } 4599 4600 static struct cam_ed * 4601 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id) 4602 { 4603 struct cam_ed *device; 4604 struct cam_devq *devq; 4605 cam_status status; 4606 4607 /* Make space for us in the device queue on our bus */ 4608 devq = bus->sim->devq; 4609 status = cam_devq_resize(devq, devq->alloc_queue.array_size + 1); 4610 4611 if (status != CAM_REQ_CMP) { 4612 device = NULL; 4613 } else { 4614 device = (struct cam_ed *)malloc(sizeof(*device), 4615 M_DEVBUF, M_NOWAIT); 4616 } 4617 4618 if (device != NULL) { 4619 struct cam_ed *cur_device; 4620 4621 bzero(device, sizeof(*device)); 4622 4623 SLIST_INIT(&device->asyncs); 4624 SLIST_INIT(&device->periphs); 4625 callout_handle_init(&device->c_handle); 4626 device->refcount = 1; 4627 device->flags |= CAM_DEV_UNCONFIGURED; 4628 /* 4629 * Take the default quirk entry until we have inquiry 4630 * data and can determine a better quirk to use. 4631 */ 4632 device->quirk = &xpt_quirk_table[xpt_quirk_table_size - 1]; 4633 4634 cam_init_pinfo(&device->alloc_ccb_entry.pinfo); 4635 device->alloc_ccb_entry.device = device; 4636 cam_init_pinfo(&device->send_ccb_entry.pinfo); 4637 device->send_ccb_entry.device = device; 4638 4639 device->target = target; 4640 /* 4641 * Hold a reference to our parent target so it 4642 * will not go away before we do. 4643 */ 4644 target->refcount++; 4645 4646 device->lun_id = lun_id; 4647 4648 /* Initialize our queues */ 4649 if (camq_init(&device->drvq, 0) != 0) { 4650 free(device, M_DEVBUF); 4651 return (NULL); 4652 } 4653 4654 if (cam_ccbq_init(&device->ccbq, 4655 bus->sim->max_dev_openings) != 0) { 4656 camq_fini(&device->drvq); 4657 free(device, M_DEVBUF); 4658 return (NULL); 4659 } 4660 /* 4661 * XXX should be limited by number of CCBs this bus can 4662 * do. 4663 */ 4664 xpt_max_ccbs += device->ccbq.devq_openings; 4665 /* Insertion sort into our target's device list */ 4666 cur_device = TAILQ_FIRST(&target->ed_entries); 4667 while (cur_device != NULL && cur_device->lun_id < lun_id) 4668 cur_device = TAILQ_NEXT(cur_device, links); 4669 if (cur_device != NULL) { 4670 TAILQ_INSERT_BEFORE(cur_device, device, links); 4671 } else { 4672 TAILQ_INSERT_TAIL(&target->ed_entries, device, links); 4673 } 4674 target->generation++; 4675 } 4676 return (device); 4677 } 4678 4679 static void 4680 xpt_release_device(struct cam_eb *bus, struct cam_et *target, 4681 struct cam_ed *device) 4682 { 4683 int s; 4684 4685 s = splcam(); 4686 if ((--device->refcount == 0) 4687 && ((device->flags & CAM_DEV_UNCONFIGURED) != 0)) { 4688 struct cam_devq *devq; 4689 4690 if (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX 4691 || device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX) 4692 panic("Removing device while still queued for ccbs"); 4693 4694 if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) 4695 untimeout(xpt_release_devq_timeout, device, 4696 device->c_handle); 4697 4698 TAILQ_REMOVE(&target->ed_entries, device,links); 4699 target->generation++; 4700 xpt_max_ccbs -= device->ccbq.devq_openings; 4701 /* Release our slot in the devq */ 4702 devq = bus->sim->devq; 4703 cam_devq_resize(devq, devq->alloc_queue.array_size - 1); 4704 splx(s); 4705 free(device, M_DEVBUF); 4706 } else 4707 splx(s); 4708 } 4709 4710 static u_int32_t 4711 xpt_dev_ccbq_resize(struct cam_path *path, int newopenings) 4712 { 4713 int s; 4714 int diff; 4715 int result; 4716 struct cam_ed *dev; 4717 4718 dev = path->device; 4719 s = splsoftcam(); 4720 4721 diff = newopenings - (dev->ccbq.dev_active + dev->ccbq.dev_openings); 4722 result = cam_ccbq_resize(&dev->ccbq, newopenings); 4723 if (result == CAM_REQ_CMP && (diff < 0)) { 4724 dev->flags |= CAM_DEV_RESIZE_QUEUE_NEEDED; 4725 } 4726 /* Adjust the global limit */ 4727 xpt_max_ccbs += diff; 4728 splx(s); 4729 return (result); 4730 } 4731 4732 static struct cam_eb * 4733 xpt_find_bus(path_id_t path_id) 4734 { 4735 struct cam_eb *bus; 4736 4737 for (bus = TAILQ_FIRST(&xpt_busses); 4738 bus != NULL; 4739 bus = TAILQ_NEXT(bus, links)) { 4740 if (bus->path_id == path_id) { 4741 bus->refcount++; 4742 break; 4743 } 4744 } 4745 return (bus); 4746 } 4747 4748 static struct cam_et * 4749 xpt_find_target(struct cam_eb *bus, target_id_t target_id) 4750 { 4751 struct cam_et *target; 4752 4753 for (target = TAILQ_FIRST(&bus->et_entries); 4754 target != NULL; 4755 target = TAILQ_NEXT(target, links)) { 4756 if (target->target_id == target_id) { 4757 target->refcount++; 4758 break; 4759 } 4760 } 4761 return (target); 4762 } 4763 4764 static struct cam_ed * 4765 xpt_find_device(struct cam_et *target, lun_id_t lun_id) 4766 { 4767 struct cam_ed *device; 4768 4769 for (device = TAILQ_FIRST(&target->ed_entries); 4770 device != NULL; 4771 device = TAILQ_NEXT(device, links)) { 4772 if (device->lun_id == lun_id) { 4773 device->refcount++; 4774 break; 4775 } 4776 } 4777 return (device); 4778 } 4779 4780 typedef struct { 4781 union ccb *request_ccb; 4782 struct ccb_pathinq *cpi; 4783 int pending_count; 4784 } xpt_scan_bus_info; 4785 4786 /* 4787 * To start a scan, request_ccb is an XPT_SCAN_BUS ccb. 4788 * As the scan progresses, xpt_scan_bus is used as the 4789 * callback on completion function. 4790 */ 4791 static void 4792 xpt_scan_bus(struct cam_periph *periph, union ccb *request_ccb) 4793 { 4794 CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE, 4795 ("xpt_scan_bus\n")); 4796 switch (request_ccb->ccb_h.func_code) { 4797 case XPT_SCAN_BUS: 4798 { 4799 xpt_scan_bus_info *scan_info; 4800 union ccb *work_ccb; 4801 struct cam_path *path; 4802 u_int i; 4803 u_int max_target; 4804 u_int initiator_id; 4805 4806 /* Find out the characteristics of the bus */ 4807 work_ccb = xpt_alloc_ccb(); 4808 xpt_setup_ccb(&work_ccb->ccb_h, request_ccb->ccb_h.path, 4809 request_ccb->ccb_h.pinfo.priority); 4810 work_ccb->ccb_h.func_code = XPT_PATH_INQ; 4811 xpt_action(work_ccb); 4812 if (work_ccb->ccb_h.status != CAM_REQ_CMP) { 4813 request_ccb->ccb_h.status = work_ccb->ccb_h.status; 4814 xpt_free_ccb(work_ccb); 4815 xpt_done(request_ccb); 4816 return; 4817 } 4818 4819 if ((work_ccb->cpi.hba_misc & PIM_NOINITIATOR) != 0) { 4820 /* 4821 * Can't scan the bus on an adapter that 4822 * cannot perform the initiator role. 4823 */ 4824 request_ccb->ccb_h.status = CAM_REQ_CMP; 4825 xpt_free_ccb(work_ccb); 4826 xpt_done(request_ccb); 4827 return; 4828 } 4829 4830 /* Save some state for use while we probe for devices */ 4831 scan_info = (xpt_scan_bus_info *) 4832 malloc(sizeof(xpt_scan_bus_info), M_TEMP, M_WAITOK); 4833 scan_info->request_ccb = request_ccb; 4834 scan_info->cpi = &work_ccb->cpi; 4835 4836 /* Cache on our stack so we can work asynchronously */ 4837 max_target = scan_info->cpi->max_target; 4838 initiator_id = scan_info->cpi->initiator_id; 4839 4840 /* 4841 * Don't count the initiator if the 4842 * initiator is addressable. 4843 */ 4844 scan_info->pending_count = max_target + 1; 4845 if (initiator_id <= max_target) 4846 scan_info->pending_count--; 4847 4848 for (i = 0; i <= max_target; i++) { 4849 cam_status status; 4850 if (i == initiator_id) 4851 continue; 4852 4853 status = xpt_create_path(&path, xpt_periph, 4854 request_ccb->ccb_h.path_id, 4855 i, 0); 4856 if (status != CAM_REQ_CMP) { 4857 printf("xpt_scan_bus: xpt_create_path failed" 4858 " with status %#x, bus scan halted\n", 4859 status); 4860 break; 4861 } 4862 work_ccb = xpt_alloc_ccb(); 4863 xpt_setup_ccb(&work_ccb->ccb_h, path, 4864 request_ccb->ccb_h.pinfo.priority); 4865 work_ccb->ccb_h.func_code = XPT_SCAN_LUN; 4866 work_ccb->ccb_h.cbfcnp = xpt_scan_bus; 4867 work_ccb->ccb_h.ppriv_ptr0 = scan_info; 4868 work_ccb->crcn.flags = request_ccb->crcn.flags; 4869 #if 0 4870 printf("xpt_scan_bus: probing %d:%d:%d\n", 4871 request_ccb->ccb_h.path_id, i, 0); 4872 #endif 4873 xpt_action(work_ccb); 4874 } 4875 break; 4876 } 4877 case XPT_SCAN_LUN: 4878 { 4879 xpt_scan_bus_info *scan_info; 4880 path_id_t path_id; 4881 target_id_t target_id; 4882 lun_id_t lun_id; 4883 4884 /* Reuse the same CCB to query if a device was really found */ 4885 scan_info = (xpt_scan_bus_info *)request_ccb->ccb_h.ppriv_ptr0; 4886 xpt_setup_ccb(&request_ccb->ccb_h, request_ccb->ccb_h.path, 4887 request_ccb->ccb_h.pinfo.priority); 4888 request_ccb->ccb_h.func_code = XPT_GDEV_TYPE; 4889 4890 path_id = request_ccb->ccb_h.path_id; 4891 target_id = request_ccb->ccb_h.target_id; 4892 lun_id = request_ccb->ccb_h.target_lun; 4893 xpt_action(request_ccb); 4894 4895 #if 0 4896 printf("xpt_scan_bus: got back probe from %d:%d:%d\n", 4897 path_id, target_id, lun_id); 4898 #endif 4899 4900 if (request_ccb->ccb_h.status != CAM_REQ_CMP) { 4901 struct cam_ed *device; 4902 struct cam_et *target; 4903 int s; 4904 4905 /* 4906 * If we already probed lun 0 successfully, or 4907 * we have additional configured luns on this 4908 * target that might have "gone away", go onto 4909 * the next lun. 4910 */ 4911 target = request_ccb->ccb_h.path->target; 4912 s = splcam(); 4913 device = TAILQ_FIRST(&target->ed_entries); 4914 if (device != NULL) 4915 device = TAILQ_NEXT(device, links); 4916 splx(s); 4917 4918 if ((lun_id != 0) || (device != NULL)) { 4919 /* Try the next lun */ 4920 if (lun_id < (CAM_SCSI2_MAXLUN-1) || 4921 (device->quirk->quirks & CAM_QUIRK_HILUNS)) 4922 lun_id++; 4923 } 4924 } else { 4925 struct cam_ed *device; 4926 4927 device = request_ccb->ccb_h.path->device; 4928 4929 if ((device->quirk->quirks & CAM_QUIRK_NOLUNS) == 0) { 4930 /* Try the next lun */ 4931 if (lun_id < (CAM_SCSI2_MAXLUN-1) || 4932 (device->quirk->quirks & CAM_QUIRK_HILUNS)) 4933 lun_id++; 4934 } 4935 } 4936 4937 xpt_free_path(request_ccb->ccb_h.path); 4938 4939 /* Check Bounds */ 4940 if ((lun_id == request_ccb->ccb_h.target_lun) 4941 || lun_id > scan_info->cpi->max_lun) { 4942 /* We're done */ 4943 4944 xpt_free_ccb(request_ccb); 4945 scan_info->pending_count--; 4946 if (scan_info->pending_count == 0) { 4947 xpt_free_ccb((union ccb *)scan_info->cpi); 4948 request_ccb = scan_info->request_ccb; 4949 free(scan_info, M_TEMP); 4950 request_ccb->ccb_h.status = CAM_REQ_CMP; 4951 xpt_done(request_ccb); 4952 } 4953 } else { 4954 /* Try the next device */ 4955 struct cam_path *path; 4956 cam_status status; 4957 4958 path = request_ccb->ccb_h.path; 4959 status = xpt_create_path(&path, xpt_periph, 4960 path_id, target_id, lun_id); 4961 if (status != CAM_REQ_CMP) { 4962 printf("xpt_scan_bus: xpt_create_path failed " 4963 "with status %#x, halting LUN scan\n", 4964 status); 4965 xpt_free_ccb(request_ccb); 4966 scan_info->pending_count--; 4967 if (scan_info->pending_count == 0) { 4968 xpt_free_ccb( 4969 (union ccb *)scan_info->cpi); 4970 request_ccb = scan_info->request_ccb; 4971 free(scan_info, M_TEMP); 4972 request_ccb->ccb_h.status = CAM_REQ_CMP; 4973 xpt_done(request_ccb); 4974 break; 4975 } 4976 } 4977 xpt_setup_ccb(&request_ccb->ccb_h, path, 4978 request_ccb->ccb_h.pinfo.priority); 4979 request_ccb->ccb_h.func_code = XPT_SCAN_LUN; 4980 request_ccb->ccb_h.cbfcnp = xpt_scan_bus; 4981 request_ccb->ccb_h.ppriv_ptr0 = scan_info; 4982 request_ccb->crcn.flags = 4983 scan_info->request_ccb->crcn.flags; 4984 #if 0 4985 xpt_print_path(path); 4986 printf("xpt_scan bus probing\n"); 4987 #endif 4988 xpt_action(request_ccb); 4989 } 4990 break; 4991 } 4992 default: 4993 break; 4994 } 4995 } 4996 4997 typedef enum { 4998 PROBE_TUR, 4999 PROBE_INQUIRY, 5000 PROBE_MODE_SENSE, 5001 PROBE_SERIAL_NUM, 5002 PROBE_TUR_FOR_NEGOTIATION 5003 } probe_action; 5004 5005 typedef enum { 5006 PROBE_INQUIRY_CKSUM = 0x01, 5007 PROBE_SERIAL_CKSUM = 0x02, 5008 PROBE_NO_ANNOUNCE = 0x04 5009 } probe_flags; 5010 5011 typedef struct { 5012 TAILQ_HEAD(, ccb_hdr) request_ccbs; 5013 probe_action action; 5014 union ccb saved_ccb; 5015 probe_flags flags; 5016 MD5_CTX context; 5017 u_int8_t digest[16]; 5018 } probe_softc; 5019 5020 static void 5021 xpt_scan_lun(struct cam_periph *periph, struct cam_path *path, 5022 cam_flags flags, union ccb *request_ccb) 5023 { 5024 struct ccb_pathinq cpi; 5025 cam_status status; 5026 struct cam_path *new_path; 5027 struct cam_periph *old_periph; 5028 int s; 5029 5030 CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE, 5031 ("xpt_scan_lun\n")); 5032 5033 xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1); 5034 cpi.ccb_h.func_code = XPT_PATH_INQ; 5035 xpt_action((union ccb *)&cpi); 5036 5037 if (cpi.ccb_h.status != CAM_REQ_CMP) { 5038 if (request_ccb != NULL) { 5039 request_ccb->ccb_h.status = cpi.ccb_h.status; 5040 xpt_done(request_ccb); 5041 } 5042 return; 5043 } 5044 5045 if ((cpi.hba_misc & PIM_NOINITIATOR) != 0) { 5046 /* 5047 * Can't scan the bus on an adapter that 5048 * cannot perform the initiator role. 5049 */ 5050 if (request_ccb != NULL) { 5051 request_ccb->ccb_h.status = CAM_REQ_CMP; 5052 xpt_done(request_ccb); 5053 } 5054 return; 5055 } 5056 5057 if (request_ccb == NULL) { 5058 request_ccb = malloc(sizeof(union ccb), M_TEMP, M_NOWAIT); 5059 if (request_ccb == NULL) { 5060 xpt_print_path(path); 5061 printf("xpt_scan_lun: can't allocate CCB, can't " 5062 "continue\n"); 5063 return; 5064 } 5065 new_path = malloc(sizeof(*new_path), M_TEMP, M_NOWAIT); 5066 if (new_path == NULL) { 5067 xpt_print_path(path); 5068 printf("xpt_scan_lun: can't allocate path, can't " 5069 "continue\n"); 5070 free(request_ccb, M_TEMP); 5071 return; 5072 } 5073 status = xpt_compile_path(new_path, xpt_periph, 5074 path->bus->path_id, 5075 path->target->target_id, 5076 path->device->lun_id); 5077 5078 if (status != CAM_REQ_CMP) { 5079 xpt_print_path(path); 5080 printf("xpt_scan_lun: can't compile path, can't " 5081 "continue\n"); 5082 free(request_ccb, M_TEMP); 5083 free(new_path, M_TEMP); 5084 return; 5085 } 5086 xpt_setup_ccb(&request_ccb->ccb_h, new_path, /*priority*/ 1); 5087 request_ccb->ccb_h.cbfcnp = xptscandone; 5088 request_ccb->ccb_h.func_code = XPT_SCAN_LUN; 5089 request_ccb->crcn.flags = flags; 5090 } 5091 5092 s = splsoftcam(); 5093 if ((old_periph = cam_periph_find(path, "probe")) != NULL) { 5094 probe_softc *softc; 5095 5096 softc = (probe_softc *)old_periph->softc; 5097 TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h, 5098 periph_links.tqe); 5099 } else { 5100 status = cam_periph_alloc(proberegister, NULL, probecleanup, 5101 probestart, "probe", 5102 CAM_PERIPH_BIO, 5103 request_ccb->ccb_h.path, NULL, 0, 5104 request_ccb); 5105 5106 if (status != CAM_REQ_CMP) { 5107 xpt_print_path(path); 5108 printf("xpt_scan_lun: cam_alloc_periph returned an " 5109 "error, can't continue probe\n"); 5110 request_ccb->ccb_h.status = status; 5111 xpt_done(request_ccb); 5112 } 5113 } 5114 splx(s); 5115 } 5116 5117 static void 5118 xptscandone(struct cam_periph *periph, union ccb *done_ccb) 5119 { 5120 xpt_release_path(done_ccb->ccb_h.path); 5121 free(done_ccb->ccb_h.path, M_TEMP); 5122 free(done_ccb, M_TEMP); 5123 } 5124 5125 static cam_status 5126 proberegister(struct cam_periph *periph, void *arg) 5127 { 5128 union ccb *request_ccb; /* CCB representing the probe request */ 5129 probe_softc *softc; 5130 5131 request_ccb = (union ccb *)arg; 5132 if (periph == NULL) { 5133 printf("proberegister: periph was NULL!!\n"); 5134 return(CAM_REQ_CMP_ERR); 5135 } 5136 5137 if (request_ccb == NULL) { 5138 printf("proberegister: no probe CCB, can't register device\n"); 5139 return(CAM_REQ_CMP_ERR); 5140 } 5141 5142 softc = (probe_softc *)malloc(sizeof(*softc), M_TEMP, M_NOWAIT); 5143 5144 if (softc == NULL) { 5145 printf("proberegister: Unable to probe new device. " 5146 "Unable to allocate softc\n"); 5147 return(CAM_REQ_CMP_ERR); 5148 } 5149 TAILQ_INIT(&softc->request_ccbs); 5150 TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h, 5151 periph_links.tqe); 5152 softc->flags = 0; 5153 periph->softc = softc; 5154 cam_periph_acquire(periph); 5155 /* 5156 * Ensure we've waited at least a bus settle 5157 * delay before attempting to probe the device. 5158 */ 5159 cam_periph_freeze_after_event(periph, &periph->path->bus->last_reset, 5160 SCSI_DELAY); 5161 probeschedule(periph); 5162 return(CAM_REQ_CMP); 5163 } 5164 5165 static void 5166 probeschedule(struct cam_periph *periph) 5167 { 5168 struct ccb_pathinq cpi; 5169 union ccb *ccb; 5170 probe_softc *softc; 5171 5172 softc = (probe_softc *)periph->softc; 5173 ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs); 5174 5175 xpt_setup_ccb(&cpi.ccb_h, periph->path, /*priority*/1); 5176 cpi.ccb_h.func_code = XPT_PATH_INQ; 5177 xpt_action((union ccb *)&cpi); 5178 5179 /* 5180 * If a device has gone away and another device, or the same one, 5181 * is back in the same place, it should have a unit attention 5182 * condition pending. It will not report the unit attention in 5183 * response to an inquiry, which may leave invalid transfer 5184 * negotiations in effect. The TUR will reveal the unit attention 5185 * condition. Only send the TUR for lun 0, since some devices 5186 * will get confused by commands other than inquiry to non-existent 5187 * luns. If you think a device has gone away start your scan from 5188 * lun 0. This will insure that any bogus transfer settings are 5189 * invalidated. 5190 * 5191 * If we haven't seen the device before and the controller supports 5192 * some kind of transfer negotiation, negotiate with the first 5193 * sent command if no bus reset was performed at startup. This 5194 * ensures that the device is not confused by transfer negotiation 5195 * settings left over by loader or BIOS action. 5196 */ 5197 if (((ccb->ccb_h.path->device->flags & CAM_DEV_UNCONFIGURED) == 0) 5198 && (ccb->ccb_h.target_lun == 0)) { 5199 softc->action = PROBE_TUR; 5200 } else if ((cpi.hba_inquiry & (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE)) != 0 5201 && (cpi.hba_misc & PIM_NOBUSRESET) != 0) { 5202 proberequestdefaultnegotiation(periph); 5203 softc->action = PROBE_INQUIRY; 5204 } else { 5205 softc->action = PROBE_INQUIRY; 5206 } 5207 5208 if (ccb->crcn.flags & CAM_EXPECT_INQ_CHANGE) 5209 softc->flags |= PROBE_NO_ANNOUNCE; 5210 else 5211 softc->flags &= ~PROBE_NO_ANNOUNCE; 5212 5213 xpt_schedule(periph, ccb->ccb_h.pinfo.priority); 5214 } 5215 5216 static void 5217 probestart(struct cam_periph *periph, union ccb *start_ccb) 5218 { 5219 /* Probe the device that our peripheral driver points to */ 5220 struct ccb_scsiio *csio; 5221 probe_softc *softc; 5222 5223 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probestart\n")); 5224 5225 softc = (probe_softc *)periph->softc; 5226 csio = &start_ccb->csio; 5227 5228 switch (softc->action) { 5229 case PROBE_TUR: 5230 case PROBE_TUR_FOR_NEGOTIATION: 5231 { 5232 scsi_test_unit_ready(csio, 5233 /*retries*/4, 5234 probedone, 5235 MSG_SIMPLE_Q_TAG, 5236 SSD_FULL_SIZE, 5237 /*timeout*/60000); 5238 break; 5239 } 5240 case PROBE_INQUIRY: 5241 { 5242 struct scsi_inquiry_data *inq_buf; 5243 5244 inq_buf = &periph->path->device->inq_data; 5245 /* 5246 * If the device is currently configured, we calculate an 5247 * MD5 checksum of the inquiry data, and if the serial number 5248 * length is greater than 0, add the serial number data 5249 * into the checksum as well. Once the inquiry and the 5250 * serial number check finish, we attempt to figure out 5251 * whether we still have the same device. 5252 */ 5253 if ((periph->path->device->flags & CAM_DEV_UNCONFIGURED) == 0) { 5254 5255 MD5Init(&softc->context); 5256 MD5Update(&softc->context, (unsigned char *)inq_buf, 5257 sizeof(struct scsi_inquiry_data)); 5258 softc->flags |= PROBE_INQUIRY_CKSUM; 5259 if (periph->path->device->serial_num_len > 0) { 5260 MD5Update(&softc->context, 5261 periph->path->device->serial_num, 5262 periph->path->device->serial_num_len); 5263 softc->flags |= PROBE_SERIAL_CKSUM; 5264 } 5265 MD5Final(softc->digest, &softc->context); 5266 } 5267 5268 scsi_inquiry(csio, 5269 /*retries*/4, 5270 probedone, 5271 MSG_SIMPLE_Q_TAG, 5272 (u_int8_t *)inq_buf, 5273 sizeof(*inq_buf), 5274 /*evpd*/FALSE, 5275 /*page_code*/0, 5276 SSD_MIN_SIZE, 5277 /*timeout*/60 * 1000); 5278 break; 5279 } 5280 case PROBE_MODE_SENSE: 5281 { 5282 void *mode_buf; 5283 int mode_buf_len; 5284 5285 mode_buf_len = sizeof(struct scsi_mode_header_6) 5286 + sizeof(struct scsi_mode_blk_desc) 5287 + sizeof(struct scsi_control_page); 5288 mode_buf = malloc(mode_buf_len, M_TEMP, M_NOWAIT); 5289 if (mode_buf != NULL) { 5290 scsi_mode_sense(csio, 5291 /*retries*/4, 5292 probedone, 5293 MSG_SIMPLE_Q_TAG, 5294 /*dbd*/FALSE, 5295 SMS_PAGE_CTRL_CURRENT, 5296 SMS_CONTROL_MODE_PAGE, 5297 mode_buf, 5298 mode_buf_len, 5299 SSD_FULL_SIZE, 5300 /*timeout*/60000); 5301 break; 5302 } 5303 xpt_print_path(periph->path); 5304 printf("Unable to mode sense control page - malloc failure\n"); 5305 softc->action = PROBE_SERIAL_NUM; 5306 /* FALLTHROUGH */ 5307 } 5308 case PROBE_SERIAL_NUM: 5309 { 5310 struct scsi_vpd_unit_serial_number *serial_buf; 5311 struct cam_ed* device; 5312 5313 serial_buf = NULL; 5314 device = periph->path->device; 5315 device->serial_num = NULL; 5316 device->serial_num_len = 0; 5317 5318 if ((device->quirk->quirks & CAM_QUIRK_NOSERIAL) == 0) 5319 serial_buf = (struct scsi_vpd_unit_serial_number *) 5320 malloc(sizeof(*serial_buf), M_TEMP, M_NOWAIT); 5321 5322 if (serial_buf != NULL) { 5323 bzero(serial_buf, sizeof(*serial_buf)); 5324 scsi_inquiry(csio, 5325 /*retries*/4, 5326 probedone, 5327 MSG_SIMPLE_Q_TAG, 5328 (u_int8_t *)serial_buf, 5329 sizeof(*serial_buf), 5330 /*evpd*/TRUE, 5331 SVPD_UNIT_SERIAL_NUMBER, 5332 SSD_MIN_SIZE, 5333 /*timeout*/60 * 1000); 5334 break; 5335 } 5336 /* 5337 * We'll have to do without, let our probedone 5338 * routine finish up for us. 5339 */ 5340 start_ccb->csio.data_ptr = NULL; 5341 probedone(periph, start_ccb); 5342 return; 5343 } 5344 } 5345 xpt_action(start_ccb); 5346 } 5347 5348 static void 5349 proberequestdefaultnegotiation(struct cam_periph *periph) 5350 { 5351 struct ccb_trans_settings cts; 5352 5353 xpt_setup_ccb(&cts.ccb_h, periph->path, /*priority*/1); 5354 cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; 5355 cts.flags = CCB_TRANS_USER_SETTINGS; 5356 xpt_action((union ccb *)&cts); 5357 cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS; 5358 cts.flags &= ~CCB_TRANS_USER_SETTINGS; 5359 cts.flags |= CCB_TRANS_CURRENT_SETTINGS; 5360 xpt_action((union ccb *)&cts); 5361 } 5362 5363 static void 5364 probedone(struct cam_periph *periph, union ccb *done_ccb) 5365 { 5366 probe_softc *softc; 5367 struct cam_path *path; 5368 u_int32_t priority; 5369 5370 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probedone\n")); 5371 5372 softc = (probe_softc *)periph->softc; 5373 path = done_ccb->ccb_h.path; 5374 priority = done_ccb->ccb_h.pinfo.priority; 5375 5376 switch (softc->action) { 5377 case PROBE_TUR: 5378 { 5379 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 5380 5381 if (cam_periph_error(done_ccb, 0, 5382 SF_NO_PRINT, NULL) == ERESTART) 5383 return; 5384 else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 5385 /* Don't wedge the queue */ 5386 xpt_release_devq(done_ccb->ccb_h.path, 5387 /*count*/1, 5388 /*run_queue*/TRUE); 5389 } 5390 softc->action = PROBE_INQUIRY; 5391 xpt_release_ccb(done_ccb); 5392 xpt_schedule(periph, priority); 5393 return; 5394 } 5395 case PROBE_INQUIRY: 5396 { 5397 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 5398 struct scsi_inquiry_data *inq_buf; 5399 u_int8_t periph_qual; 5400 u_int8_t periph_dtype; 5401 5402 path->device->flags |= CAM_DEV_INQUIRY_DATA_VALID; 5403 inq_buf = &path->device->inq_data; 5404 5405 periph_qual = SID_QUAL(inq_buf); 5406 periph_dtype = SID_TYPE(inq_buf); 5407 if (periph_dtype != T_NODEVICE) { 5408 switch(periph_qual) { 5409 case SID_QUAL_LU_CONNECTED: 5410 { 5411 xpt_find_quirk(path->device); 5412 5413 if ((inq_buf->flags & SID_CmdQue) != 0) 5414 softc->action = 5415 PROBE_MODE_SENSE; 5416 else 5417 softc->action = 5418 PROBE_SERIAL_NUM; 5419 5420 path->device->flags &= 5421 ~CAM_DEV_UNCONFIGURED; 5422 5423 xpt_release_ccb(done_ccb); 5424 xpt_schedule(periph, priority); 5425 return; 5426 } 5427 default: 5428 break; 5429 } 5430 } 5431 } else if (cam_periph_error(done_ccb, 0, 5432 done_ccb->ccb_h.target_lun > 0 5433 ? SF_RETRY_UA|SF_QUIET_IR 5434 : SF_RETRY_UA, 5435 &softc->saved_ccb) == ERESTART) { 5436 return; 5437 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 5438 /* Don't wedge the queue */ 5439 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1, 5440 /*run_queue*/TRUE); 5441 } 5442 /* 5443 * If we get to this point, we got an error status back 5444 * from the inquiry and the error status doesn't require 5445 * automatically retrying the command. Therefore, the 5446 * inquiry failed. If we had inquiry information before 5447 * for this device, but this latest inquiry command failed, 5448 * the device has probably gone away. If this device isn't 5449 * already marked unconfigured, notify the peripheral 5450 * drivers that this device is no more. 5451 */ 5452 if ((path->device->flags & CAM_DEV_UNCONFIGURED) == 0) 5453 /* Send the async notification. */ 5454 xpt_async(AC_LOST_DEVICE, path, NULL); 5455 5456 xpt_release_ccb(done_ccb); 5457 break; 5458 } 5459 case PROBE_MODE_SENSE: 5460 { 5461 struct ccb_scsiio *csio; 5462 struct scsi_mode_header_6 *mode_hdr; 5463 5464 csio = &done_ccb->csio; 5465 mode_hdr = (struct scsi_mode_header_6 *)csio->data_ptr; 5466 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 5467 struct scsi_control_page *page; 5468 u_int8_t *offset; 5469 5470 offset = ((u_int8_t *)&mode_hdr[1]) 5471 + mode_hdr->blk_desc_len; 5472 page = (struct scsi_control_page *)offset; 5473 path->device->queue_flags = page->queue_flags; 5474 } else if (cam_periph_error(done_ccb, 0, 5475 SF_RETRY_UA|SF_NO_PRINT, 5476 &softc->saved_ccb) == ERESTART) { 5477 return; 5478 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 5479 /* Don't wedge the queue */ 5480 xpt_release_devq(done_ccb->ccb_h.path, 5481 /*count*/1, /*run_queue*/TRUE); 5482 } 5483 xpt_release_ccb(done_ccb); 5484 free(mode_hdr, M_TEMP); 5485 softc->action = PROBE_SERIAL_NUM; 5486 xpt_schedule(periph, priority); 5487 return; 5488 } 5489 case PROBE_SERIAL_NUM: 5490 { 5491 struct ccb_scsiio *csio; 5492 struct scsi_vpd_unit_serial_number *serial_buf; 5493 u_int32_t priority; 5494 int changed; 5495 int have_serialnum; 5496 5497 changed = 1; 5498 have_serialnum = 0; 5499 csio = &done_ccb->csio; 5500 priority = done_ccb->ccb_h.pinfo.priority; 5501 serial_buf = 5502 (struct scsi_vpd_unit_serial_number *)csio->data_ptr; 5503 5504 /* Clean up from previous instance of this device */ 5505 if (path->device->serial_num != NULL) { 5506 free(path->device->serial_num, M_DEVBUF); 5507 path->device->serial_num = NULL; 5508 path->device->serial_num_len = 0; 5509 } 5510 5511 if (serial_buf == NULL) { 5512 /* 5513 * Don't process the command as it was never sent 5514 */ 5515 } else if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP 5516 && (serial_buf->length > 0)) { 5517 5518 have_serialnum = 1; 5519 path->device->serial_num = 5520 (u_int8_t *)malloc((serial_buf->length + 1), 5521 M_DEVBUF, M_NOWAIT); 5522 if (path->device->serial_num != NULL) { 5523 bcopy(serial_buf->serial_num, 5524 path->device->serial_num, 5525 serial_buf->length); 5526 path->device->serial_num_len = 5527 serial_buf->length; 5528 path->device->serial_num[serial_buf->length] 5529 = '\0'; 5530 } 5531 } else if (cam_periph_error(done_ccb, 0, 5532 SF_RETRY_UA|SF_NO_PRINT, 5533 &softc->saved_ccb) == ERESTART) { 5534 return; 5535 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 5536 /* Don't wedge the queue */ 5537 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1, 5538 /*run_queue*/TRUE); 5539 } 5540 5541 /* 5542 * Let's see if we have seen this device before. 5543 */ 5544 if ((softc->flags & PROBE_INQUIRY_CKSUM) != 0) { 5545 MD5_CTX context; 5546 u_int8_t digest[16]; 5547 5548 MD5Init(&context); 5549 5550 MD5Update(&context, 5551 (unsigned char *)&path->device->inq_data, 5552 sizeof(struct scsi_inquiry_data)); 5553 5554 if (have_serialnum) 5555 MD5Update(&context, serial_buf->serial_num, 5556 serial_buf->length); 5557 5558 MD5Final(digest, &context); 5559 if (bcmp(softc->digest, digest, 16) == 0) 5560 changed = 0; 5561 5562 /* 5563 * XXX Do we need to do a TUR in order to ensure 5564 * that the device really hasn't changed??? 5565 */ 5566 if ((changed != 0) 5567 && ((softc->flags & PROBE_NO_ANNOUNCE) == 0)) 5568 xpt_async(AC_LOST_DEVICE, path, NULL); 5569 } 5570 if (serial_buf != NULL) 5571 free(serial_buf, M_TEMP); 5572 5573 if (changed != 0) { 5574 /* 5575 * Now that we have all the necessary 5576 * information to safely perform transfer 5577 * negotiations... Controllers don't perform 5578 * any negotiation or tagged queuing until 5579 * after the first XPT_SET_TRAN_SETTINGS ccb is 5580 * received. So, on a new device, just retreive 5581 * the user settings, and set them as the current 5582 * settings to set the device up. 5583 */ 5584 proberequestdefaultnegotiation(periph); 5585 xpt_release_ccb(done_ccb); 5586 5587 /* 5588 * Perform a TUR to allow the controller to 5589 * perform any necessary transfer negotiation. 5590 */ 5591 softc->action = PROBE_TUR_FOR_NEGOTIATION; 5592 xpt_schedule(periph, priority); 5593 return; 5594 } 5595 xpt_release_ccb(done_ccb); 5596 break; 5597 } 5598 case PROBE_TUR_FOR_NEGOTIATION: 5599 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 5600 /* Don't wedge the queue */ 5601 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1, 5602 /*run_queue*/TRUE); 5603 } 5604 5605 path->device->flags &= ~CAM_DEV_UNCONFIGURED; 5606 5607 if ((softc->flags & PROBE_NO_ANNOUNCE) == 0) { 5608 /* Inform the XPT that a new device has been found */ 5609 done_ccb->ccb_h.func_code = XPT_GDEV_TYPE; 5610 xpt_action(done_ccb); 5611 5612 xpt_async(AC_FOUND_DEVICE, xpt_periph->path, done_ccb); 5613 } 5614 xpt_release_ccb(done_ccb); 5615 break; 5616 } 5617 done_ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs); 5618 TAILQ_REMOVE(&softc->request_ccbs, &done_ccb->ccb_h, periph_links.tqe); 5619 done_ccb->ccb_h.status = CAM_REQ_CMP; 5620 xpt_done(done_ccb); 5621 if (TAILQ_FIRST(&softc->request_ccbs) == NULL) { 5622 cam_periph_invalidate(periph); 5623 cam_periph_release(periph); 5624 } else { 5625 probeschedule(periph); 5626 } 5627 } 5628 5629 static void 5630 probecleanup(struct cam_periph *periph) 5631 { 5632 free(periph->softc, M_TEMP); 5633 } 5634 5635 static void 5636 xpt_find_quirk(struct cam_ed *device) 5637 { 5638 caddr_t match; 5639 5640 match = cam_quirkmatch((caddr_t)&device->inq_data, 5641 (caddr_t)xpt_quirk_table, 5642 sizeof(xpt_quirk_table)/sizeof(*xpt_quirk_table), 5643 sizeof(*xpt_quirk_table), scsi_inquiry_match); 5644 5645 if (match == NULL) 5646 panic("xpt_find_quirk: device didn't match wildcard entry!!"); 5647 5648 device->quirk = (struct xpt_quirk_entry *)match; 5649 } 5650 5651 static void 5652 xpt_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device, 5653 int async_update) 5654 { 5655 struct cam_sim *sim; 5656 int qfrozen; 5657 5658 sim = cts->ccb_h.path->bus->sim; 5659 if (async_update == FALSE) { 5660 struct scsi_inquiry_data *inq_data; 5661 struct ccb_pathinq cpi; 5662 struct ccb_trans_settings cur_cts; 5663 5664 if (device == NULL) { 5665 cts->ccb_h.status = CAM_PATH_INVALID; 5666 xpt_done((union ccb *)cts); 5667 return; 5668 } 5669 5670 /* 5671 * Perform sanity checking against what the 5672 * controller and device can do. 5673 */ 5674 xpt_setup_ccb(&cpi.ccb_h, cts->ccb_h.path, /*priority*/1); 5675 cpi.ccb_h.func_code = XPT_PATH_INQ; 5676 xpt_action((union ccb *)&cpi); 5677 xpt_setup_ccb(&cur_cts.ccb_h, cts->ccb_h.path, /*priority*/1); 5678 cur_cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; 5679 cur_cts.flags = CCB_TRANS_CURRENT_SETTINGS; 5680 xpt_action((union ccb *)&cur_cts); 5681 inq_data = &device->inq_data; 5682 5683 /* Fill in any gaps in what the user gave us */ 5684 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) == 0) 5685 cts->sync_period = cur_cts.sync_period; 5686 if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) == 0) 5687 cts->sync_offset = cur_cts.sync_offset; 5688 if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) == 0) 5689 cts->bus_width = cur_cts.bus_width; 5690 if ((cts->valid & CCB_TRANS_DISC_VALID) == 0) { 5691 cts->flags &= ~CCB_TRANS_DISC_ENB; 5692 cts->flags |= cur_cts.flags & CCB_TRANS_DISC_ENB; 5693 } 5694 if ((cts->valid & CCB_TRANS_TQ_VALID) == 0) { 5695 cts->flags &= ~CCB_TRANS_TAG_ENB; 5696 cts->flags |= cur_cts.flags & CCB_TRANS_TAG_ENB; 5697 } 5698 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0 5699 && (inq_data->flags & SID_Sync) == 0) 5700 || (cpi.hba_inquiry & PI_SDTR_ABLE) == 0) { 5701 /* Force async */ 5702 cts->sync_period = 0; 5703 cts->sync_offset = 0; 5704 } 5705 5706 switch (cts->bus_width) { 5707 case MSG_EXT_WDTR_BUS_32_BIT: 5708 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0 5709 || (inq_data->flags & SID_WBus32) != 0) 5710 && (cpi.hba_inquiry & PI_WIDE_32) != 0) 5711 break; 5712 /* Fall Through to 16-bit */ 5713 case MSG_EXT_WDTR_BUS_16_BIT: 5714 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0 5715 || (inq_data->flags & SID_WBus16) != 0) 5716 && (cpi.hba_inquiry & PI_WIDE_16) != 0) { 5717 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 5718 break; 5719 } 5720 /* Fall Through to 8-bit */ 5721 default: /* New bus width?? */ 5722 case MSG_EXT_WDTR_BUS_8_BIT: 5723 /* All targets can do this */ 5724 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 5725 break; 5726 } 5727 5728 if ((cts->flags & CCB_TRANS_DISC_ENB) == 0) { 5729 /* 5730 * Can't tag queue without disconnection. 5731 */ 5732 cts->flags &= ~CCB_TRANS_TAG_ENB; 5733 cts->valid |= CCB_TRANS_TQ_VALID; 5734 } 5735 5736 if ((cpi.hba_inquiry & PI_TAG_ABLE) == 0 5737 || (inq_data->flags & SID_CmdQue) == 0 5738 || (device->queue_flags & SCP_QUEUE_DQUE) != 0 5739 || (device->quirk->mintags == 0)) { 5740 /* 5741 * Can't tag on hardware that doesn't support, 5742 * doesn't have it enabled, or has broken tag support. 5743 */ 5744 cts->flags &= ~CCB_TRANS_TAG_ENB; 5745 } 5746 } 5747 5748 qfrozen = FALSE; 5749 if ((cts->valid & CCB_TRANS_TQ_VALID) != 0 5750 && (async_update == FALSE)) { 5751 int device_tagenb; 5752 5753 /* 5754 * If we are transitioning from tags to no-tags or 5755 * vice-versa, we need to carefully freeze and restart 5756 * the queue so that we don't overlap tagged and non-tagged 5757 * commands. We also temporarily stop tags if there is 5758 * a change in transfer negotiation settings to allow 5759 * "tag-less" negotiation. 5760 */ 5761 if ((device->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 5762 || (device->inq_flags & SID_CmdQue) != 0) 5763 device_tagenb = TRUE; 5764 else 5765 device_tagenb = FALSE; 5766 5767 if (((cts->flags & CCB_TRANS_TAG_ENB) != 0 5768 && device_tagenb == FALSE) 5769 || ((cts->flags & CCB_TRANS_TAG_ENB) == 0 5770 && device_tagenb == TRUE)) { 5771 5772 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) { 5773 /* 5774 * Delay change to use tags until after a 5775 * few commands have gone to this device so 5776 * the controller has time to perform transfer 5777 * negotiations without tagged messages getting 5778 * in the way. 5779 */ 5780 device->tag_delay_count = CAM_TAG_DELAY_COUNT; 5781 device->flags |= CAM_DEV_TAG_AFTER_COUNT; 5782 } else { 5783 xpt_freeze_devq(cts->ccb_h.path, /*count*/1); 5784 qfrozen = TRUE; 5785 device->inq_flags &= ~SID_CmdQue; 5786 xpt_dev_ccbq_resize(cts->ccb_h.path, 5787 sim->max_dev_openings); 5788 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT; 5789 device->tag_delay_count = 0; 5790 } 5791 } 5792 } 5793 5794 if (async_update == FALSE) { 5795 /* 5796 * If we are currently performing tagged transactions to 5797 * this device and want to change its negotiation parameters, 5798 * go non-tagged for a bit to give the controller a chance to 5799 * negotiate unhampered by tag messages. 5800 */ 5801 if ((device->inq_flags & SID_CmdQue) != 0 5802 && (cts->flags & (CCB_TRANS_SYNC_RATE_VALID| 5803 CCB_TRANS_SYNC_OFFSET_VALID| 5804 CCB_TRANS_BUS_WIDTH_VALID)) != 0) 5805 xpt_toggle_tags(cts->ccb_h.path); 5806 5807 (*(sim->sim_action))(sim, (union ccb *)cts); 5808 } 5809 5810 if (qfrozen) { 5811 struct ccb_relsim crs; 5812 5813 xpt_setup_ccb(&crs.ccb_h, cts->ccb_h.path, 5814 /*priority*/1); 5815 crs.ccb_h.func_code = XPT_REL_SIMQ; 5816 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY; 5817 crs.openings 5818 = crs.release_timeout 5819 = crs.qfrozen_cnt 5820 = 0; 5821 xpt_action((union ccb *)&crs); 5822 } 5823 } 5824 5825 static void 5826 xpt_toggle_tags(struct cam_path *path) 5827 { 5828 struct cam_ed *dev; 5829 5830 /* 5831 * Give controllers a chance to renegotiate 5832 * before starting tag operations. We 5833 * "toggle" tagged queuing off then on 5834 * which causes the tag enable command delay 5835 * counter to come into effect. 5836 */ 5837 dev = path->device; 5838 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 5839 || ((dev->inq_flags & SID_CmdQue) != 0 5840 && (dev->inq_flags & (SID_Sync|SID_WBus16|SID_WBus32)) != 0)) { 5841 struct ccb_trans_settings cts; 5842 5843 xpt_setup_ccb(&cts.ccb_h, path, 1); 5844 cts.flags = 0; 5845 cts.valid = CCB_TRANS_TQ_VALID; 5846 xpt_set_transfer_settings(&cts, path->device, 5847 /*async_update*/TRUE); 5848 cts.flags = CCB_TRANS_TAG_ENB; 5849 xpt_set_transfer_settings(&cts, path->device, 5850 /*async_update*/TRUE); 5851 } 5852 } 5853 5854 static void 5855 xpt_start_tags(struct cam_path *path) 5856 { 5857 struct ccb_relsim crs; 5858 struct cam_ed *device; 5859 struct cam_sim *sim; 5860 int newopenings; 5861 5862 device = path->device; 5863 sim = path->bus->sim; 5864 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT; 5865 xpt_freeze_devq(path, /*count*/1); 5866 device->inq_flags |= SID_CmdQue; 5867 newopenings = min(device->quirk->maxtags, sim->max_tagged_dev_openings); 5868 xpt_dev_ccbq_resize(path, newopenings); 5869 xpt_setup_ccb(&crs.ccb_h, path, /*priority*/1); 5870 crs.ccb_h.func_code = XPT_REL_SIMQ; 5871 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY; 5872 crs.openings 5873 = crs.release_timeout 5874 = crs.qfrozen_cnt 5875 = 0; 5876 xpt_action((union ccb *)&crs); 5877 } 5878 5879 static int busses_to_config; 5880 static int busses_to_reset; 5881 5882 static int 5883 xptconfigbuscountfunc(struct cam_eb *bus, void *arg) 5884 { 5885 if (bus->path_id != CAM_XPT_PATH_ID) { 5886 struct cam_path path; 5887 struct ccb_pathinq cpi; 5888 int can_negotiate; 5889 5890 busses_to_config++; 5891 xpt_compile_path(&path, NULL, bus->path_id, 5892 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 5893 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1); 5894 cpi.ccb_h.func_code = XPT_PATH_INQ; 5895 xpt_action((union ccb *)&cpi); 5896 can_negotiate = cpi.hba_inquiry; 5897 can_negotiate &= (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE); 5898 if ((cpi.hba_misc & PIM_NOBUSRESET) == 0 5899 && can_negotiate) 5900 busses_to_reset++; 5901 xpt_release_path(&path); 5902 } 5903 5904 return(1); 5905 } 5906 5907 static int 5908 xptconfigfunc(struct cam_eb *bus, void *arg) 5909 { 5910 struct cam_path *path; 5911 union ccb *work_ccb; 5912 5913 if (bus->path_id != CAM_XPT_PATH_ID) { 5914 cam_status status; 5915 int can_negotiate; 5916 5917 work_ccb = xpt_alloc_ccb(); 5918 if ((status = xpt_create_path(&path, xpt_periph, bus->path_id, 5919 CAM_TARGET_WILDCARD, 5920 CAM_LUN_WILDCARD)) !=CAM_REQ_CMP){ 5921 printf("xptconfigfunc: xpt_create_path failed with " 5922 "status %#x for bus %d\n", status, bus->path_id); 5923 printf("xptconfigfunc: halting bus configuration\n"); 5924 xpt_free_ccb(work_ccb); 5925 busses_to_config--; 5926 xpt_finishconfig(xpt_periph, NULL); 5927 return(0); 5928 } 5929 xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1); 5930 work_ccb->ccb_h.func_code = XPT_PATH_INQ; 5931 xpt_action(work_ccb); 5932 if (work_ccb->ccb_h.status != CAM_REQ_CMP) { 5933 printf("xptconfigfunc: CPI failed on bus %d " 5934 "with status %d\n", bus->path_id, 5935 work_ccb->ccb_h.status); 5936 xpt_finishconfig(xpt_periph, work_ccb); 5937 return(1); 5938 } 5939 5940 can_negotiate = work_ccb->cpi.hba_inquiry; 5941 can_negotiate &= (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE); 5942 if ((work_ccb->cpi.hba_misc & PIM_NOBUSRESET) == 0 5943 && (can_negotiate != 0)) { 5944 xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1); 5945 work_ccb->ccb_h.func_code = XPT_RESET_BUS; 5946 work_ccb->ccb_h.cbfcnp = NULL; 5947 CAM_DEBUG(path, CAM_DEBUG_SUBTRACE, 5948 ("Resetting Bus\n")); 5949 xpt_action(work_ccb); 5950 xpt_finishconfig(xpt_periph, work_ccb); 5951 } else { 5952 /* Act as though we performed a successful BUS RESET */ 5953 work_ccb->ccb_h.func_code = XPT_RESET_BUS; 5954 xpt_finishconfig(xpt_periph, work_ccb); 5955 } 5956 } 5957 5958 return(1); 5959 } 5960 5961 static void 5962 xpt_config(void *arg) 5963 { 5964 /* Now that interrupts are enabled, go find our devices */ 5965 5966 #ifdef CAMDEBUG 5967 /* Setup debugging flags and path */ 5968 #ifdef CAM_DEBUG_FLAGS 5969 cam_dflags = CAM_DEBUG_FLAGS; 5970 #else /* !CAM_DEBUG_FLAGS */ 5971 cam_dflags = CAM_DEBUG_NONE; 5972 #endif /* CAM_DEBUG_FLAGS */ 5973 #ifdef CAM_DEBUG_BUS 5974 if (cam_dflags != CAM_DEBUG_NONE) { 5975 if (xpt_create_path(&cam_dpath, xpt_periph, 5976 CAM_DEBUG_BUS, CAM_DEBUG_TARGET, 5977 CAM_DEBUG_LUN) != CAM_REQ_CMP) { 5978 printf("xpt_config: xpt_create_path() failed for debug" 5979 " target %d:%d:%d, debugging disabled\n", 5980 CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN); 5981 cam_dflags = CAM_DEBUG_NONE; 5982 } 5983 } else 5984 cam_dpath = NULL; 5985 #else /* !CAM_DEBUG_BUS */ 5986 cam_dpath = NULL; 5987 #endif /* CAM_DEBUG_BUS */ 5988 #endif /* CAMDEBUG */ 5989 5990 /* 5991 * Scan all installed busses. 5992 */ 5993 xpt_for_all_busses(xptconfigbuscountfunc, NULL); 5994 5995 if (busses_to_config == 0) { 5996 /* Call manually because we don't have any busses */ 5997 xpt_finishconfig(xpt_periph, NULL); 5998 } else { 5999 if (busses_to_reset > 0 && SCSI_DELAY >= 2000) { 6000 printf("Waiting %d seconds for SCSI " 6001 "devices to settle\n", SCSI_DELAY/1000); 6002 } 6003 xpt_for_all_busses(xptconfigfunc, NULL); 6004 } 6005 } 6006 6007 /* 6008 * If the given device only has one peripheral attached to it, and if that 6009 * peripheral is the passthrough driver, announce it. This insures that the 6010 * user sees some sort of announcement for every peripheral in their system. 6011 */ 6012 static int 6013 xptpassannouncefunc(struct cam_ed *device, void *arg) 6014 { 6015 struct cam_periph *periph; 6016 int i; 6017 6018 for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL; 6019 periph = SLIST_NEXT(periph, periph_links), i++); 6020 6021 periph = SLIST_FIRST(&device->periphs); 6022 if ((i == 1) 6023 && (strncmp(periph->periph_name, "pass", 4) == 0)) 6024 xpt_announce_periph(periph, NULL); 6025 6026 return(1); 6027 } 6028 6029 static void 6030 xpt_finishconfig(struct cam_periph *periph, union ccb *done_ccb) 6031 { 6032 struct periph_driver **p_drv; 6033 int i; 6034 6035 if (done_ccb != NULL) { 6036 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, 6037 ("xpt_finishconfig\n")); 6038 switch(done_ccb->ccb_h.func_code) { 6039 case XPT_RESET_BUS: 6040 if (done_ccb->ccb_h.status == CAM_REQ_CMP) { 6041 done_ccb->ccb_h.func_code = XPT_SCAN_BUS; 6042 done_ccb->ccb_h.cbfcnp = xpt_finishconfig; 6043 xpt_action(done_ccb); 6044 return; 6045 } 6046 /* FALLTHROUGH */ 6047 case XPT_SCAN_BUS: 6048 default: 6049 xpt_free_path(done_ccb->ccb_h.path); 6050 busses_to_config--; 6051 break; 6052 } 6053 } 6054 6055 if (busses_to_config == 0) { 6056 /* Register all the peripheral drivers */ 6057 /* XXX This will have to change when we have loadable modules */ 6058 p_drv = (struct periph_driver **)periphdriver_set.ls_items; 6059 for (i = 0; p_drv[i] != NULL; i++) { 6060 (*p_drv[i]->init)(); 6061 } 6062 6063 /* 6064 * Check for devices with no "standard" peripheral driver 6065 * attached. For any devices like that, announce the 6066 * passthrough driver so the user will see something. 6067 */ 6068 xpt_for_all_devices(xptpassannouncefunc, NULL); 6069 6070 /* Release our hook so that the boot can continue. */ 6071 config_intrhook_disestablish(xpt_config_hook); 6072 free(xpt_config_hook, M_TEMP); 6073 xpt_config_hook = NULL; 6074 } 6075 if (done_ccb != NULL) 6076 xpt_free_ccb(done_ccb); 6077 } 6078 6079 static void 6080 xptaction(struct cam_sim *sim, union ccb *work_ccb) 6081 { 6082 CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n")); 6083 6084 switch (work_ccb->ccb_h.func_code) { 6085 /* Common cases first */ 6086 case XPT_PATH_INQ: /* Path routing inquiry */ 6087 { 6088 struct ccb_pathinq *cpi; 6089 6090 cpi = &work_ccb->cpi; 6091 cpi->version_num = 1; /* XXX??? */ 6092 cpi->hba_inquiry = 0; 6093 cpi->target_sprt = 0; 6094 cpi->hba_misc = 0; 6095 cpi->hba_eng_cnt = 0; 6096 cpi->max_target = 0; 6097 cpi->max_lun = 0; 6098 cpi->initiator_id = 0; 6099 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 6100 strncpy(cpi->hba_vid, "", HBA_IDLEN); 6101 strncpy(cpi->dev_name, sim->sim_name, DEV_IDLEN); 6102 cpi->unit_number = sim->unit_number; 6103 cpi->bus_id = sim->bus_id; 6104 cpi->base_transfer_speed = 0; 6105 cpi->ccb_h.status = CAM_REQ_CMP; 6106 xpt_done(work_ccb); 6107 break; 6108 } 6109 default: 6110 work_ccb->ccb_h.status = CAM_REQ_INVALID; 6111 xpt_done(work_ccb); 6112 break; 6113 } 6114 } 6115 6116 /* 6117 * Should only be called by the machine interrupt dispatch routines, 6118 * so put these prototypes here instead of in the header. 6119 */ 6120 6121 static void 6122 swi_camnet(void) 6123 { 6124 camisr(&cam_netq); 6125 } 6126 6127 static void 6128 swi_cambio(void) 6129 { 6130 camisr(&cam_bioq); 6131 } 6132 6133 static void 6134 camisr(cam_isrq_t *queue) 6135 { 6136 int s; 6137 struct ccb_hdr *ccb_h; 6138 6139 s = splcam(); 6140 while ((ccb_h = TAILQ_FIRST(queue)) != NULL) { 6141 int runq; 6142 6143 TAILQ_REMOVE(queue, ccb_h, sim_links.tqe); 6144 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX; 6145 splx(s); 6146 6147 CAM_DEBUG(ccb_h->path, CAM_DEBUG_TRACE, 6148 ("camisr")); 6149 6150 runq = FALSE; 6151 6152 if (ccb_h->flags & CAM_HIGH_POWER) { 6153 struct highpowerlist *hphead; 6154 struct cam_ed *device; 6155 union ccb *send_ccb; 6156 6157 hphead = &highpowerq; 6158 6159 send_ccb = (union ccb *)STAILQ_FIRST(hphead); 6160 6161 /* 6162 * Increment the count since this command is done. 6163 */ 6164 num_highpower++; 6165 6166 /* 6167 * Any high powered commands queued up? 6168 */ 6169 if (send_ccb != NULL) { 6170 device = send_ccb->ccb_h.path->device; 6171 6172 STAILQ_REMOVE_HEAD(hphead, xpt_links.stqe); 6173 6174 xpt_release_devq(send_ccb->ccb_h.path, 6175 /*count*/1, /*runqueue*/TRUE); 6176 } 6177 } 6178 if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) { 6179 struct cam_ed *dev; 6180 6181 dev = ccb_h->path->device; 6182 6183 s = splcam(); 6184 cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h); 6185 6186 ccb_h->path->bus->sim->devq->send_active--; 6187 ccb_h->path->bus->sim->devq->send_openings++; 6188 splx(s); 6189 6190 if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0 6191 || ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0 6192 && (dev->ccbq.dev_active == 0))) { 6193 6194 xpt_release_devq(ccb_h->path, /*count*/1, 6195 /*run_queue*/TRUE); 6196 } 6197 6198 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 6199 && (--dev->tag_delay_count == 0)) 6200 xpt_start_tags(ccb_h->path); 6201 6202 if ((dev->ccbq.queue.entries > 0) 6203 && (dev->qfrozen_cnt == 0) 6204 && (device_is_send_queued(dev) == 0)) { 6205 runq = xpt_schedule_dev_sendq(ccb_h->path->bus, 6206 dev); 6207 } 6208 } 6209 6210 if (ccb_h->status & CAM_RELEASE_SIMQ) { 6211 xpt_release_simq(ccb_h->path->bus->sim, 6212 /*run_queue*/TRUE); 6213 } else if ((ccb_h->flags & CAM_DEV_QFRZDIS) 6214 && (ccb_h->status & CAM_DEV_QFRZN)) { 6215 xpt_release_devq(ccb_h->path, /*count*/1, 6216 /*run_queue*/TRUE); 6217 ccb_h->status &= ~CAM_DEV_QFRZN; 6218 } else if (runq) { 6219 xpt_run_dev_sendq(ccb_h->path->bus); 6220 } 6221 6222 /* Call the peripheral driver's callback */ 6223 (*ccb_h->cbfcnp)(ccb_h->path->periph, 6224 (union ccb *)ccb_h); 6225 6226 /* Raise IPL for while test */ 6227 s = splcam(); 6228 } 6229 splx(s); 6230 } 6231