1 /* 2 * Implementation of the Common Access Method Transport (XPT) layer. 3 * 4 * Copyright (c) 1997, 1998 Justin T. Gibbs. 5 * Copyright (c) 1997, 1998 Kenneth D. Merry. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions, and the following disclaimer, 13 * without modification, immediately at the beginning of the file. 14 * 2. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * $Id: cam_xpt.c,v 1.24 1998/10/15 19:08:52 ken Exp $ 30 */ 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/types.h> 34 #include <sys/malloc.h> 35 #include <sys/device.h> 36 #include <sys/kernel.h> 37 #include <sys/conf.h> 38 #include <sys/fcntl.h> 39 #include <sys/md5.h> 40 #include <sys/devicestat.h> 41 42 #ifdef PC98 43 #include <pc98/pc98/pc98_machdep.h> /* geometry translation */ 44 #endif 45 46 #include <machine/clock.h> 47 #include <machine/ipl.h> 48 49 #include <cam/cam.h> 50 #include <cam/cam_conf.h> 51 #include <cam/cam_ccb.h> 52 #include <cam/cam_periph.h> 53 #include <cam/cam_sim.h> 54 #include <cam/cam_xpt.h> 55 #include <cam/cam_xpt_sim.h> 56 #include <cam/cam_xpt_periph.h> 57 #include <cam/cam_debug.h> 58 59 #include <cam/scsi/scsi_all.h> 60 #include <cam/scsi/scsi_message.h> 61 #include <cam/scsi/scsi_pass.h> 62 #include "opt_cam.h" 63 #include "opt_scsi.h" 64 65 extern void (*ihandlers[32]) __P((void)); 66 67 /* Datastructures internal to the xpt layer */ 68 69 /* 70 * Definition of an async handler callback block. These are used to add 71 * SIMs and peripherals to the async callback lists. 72 */ 73 struct async_node { 74 SLIST_ENTRY(async_node) links; 75 u_int32_t event_enable; /* Async Event enables */ 76 void (*callback)(void *arg, u_int32_t code, 77 struct cam_path *path, void *args); 78 void *callback_arg; 79 }; 80 81 SLIST_HEAD(async_list, async_node); 82 SLIST_HEAD(periph_list, cam_periph); 83 STAILQ_HEAD(highpowerlist, ccb_hdr) highpowerq; 84 85 /* 86 * This is the maximum number of high powered commands (e.g. start unit) 87 * that can be outstanding at a particular time. 88 */ 89 #ifndef CAM_MAX_HIGHPOWER 90 #define CAM_MAX_HIGHPOWER 4 91 #endif 92 93 /* 94 * This is the number of seconds we wait for devices to settle after a SCSI 95 * bus reset. 96 */ 97 #ifndef SCSI_DELAY 98 #define SCSI_DELAY 2000 99 #endif 100 /* 101 * If someone sets this to 0, we assume that they want the minimum 102 * allowable bus settle delay. All devices need _some_ sort of bus settle 103 * delay, so we'll set it to a minimum value of 100ms. 104 */ 105 #if (SCSI_DELAY == 0) 106 #undef SCSI_DELAY 107 #define SCSI_DELAY 100 108 #endif 109 110 /* 111 * Make sure the user isn't using seconds instead of milliseconds. 112 */ 113 #if (SCSI_DELAY < 100) 114 #error "SCSI_DELAY is in milliseconds, not seconds! Please use a larger value" 115 #endif 116 117 /* number of high powered commands that can go through right now */ 118 static int num_highpower = CAM_MAX_HIGHPOWER; 119 120 /* 121 * Structure for queueing a device in a run queue. 122 * There is one run queue for allocating new ccbs, 123 * and another for sending ccbs to the controller. 124 */ 125 struct cam_ed_qinfo { 126 cam_pinfo pinfo; 127 struct cam_ed *device; 128 }; 129 130 /* 131 * The CAM EDT (Existing Device Table) contains the device information for 132 * all devices for all busses in the system. The table contains a 133 * cam_ed structure for each device on the bus. 134 */ 135 struct cam_ed { 136 TAILQ_ENTRY(cam_ed) links; 137 struct cam_ed_qinfo alloc_ccb_entry; 138 struct cam_ed_qinfo send_ccb_entry; 139 struct cam_et *target; 140 lun_id_t lun_id; 141 struct camq drvq; /* 142 * Queue of type drivers wanting to do 143 * work on this device. 144 */ 145 struct cam_ccbq ccbq; /* Queue of pending ccbs */ 146 struct async_list asyncs; /* Async callback info for this B/T/L */ 147 struct periph_list periphs; /* All attached devices */ 148 u_int generation; /* Generation number */ 149 struct cam_periph *owner; /* Peripheral driver's ownership tag */ 150 struct xpt_quirk_entry *quirk; /* Oddities about this device */ 151 /* Storage for the inquiry data */ 152 struct scsi_inquiry_data inq_data; 153 u_int8_t inq_flags; /* 154 * Current settings for inquiry flags. 155 * This allows us to override settings 156 * like disconnection and tagged 157 * queuing for a device. 158 */ 159 u_int8_t queue_flags; /* Queue flags from the control page */ 160 u_int8_t *serial_num; 161 u_int8_t serial_num_len; 162 u_int32_t qfrozen_cnt; 163 u_int32_t flags; 164 #define CAM_DEV_UNCONFIGURED 0x01 165 #define CAM_DEV_REL_TIMEOUT_PENDING 0x02 166 #define CAM_DEV_REL_ON_COMPLETE 0x04 167 #define CAM_DEV_REL_ON_QUEUE_EMPTY 0x08 168 #define CAM_DEV_RESIZE_QUEUE_NEEDED 0x10 169 #define CAM_DEV_TAG_AFTER_COUNT 0x20 170 u_int32_t tag_delay_count; 171 #define CAM_TAG_DELAY_COUNT 5 172 u_int32_t refcount; 173 struct callout_handle c_handle; 174 }; 175 176 /* 177 * Each target is represented by an ET (Existing Target). These 178 * entries are created when a target is successfully probed with an 179 * identify, and removed when a device fails to respond after a number 180 * of retries, or a bus rescan finds the device missing. 181 */ 182 struct cam_et { 183 TAILQ_HEAD(, cam_ed) ed_entries; 184 TAILQ_ENTRY(cam_et) links; 185 struct cam_eb *bus; 186 target_id_t target_id; 187 u_int32_t refcount; 188 u_int generation; 189 }; 190 191 /* 192 * Each bus is represented by an EB (Existing Bus). These entries 193 * are created by calls to xpt_bus_register and deleted by calls to 194 * xpt_bus_deregister. 195 */ 196 struct cam_eb { 197 TAILQ_HEAD(, cam_et) et_entries; 198 TAILQ_ENTRY(cam_eb) links; 199 struct async_list asyncs; /* Async callback info for this B/T/L */ 200 path_id_t path_id; 201 struct cam_sim *sim; 202 u_int32_t flags; 203 #define CAM_EB_RUNQ_SCHEDULED 0x01 204 u_int generation; 205 }; 206 207 struct cam_path { 208 struct cam_periph *periph; 209 struct cam_eb *bus; 210 struct cam_et *target; 211 struct cam_ed *device; 212 }; 213 214 struct xpt_quirk_entry { 215 struct scsi_inquiry_pattern inq_pat; 216 u_int8_t quirks; 217 #define CAM_QUIRK_NOLUNS 0x01 218 #define CAM_QUIRK_NOSERIAL 0x02 219 u_int8_t mintags; 220 u_int8_t maxtags; 221 }; 222 223 typedef enum { 224 XPT_FLAG_OPEN = 0x01 225 } xpt_flags; 226 227 struct xpt_softc { 228 xpt_flags flags; 229 u_int32_t generation; 230 #ifdef DEVFS 231 void *xpt_devfs_token; 232 void *ctl_devfs_token; 233 #endif 234 }; 235 236 static const char quantum[] = "QUANTUM"; 237 static const char sony[] = "SONY"; 238 static const char west_digital[] = "WDIGTL"; 239 240 static struct xpt_quirk_entry xpt_quirk_table[] = 241 { 242 { 243 /* Reports QUEUE FULL for temporary resource shortages */ 244 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP39100*", "*" }, 245 /*quirks*/0, /*mintags*/24, /*maxtags*/32 246 }, 247 { 248 /* Reports QUEUE FULL for temporary resource shortages */ 249 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP34550*", "*" }, 250 /*quirks*/0, /*mintags*/24, /*maxtags*/32 251 }, 252 { 253 /* Reports QUEUE FULL for temporary resource shortages */ 254 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP32275*", "*" }, 255 /*quirks*/0, /*mintags*/24, /*maxtags*/32 256 }, 257 { 258 /* Broken tagged queuing drive */ 259 { T_DIRECT, SIP_MEDIA_FIXED, "HP", "C372*", "*" }, 260 /*quirks*/0, /*mintags*/0, /*maxtags*/0 261 }, 262 { 263 /* Broken tagged queuing drive */ 264 { T_DIRECT, SIP_MEDIA_FIXED, "MICROP", "3391*", "x43h" }, 265 /*quirks*/0, /*mintags*/0, /*maxtags*/0 266 }, 267 { 268 /* 269 * Broken tagged queuing drive 270 * Reported by: Bret Ford <bford@uop.cs.uop.edu> 271 * and: Martin Renters <martin@tdc.on.ca> 272 */ 273 { T_DIRECT, SIP_MEDIA_FIXED, "SEAGATE", "ST410800*", "71*" }, 274 /*quirks*/0, /*mintags*/0, /*maxtags*/0 275 }, 276 { 277 /* Broken tagged queuing drive */ 278 { T_DIRECT, SIP_MEDIA_REMOVABLE, "iomega", "jaz*", "*" }, 279 /*quirks*/0, /*mintags*/0, /*maxtags*/0 280 }, 281 { 282 /* Broken tagged queuing drive */ 283 { T_DIRECT, SIP_MEDIA_REMOVABLE, "CONNER", "CFP2107*", "*" }, 284 /*quirks*/0, /*mintags*/0, /*maxtags*/0 285 }, 286 { 287 /* 288 * Slow when tagged queueing is enabled. (1.5MB/sec versus 289 * 8MB/sec.) 290 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu> 291 */ 292 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "WDE*", "*" }, 293 /*quirks*/0, /*mintags*/0, /*maxtags*/0 294 }, 295 { 296 /* 297 * Slow when tagged queueing is enabled. (1.5MB/sec versus 298 * 8MB/sec.) 299 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu> 300 */ 301 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "ENTERPRISE", "*" }, 302 /*quirks*/0, /*mintags*/0, /*maxtags*/0 303 }, 304 { 305 /* 306 * Doesn't handle queue full condition correctly, 307 * so we need to limit maxtags to what the device 308 * can handle instead of determining this automatically. 309 */ 310 { T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "WN321010S*", "*" }, 311 /*quirks*/0, /*mintags*/2, /*maxtags*/32 312 }, 313 { 314 /* 315 * Hack until multiple-luns are supported by 316 * the target mode code. 317 */ 318 { 319 T_PROCESSOR, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, 320 "FreeBSD", "TM-PT", "*" 321 }, 322 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 323 }, 324 { 325 /* Really only one LUN */ 326 { 327 T_ENCLOSURE, SIP_MEDIA_FIXED, "SUN", "SENA*", "*" 328 }, 329 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 330 }, 331 { 332 /* I can't believe we need a quirk for DPT volumes. */ 333 { 334 T_ANY, SIP_MEDIA_FIXED|SIP_MEDIA_REMOVABLE, 335 "DPT", "*", "*" 336 }, 337 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, 338 /*mintags*/0, /*maxtags*/255 339 }, 340 { 341 /* 342 * This drive doesn't like multiple LUN probing. 343 * Verified by: Jean-Marc Zucconi <jmz@FreeBSD.ORG> 344 */ 345 { 346 T_CDROM, SIP_MEDIA_REMOVABLE, sony, 347 "CD-ROM CDU-80*", "*" 348 }, 349 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 350 }, 351 { 352 /* 353 * This drive doesn't like multiple LUN probing. 354 * Submitted by: Parag Patel <parag@cgt.com> 355 */ 356 { 357 T_WORM, SIP_MEDIA_REMOVABLE, sony, 358 "CD-R CDU9*", "*" 359 }, 360 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 361 }, 362 { 363 /* 364 * The 8200 doesn't like multi-lun probing, and probably 365 * don't like serial number requests either. 366 */ 367 { 368 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE", 369 "EXB-8200*", "*" 370 }, 371 CAM_QUIRK_NOSERIAL|CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0 372 }, 373 { 374 /* Default tagged queuing parameters for all devices */ 375 { 376 T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, 377 /*vendor*/"*", /*product*/"*", /*revision*/"*" 378 }, 379 /*quirks*/0, /*mintags*/2, /*maxtags*/255 380 }, 381 }; 382 383 typedef enum { 384 DM_RET_COPY = 0x01, 385 DM_RET_FLAG_MASK = 0x0f, 386 DM_RET_NONE = 0x00, 387 DM_RET_STOP = 0x10, 388 DM_RET_DESCEND = 0x20, 389 DM_RET_ERROR = 0x30, 390 DM_RET_ACTION_MASK = 0xf0 391 } dev_match_ret; 392 393 typedef enum { 394 XPT_DEPTH_BUS, 395 XPT_DEPTH_TARGET, 396 XPT_DEPTH_DEVICE, 397 XPT_DEPTH_PERIPH 398 } xpt_traverse_depth; 399 400 struct xpt_traverse_config { 401 xpt_traverse_depth depth; 402 void *tr_func; 403 void *tr_arg; 404 }; 405 406 typedef int xpt_busfunc_t (struct cam_eb *bus, void *arg); 407 typedef int xpt_targetfunc_t (struct cam_et *target, void *arg); 408 typedef int xpt_devicefunc_t (struct cam_ed *device, void *arg); 409 typedef int xpt_periphfunc_t (struct cam_periph *periph, void *arg); 410 typedef int xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg); 411 412 /* Transport layer configuration information */ 413 static struct xpt_softc xsoftc; 414 415 /* Queues for our software interrupt handler */ 416 typedef TAILQ_HEAD(cam_isrq, ccb_hdr) cam_isrq_t; 417 static cam_isrq_t cam_bioq; 418 static cam_isrq_t cam_netq; 419 420 /* "Pool" of inactive ccbs managed by xpt_alloc_ccb and xpt_free_ccb */ 421 SLIST_HEAD(,ccb_hdr) ccb_freeq; 422 static u_int xpt_max_ccbs; /* 423 * Maximum size of ccb pool. Modified as 424 * devices are added/removed or have their 425 * opening counts changed. 426 */ 427 static u_int xpt_ccb_count; /* Current count of allocated ccbs */ 428 429 static struct cam_periph *xpt_periph; 430 431 static periph_init_t xpt_periph_init; 432 433 static periph_init_t probe_periph_init; 434 435 static struct periph_driver xpt_driver = 436 { 437 xpt_periph_init, "xpt", 438 TAILQ_HEAD_INITIALIZER(xpt_driver.units) 439 }; 440 441 static struct periph_driver probe_driver = 442 { 443 probe_periph_init, "probe", 444 TAILQ_HEAD_INITIALIZER(probe_driver.units) 445 }; 446 447 DATA_SET(periphdriver_set, xpt_driver); 448 DATA_SET(periphdriver_set, probe_driver); 449 450 #define XPT_CDEV_MAJOR 104 451 452 static d_open_t xptopen; 453 static d_close_t xptclose; 454 static d_ioctl_t xptioctl; 455 456 static struct cdevsw xpt_cdevsw = 457 { 458 /*d_open*/ xptopen, 459 /*d_close*/ xptclose, 460 /*d_read*/ noread, 461 /*d_write*/ nowrite, 462 /*d_ioctl*/ xptioctl, 463 /*d_stop*/ nostop, 464 /*d_reset*/ noreset, 465 /*d_devtotty*/ nodevtotty, 466 /*d_poll*/ NULL, 467 /*d_mmap*/ nommap, 468 /*d_strategy*/ nostrategy, 469 /*d_name*/ "xpt", 470 /*d_spare*/ NULL, 471 /*d_maj*/ -1, 472 /*d_dump*/ nodump, 473 /*d_psize*/ nopsize, 474 /*d_flags*/ 0, 475 /*d_maxio*/ 0, 476 /*b_maj*/ -1 477 }; 478 479 static struct intr_config_hook *xpt_config_hook; 480 481 /* Registered busses */ 482 TAILQ_HEAD(,cam_eb) xpt_busses; 483 static u_int bus_generation; 484 485 /* Storage for debugging datastructures */ 486 #ifdef CAMDEBUG 487 struct cam_path *cam_dpath; 488 u_int32_t cam_dflags; 489 #endif 490 491 #if defined(CAM_DEBUG_FLAGS) && !defined(CAMDEBUG) 492 #error "You must have options CAMDEBUG to use options CAM_DEBUG_FLAGS" 493 #endif 494 495 /* 496 * In order to enable the CAM_DEBUG_* options, the user must have CAMDEBUG 497 * enabled. Also, the user must have either none, or all of CAM_DEBUG_BUS, 498 * CAM_DEBUG_TARGET, and CAM_DEBUG_LUN specified. 499 */ 500 #if defined(CAM_DEBUG_BUS) || defined(CAM_DEBUG_TARGET) \ 501 || defined(CAM_DEBUG_LUN) 502 #ifdef CAMDEBUG 503 #if !defined(CAM_DEBUG_BUS) || !defined(CAM_DEBUG_TARGET) \ 504 || !defined(CAM_DEBUG_LUN) 505 #error "You must define all or none of CAM_DEBUG_BUS, CAM_DEBUG_TARGET \ 506 and CAM_DEBUG_LUN" 507 #endif /* !CAM_DEBUG_BUS || !CAM_DEBUG_TARGET || !CAM_DEBUG_LUN */ 508 #else /* !CAMDEBUG */ 509 #error "You must use options CAMDEBUG if you use the CAM_DEBUG_* options" 510 #endif /* CAMDEBUG */ 511 #endif /* CAM_DEBUG_BUS || CAM_DEBUG_TARGET || CAM_DEBUG_LUN */ 512 513 /* Forward declarations for private functions */ 514 void xpt_init(void); 515 516 static cam_status xpt_compile_path(struct cam_path *new_path, 517 struct cam_periph *perph, 518 path_id_t path_id, 519 target_id_t target_id, 520 lun_id_t lun_id); 521 522 static void xpt_release_path(struct cam_path *path); 523 524 static void xpt_async_bcast(struct async_list *async_head, 525 u_int32_t async_code, 526 struct cam_path *path, 527 void *async_arg); 528 static int xptnextfreebus(path_id_t startbus); 529 static int xptpathid(const char *sim_name, int sim_unit, int sim_bus, 530 path_id_t *nextpath); 531 static union ccb *xpt_get_ccb(struct cam_ed *device); 532 static int xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo, 533 u_int32_t new_priority); 534 static void xpt_run_dev_allocq(struct cam_eb *bus); 535 static void xpt_run_dev_sendq(struct cam_eb *bus); 536 static timeout_t xpt_release_devq_timeout; 537 static timeout_t xpt_release_simq_timeout; 538 static struct cam_et* 539 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id); 540 static void xpt_release_target(struct cam_eb *bus, struct cam_et *target); 541 static struct cam_ed* 542 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, 543 lun_id_t lun_id); 544 static void xpt_release_device(struct cam_eb *bus, struct cam_et *target, 545 struct cam_ed *device); 546 static u_int32_t xpt_dev_ccbq_resize(struct cam_path *path, int newopenings); 547 static struct cam_eb* 548 xpt_find_bus(path_id_t path_id); 549 static struct cam_et* 550 xpt_find_target(struct cam_eb *bus, target_id_t target_id); 551 static struct cam_ed* 552 xpt_find_device(struct cam_et *target, lun_id_t lun_id); 553 static void xpt_scan_bus(struct cam_periph *periph, union ccb *ccb); 554 static void xpt_scan_lun(struct cam_periph *periph, 555 struct cam_path *path, cam_flags flags, 556 union ccb *ccb); 557 static void xptscandone(struct cam_periph *periph, union ccb *done_ccb); 558 static xpt_busfunc_t xptconfigbuscountfunc; 559 static xpt_busfunc_t xptconfigfunc; 560 static void xpt_config(void *arg); 561 static xpt_devicefunc_t xptfinishconfigfunc; 562 static xpt_devicefunc_t xptpassannouncefunc; 563 static void xpt_finishconfig(struct cam_periph *periph, union ccb *ccb); 564 static void xptaction(struct cam_sim *sim, union ccb *work_ccb); 565 void swi_camnet(void); 566 void swi_cambio(void); 567 static void camisr(cam_isrq_t *queue); 568 #if 0 569 static void xptstart(struct cam_periph *periph, union ccb *work_ccb); 570 static void xptasync(struct cam_periph *periph, 571 u_int32_t code, cam_path *path); 572 #endif 573 static dev_match_ret xptbusmatch(struct dev_match_pattern *patterns, 574 int num_patterns, struct cam_eb *bus); 575 static dev_match_ret xptdevicematch(struct dev_match_pattern *patterns, 576 int num_patterns, struct cam_ed *device); 577 static dev_match_ret xptperiphmatch(struct dev_match_pattern *patterns, 578 int num_patterns, 579 struct cam_periph *periph); 580 static xpt_busfunc_t xptedtbusfunc; 581 static xpt_targetfunc_t xptedttargetfunc; 582 static xpt_devicefunc_t xptedtdevicefunc; 583 static xpt_periphfunc_t xptedtperiphfunc; 584 static xpt_pdrvfunc_t xptplistpdrvfunc; 585 static xpt_periphfunc_t xptplistperiphfunc; 586 static int xptedtmatch(struct ccb_dev_match *cdm); 587 static int xptperiphlistmatch(struct ccb_dev_match *cdm); 588 static int xptbustraverse(struct cam_eb *start_bus, 589 xpt_busfunc_t *tr_func, void *arg); 590 static int xpttargettraverse(struct cam_eb *bus, 591 struct cam_et *start_target, 592 xpt_targetfunc_t *tr_func, void *arg); 593 static int xptdevicetraverse(struct cam_et *target, 594 struct cam_ed *start_device, 595 xpt_devicefunc_t *tr_func, void *arg); 596 static int xptperiphtraverse(struct cam_ed *device, 597 struct cam_periph *start_periph, 598 xpt_periphfunc_t *tr_func, void *arg); 599 static int xptpdrvtraverse(struct periph_driver **start_pdrv, 600 xpt_pdrvfunc_t *tr_func, void *arg); 601 static int xptpdperiphtraverse(struct periph_driver **pdrv, 602 struct cam_periph *start_periph, 603 xpt_periphfunc_t *tr_func, 604 void *arg); 605 static xpt_busfunc_t xptdefbusfunc; 606 static xpt_targetfunc_t xptdeftargetfunc; 607 static xpt_devicefunc_t xptdefdevicefunc; 608 static xpt_periphfunc_t xptdefperiphfunc; 609 static int xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg); 610 static int xpt_for_all_targets(xpt_targetfunc_t *tr_func, 611 void *arg); 612 static int xpt_for_all_devices(xpt_devicefunc_t *tr_func, 613 void *arg); 614 static int xpt_for_all_periphs(xpt_periphfunc_t *tr_func, 615 void *arg); 616 static xpt_devicefunc_t xptsetasyncfunc; 617 static xpt_busfunc_t xptsetasyncbusfunc; 618 static cam_status xptregister(struct cam_periph *periph, 619 void *arg); 620 static cam_status proberegister(struct cam_periph *periph, 621 void *arg); 622 static void probeschedule(struct cam_periph *probe_periph); 623 static void probestart(struct cam_periph *periph, union ccb *start_ccb); 624 static void probedone(struct cam_periph *periph, union ccb *done_ccb); 625 static void probecleanup(struct cam_periph *periph); 626 static void xpt_find_quirk(struct cam_ed *device); 627 static void xpt_set_transfer_settings(struct ccb_trans_settings *cts, 628 struct cam_ed *device, 629 int async_update); 630 static void xpt_toggle_tags(struct cam_path *path); 631 static void xpt_start_tags(struct cam_path *path); 632 static __inline int xpt_schedule_dev_allocq(struct cam_eb *bus, 633 struct cam_ed *dev); 634 static __inline int xpt_schedule_dev_sendq(struct cam_eb *bus, 635 struct cam_ed *dev); 636 static __inline int periph_is_queued(struct cam_periph *periph); 637 static __inline int device_is_alloc_queued(struct cam_ed *device); 638 static __inline int device_is_send_queued(struct cam_ed *device); 639 static __inline int dev_allocq_is_runnable(struct cam_devq *devq); 640 641 static __inline int 642 xpt_schedule_dev_allocq(struct cam_eb *bus, struct cam_ed *dev) 643 { 644 int retval; 645 646 if (dev->ccbq.devq_openings > 0) { 647 if ((dev->flags & CAM_DEV_RESIZE_QUEUE_NEEDED) != 0) { 648 cam_ccbq_resize(&dev->ccbq, 649 dev->ccbq.dev_openings 650 + dev->ccbq.dev_active); 651 dev->flags &= ~CAM_DEV_RESIZE_QUEUE_NEEDED; 652 } 653 retval = xpt_schedule_dev(&bus->sim->devq->alloc_queue, 654 &dev->alloc_ccb_entry.pinfo, 655 dev->drvq.queue_array[0]->priority); 656 } else { 657 retval = 0; 658 } 659 660 return (retval); 661 } 662 663 static __inline int 664 xpt_schedule_dev_sendq(struct cam_eb *bus, struct cam_ed *dev) 665 { 666 int retval; 667 668 if (dev->ccbq.dev_openings > 0) { 669 retval = xpt_schedule_dev(&bus->sim->devq->send_queue, 670 &dev->send_ccb_entry.pinfo, 671 dev->ccbq.queue.queue_array[0]->priority); 672 } else { 673 retval = 0; 674 } 675 return (retval); 676 } 677 678 static __inline int 679 periph_is_queued(struct cam_periph *periph) 680 { 681 return (periph->pinfo.index != CAM_UNQUEUED_INDEX); 682 } 683 684 static __inline int 685 device_is_alloc_queued(struct cam_ed *device) 686 { 687 return (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX); 688 } 689 690 static __inline int 691 device_is_send_queued(struct cam_ed *device) 692 { 693 return (device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX); 694 } 695 696 static __inline int 697 dev_allocq_is_runnable(struct cam_devq *devq) 698 { 699 /* 700 * Have work to do. 701 * Have space to do more work. 702 * Allowed to do work. 703 */ 704 return ((devq->alloc_queue.qfrozen_cnt == 0) 705 && (devq->alloc_queue.entries > 0) 706 && (devq->alloc_openings > 0)); 707 } 708 709 static void 710 xpt_periph_init() 711 { 712 dev_t dev; 713 714 dev = makedev(XPT_CDEV_MAJOR, 0); 715 cdevsw_add(&dev, &xpt_cdevsw, NULL); 716 } 717 718 static void 719 probe_periph_init() 720 { 721 } 722 723 724 static void 725 xptdone(struct cam_periph *periph, union ccb *done_ccb) 726 { 727 /* Caller will release the CCB */ 728 wakeup(&done_ccb->ccb_h.cbfcnp); 729 } 730 731 static int 732 xptopen(dev_t dev, int flags, int fmt, struct proc *p) 733 { 734 int unit; 735 736 unit = minor(dev) & 0xff; 737 738 /* 739 * Only allow read-write access. 740 */ 741 if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0)) 742 return(EPERM); 743 744 /* 745 * We don't allow nonblocking access. 746 */ 747 if ((flags & O_NONBLOCK) != 0) { 748 printf("xpt%d: can't do nonblocking accesss\n", unit); 749 return(ENODEV); 750 } 751 752 /* 753 * We only have one transport layer right now. If someone accesses 754 * us via something other than minor number 1, point out their 755 * mistake. 756 */ 757 if (unit != 0) { 758 printf("xptopen: got invalid xpt unit %d\n", unit); 759 return(ENXIO); 760 } 761 762 /* Mark ourselves open */ 763 xsoftc.flags |= XPT_FLAG_OPEN; 764 765 return(0); 766 } 767 768 static int 769 xptclose(dev_t dev, int flag, int fmt, struct proc *p) 770 { 771 int unit; 772 773 unit = minor(dev) & 0xff; 774 775 /* 776 * We only have one transport layer right now. If someone accesses 777 * us via something other than minor number 1, point out their 778 * mistake. 779 */ 780 if (unit != 0) { 781 printf("xptclose: got invalid xpt unit %d\n", unit); 782 return(ENXIO); 783 } 784 785 /* Mark ourselves closed */ 786 xsoftc.flags &= ~XPT_FLAG_OPEN; 787 788 return(0); 789 } 790 791 static int 792 xptioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p) 793 { 794 int unit, error; 795 796 error = 0; 797 unit = minor(dev) & 0xff; 798 799 /* 800 * We only have one transport layer right now. If someone accesses 801 * us via something other than minor number 1, point out their 802 * mistake. 803 */ 804 if (unit != 0) { 805 printf("xptioctl: got invalid xpt unit %d\n", unit); 806 return(ENXIO); 807 } 808 809 switch(cmd) { 810 /* 811 * For the transport layer CAMIOCOMMAND ioctl, we really only want 812 * to accept CCB types that don't quite make sense to send through a 813 * passthrough driver. 814 */ 815 case CAMIOCOMMAND: { 816 union ccb *ccb; 817 union ccb *inccb; 818 819 inccb = (union ccb *)addr; 820 821 switch(inccb->ccb_h.func_code) { 822 case XPT_SCAN_BUS: 823 case XPT_RESET_BUS: 824 if ((inccb->ccb_h.target_id != CAM_TARGET_WILDCARD) 825 || (inccb->ccb_h.target_lun != CAM_LUN_WILDCARD)) { 826 error = EINVAL; 827 break; 828 } 829 /* FALLTHROUGH */ 830 case XPT_SCAN_LUN: 831 case XPT_ENG_INQ: /* XXX not implemented yet */ 832 case XPT_ENG_EXEC: 833 834 ccb = xpt_alloc_ccb(); 835 836 /* 837 * Create a path using the bus, target, and lun the 838 * user passed in. 839 */ 840 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, 841 inccb->ccb_h.path_id, 842 inccb->ccb_h.target_id, 843 inccb->ccb_h.target_lun) != 844 CAM_REQ_CMP){ 845 error = EINVAL; 846 xpt_free_ccb(ccb); 847 break; 848 } 849 /* Ensure all of our fields are correct */ 850 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, 851 inccb->ccb_h.pinfo.priority); 852 xpt_merge_ccb(ccb, inccb); 853 ccb->ccb_h.cbfcnp = xptdone; 854 cam_periph_runccb(ccb, NULL, 0, 0, NULL); 855 bcopy(ccb, inccb, sizeof(union ccb)); 856 xpt_free_path(ccb->ccb_h.path); 857 xpt_free_ccb(ccb); 858 break; 859 860 case XPT_DEBUG: { 861 union ccb ccb; 862 863 /* 864 * This is an immedaite CCB, so it's okay to 865 * allocate it on the stack. 866 */ 867 868 /* 869 * Create a path using the bus, target, and lun the 870 * user passed in. 871 */ 872 if (xpt_create_path(&ccb.ccb_h.path, xpt_periph, 873 inccb->ccb_h.path_id, 874 inccb->ccb_h.target_id, 875 inccb->ccb_h.target_lun) != 876 CAM_REQ_CMP){ 877 error = EINVAL; 878 break; 879 } 880 /* Ensure all of our fields are correct */ 881 xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path, 882 inccb->ccb_h.pinfo.priority); 883 xpt_merge_ccb(&ccb, inccb); 884 ccb.ccb_h.cbfcnp = xptdone; 885 xpt_action(&ccb); 886 bcopy(&ccb, inccb, sizeof(union ccb)); 887 xpt_free_path(ccb.ccb_h.path); 888 break; 889 890 } 891 case XPT_DEV_MATCH: { 892 struct cam_periph_map_info mapinfo; 893 894 /* 895 * We can't deal with physical addresses for this 896 * type of transaction. 897 */ 898 if (inccb->ccb_h.flags & CAM_DATA_PHYS) { 899 error = EINVAL; 900 break; 901 } 902 bzero(&mapinfo, sizeof(mapinfo)); 903 904 /* 905 * Map the pattern and match buffers into kernel 906 * virtual address space. 907 */ 908 error = cam_periph_mapmem(inccb, &mapinfo); 909 910 if (error) 911 break; 912 913 /* 914 * This is an immediate CCB, we can send it on directly. 915 */ 916 xpt_action(inccb); 917 918 /* 919 * Map the buffers back into user space. 920 */ 921 cam_periph_unmapmem(inccb, &mapinfo); 922 923 error = 0; 924 break; 925 } 926 default: 927 error = EINVAL; 928 break; 929 } 930 break; 931 } 932 /* 933 * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input, 934 * with the periphal driver name and unit name filled in. The other 935 * fields don't really matter as input. The passthrough driver name 936 * ("pass"), and unit number are passed back in the ccb. The current 937 * device generation number, and the index into the device peripheral 938 * driver list, and the status are also passed back. Note that 939 * since we do everything in one pass, unlike the XPT_GDEVLIST ccb, 940 * we never return a status of CAM_GDEVLIST_LIST_CHANGED. It is 941 * (or rather should be) impossible for the device peripheral driver 942 * list to change since we look at the whole thing in one pass, and 943 * we do it with splsoftcam protection. 944 * 945 */ 946 case CAMGETPASSTHRU: { 947 union ccb *ccb; 948 struct cam_periph *periph; 949 struct periph_driver **p_drv; 950 char *name; 951 int unit; 952 int cur_generation; 953 int base_periph_found; 954 int splbreaknum; 955 int s; 956 957 ccb = (union ccb *)addr; 958 unit = ccb->cgdl.unit_number; 959 name = ccb->cgdl.periph_name; 960 /* 961 * Every 100 devices, we want to drop our spl protection to 962 * give the software interrupt handler a chance to run. 963 * Most systems won't run into this check, but this should 964 * avoid starvation in the software interrupt handler in 965 * large systems. 966 */ 967 splbreaknum = 100; 968 969 ccb = (union ccb *)addr; 970 971 base_periph_found = 0; 972 973 /* 974 * Sanity check -- make sure we don't get a null peripheral 975 * driver name. 976 */ 977 if (*ccb->cgdl.periph_name == '\0') { 978 error = EINVAL; 979 break; 980 } 981 982 /* Keep the list from changing while we traverse it */ 983 s = splsoftcam(); 984 ptstartover: 985 cur_generation = xsoftc.generation; 986 987 /* first find our driver in the list of drivers */ 988 for (p_drv = (struct periph_driver **)periphdriver_set.ls_items; 989 *p_drv != NULL; p_drv++) 990 if (strcmp((*p_drv)->driver_name, name) == 0) 991 break; 992 993 if (*p_drv == NULL) { 994 splx(s); 995 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 996 ccb->cgdl.status = CAM_GDEVLIST_ERROR; 997 *ccb->cgdl.periph_name = '\0'; 998 ccb->cgdl.unit_number = 0; 999 error = ENOENT; 1000 break; 1001 } 1002 1003 /* 1004 * Run through every peripheral instance of this driver 1005 * and check to see whether it matches the unit passed 1006 * in by the user. If it does, get out of the loops and 1007 * find the passthrough driver associated with that 1008 * peripheral driver. 1009 */ 1010 for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL; 1011 periph = TAILQ_NEXT(periph, unit_links)) { 1012 1013 if (periph->unit_number == unit) { 1014 break; 1015 } else if (--splbreaknum == 0) { 1016 splx(s); 1017 s = splsoftcam(); 1018 splbreaknum = 100; 1019 if (cur_generation != xsoftc.generation) 1020 goto ptstartover; 1021 } 1022 } 1023 /* 1024 * If we found the peripheral driver that the user passed 1025 * in, go through all of the peripheral drivers for that 1026 * particular device and look for a passthrough driver. 1027 */ 1028 if (periph != NULL) { 1029 struct cam_ed *device; 1030 int i; 1031 1032 base_periph_found = 1; 1033 device = periph->path->device; 1034 for (i = 0, periph = device->periphs.slh_first; 1035 periph != NULL; 1036 periph = periph->periph_links.sle_next, i++) { 1037 /* 1038 * Check to see whether we have a 1039 * passthrough device or not. 1040 */ 1041 if (strcmp(periph->periph_name, "pass") == 0) { 1042 /* 1043 * Fill in the getdevlist fields. 1044 */ 1045 strcpy(ccb->cgdl.periph_name, 1046 periph->periph_name); 1047 ccb->cgdl.unit_number = 1048 periph->unit_number; 1049 if (periph->periph_links.sle_next) 1050 ccb->cgdl.status = 1051 CAM_GDEVLIST_MORE_DEVS; 1052 else 1053 ccb->cgdl.status = 1054 CAM_GDEVLIST_LAST_DEVICE; 1055 ccb->cgdl.generation = 1056 device->generation; 1057 ccb->cgdl.index = i; 1058 /* 1059 * Fill in some CCB header fields 1060 * that the user may want. 1061 */ 1062 ccb->ccb_h.path_id = 1063 periph->path->bus->path_id; 1064 ccb->ccb_h.target_id = 1065 periph->path->target->target_id; 1066 ccb->ccb_h.target_lun = 1067 periph->path->device->lun_id; 1068 ccb->ccb_h.status = CAM_REQ_CMP; 1069 break; 1070 } 1071 } 1072 } 1073 1074 /* 1075 * If the periph is null here, one of two things has 1076 * happened. The first possibility is that we couldn't 1077 * find the unit number of the particular peripheral driver 1078 * that the user is asking about. e.g. the user asks for 1079 * the passthrough driver for "da11". We find the list of 1080 * "da" peripherals all right, but there is no unit 11. 1081 * The other possibility is that we went through the list 1082 * of peripheral drivers attached to the device structure, 1083 * but didn't find one with the name "pass". Either way, 1084 * we return ENOENT, since we couldn't find something. 1085 */ 1086 if (periph == NULL) { 1087 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 1088 ccb->cgdl.status = CAM_GDEVLIST_ERROR; 1089 *ccb->cgdl.periph_name = '\0'; 1090 ccb->cgdl.unit_number = 0; 1091 error = ENOENT; 1092 /* 1093 * It is unfortunate that this is even necessary, 1094 * but there are many, many clueless users out there. 1095 * If this is true, the user is looking for the 1096 * passthrough driver, but doesn't have one in his 1097 * kernel. 1098 */ 1099 if (base_periph_found == 1) { 1100 printf("xptioctl: pass driver is not in the " 1101 "kernel\n"); 1102 printf("xptioctl: put \"device pass0\" in " 1103 "your kernel config file\n"); 1104 } 1105 } 1106 splx(s); 1107 break; 1108 } 1109 default: 1110 error = ENOTTY; 1111 break; 1112 } 1113 1114 return(error); 1115 } 1116 1117 /* Functions accessed by the peripheral drivers */ 1118 void 1119 xpt_init() 1120 { 1121 struct cam_sim *xpt_sim; 1122 struct cam_path *path; 1123 struct cam_devq; 1124 cam_status status; 1125 1126 TAILQ_INIT(&xpt_busses); 1127 TAILQ_INIT(&cam_bioq); 1128 TAILQ_INIT(&cam_netq); 1129 SLIST_INIT(&ccb_freeq); 1130 STAILQ_INIT(&highpowerq); 1131 1132 /* 1133 * The xpt layer is, itself, the equivelent of a SIM. 1134 * Allow 16 ccbs in the ccb pool for it. This should 1135 * give decent parallelism when we probe busses and 1136 * perform other XPT functions. 1137 */ 1138 xpt_sim = (struct cam_sim *)malloc(sizeof(*xpt_sim), 1139 M_DEVBUF, M_WAITOK); 1140 xpt_sim->sim_action = xptaction; 1141 xpt_sim->sim_name = "xpt"; 1142 xpt_sim->path_id = CAM_XPT_PATH_ID; 1143 xpt_sim->bus_id = 0; 1144 xpt_sim->max_tagged_dev_openings = 0; 1145 xpt_sim->max_dev_openings = 0; 1146 xpt_sim->devq = cam_simq_alloc(16); 1147 xpt_max_ccbs = 16; 1148 1149 xpt_bus_register(xpt_sim, 0); 1150 1151 /* 1152 * Looking at the XPT from the SIM layer, the XPT is 1153 * the equivelent of a peripheral driver. Allocate 1154 * a peripheral driver entry for us. 1155 */ 1156 if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID, 1157 CAM_TARGET_WILDCARD, 1158 CAM_LUN_WILDCARD)) != CAM_REQ_CMP) { 1159 printf("xpt_init: xpt_create_path failed with status %#x," 1160 " failing attach\n", status); 1161 return; 1162 } 1163 1164 cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO, 1165 path, NULL, 0, NULL); 1166 xpt_free_path(path); 1167 1168 xpt_sim->softc = xpt_periph; 1169 1170 /* 1171 * Register a callback for when interrupts are enabled. 1172 */ 1173 xpt_config_hook = 1174 (struct intr_config_hook *)malloc(sizeof(struct intr_config_hook), 1175 M_TEMP, M_NOWAIT); 1176 if (xpt_config_hook == NULL) { 1177 printf("xpt_init: Cannot malloc config hook " 1178 "- failing attach\n"); 1179 return; 1180 } 1181 bzero(xpt_config_hook, sizeof(*xpt_config_hook)); 1182 1183 xpt_config_hook->ich_func = xpt_config; 1184 if (config_intrhook_establish(xpt_config_hook) != 0) { 1185 free (xpt_config_hook, M_TEMP); 1186 printf("xpt_init: config_intrhook_establish failed " 1187 "- failing attach\n"); 1188 } 1189 1190 /* Install our software interrupt handlers */ 1191 /* XXX Should call some MI function to do this */ 1192 #ifdef __i386__ 1193 ihandlers[SWI_CAMNET] = swi_camnet; 1194 ihandlers[SWI_CAMBIO] = swi_cambio; 1195 #endif 1196 } 1197 1198 static cam_status 1199 xptregister(struct cam_periph *periph, void *arg) 1200 { 1201 if (periph == NULL) { 1202 printf("xptregister: periph was NULL!!\n"); 1203 return(CAM_REQ_CMP_ERR); 1204 } 1205 1206 periph->softc = NULL; 1207 1208 xpt_periph = periph; 1209 1210 return(CAM_REQ_CMP); 1211 } 1212 1213 int32_t 1214 xpt_add_periph(struct cam_periph *periph) 1215 { 1216 struct cam_ed *device; 1217 int32_t status; 1218 struct periph_list *periph_head; 1219 1220 device = periph->path->device; 1221 1222 periph_head = &device->periphs; 1223 1224 status = CAM_REQ_CMP; 1225 1226 if (device != NULL) { 1227 int s; 1228 1229 /* 1230 * Make room for this peripheral 1231 * so it will fit in the queue 1232 * when it's scheduled to run 1233 */ 1234 s = splsoftcam(); 1235 status = camq_resize(&device->drvq, 1236 device->drvq.array_size + 1); 1237 1238 device->generation++; 1239 1240 SLIST_INSERT_HEAD(periph_head, periph, periph_links); 1241 1242 splx(s); 1243 } 1244 1245 xsoftc.generation++; 1246 1247 return (status); 1248 } 1249 1250 void 1251 xpt_remove_periph(struct cam_periph *periph) 1252 { 1253 struct cam_ed *device; 1254 1255 device = periph->path->device; 1256 1257 if (device != NULL) { 1258 int s; 1259 struct periph_list *periph_head; 1260 1261 periph_head = &device->periphs; 1262 1263 /* Release the slot for this peripheral */ 1264 s = splsoftcam(); 1265 camq_resize(&device->drvq, device->drvq.array_size - 1); 1266 1267 device->generation++; 1268 1269 SLIST_REMOVE(periph_head, periph, cam_periph, periph_links); 1270 1271 splx(s); 1272 } 1273 1274 xsoftc.generation++; 1275 1276 } 1277 1278 void 1279 xpt_announce_periph(struct cam_periph *periph, char *announce_string) 1280 { 1281 int s; 1282 u_int mb; 1283 struct cam_path *path; 1284 struct ccb_trans_settings cts; 1285 1286 path = periph->path; 1287 /* 1288 * To ensure that this is printed in one piece, 1289 * mask out CAM interrupts. 1290 */ 1291 s = splsoftcam(); 1292 printf("%s%d at %s%d bus %d target %d lun %d\n", 1293 periph->periph_name, periph->unit_number, 1294 path->bus->sim->sim_name, 1295 path->bus->sim->unit_number, 1296 path->bus->sim->bus_id, 1297 path->target->target_id, 1298 path->device->lun_id); 1299 printf("%s%d: ", periph->periph_name, periph->unit_number); 1300 scsi_print_inquiry(&path->device->inq_data); 1301 if ((bootverbose) 1302 && (path->device->serial_num_len > 0)) { 1303 /* Don't wrap the screen - print only the first 60 chars */ 1304 printf("%s%d: Serial Number %.60s\n", periph->periph_name, 1305 periph->unit_number, path->device->serial_num); 1306 } 1307 xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1); 1308 cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS; 1309 cts.flags = CCB_TRANS_CURRENT_SETTINGS; 1310 xpt_action((union ccb*)&cts); 1311 if (cts.ccb_h.status == CAM_REQ_CMP) { 1312 u_int speed; 1313 u_int freq; 1314 1315 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0 1316 && cts.sync_offset != 0) { 1317 freq = scsi_calc_syncsrate(cts.sync_period); 1318 speed = freq; 1319 } else { 1320 freq = 0; 1321 speed = path->bus->sim->base_transfer_speed; 1322 } 1323 if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) 1324 speed *= (0x01 << cts.bus_width); 1325 mb = speed / 1000; 1326 if (mb > 0) 1327 printf("%s%d: %d.%dMB/s transfers", periph->periph_name, 1328 periph->unit_number, mb, speed % 1000); 1329 else 1330 printf("%s%d: %dKB/s transfers", periph->periph_name, 1331 periph->unit_number, (speed % 1000) * 1000); 1332 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0 1333 && cts.sync_offset != 0) { 1334 printf(" (%d.%dMHz, offset %d", freq / 1000, 1335 freq % 1000, cts.sync_offset); 1336 } 1337 if ((cts.valid & CCB_TRANS_BUS_WIDTH_VALID) != 0 1338 && cts.bus_width > 0) { 1339 if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0 1340 && cts.sync_offset != 0) { 1341 printf(", "); 1342 } else { 1343 printf(" ("); 1344 } 1345 printf("%dbit)", 8 * (0x01 << cts.bus_width)); 1346 } else if ((cts.valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0 1347 && cts.sync_offset != 0) { 1348 printf(")"); 1349 } 1350 1351 if (path->device->inq_flags & SID_CmdQue 1352 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) { 1353 printf(", Tagged Queueing Enabled"); 1354 } 1355 1356 printf("\n"); 1357 } else if (path->device->inq_flags & SID_CmdQue 1358 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) { 1359 printf("%s%d: Tagged Queueing Enabled\n", 1360 periph->periph_name, periph->unit_number); 1361 } 1362 1363 /* 1364 * We only want to print the caller's announce string if they've 1365 * passed one in.. 1366 */ 1367 if (announce_string != NULL) 1368 printf("%s%d: %s\n", periph->periph_name, 1369 periph->unit_number, announce_string); 1370 splx(s); 1371 } 1372 1373 1374 static dev_match_ret 1375 xptbusmatch(struct dev_match_pattern *patterns, int num_patterns, 1376 struct cam_eb *bus) 1377 { 1378 dev_match_ret retval; 1379 int i; 1380 1381 retval = DM_RET_NONE; 1382 1383 /* 1384 * If we aren't given something to match against, that's an error. 1385 */ 1386 if (bus == NULL) 1387 return(DM_RET_ERROR); 1388 1389 /* 1390 * If there are no match entries, then this bus matches no 1391 * matter what. 1392 */ 1393 if ((patterns == NULL) || (num_patterns == 0)) 1394 return(DM_RET_DESCEND | DM_RET_COPY); 1395 1396 for (i = 0; i < num_patterns; i++) { 1397 struct bus_match_pattern *cur_pattern; 1398 1399 /* 1400 * If the pattern in question isn't for a bus node, we 1401 * aren't interested. However, we do indicate to the 1402 * calling routine that we should continue descending the 1403 * tree, since the user wants to match against lower-level 1404 * EDT elements. 1405 */ 1406 if (patterns[i].type != DEV_MATCH_BUS) { 1407 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) 1408 retval |= DM_RET_DESCEND; 1409 continue; 1410 } 1411 1412 cur_pattern = &patterns[i].pattern.bus_pattern; 1413 1414 /* 1415 * If they want to match any bus node, we give them any 1416 * device node. 1417 */ 1418 if (cur_pattern->flags == BUS_MATCH_ANY) { 1419 /* set the copy flag */ 1420 retval |= DM_RET_COPY; 1421 1422 /* 1423 * If we've already decided on an action, go ahead 1424 * and return. 1425 */ 1426 if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE) 1427 return(retval); 1428 } 1429 1430 /* 1431 * Not sure why someone would do this... 1432 */ 1433 if (cur_pattern->flags == BUS_MATCH_NONE) 1434 continue; 1435 1436 if (((cur_pattern->flags & BUS_MATCH_PATH) != 0) 1437 && (cur_pattern->path_id != bus->path_id)) 1438 continue; 1439 1440 if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0) 1441 && (cur_pattern->bus_id != bus->sim->bus_id)) 1442 continue; 1443 1444 if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0) 1445 && (cur_pattern->unit_number != bus->sim->unit_number)) 1446 continue; 1447 1448 if (((cur_pattern->flags & BUS_MATCH_NAME) != 0) 1449 && (strncmp(cur_pattern->dev_name, bus->sim->sim_name, 1450 DEV_IDLEN) != 0)) 1451 continue; 1452 1453 /* 1454 * If we get to this point, the user definitely wants 1455 * information on this bus. So tell the caller to copy the 1456 * data out. 1457 */ 1458 retval |= DM_RET_COPY; 1459 1460 /* 1461 * If the return action has been set to descend, then we 1462 * know that we've already seen a non-bus matching 1463 * expression, therefore we need to further descend the tree. 1464 * This won't change by continuing around the loop, so we 1465 * go ahead and return. If we haven't seen a non-bus 1466 * matching expression, we keep going around the loop until 1467 * we exhaust the matching expressions. We'll set the stop 1468 * flag once we fall out of the loop. 1469 */ 1470 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND) 1471 return(retval); 1472 } 1473 1474 /* 1475 * If the return action hasn't been set to descend yet, that means 1476 * we haven't seen anything other than bus matching patterns. So 1477 * tell the caller to stop descending the tree -- the user doesn't 1478 * want to match against lower level tree elements. 1479 */ 1480 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) 1481 retval |= DM_RET_STOP; 1482 1483 return(retval); 1484 } 1485 1486 static dev_match_ret 1487 xptdevicematch(struct dev_match_pattern *patterns, int num_patterns, 1488 struct cam_ed *device) 1489 { 1490 dev_match_ret retval; 1491 int i; 1492 1493 retval = DM_RET_NONE; 1494 1495 /* 1496 * If we aren't given something to match against, that's an error. 1497 */ 1498 if (device == NULL) 1499 return(DM_RET_ERROR); 1500 1501 /* 1502 * If there are no match entries, then this device matches no 1503 * matter what. 1504 */ 1505 if ((patterns == NULL) || (patterns == 0)) 1506 return(DM_RET_DESCEND | DM_RET_COPY); 1507 1508 for (i = 0; i < num_patterns; i++) { 1509 struct device_match_pattern *cur_pattern; 1510 1511 /* 1512 * If the pattern in question isn't for a device node, we 1513 * aren't interested. 1514 */ 1515 if (patterns[i].type != DEV_MATCH_DEVICE) { 1516 if ((patterns[i].type == DEV_MATCH_PERIPH) 1517 && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)) 1518 retval |= DM_RET_DESCEND; 1519 continue; 1520 } 1521 1522 cur_pattern = &patterns[i].pattern.device_pattern; 1523 1524 /* 1525 * If they want to match any device node, we give them any 1526 * device node. 1527 */ 1528 if (cur_pattern->flags == DEV_MATCH_ANY) { 1529 /* set the copy flag */ 1530 retval |= DM_RET_COPY; 1531 1532 1533 /* 1534 * If we've already decided on an action, go ahead 1535 * and return. 1536 */ 1537 if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE) 1538 return(retval); 1539 } 1540 1541 /* 1542 * Not sure why someone would do this... 1543 */ 1544 if (cur_pattern->flags == DEV_MATCH_NONE) 1545 continue; 1546 1547 if (((cur_pattern->flags & DEV_MATCH_PATH) != 0) 1548 && (cur_pattern->path_id != device->target->bus->path_id)) 1549 continue; 1550 1551 if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0) 1552 && (cur_pattern->target_id != device->target->target_id)) 1553 continue; 1554 1555 if (((cur_pattern->flags & DEV_MATCH_LUN) != 0) 1556 && (cur_pattern->target_lun != device->lun_id)) 1557 continue; 1558 1559 if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0) 1560 && (cam_quirkmatch((caddr_t)&device->inq_data, 1561 (caddr_t)&cur_pattern->inq_pat, 1562 1, sizeof(cur_pattern->inq_pat), 1563 scsi_static_inquiry_match) == NULL)) 1564 continue; 1565 1566 /* 1567 * If we get to this point, the user definitely wants 1568 * information on this device. So tell the caller to copy 1569 * the data out. 1570 */ 1571 retval |= DM_RET_COPY; 1572 1573 /* 1574 * If the return action has been set to descend, then we 1575 * know that we've already seen a peripheral matching 1576 * expression, therefore we need to further descend the tree. 1577 * This won't change by continuing around the loop, so we 1578 * go ahead and return. If we haven't seen a peripheral 1579 * matching expression, we keep going around the loop until 1580 * we exhaust the matching expressions. We'll set the stop 1581 * flag once we fall out of the loop. 1582 */ 1583 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND) 1584 return(retval); 1585 } 1586 1587 /* 1588 * If the return action hasn't been set to descend yet, that means 1589 * we haven't seen any peripheral matching patterns. So tell the 1590 * caller to stop descending the tree -- the user doesn't want to 1591 * match against lower level tree elements. 1592 */ 1593 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) 1594 retval |= DM_RET_STOP; 1595 1596 return(retval); 1597 } 1598 1599 /* 1600 * Match a single peripheral against any number of match patterns. 1601 */ 1602 static dev_match_ret 1603 xptperiphmatch(struct dev_match_pattern *patterns, int num_patterns, 1604 struct cam_periph *periph) 1605 { 1606 dev_match_ret retval; 1607 int i; 1608 1609 /* 1610 * If we aren't given something to match against, that's an error. 1611 */ 1612 if (periph == NULL) 1613 return(DM_RET_ERROR); 1614 1615 /* 1616 * If there are no match entries, then this peripheral matches no 1617 * matter what. 1618 */ 1619 if ((patterns == NULL) || (num_patterns == 0)) 1620 return(DM_RET_STOP | DM_RET_COPY); 1621 1622 /* 1623 * There aren't any nodes below a peripheral node, so there's no 1624 * reason to descend the tree any further. 1625 */ 1626 retval = DM_RET_STOP; 1627 1628 for (i = 0; i < num_patterns; i++) { 1629 struct periph_match_pattern *cur_pattern; 1630 1631 /* 1632 * If the pattern in question isn't for a peripheral, we 1633 * aren't interested. 1634 */ 1635 if (patterns[i].type != DEV_MATCH_PERIPH) 1636 continue; 1637 1638 cur_pattern = &patterns[i].pattern.periph_pattern; 1639 1640 /* 1641 * If they want to match on anything, then we will do so. 1642 */ 1643 if (cur_pattern->flags == PERIPH_MATCH_ANY) { 1644 /* set the copy flag */ 1645 retval |= DM_RET_COPY; 1646 1647 /* 1648 * We've already set the return action to stop, 1649 * since there are no nodes below peripherals in 1650 * the tree. 1651 */ 1652 return(retval); 1653 } 1654 1655 /* 1656 * Not sure why someone would do this... 1657 */ 1658 if (cur_pattern->flags == PERIPH_MATCH_NONE) 1659 continue; 1660 1661 if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0) 1662 && (cur_pattern->path_id != periph->path->bus->path_id)) 1663 continue; 1664 1665 /* 1666 * For the target and lun id's, we have to make sure the 1667 * target and lun pointers aren't NULL. The xpt peripheral 1668 * has a wildcard target and device. 1669 */ 1670 if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0) 1671 && ((periph->path->target == NULL) 1672 ||(cur_pattern->target_id != periph->path->target->target_id))) 1673 continue; 1674 1675 if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0) 1676 && ((periph->path->device == NULL) 1677 || (cur_pattern->target_lun != periph->path->device->lun_id))) 1678 continue; 1679 1680 if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0) 1681 && (cur_pattern->unit_number != periph->unit_number)) 1682 continue; 1683 1684 if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0) 1685 && (strncmp(cur_pattern->periph_name, periph->periph_name, 1686 DEV_IDLEN) != 0)) 1687 continue; 1688 1689 /* 1690 * If we get to this point, the user definitely wants 1691 * information on this peripheral. So tell the caller to 1692 * copy the data out. 1693 */ 1694 retval |= DM_RET_COPY; 1695 1696 /* 1697 * The return action has already been set to stop, since 1698 * peripherals don't have any nodes below them in the EDT. 1699 */ 1700 return(retval); 1701 } 1702 1703 /* 1704 * If we get to this point, the peripheral that was passed in 1705 * doesn't match any of the patterns. 1706 */ 1707 return(retval); 1708 } 1709 1710 static int 1711 xptedtbusfunc(struct cam_eb *bus, void *arg) 1712 { 1713 struct ccb_dev_match *cdm; 1714 dev_match_ret retval; 1715 1716 cdm = (struct ccb_dev_match *)arg; 1717 1718 /* 1719 * If our position is for something deeper in the tree, that means 1720 * that we've already seen this node. So, we keep going down. 1721 */ 1722 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1723 && (cdm->pos.cookie.bus == bus) 1724 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1725 && (cdm->pos.cookie.target != NULL)) 1726 retval = DM_RET_DESCEND; 1727 else 1728 retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus); 1729 1730 /* 1731 * If we got an error, bail out of the search. 1732 */ 1733 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 1734 cdm->status = CAM_DEV_MATCH_ERROR; 1735 return(0); 1736 } 1737 1738 /* 1739 * If the copy flag is set, copy this bus out. 1740 */ 1741 if (retval & DM_RET_COPY) { 1742 int spaceleft, j; 1743 1744 spaceleft = cdm->match_buf_len - (cdm->num_matches * 1745 sizeof(struct dev_match_result)); 1746 1747 /* 1748 * If we don't have enough space to put in another 1749 * match result, save our position and tell the 1750 * user there are more devices to check. 1751 */ 1752 if (spaceleft < sizeof(struct dev_match_result)) { 1753 bzero(&cdm->pos, sizeof(cdm->pos)); 1754 cdm->pos.position_type = 1755 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS; 1756 1757 cdm->pos.cookie.bus = bus; 1758 cdm->pos.generations[CAM_BUS_GENERATION]= 1759 bus_generation; 1760 cdm->status = CAM_DEV_MATCH_MORE; 1761 return(0); 1762 } 1763 j = cdm->num_matches; 1764 cdm->num_matches++; 1765 cdm->matches[j].type = DEV_MATCH_BUS; 1766 cdm->matches[j].result.bus_result.path_id = bus->path_id; 1767 cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id; 1768 cdm->matches[j].result.bus_result.unit_number = 1769 bus->sim->unit_number; 1770 strncpy(cdm->matches[j].result.bus_result.dev_name, 1771 bus->sim->sim_name, DEV_IDLEN); 1772 } 1773 1774 /* 1775 * If the user is only interested in busses, there's no 1776 * reason to descend to the next level in the tree. 1777 */ 1778 if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP) 1779 return(1); 1780 1781 /* 1782 * If there is a target generation recorded, check it to 1783 * make sure the target list hasn't changed. 1784 */ 1785 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1786 && (bus == cdm->pos.cookie.bus) 1787 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1788 && (cdm->pos.generations[CAM_TARGET_GENERATION] != 0) 1789 && (cdm->pos.generations[CAM_TARGET_GENERATION] != 1790 bus->generation)) { 1791 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 1792 return(0); 1793 } 1794 1795 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1796 && (cdm->pos.cookie.bus == bus) 1797 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1798 && (cdm->pos.cookie.target != NULL)) 1799 return(xpttargettraverse(bus, 1800 (struct cam_et *)cdm->pos.cookie.target, 1801 xptedttargetfunc, arg)); 1802 else 1803 return(xpttargettraverse(bus, NULL, xptedttargetfunc, arg)); 1804 } 1805 1806 static int 1807 xptedttargetfunc(struct cam_et *target, void *arg) 1808 { 1809 struct ccb_dev_match *cdm; 1810 1811 cdm = (struct ccb_dev_match *)arg; 1812 1813 /* 1814 * If there is a device list generation recorded, check it to 1815 * make sure the device list hasn't changed. 1816 */ 1817 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1818 && (cdm->pos.cookie.bus == target->bus) 1819 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1820 && (cdm->pos.cookie.target == target) 1821 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) 1822 && (cdm->pos.generations[CAM_DEV_GENERATION] != 0) 1823 && (cdm->pos.generations[CAM_DEV_GENERATION] != 1824 target->generation)) { 1825 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 1826 return(0); 1827 } 1828 1829 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1830 && (cdm->pos.cookie.bus == target->bus) 1831 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1832 && (cdm->pos.cookie.target == target) 1833 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) 1834 && (cdm->pos.cookie.device != NULL)) 1835 return(xptdevicetraverse(target, 1836 (struct cam_ed *)cdm->pos.cookie.device, 1837 xptedtdevicefunc, arg)); 1838 else 1839 return(xptdevicetraverse(target, NULL, xptedtdevicefunc, arg)); 1840 } 1841 1842 static int 1843 xptedtdevicefunc(struct cam_ed *device, void *arg) 1844 { 1845 1846 struct ccb_dev_match *cdm; 1847 dev_match_ret retval; 1848 1849 cdm = (struct ccb_dev_match *)arg; 1850 1851 /* 1852 * If our position is for something deeper in the tree, that means 1853 * that we've already seen this node. So, we keep going down. 1854 */ 1855 if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE) 1856 && (cdm->pos.cookie.device == device) 1857 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 1858 && (cdm->pos.cookie.periph != NULL)) 1859 retval = DM_RET_DESCEND; 1860 else 1861 retval = xptdevicematch(cdm->patterns, cdm->num_patterns, 1862 device); 1863 1864 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 1865 cdm->status = CAM_DEV_MATCH_ERROR; 1866 return(0); 1867 } 1868 1869 /* 1870 * If the copy flag is set, copy this device out. 1871 */ 1872 if (retval & DM_RET_COPY) { 1873 int spaceleft, j; 1874 1875 spaceleft = cdm->match_buf_len - (cdm->num_matches * 1876 sizeof(struct dev_match_result)); 1877 1878 /* 1879 * If we don't have enough space to put in another 1880 * match result, save our position and tell the 1881 * user there are more devices to check. 1882 */ 1883 if (spaceleft < sizeof(struct dev_match_result)) { 1884 bzero(&cdm->pos, sizeof(cdm->pos)); 1885 cdm->pos.position_type = 1886 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS | 1887 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE; 1888 1889 cdm->pos.cookie.bus = device->target->bus; 1890 cdm->pos.generations[CAM_BUS_GENERATION]= 1891 bus_generation; 1892 cdm->pos.cookie.target = device->target; 1893 cdm->pos.generations[CAM_TARGET_GENERATION] = 1894 device->target->bus->generation; 1895 cdm->pos.cookie.device = device; 1896 cdm->pos.generations[CAM_DEV_GENERATION] = 1897 device->target->generation; 1898 cdm->status = CAM_DEV_MATCH_MORE; 1899 return(0); 1900 } 1901 j = cdm->num_matches; 1902 cdm->num_matches++; 1903 cdm->matches[j].type = DEV_MATCH_DEVICE; 1904 cdm->matches[j].result.device_result.path_id = 1905 device->target->bus->path_id; 1906 cdm->matches[j].result.device_result.target_id = 1907 device->target->target_id; 1908 cdm->matches[j].result.device_result.target_lun = 1909 device->lun_id; 1910 bcopy(&device->inq_data, 1911 &cdm->matches[j].result.device_result.inq_data, 1912 sizeof(struct scsi_inquiry_data)); 1913 } 1914 1915 /* 1916 * If the user isn't interested in peripherals, don't descend 1917 * the tree any further. 1918 */ 1919 if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP) 1920 return(1); 1921 1922 /* 1923 * If there is a peripheral list generation recorded, make sure 1924 * it hasn't changed. 1925 */ 1926 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1927 && (device->target->bus == cdm->pos.cookie.bus) 1928 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1929 && (device->target == cdm->pos.cookie.target) 1930 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) 1931 && (device == cdm->pos.cookie.device) 1932 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 1933 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0) 1934 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 1935 device->generation)){ 1936 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 1937 return(0); 1938 } 1939 1940 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1941 && (cdm->pos.cookie.bus == device->target->bus) 1942 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1943 && (cdm->pos.cookie.target == device->target) 1944 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) 1945 && (cdm->pos.cookie.device == device) 1946 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 1947 && (cdm->pos.cookie.periph != NULL)) 1948 return(xptperiphtraverse(device, 1949 (struct cam_periph *)cdm->pos.cookie.periph, 1950 xptedtperiphfunc, arg)); 1951 else 1952 return(xptperiphtraverse(device, NULL, xptedtperiphfunc, arg)); 1953 } 1954 1955 static int 1956 xptedtperiphfunc(struct cam_periph *periph, void *arg) 1957 { 1958 struct ccb_dev_match *cdm; 1959 dev_match_ret retval; 1960 1961 cdm = (struct ccb_dev_match *)arg; 1962 1963 retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph); 1964 1965 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 1966 cdm->status = CAM_DEV_MATCH_ERROR; 1967 return(0); 1968 } 1969 1970 /* 1971 * If the copy flag is set, copy this peripheral out. 1972 */ 1973 if (retval & DM_RET_COPY) { 1974 int spaceleft, j; 1975 1976 spaceleft = cdm->match_buf_len - (cdm->num_matches * 1977 sizeof(struct dev_match_result)); 1978 1979 /* 1980 * If we don't have enough space to put in another 1981 * match result, save our position and tell the 1982 * user there are more devices to check. 1983 */ 1984 if (spaceleft < sizeof(struct dev_match_result)) { 1985 bzero(&cdm->pos, sizeof(cdm->pos)); 1986 cdm->pos.position_type = 1987 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS | 1988 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE | 1989 CAM_DEV_POS_PERIPH; 1990 1991 cdm->pos.cookie.bus = periph->path->bus; 1992 cdm->pos.generations[CAM_BUS_GENERATION]= 1993 bus_generation; 1994 cdm->pos.cookie.target = periph->path->target; 1995 cdm->pos.generations[CAM_TARGET_GENERATION] = 1996 periph->path->bus->generation; 1997 cdm->pos.cookie.device = periph->path->device; 1998 cdm->pos.generations[CAM_DEV_GENERATION] = 1999 periph->path->target->generation; 2000 cdm->pos.cookie.periph = periph; 2001 cdm->pos.generations[CAM_PERIPH_GENERATION] = 2002 periph->path->device->generation; 2003 cdm->status = CAM_DEV_MATCH_MORE; 2004 return(0); 2005 } 2006 2007 j = cdm->num_matches; 2008 cdm->num_matches++; 2009 cdm->matches[j].type = DEV_MATCH_PERIPH; 2010 cdm->matches[j].result.periph_result.path_id = 2011 periph->path->bus->path_id; 2012 cdm->matches[j].result.periph_result.target_id = 2013 periph->path->target->target_id; 2014 cdm->matches[j].result.periph_result.target_lun = 2015 periph->path->device->lun_id; 2016 cdm->matches[j].result.periph_result.unit_number = 2017 periph->unit_number; 2018 strncpy(cdm->matches[j].result.periph_result.periph_name, 2019 periph->periph_name, DEV_IDLEN); 2020 } 2021 2022 return(1); 2023 } 2024 2025 static int 2026 xptedtmatch(struct ccb_dev_match *cdm) 2027 { 2028 int ret; 2029 2030 cdm->num_matches = 0; 2031 2032 /* 2033 * Check the bus list generation. If it has changed, the user 2034 * needs to reset everything and start over. 2035 */ 2036 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 2037 && (cdm->pos.generations[CAM_BUS_GENERATION] != 0) 2038 && (cdm->pos.generations[CAM_BUS_GENERATION] != bus_generation)) { 2039 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 2040 return(0); 2041 } 2042 2043 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 2044 && (cdm->pos.cookie.bus != NULL)) 2045 ret = xptbustraverse((struct cam_eb *)cdm->pos.cookie.bus, 2046 xptedtbusfunc, cdm); 2047 else 2048 ret = xptbustraverse(NULL, xptedtbusfunc, cdm); 2049 2050 /* 2051 * If we get back 0, that means that we had to stop before fully 2052 * traversing the EDT. It also means that one of the subroutines 2053 * has set the status field to the proper value. If we get back 1, 2054 * we've fully traversed the EDT and copied out any matching entries. 2055 */ 2056 if (ret == 1) 2057 cdm->status = CAM_DEV_MATCH_LAST; 2058 2059 return(ret); 2060 } 2061 2062 static int 2063 xptplistpdrvfunc(struct periph_driver **pdrv, void *arg) 2064 { 2065 struct ccb_dev_match *cdm; 2066 2067 cdm = (struct ccb_dev_match *)arg; 2068 2069 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR) 2070 && (cdm->pos.cookie.pdrv == pdrv) 2071 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 2072 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0) 2073 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 2074 (*pdrv)->generation)) { 2075 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 2076 return(0); 2077 } 2078 2079 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR) 2080 && (cdm->pos.cookie.pdrv == pdrv) 2081 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 2082 && (cdm->pos.cookie.periph != NULL)) 2083 return(xptpdperiphtraverse(pdrv, 2084 (struct cam_periph *)cdm->pos.cookie.periph, 2085 xptplistperiphfunc, arg)); 2086 else 2087 return(xptpdperiphtraverse(pdrv, NULL,xptplistperiphfunc, arg)); 2088 } 2089 2090 static int 2091 xptplistperiphfunc(struct cam_periph *periph, void *arg) 2092 { 2093 struct ccb_dev_match *cdm; 2094 dev_match_ret retval; 2095 2096 cdm = (struct ccb_dev_match *)arg; 2097 2098 retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph); 2099 2100 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 2101 cdm->status = CAM_DEV_MATCH_ERROR; 2102 return(0); 2103 } 2104 2105 /* 2106 * If the copy flag is set, copy this peripheral out. 2107 */ 2108 if (retval & DM_RET_COPY) { 2109 int spaceleft, j; 2110 2111 spaceleft = cdm->match_buf_len - (cdm->num_matches * 2112 sizeof(struct dev_match_result)); 2113 2114 /* 2115 * If we don't have enough space to put in another 2116 * match result, save our position and tell the 2117 * user there are more devices to check. 2118 */ 2119 if (spaceleft < sizeof(struct dev_match_result)) { 2120 struct periph_driver **pdrv; 2121 2122 pdrv = NULL; 2123 bzero(&cdm->pos, sizeof(cdm->pos)); 2124 cdm->pos.position_type = 2125 CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR | 2126 CAM_DEV_POS_PERIPH; 2127 2128 /* 2129 * This may look a bit non-sensical, but it is 2130 * actually quite logical. There are very few 2131 * peripheral drivers, and bloating every peripheral 2132 * structure with a pointer back to its parent 2133 * peripheral driver linker set entry would cost 2134 * more in the long run than doing this quick lookup. 2135 */ 2136 for (pdrv = 2137 (struct periph_driver **)periphdriver_set.ls_items; 2138 *pdrv != NULL; pdrv++) { 2139 if (strcmp((*pdrv)->driver_name, 2140 periph->periph_name) == 0) 2141 break; 2142 } 2143 2144 if (pdrv == NULL) { 2145 cdm->status = CAM_DEV_MATCH_ERROR; 2146 return(0); 2147 } 2148 2149 cdm->pos.cookie.pdrv = pdrv; 2150 /* 2151 * The periph generation slot does double duty, as 2152 * does the periph pointer slot. They are used for 2153 * both edt and pdrv lookups and positioning. 2154 */ 2155 cdm->pos.cookie.periph = periph; 2156 cdm->pos.generations[CAM_PERIPH_GENERATION] = 2157 (*pdrv)->generation; 2158 cdm->status = CAM_DEV_MATCH_MORE; 2159 return(0); 2160 } 2161 2162 j = cdm->num_matches; 2163 cdm->num_matches++; 2164 cdm->matches[j].type = DEV_MATCH_PERIPH; 2165 cdm->matches[j].result.periph_result.path_id = 2166 periph->path->bus->path_id; 2167 2168 /* 2169 * The transport layer peripheral doesn't have a target or 2170 * lun. 2171 */ 2172 if (periph->path->target) 2173 cdm->matches[j].result.periph_result.target_id = 2174 periph->path->target->target_id; 2175 else 2176 cdm->matches[j].result.periph_result.target_id = -1; 2177 2178 if (periph->path->device) 2179 cdm->matches[j].result.periph_result.target_lun = 2180 periph->path->device->lun_id; 2181 else 2182 cdm->matches[j].result.periph_result.target_lun = -1; 2183 2184 cdm->matches[j].result.periph_result.unit_number = 2185 periph->unit_number; 2186 strncpy(cdm->matches[j].result.periph_result.periph_name, 2187 periph->periph_name, DEV_IDLEN); 2188 } 2189 2190 return(1); 2191 } 2192 2193 static int 2194 xptperiphlistmatch(struct ccb_dev_match *cdm) 2195 { 2196 int ret; 2197 2198 cdm->num_matches = 0; 2199 2200 /* 2201 * At this point in the edt traversal function, we check the bus 2202 * list generation to make sure that no busses have been added or 2203 * removed since the user last sent a XPT_DEV_MATCH ccb through. 2204 * For the peripheral driver list traversal function, however, we 2205 * don't have to worry about new peripheral driver types coming or 2206 * going; they're in a linker set, and therefore can't change 2207 * without a recompile. 2208 */ 2209 2210 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR) 2211 && (cdm->pos.cookie.pdrv != NULL)) 2212 ret = xptpdrvtraverse( 2213 (struct periph_driver **)cdm->pos.cookie.pdrv, 2214 xptplistpdrvfunc, cdm); 2215 else 2216 ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm); 2217 2218 /* 2219 * If we get back 0, that means that we had to stop before fully 2220 * traversing the peripheral driver tree. It also means that one of 2221 * the subroutines has set the status field to the proper value. If 2222 * we get back 1, we've fully traversed the EDT and copied out any 2223 * matching entries. 2224 */ 2225 if (ret == 1) 2226 cdm->status = CAM_DEV_MATCH_LAST; 2227 2228 return(ret); 2229 } 2230 2231 static int 2232 xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg) 2233 { 2234 struct cam_eb *bus, *next_bus; 2235 int retval; 2236 2237 retval = 1; 2238 2239 for (bus = (start_bus ? start_bus : TAILQ_FIRST(&xpt_busses)); 2240 bus != NULL; 2241 bus = next_bus) { 2242 next_bus = TAILQ_NEXT(bus, links); 2243 2244 retval = tr_func(bus, arg); 2245 if (retval == 0) 2246 return(retval); 2247 } 2248 2249 return(retval); 2250 } 2251 2252 static int 2253 xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target, 2254 xpt_targetfunc_t *tr_func, void *arg) 2255 { 2256 struct cam_et *target, *next_target; 2257 int retval; 2258 2259 retval = 1; 2260 for (target = (start_target ? start_target : 2261 TAILQ_FIRST(&bus->et_entries)); 2262 target != NULL; target = next_target) { 2263 2264 next_target = TAILQ_NEXT(target, links); 2265 2266 retval = tr_func(target, arg); 2267 2268 if (retval == 0) 2269 return(retval); 2270 } 2271 2272 return(retval); 2273 } 2274 2275 static int 2276 xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device, 2277 xpt_devicefunc_t *tr_func, void *arg) 2278 { 2279 struct cam_ed *device, *next_device; 2280 int retval; 2281 2282 retval = 1; 2283 for (device = (start_device ? start_device : 2284 TAILQ_FIRST(&target->ed_entries)); 2285 device != NULL; 2286 device = next_device) { 2287 2288 next_device = TAILQ_NEXT(device, links); 2289 2290 retval = tr_func(device, arg); 2291 2292 if (retval == 0) 2293 return(retval); 2294 } 2295 2296 return(retval); 2297 } 2298 2299 static int 2300 xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph, 2301 xpt_periphfunc_t *tr_func, void *arg) 2302 { 2303 struct cam_periph *periph, *next_periph; 2304 int retval; 2305 2306 retval = 1; 2307 2308 for (periph = (start_periph ? start_periph : 2309 SLIST_FIRST(&device->periphs)); 2310 periph != NULL; 2311 periph = next_periph) { 2312 2313 next_periph = SLIST_NEXT(periph, periph_links); 2314 2315 retval = tr_func(periph, arg); 2316 if (retval == 0) 2317 return(retval); 2318 } 2319 2320 return(retval); 2321 } 2322 2323 static int 2324 xptpdrvtraverse(struct periph_driver **start_pdrv, 2325 xpt_pdrvfunc_t *tr_func, void *arg) 2326 { 2327 struct periph_driver **pdrv; 2328 int retval; 2329 2330 retval = 1; 2331 2332 /* 2333 * We don't traverse the peripheral driver list like we do the 2334 * other lists, because it is a linker set, and therefore cannot be 2335 * changed during runtime. If the peripheral driver list is ever 2336 * re-done to be something other than a linker set (i.e. it can 2337 * change while the system is running), the list traversal should 2338 * be modified to work like the other traversal functions. 2339 */ 2340 for (pdrv = (start_pdrv ? start_pdrv : 2341 (struct periph_driver **)periphdriver_set.ls_items); 2342 *pdrv != NULL; pdrv++) { 2343 retval = tr_func(pdrv, arg); 2344 2345 if (retval == 0) 2346 return(retval); 2347 } 2348 2349 return(retval); 2350 } 2351 2352 static int 2353 xptpdperiphtraverse(struct periph_driver **pdrv, 2354 struct cam_periph *start_periph, 2355 xpt_periphfunc_t *tr_func, void *arg) 2356 { 2357 struct cam_periph *periph, *next_periph; 2358 int retval; 2359 2360 retval = 1; 2361 2362 for (periph = (start_periph ? start_periph : 2363 TAILQ_FIRST(&(*pdrv)->units)); periph != NULL; 2364 periph = next_periph) { 2365 2366 next_periph = TAILQ_NEXT(periph, unit_links); 2367 2368 retval = tr_func(periph, arg); 2369 if (retval == 0) 2370 return(retval); 2371 } 2372 return(retval); 2373 } 2374 2375 static int 2376 xptdefbusfunc(struct cam_eb *bus, void *arg) 2377 { 2378 struct xpt_traverse_config *tr_config; 2379 2380 tr_config = (struct xpt_traverse_config *)arg; 2381 2382 if (tr_config->depth == XPT_DEPTH_BUS) { 2383 xpt_busfunc_t *tr_func; 2384 2385 tr_func = (xpt_busfunc_t *)tr_config->tr_func; 2386 2387 return(tr_func(bus, tr_config->tr_arg)); 2388 } else 2389 return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg)); 2390 } 2391 2392 static int 2393 xptdeftargetfunc(struct cam_et *target, void *arg) 2394 { 2395 struct xpt_traverse_config *tr_config; 2396 2397 tr_config = (struct xpt_traverse_config *)arg; 2398 2399 if (tr_config->depth == XPT_DEPTH_TARGET) { 2400 xpt_targetfunc_t *tr_func; 2401 2402 tr_func = (xpt_targetfunc_t *)tr_config->tr_func; 2403 2404 return(tr_func(target, tr_config->tr_arg)); 2405 } else 2406 return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg)); 2407 } 2408 2409 static int 2410 xptdefdevicefunc(struct cam_ed *device, void *arg) 2411 { 2412 struct xpt_traverse_config *tr_config; 2413 2414 tr_config = (struct xpt_traverse_config *)arg; 2415 2416 if (tr_config->depth == XPT_DEPTH_DEVICE) { 2417 xpt_devicefunc_t *tr_func; 2418 2419 tr_func = (xpt_devicefunc_t *)tr_config->tr_func; 2420 2421 return(tr_func(device, tr_config->tr_arg)); 2422 } else 2423 return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg)); 2424 } 2425 2426 static int 2427 xptdefperiphfunc(struct cam_periph *periph, void *arg) 2428 { 2429 struct xpt_traverse_config *tr_config; 2430 xpt_periphfunc_t *tr_func; 2431 2432 tr_config = (struct xpt_traverse_config *)arg; 2433 2434 tr_func = (xpt_periphfunc_t *)tr_config->tr_func; 2435 2436 /* 2437 * Unlike the other default functions, we don't check for depth 2438 * here. The peripheral driver level is the last level in the EDT, 2439 * so if we're here, we should execute the function in question. 2440 */ 2441 return(tr_func(periph, tr_config->tr_arg)); 2442 } 2443 2444 /* 2445 * Execute the given function for every bus in the EDT. 2446 */ 2447 static int 2448 xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg) 2449 { 2450 struct xpt_traverse_config tr_config; 2451 2452 tr_config.depth = XPT_DEPTH_BUS; 2453 tr_config.tr_func = tr_func; 2454 tr_config.tr_arg = arg; 2455 2456 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config)); 2457 } 2458 2459 /* 2460 * Execute the given function for every target in the EDT. 2461 */ 2462 static int 2463 xpt_for_all_targets(xpt_targetfunc_t *tr_func, void *arg) 2464 { 2465 struct xpt_traverse_config tr_config; 2466 2467 tr_config.depth = XPT_DEPTH_TARGET; 2468 tr_config.tr_func = tr_func; 2469 tr_config.tr_arg = arg; 2470 2471 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config)); 2472 } 2473 2474 /* 2475 * Execute the given function for every device in the EDT. 2476 */ 2477 static int 2478 xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg) 2479 { 2480 struct xpt_traverse_config tr_config; 2481 2482 tr_config.depth = XPT_DEPTH_DEVICE; 2483 tr_config.tr_func = tr_func; 2484 tr_config.tr_arg = arg; 2485 2486 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config)); 2487 } 2488 2489 /* 2490 * Execute the given function for every peripheral in the EDT. 2491 */ 2492 static int 2493 xpt_for_all_periphs(xpt_periphfunc_t *tr_func, void *arg) 2494 { 2495 struct xpt_traverse_config tr_config; 2496 2497 tr_config.depth = XPT_DEPTH_PERIPH; 2498 tr_config.tr_func = tr_func; 2499 tr_config.tr_arg = arg; 2500 2501 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config)); 2502 } 2503 2504 static int 2505 xptsetasyncfunc(struct cam_ed *device, void *arg) 2506 { 2507 struct cam_path path; 2508 struct ccb_getdev cgd; 2509 struct async_node *cur_entry; 2510 2511 cur_entry = (struct async_node *)arg; 2512 2513 xpt_compile_path(&path, 2514 NULL, 2515 device->target->bus->path_id, 2516 device->target->target_id, 2517 device->lun_id); 2518 xpt_setup_ccb(&cgd.ccb_h, &path, /*priority*/1); 2519 cgd.ccb_h.func_code = XPT_GDEV_TYPE; 2520 xpt_action((union ccb *)&cgd); 2521 cur_entry->callback(cur_entry->callback_arg, 2522 AC_FOUND_DEVICE, 2523 &path, &cgd); 2524 xpt_release_path(&path); 2525 2526 return(1); 2527 } 2528 static int 2529 xptsetasyncbusfunc(struct cam_eb *bus, void *arg) 2530 { 2531 struct cam_path path; 2532 struct ccb_pathinq cpi; 2533 struct async_node *cur_entry; 2534 2535 cur_entry = (struct async_node *)arg; 2536 2537 xpt_compile_path(&path, /*periph*/NULL, 2538 bus->sim->path_id, 2539 CAM_TARGET_WILDCARD, 2540 CAM_LUN_WILDCARD); 2541 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1); 2542 cpi.ccb_h.func_code = XPT_PATH_INQ; 2543 xpt_action((union ccb *)&cpi); 2544 cur_entry->callback(cur_entry->callback_arg, 2545 AC_PATH_REGISTERED, 2546 &path, &cpi); 2547 xpt_release_path(&path); 2548 2549 return(1); 2550 } 2551 2552 void 2553 xpt_action(union ccb *start_ccb) 2554 { 2555 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action\n")); 2556 2557 start_ccb->ccb_h.status = CAM_REQ_INPROG; 2558 2559 switch (start_ccb->ccb_h.func_code) { 2560 case XPT_SCSI_IO: 2561 { 2562 #ifdef CAMDEBUG 2563 char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1]; 2564 struct cam_path *path; 2565 2566 path = start_ccb->ccb_h.path; 2567 #endif 2568 2569 /* 2570 * For the sake of compatibility with SCSI-1 2571 * devices that may not understand the identify 2572 * message, we include lun information in the 2573 * second byte of all commands. SCSI-1 specifies 2574 * that luns are a 3 bit value and reserves only 3 2575 * bits for lun information in the CDB. Later 2576 * revisions of the SCSI spec allow for more than 8 2577 * luns, but have deprecated lun information in the 2578 * CDB. So, if the lun won't fit, we must omit. 2579 * 2580 * Also be aware that during initial probing for devices, 2581 * the inquiry information is unknown but initialized to 0. 2582 * This means that this code will be exercised while probing 2583 * devices with an ANSI revision greater than 2. 2584 */ 2585 if (SID_ANSI_REV(&start_ccb->ccb_h.path->device->inq_data) <= 2 2586 && start_ccb->ccb_h.target_lun < 8 2587 && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) { 2588 2589 start_ccb->csio.cdb_io.cdb_bytes[1] |= 2590 start_ccb->ccb_h.target_lun << 5; 2591 } 2592 start_ccb->csio.scsi_status = SCSI_STATUS_OK; 2593 start_ccb->csio.sense_resid = 0; 2594 start_ccb->csio.resid = 0; 2595 CAM_DEBUG(path, CAM_DEBUG_CDB,("%s. CDB: %s\n", 2596 scsi_op_desc(start_ccb->csio.cdb_io.cdb_bytes[0], 2597 &path->device->inq_data), 2598 scsi_cdb_string(start_ccb->csio.cdb_io.cdb_bytes, 2599 cdb_str, sizeof(cdb_str)))); 2600 /* FALLTRHOUGH */ 2601 } 2602 case XPT_TARGET_IO: 2603 case XPT_CONT_TARGET_IO: 2604 case XPT_ENG_EXEC: 2605 { 2606 struct cam_path *path; 2607 int s; 2608 int runq; 2609 2610 path = start_ccb->ccb_h.path; 2611 s = splsoftcam(); 2612 2613 cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb); 2614 if (path->device->qfrozen_cnt == 0) 2615 runq = xpt_schedule_dev_sendq(path->bus, path->device); 2616 else 2617 runq = 0; 2618 splx(s); 2619 if (runq != 0) 2620 xpt_run_dev_sendq(path->bus); 2621 break; 2622 } 2623 case XPT_SET_TRAN_SETTINGS: 2624 { 2625 xpt_set_transfer_settings(&start_ccb->cts, 2626 start_ccb->ccb_h.path->device, 2627 /*async_update*/FALSE); 2628 break; 2629 } 2630 case XPT_CALC_GEOMETRY: 2631 /* Filter out garbage */ 2632 if (start_ccb->ccg.block_size == 0 2633 || start_ccb->ccg.volume_size == 0) { 2634 start_ccb->ccg.cylinders = 0; 2635 start_ccb->ccg.heads = 0; 2636 start_ccb->ccg.secs_per_track = 0; 2637 start_ccb->ccb_h.status = CAM_REQ_CMP; 2638 break; 2639 } 2640 #ifdef PC98 2641 /* 2642 * In a PC-98 system, geometry translation depens on 2643 * the "real" device geometry obtained from mode page 4. 2644 * SCSI geometry translation is performed in the 2645 * initialization routine of the SCSI BIOS and the result 2646 * stored in host memory. If the translation is available 2647 * in host memory, use it. If not, rely on the default 2648 * translation the device driver performs. 2649 */ 2650 if (scsi_da_bios_params(&start_ccb->ccg) != 0) { 2651 start_ccb->ccb_h.status = CAM_REQ_CMP; 2652 break; 2653 } 2654 /* FALLTHROUGH */ 2655 #endif 2656 case XPT_ACCEPT_TARGET_IO: 2657 case XPT_EN_LUN: 2658 case XPT_IMMED_NOTIFY: 2659 case XPT_NOTIFY_ACK: 2660 case XPT_GET_TRAN_SETTINGS: 2661 case XPT_PATH_INQ: 2662 case XPT_RESET_BUS: 2663 { 2664 struct cam_sim *sim; 2665 2666 sim = start_ccb->ccb_h.path->bus->sim; 2667 (*(sim->sim_action))(sim, start_ccb); 2668 break; 2669 } 2670 case XPT_GDEV_TYPE: 2671 if ((start_ccb->ccb_h.path->device->flags & CAM_DEV_UNCONFIGURED) != 0) { 2672 start_ccb->ccb_h.status = CAM_DEV_NOT_THERE; 2673 } else { 2674 struct ccb_getdev *cgd; 2675 struct cam_et *tar; 2676 struct cam_ed *dev; 2677 int s; 2678 2679 s = splsoftcam(); 2680 cgd = &start_ccb->cgd; 2681 tar = cgd->ccb_h.path->target; 2682 dev = cgd->ccb_h.path->device; 2683 cgd->inq_data = dev->inq_data; 2684 cgd->pd_type = SID_TYPE(&dev->inq_data); 2685 cgd->dev_openings = dev->ccbq.dev_openings; 2686 cgd->dev_active = dev->ccbq.dev_active; 2687 cgd->devq_openings = dev->ccbq.devq_openings; 2688 cgd->devq_queued = dev->ccbq.queue.entries; 2689 cgd->held = dev->ccbq.held; 2690 cgd->maxtags = dev->quirk->maxtags; 2691 cgd->mintags = dev->quirk->mintags; 2692 cgd->ccb_h.status = CAM_REQ_CMP; 2693 cgd->serial_num_len = dev->serial_num_len; 2694 if ((dev->serial_num_len > 0) 2695 && (dev->serial_num != NULL)) 2696 bcopy(dev->serial_num, cgd->serial_num, 2697 dev->serial_num_len); 2698 splx(s); 2699 } 2700 break; 2701 case XPT_GDEVLIST: 2702 { 2703 struct cam_periph *nperiph; 2704 struct periph_list *periph_head; 2705 struct ccb_getdevlist *cgdl; 2706 int i; 2707 int s; 2708 struct cam_ed *device; 2709 int found; 2710 2711 2712 found = 0; 2713 2714 /* 2715 * Don't want anyone mucking with our data. 2716 */ 2717 s = splsoftcam(); 2718 device = start_ccb->ccb_h.path->device; 2719 periph_head = &device->periphs; 2720 cgdl = &start_ccb->cgdl; 2721 2722 /* 2723 * Check and see if the list has changed since the user 2724 * last requested a list member. If so, tell them that the 2725 * list has changed, and therefore they need to start over 2726 * from the beginning. 2727 */ 2728 if ((cgdl->index != 0) && 2729 (cgdl->generation != device->generation)) { 2730 cgdl->status = CAM_GDEVLIST_LIST_CHANGED; 2731 splx(s); 2732 break; 2733 } 2734 2735 /* 2736 * Traverse the list of peripherals and attempt to find 2737 * the requested peripheral. 2738 */ 2739 for (nperiph = periph_head->slh_first, i = 0; 2740 (nperiph != NULL) && (i <= cgdl->index); 2741 nperiph = nperiph->periph_links.sle_next, i++) { 2742 if (i == cgdl->index) { 2743 strncpy(cgdl->periph_name, 2744 nperiph->periph_name, 2745 DEV_IDLEN); 2746 cgdl->unit_number = nperiph->unit_number; 2747 found = 1; 2748 } 2749 } 2750 if (found == 0) { 2751 cgdl->status = CAM_GDEVLIST_ERROR; 2752 splx(s); 2753 break; 2754 } 2755 2756 if (nperiph == NULL) 2757 cgdl->status = CAM_GDEVLIST_LAST_DEVICE; 2758 else 2759 cgdl->status = CAM_GDEVLIST_MORE_DEVS; 2760 2761 cgdl->index++; 2762 cgdl->generation = device->generation; 2763 2764 splx(s); 2765 cgdl->ccb_h.status = CAM_REQ_CMP; 2766 break; 2767 } 2768 case XPT_DEV_MATCH: 2769 { 2770 int s; 2771 dev_pos_type position_type; 2772 struct ccb_dev_match *cdm; 2773 int ret; 2774 2775 cdm = &start_ccb->cdm; 2776 2777 /* 2778 * Prevent EDT changes while we traverse it. 2779 */ 2780 s = splsoftcam(); 2781 /* 2782 * There are two ways of getting at information in the EDT. 2783 * The first way is via the primary EDT tree. It starts 2784 * with a list of busses, then a list of targets on a bus, 2785 * then devices/luns on a target, and then peripherals on a 2786 * device/lun. The "other" way is by the peripheral driver 2787 * lists. The peripheral driver lists are organized by 2788 * peripheral driver. (obviously) So it makes sense to 2789 * use the peripheral driver list if the user is looking 2790 * for something like "da1", or all "da" devices. If the 2791 * user is looking for something on a particular bus/target 2792 * or lun, it's generally better to go through the EDT tree. 2793 */ 2794 2795 if (cdm->pos.position_type != CAM_DEV_POS_NONE) 2796 position_type = cdm->pos.position_type; 2797 else { 2798 int i; 2799 2800 position_type = CAM_DEV_POS_NONE; 2801 2802 for (i = 0; i < cdm->num_patterns; i++) { 2803 if ((cdm->patterns[i].type == DEV_MATCH_BUS) 2804 ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){ 2805 position_type = CAM_DEV_POS_EDT; 2806 break; 2807 } 2808 } 2809 2810 if (cdm->num_patterns == 0) 2811 position_type = CAM_DEV_POS_EDT; 2812 else if (position_type == CAM_DEV_POS_NONE) 2813 position_type = CAM_DEV_POS_PDRV; 2814 } 2815 2816 switch(position_type & CAM_DEV_POS_TYPEMASK) { 2817 case CAM_DEV_POS_EDT: 2818 ret = xptedtmatch(cdm); 2819 break; 2820 case CAM_DEV_POS_PDRV: 2821 ret = xptperiphlistmatch(cdm); 2822 break; 2823 default: 2824 cdm->status = CAM_DEV_MATCH_ERROR; 2825 break; 2826 } 2827 2828 splx(s); 2829 2830 if (cdm->status == CAM_DEV_MATCH_ERROR) 2831 start_ccb->ccb_h.status = CAM_REQ_CMP_ERR; 2832 else 2833 start_ccb->ccb_h.status = CAM_REQ_CMP; 2834 2835 break; 2836 } 2837 case XPT_SASYNC_CB: 2838 { 2839 /* 2840 * First off, determine the list we want to 2841 * be insterted into. 2842 */ 2843 struct ccb_setasync *csa; 2844 struct async_node *cur_entry; 2845 struct async_list *async_head; 2846 u_int32_t added; 2847 int s; 2848 2849 csa = &start_ccb->csa; 2850 added = csa->event_enable; 2851 if (csa->ccb_h.path->device != NULL) { 2852 async_head = &csa->ccb_h.path->device->asyncs; 2853 } else { 2854 async_head = &csa->ccb_h.path->bus->asyncs; 2855 } 2856 2857 /* 2858 * If there is already an entry for us, simply 2859 * update it. 2860 */ 2861 s = splsoftcam(); 2862 cur_entry = SLIST_FIRST(async_head); 2863 while (cur_entry != NULL) { 2864 if ((cur_entry->callback_arg == csa->callback_arg) 2865 && (cur_entry->callback == csa->callback)) 2866 break; 2867 cur_entry = SLIST_NEXT(cur_entry, links); 2868 } 2869 2870 if (cur_entry != NULL) { 2871 /* 2872 * If the request has no flags set, 2873 * remove the entry. 2874 */ 2875 added &= ~cur_entry->event_enable; 2876 if (csa->event_enable == 0) { 2877 SLIST_REMOVE(async_head, cur_entry, 2878 async_node, links); 2879 free(cur_entry, M_DEVBUF); 2880 } else { 2881 cur_entry->event_enable = csa->event_enable; 2882 } 2883 } else { 2884 cur_entry = malloc(sizeof(*cur_entry), M_DEVBUF, 2885 M_NOWAIT); 2886 if (cur_entry == NULL) { 2887 splx(s); 2888 csa->ccb_h.status = CAM_RESRC_UNAVAIL; 2889 break; 2890 } 2891 cur_entry->callback_arg = csa->callback_arg; 2892 cur_entry->callback = csa->callback; 2893 cur_entry->event_enable = csa->event_enable; 2894 SLIST_INSERT_HEAD(async_head, cur_entry, links); 2895 } 2896 2897 if ((added & AC_FOUND_DEVICE) != 0) { 2898 /* 2899 * Get this peripheral up to date with all 2900 * the currently existing devices. 2901 */ 2902 xpt_for_all_devices(xptsetasyncfunc, cur_entry); 2903 } 2904 if ((added & AC_PATH_REGISTERED) != 0) { 2905 /* 2906 * Get this peripheral up to date with all 2907 * the currently existing busses. 2908 */ 2909 xpt_for_all_busses(xptsetasyncbusfunc, cur_entry); 2910 } 2911 splx(s); 2912 start_ccb->ccb_h.status = CAM_REQ_CMP; 2913 break; 2914 } 2915 case XPT_REL_SIMQ: 2916 { 2917 struct ccb_relsim *crs; 2918 struct cam_ed *dev; 2919 int s; 2920 2921 crs = &start_ccb->crs; 2922 dev = crs->ccb_h.path->device; 2923 if (dev == NULL) { 2924 2925 crs->ccb_h.status = CAM_DEV_NOT_THERE; 2926 break; 2927 } 2928 2929 s = splcam(); 2930 2931 if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) { 2932 2933 if ((dev->inq_data.flags & SID_CmdQue) != 0) { 2934 2935 /* Don't ever go below one opening */ 2936 if (crs->openings > 0) { 2937 xpt_dev_ccbq_resize(crs->ccb_h.path, 2938 crs->openings); 2939 2940 if (bootverbose || 1) { 2941 xpt_print_path(crs->ccb_h.path); 2942 printf("tagged openings " 2943 "now %d\n", 2944 crs->openings); 2945 } 2946 } 2947 } 2948 } 2949 2950 if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) { 2951 2952 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) { 2953 2954 /* 2955 * Just extend the old timeout and decrement 2956 * the freeze count so that a single timeout 2957 * is sufficient for releasing the queue. 2958 */ 2959 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE; 2960 untimeout(xpt_release_devq_timeout, 2961 dev, dev->c_handle); 2962 } else { 2963 2964 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 2965 } 2966 2967 dev->c_handle = 2968 timeout(xpt_release_devq_timeout, 2969 dev, 2970 (crs->release_timeout * hz) / 1000); 2971 2972 dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING; 2973 2974 } 2975 2976 if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) { 2977 2978 if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) { 2979 /* 2980 * Decrement the freeze count so that a single 2981 * completion is still sufficient to unfreeze 2982 * the queue. 2983 */ 2984 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE; 2985 } else { 2986 2987 dev->flags |= CAM_DEV_REL_ON_COMPLETE; 2988 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 2989 } 2990 } 2991 2992 if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) { 2993 2994 if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0 2995 || (dev->ccbq.dev_active == 0)) { 2996 2997 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE; 2998 } else { 2999 3000 dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY; 3001 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 3002 } 3003 } 3004 splx(s); 3005 3006 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) { 3007 3008 xpt_release_devq(crs->ccb_h.path->device, 3009 /*run_queue*/TRUE); 3010 } 3011 start_ccb->crs.qfrozen_cnt = dev->qfrozen_cnt; 3012 start_ccb->ccb_h.status = CAM_REQ_CMP; 3013 break; 3014 } 3015 case XPT_SCAN_BUS: 3016 xpt_scan_bus(start_ccb->ccb_h.path->periph, start_ccb); 3017 break; 3018 case XPT_SCAN_LUN: 3019 xpt_scan_lun(start_ccb->ccb_h.path->periph, 3020 start_ccb->ccb_h.path, start_ccb->crcn.flags, 3021 start_ccb); 3022 break; 3023 case XPT_DEBUG: { 3024 #ifdef CAMDEBUG 3025 int s; 3026 3027 s = splcam(); 3028 cam_dflags = start_ccb->cdbg.flags; 3029 if (cam_dpath != NULL) { 3030 xpt_free_path(cam_dpath); 3031 cam_dpath = NULL; 3032 } 3033 3034 if (cam_dflags != CAM_DEBUG_NONE) { 3035 if (xpt_create_path(&cam_dpath, xpt_periph, 3036 start_ccb->ccb_h.path_id, 3037 start_ccb->ccb_h.target_id, 3038 start_ccb->ccb_h.target_lun) != 3039 CAM_REQ_CMP) { 3040 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 3041 cam_dflags = CAM_DEBUG_NONE; 3042 } else 3043 start_ccb->ccb_h.status = CAM_REQ_CMP; 3044 } else { 3045 cam_dpath = NULL; 3046 start_ccb->ccb_h.status = CAM_REQ_CMP; 3047 } 3048 splx(s); 3049 #else /* !CAMDEBUG */ 3050 start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 3051 #endif /* CAMDEBUG */ 3052 break; 3053 } 3054 case XPT_NOOP: 3055 start_ccb->ccb_h.status = CAM_REQ_CMP; 3056 break; 3057 default: 3058 case XPT_SDEV_TYPE: 3059 case XPT_ABORT: 3060 case XPT_RESET_DEV: 3061 case XPT_TERM_IO: 3062 case XPT_ENG_INQ: 3063 /* XXX Implement */ 3064 start_ccb->ccb_h.status = CAM_PROVIDE_FAIL; 3065 break; 3066 } 3067 } 3068 3069 void 3070 xpt_polled_action(union ccb *start_ccb) 3071 { 3072 int s; 3073 u_int32_t timeout; 3074 struct cam_sim *sim; 3075 struct cam_devq *devq; 3076 struct cam_ed *dev; 3077 3078 timeout = start_ccb->ccb_h.timeout; 3079 sim = start_ccb->ccb_h.path->bus->sim; 3080 devq = sim->devq; 3081 dev = start_ccb->ccb_h.path->device; 3082 3083 s = splcam(); 3084 3085 /* 3086 * Steal an opening so that no other queued requests 3087 * can get it before us while we simulate interrupts. 3088 */ 3089 dev->ccbq.devq_openings--; 3090 dev->ccbq.dev_openings--; 3091 3092 while((devq->send_openings <= 0 || dev->ccbq.dev_openings < 0) 3093 && (--timeout > 0)) { 3094 DELAY(1000); 3095 (*(sim->sim_poll))(sim); 3096 swi_camnet(); 3097 swi_cambio(); 3098 } 3099 3100 dev->ccbq.devq_openings++; 3101 dev->ccbq.dev_openings++; 3102 3103 if (timeout != 0) { 3104 xpt_action(start_ccb); 3105 while(--timeout > 0) { 3106 (*(sim->sim_poll))(sim); 3107 swi_camnet(); 3108 swi_cambio(); 3109 if ((start_ccb->ccb_h.status & CAM_STATUS_MASK) 3110 != CAM_REQ_INPROG) 3111 break; 3112 DELAY(1000); 3113 } 3114 if (timeout == 0) { 3115 /* 3116 * XXX Is it worth adding a sim_timeout entry 3117 * point so we can attempt recovery? If 3118 * this is only used for dumps, I don't think 3119 * it is. 3120 */ 3121 start_ccb->ccb_h.status = CAM_CMD_TIMEOUT; 3122 } 3123 } else { 3124 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 3125 } 3126 splx(s); 3127 } 3128 3129 /* 3130 * Schedule a peripheral driver to receive a ccb when it's 3131 * target device has space for more transactions. 3132 */ 3133 void 3134 xpt_schedule(struct cam_periph *perph, u_int32_t new_priority) 3135 { 3136 struct cam_ed *device; 3137 int s; 3138 int runq; 3139 3140 CAM_DEBUG(perph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n")); 3141 device = perph->path->device; 3142 s = splsoftcam(); 3143 if (periph_is_queued(perph)) { 3144 /* Simply reorder based on new priority */ 3145 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE, 3146 (" change priority to %d\n", new_priority)); 3147 if (new_priority < perph->pinfo.priority) { 3148 camq_change_priority(&device->drvq, 3149 perph->pinfo.index, 3150 new_priority); 3151 } 3152 runq = 0; 3153 } else { 3154 /* New entry on the queue */ 3155 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE, 3156 (" added periph to queue\n")); 3157 if (device->drvq.generation++ == 0) { 3158 /* Generation wrap, regen all entries */ 3159 camq_regen(&device->drvq); 3160 } 3161 perph->pinfo.priority = new_priority; 3162 perph->pinfo.generation = device->drvq.generation; 3163 camq_insert(&device->drvq, &perph->pinfo); 3164 runq = xpt_schedule_dev_allocq(perph->path->bus, device); 3165 } 3166 splx(s); 3167 if (runq != 0) { 3168 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE, 3169 (" calling xpt_run_devq\n")); 3170 xpt_run_dev_allocq(perph->path->bus); 3171 } 3172 } 3173 3174 3175 /* 3176 * Schedule a device to run on a given queue. 3177 * If the device was inserted as a new entry on the queue, 3178 * return 1 meaning the device queue should be run. If we 3179 * were already queued, implying someone else has already 3180 * started the queue, return 0 so the caller doesn't attempt 3181 * to run the queue. Must be run at either splsoftcam 3182 * (or splcam since that encompases splsoftcam). 3183 */ 3184 static int 3185 xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo, 3186 u_int32_t new_priority) 3187 { 3188 int retval; 3189 u_int32_t old_priority; 3190 3191 CAM_DEBUG_PRINT(CAM_DEBUG_TRACE, ("xpt_schedule_dev\n")); 3192 3193 old_priority = pinfo->priority; 3194 3195 /* 3196 * Are we already queued? 3197 */ 3198 if (pinfo->index != CAM_UNQUEUED_INDEX) { 3199 /* Simply reorder based on new priority */ 3200 if (new_priority < old_priority) { 3201 camq_change_priority(queue, pinfo->index, 3202 new_priority); 3203 CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE, 3204 ("changed priority to %d\n", 3205 new_priority)); 3206 } 3207 retval = 0; 3208 } else { 3209 /* New entry on the queue */ 3210 if (new_priority < old_priority) 3211 pinfo->priority = new_priority; 3212 3213 CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE, 3214 ("Inserting onto queue\n")); 3215 if (queue->generation++ == 0) { 3216 /* Generation wrap, regen all entries */ 3217 camq_regen(queue); 3218 } 3219 pinfo->generation = queue->generation; 3220 camq_insert(queue, pinfo); 3221 retval = 1; 3222 } 3223 return (retval); 3224 } 3225 3226 static void 3227 xpt_run_dev_allocq(struct cam_eb *bus) 3228 { 3229 struct cam_devq *devq; 3230 int s; 3231 3232 CAM_DEBUG_PRINT(CAM_DEBUG_TRACE, ("xpt_run_dev_allocq\n")); 3233 devq = bus->sim->devq; 3234 3235 CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE, 3236 (" qfrozen_cnt == 0x%x, entries == %d, " 3237 "openings == %d, active == %d\n", 3238 devq->alloc_queue.qfrozen_cnt, 3239 devq->alloc_queue.entries, 3240 devq->alloc_openings, 3241 devq->alloc_active)); 3242 3243 s = splsoftcam(); 3244 devq->alloc_queue.qfrozen_cnt++; 3245 while ((devq->alloc_queue.entries > 0) 3246 && (devq->alloc_openings > 0) 3247 && (devq->alloc_queue.qfrozen_cnt <= 1)) { 3248 struct cam_ed_qinfo *qinfo; 3249 struct cam_ed *device; 3250 union ccb *work_ccb; 3251 struct cam_periph *drv; 3252 struct camq *drvq; 3253 3254 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue, 3255 /*position*/0); 3256 device = qinfo->device; 3257 3258 CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE, 3259 ("running device %p\n", device)); 3260 3261 drvq = &device->drvq; 3262 3263 #ifdef CAMDEBUG 3264 if (drvq->entries <= 0) { 3265 panic("xpt_run_dev_allocq: " 3266 "Device on queue without any work to do"); 3267 } 3268 #endif 3269 if ((work_ccb = xpt_get_ccb(device)) != NULL) { 3270 devq->alloc_openings--; 3271 devq->alloc_active++; 3272 drv = (struct cam_periph*)camq_remove(drvq, 3273 /*pos*/0); 3274 /* Update priority */ 3275 if (drvq->entries > 0) { 3276 qinfo->pinfo.priority = drvq->queue_array[0]->priority; 3277 } else { 3278 qinfo->pinfo.priority = CAM_PRIORITY_NONE; 3279 } 3280 splx(s); 3281 xpt_setup_ccb(&work_ccb->ccb_h, drv->path, 3282 drv->pinfo.priority); 3283 CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE, 3284 ("calling periph start\n")); 3285 drv->periph_start(drv, work_ccb); 3286 } else { 3287 /* 3288 * Malloc failure in alloc_ccb 3289 */ 3290 /* 3291 * XXX add us to a list to be run from free_ccb 3292 * if we don't have any ccbs active on this 3293 * device queue otherwise we may never get run 3294 * again. 3295 */ 3296 break; 3297 } 3298 3299 /* Raise IPL for possible insertion and test at top of loop */ 3300 s = splsoftcam(); 3301 3302 if (drvq->entries > 0) { 3303 /* We have more work. Attempt to reschedule */ 3304 xpt_schedule_dev_allocq(bus, device); 3305 } 3306 } 3307 devq->alloc_queue.qfrozen_cnt--; 3308 splx(s); 3309 } 3310 3311 static void 3312 xpt_run_dev_sendq(struct cam_eb *bus) 3313 { 3314 struct cam_devq *devq; 3315 int s; 3316 3317 CAM_DEBUG_PRINT(CAM_DEBUG_TRACE, ("xpt_run_dev_sendq\n")); 3318 3319 devq = bus->sim->devq; 3320 3321 s = splcam(); 3322 devq->send_queue.qfrozen_cnt++; 3323 splx(s); 3324 s = splsoftcam(); 3325 while ((devq->send_queue.entries > 0) 3326 && (devq->send_openings > 0)) { 3327 struct cam_ed_qinfo *qinfo; 3328 struct cam_ed *device; 3329 union ccb *work_ccb; 3330 struct cam_sim *sim; 3331 int ospl; 3332 3333 ospl = splcam(); 3334 if (devq->send_queue.qfrozen_cnt > 1) { 3335 splx(ospl); 3336 break; 3337 } 3338 3339 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue, 3340 /*position*/0); 3341 device = qinfo->device; 3342 3343 /* 3344 * If the device has been "frozen", don't attempt 3345 * to run it. 3346 */ 3347 if (device->qfrozen_cnt > 0) { 3348 splx(ospl); 3349 continue; 3350 } 3351 3352 CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE, 3353 ("running device %p\n", device)); 3354 3355 work_ccb = cam_ccbq_peek_ccb(&device->ccbq, 0); 3356 if (work_ccb == NULL) { 3357 printf("device on run queue with no ccbs???"); 3358 splx(ospl); 3359 continue; 3360 } 3361 3362 if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) { 3363 3364 if (num_highpower <= 0) { 3365 /* 3366 * We got a high power command, but we 3367 * don't have any available slots. Freeze 3368 * the device queue until we have a slot 3369 * available. 3370 */ 3371 device->qfrozen_cnt++; 3372 STAILQ_INSERT_TAIL(&highpowerq, 3373 &work_ccb->ccb_h, 3374 xpt_links.stqe); 3375 3376 splx(ospl); 3377 continue; 3378 } else { 3379 /* 3380 * Consume a high power slot while 3381 * this ccb runs. 3382 */ 3383 num_highpower--; 3384 } 3385 } 3386 devq->active_dev = device; 3387 cam_ccbq_remove_ccb(&device->ccbq, work_ccb); 3388 3389 cam_ccbq_send_ccb(&device->ccbq, work_ccb); 3390 splx(ospl); 3391 3392 devq->send_openings--; 3393 devq->send_active++; 3394 3395 if (device->ccbq.queue.entries > 0) { 3396 qinfo->pinfo.priority = 3397 device->ccbq.queue.queue_array[0]->priority; 3398 xpt_schedule_dev_sendq(bus, device); 3399 } else { 3400 qinfo->pinfo.priority = CAM_PRIORITY_NONE; 3401 } 3402 3403 if (work_ccb && (work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0){ 3404 /* 3405 * The client wants to freeze the queue 3406 * after this CCB is sent. 3407 */ 3408 ospl = splcam(); 3409 device->qfrozen_cnt++; 3410 splx(ospl); 3411 } 3412 3413 splx(s); 3414 3415 if ((device->inq_flags & SID_CmdQue) != 0) 3416 work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID; 3417 else 3418 /* 3419 * Clear this in case of a retried CCB that failed 3420 * due to a rejected tag. 3421 */ 3422 work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID; 3423 3424 /* 3425 * Device queues can be shared among multiple sim instances 3426 * that reside on different busses. Use the SIM in the queue 3427 * CCB's path, rather than the one in the bus that was passed 3428 * into this function. 3429 */ 3430 sim = work_ccb->ccb_h.path->bus->sim; 3431 (*(sim->sim_action))(sim, work_ccb); 3432 3433 ospl = splcam(); 3434 devq->active_dev = NULL; 3435 splx(ospl); 3436 /* Raise IPL for possible insertion and test at top of loop */ 3437 s = splsoftcam(); 3438 } 3439 splx(s); 3440 s = splcam(); 3441 devq->send_queue.qfrozen_cnt--; 3442 splx(s); 3443 } 3444 3445 /* 3446 * This function merges stuff from the slave ccb into the master ccb, while 3447 * keeping important fields in the master ccb constant. 3448 */ 3449 void 3450 xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb) 3451 { 3452 /* 3453 * Pull fields that are valid for peripheral drivers to set 3454 * into the master CCB along with the CCB "payload". 3455 */ 3456 master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count; 3457 master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code; 3458 master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout; 3459 master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags; 3460 bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1], 3461 sizeof(union ccb) - sizeof(struct ccb_hdr)); 3462 } 3463 3464 void 3465 xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority) 3466 { 3467 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n")); 3468 ccb_h->pinfo.priority = priority; 3469 ccb_h->path = path; 3470 ccb_h->path_id = path->bus->path_id; 3471 if (path->target) 3472 ccb_h->target_id = path->target->target_id; 3473 else 3474 ccb_h->target_id = CAM_TARGET_WILDCARD; 3475 if (path->device) { 3476 if (path->device->ccbq.queue.generation++ == 0) { 3477 /* Generation wrap, regen all entries */ 3478 cam_ccbq_regen(&path->device->ccbq); 3479 } 3480 ccb_h->target_lun = path->device->lun_id; 3481 ccb_h->pinfo.generation = path->device->ccbq.queue.generation; 3482 } else { 3483 ccb_h->target_lun = CAM_TARGET_WILDCARD; 3484 } 3485 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX; 3486 ccb_h->flags = 0; 3487 } 3488 3489 /* Path manipulation functions */ 3490 cam_status 3491 xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph, 3492 path_id_t path_id, target_id_t target_id, lun_id_t lun_id) 3493 { 3494 struct cam_path *path; 3495 cam_status status; 3496 3497 path = (struct cam_path *)malloc(sizeof(*path), M_DEVBUF, M_NOWAIT); 3498 3499 if (path == NULL) { 3500 status = CAM_RESRC_UNAVAIL; 3501 return(status); 3502 } 3503 status = xpt_compile_path(path, perph, path_id, target_id, lun_id); 3504 if (status != CAM_REQ_CMP) { 3505 free(path, M_DEVBUF); 3506 path = NULL; 3507 } 3508 *new_path_ptr = path; 3509 return (status); 3510 } 3511 3512 static cam_status 3513 xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph, 3514 path_id_t path_id, target_id_t target_id, lun_id_t lun_id) 3515 { 3516 struct cam_eb *bus; 3517 struct cam_et *target; 3518 struct cam_ed *device; 3519 cam_status status; 3520 int s; 3521 3522 status = CAM_REQ_CMP; /* Completed without error */ 3523 target = NULL; /* Wildcarded */ 3524 device = NULL; /* Wildcarded */ 3525 s = splsoftcam(); 3526 bus = xpt_find_bus(path_id); 3527 if (bus == NULL) { 3528 status = CAM_PATH_INVALID; 3529 } else if (target_id != CAM_TARGET_WILDCARD) { 3530 target = xpt_find_target(bus, target_id); 3531 if (target == NULL) { 3532 if (path_id == CAM_XPT_PATH_ID) { 3533 status = CAM_TID_INVALID; 3534 } else { 3535 /* Create one */ 3536 struct cam_et *new_target; 3537 3538 new_target = xpt_alloc_target(bus, target_id); 3539 if (new_target == NULL) { 3540 status = CAM_RESRC_UNAVAIL; 3541 } else { 3542 target = new_target; 3543 } 3544 } 3545 } 3546 if (target != NULL && lun_id != CAM_LUN_WILDCARD) { 3547 device = xpt_find_device(target, lun_id); 3548 if (device == NULL) { 3549 if (path_id == CAM_XPT_PATH_ID) { 3550 status = CAM_LUN_INVALID; 3551 } else { 3552 /* Create one */ 3553 struct cam_ed *new_device; 3554 3555 new_device = xpt_alloc_device(bus, 3556 target, 3557 lun_id); 3558 if (new_device == NULL) { 3559 status = CAM_RESRC_UNAVAIL; 3560 } else { 3561 device = new_device; 3562 } 3563 } 3564 } 3565 } 3566 } else if (lun_id != CAM_LUN_WILDCARD) { 3567 /* 3568 * Specific luns are not allowed if the 3569 * target is wildcarded 3570 */ 3571 status = CAM_LUN_INVALID; 3572 } 3573 3574 /* 3575 * Only touch the user's data if we are successful. 3576 */ 3577 if (status == CAM_REQ_CMP) { 3578 new_path->periph = perph; 3579 new_path->bus = bus; 3580 new_path->target = target; 3581 new_path->device = device; 3582 CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n")); 3583 } else { 3584 if (device != NULL) 3585 xpt_release_device(bus, target, device); 3586 if (target != NULL) 3587 xpt_release_target(bus, target); 3588 } 3589 splx(s); 3590 return (status); 3591 } 3592 3593 static void 3594 xpt_release_path(struct cam_path *path) 3595 { 3596 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n")); 3597 if (path->device != NULL) 3598 xpt_release_device(path->bus, path->target, path->device); 3599 if (path->target != NULL) 3600 xpt_release_target(path->bus, path->target); 3601 } 3602 3603 void 3604 xpt_free_path(struct cam_path *path) 3605 { 3606 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n")); 3607 xpt_release_path(path); 3608 free(path, M_DEVBUF); 3609 } 3610 3611 3612 /* 3613 * Return -1 for failure, 0 for exact match, 1 for match with wildcards. 3614 */ 3615 int 3616 xpt_path_comp(struct cam_path *path1, struct cam_path *path2) 3617 { 3618 int retval = 0; 3619 3620 if (path1->bus != path2->bus) { 3621 if ((path1->bus == NULL) 3622 || (path2->bus == NULL)) 3623 retval = 1; 3624 else 3625 return (-1); 3626 } 3627 if (path1->target != path2->target) { 3628 if ((path1->target == NULL) 3629 || (path2->target == NULL)) 3630 retval = 1; 3631 else 3632 return (-1); 3633 } 3634 if (path1->device != path2->device) { 3635 if ((path1->device == NULL) 3636 || (path2->device == NULL)) 3637 retval = 1; 3638 else 3639 return (-1); 3640 } 3641 return (retval); 3642 } 3643 3644 void 3645 xpt_print_path(struct cam_path *path) 3646 { 3647 if (path == NULL) 3648 printf("(nopath): "); 3649 else { 3650 if (path->periph != NULL) 3651 printf("(%s%d:", path->periph->periph_name, 3652 path->periph->unit_number); 3653 else 3654 printf("(noperiph:"); 3655 3656 if (path->bus != NULL) 3657 printf("%s%d:%d:", path->bus->sim->sim_name, 3658 path->bus->sim->unit_number, 3659 path->bus->sim->bus_id); 3660 else 3661 printf("nobus:"); 3662 3663 if (path->target != NULL) 3664 printf("%d:", path->target->target_id); 3665 else 3666 printf("X:"); 3667 3668 if (path->device != NULL) 3669 printf("%d): ", path->device->lun_id); 3670 else 3671 printf("X): "); 3672 } 3673 } 3674 3675 path_id_t 3676 xpt_path_path_id(struct cam_path *path) 3677 { 3678 return(path->bus->path_id); 3679 } 3680 3681 target_id_t 3682 xpt_path_target_id(struct cam_path *path) 3683 { 3684 if (path->target != NULL) 3685 return (path->target->target_id); 3686 else 3687 return (CAM_TARGET_WILDCARD); 3688 } 3689 3690 lun_id_t 3691 xpt_path_lun_id(struct cam_path *path) 3692 { 3693 if (path->device != NULL) 3694 return (path->device->lun_id); 3695 else 3696 return (CAM_LUN_WILDCARD); 3697 } 3698 3699 struct cam_sim * 3700 xpt_path_sim(struct cam_path *path) 3701 { 3702 return (path->bus->sim); 3703 } 3704 3705 struct cam_periph* 3706 xpt_path_periph(struct cam_path *path) 3707 { 3708 return (path->periph); 3709 } 3710 3711 /* 3712 * Release a CAM control block for the caller. Remit the cost of the structure 3713 * to the device referenced by the path. If the this device had no 'credits' 3714 * and peripheral drivers have registered async callbacks for this notification 3715 * call them now. 3716 */ 3717 void 3718 xpt_release_ccb(union ccb *free_ccb) 3719 { 3720 int s; 3721 struct cam_path *path; 3722 struct cam_ed *device; 3723 struct cam_eb *bus; 3724 3725 CAM_DEBUG_PRINT(CAM_DEBUG_TRACE, ("xpt_release_ccb\n")); 3726 path = free_ccb->ccb_h.path; 3727 device = path->device; 3728 bus = path->bus; 3729 s = splsoftcam(); 3730 cam_ccbq_release_opening(&device->ccbq); 3731 if (xpt_ccb_count > xpt_max_ccbs) { 3732 xpt_free_ccb(free_ccb); 3733 xpt_ccb_count--; 3734 } else { 3735 SLIST_INSERT_HEAD(&ccb_freeq, &free_ccb->ccb_h, xpt_links.sle); 3736 } 3737 bus->sim->devq->alloc_openings++; 3738 bus->sim->devq->alloc_active--; 3739 /* XXX Turn this into an inline function - xpt_run_device?? */ 3740 if ((device_is_alloc_queued(device) == 0) 3741 && (device->drvq.entries > 0)) { 3742 xpt_schedule_dev_allocq(bus, device); 3743 } 3744 splx(s); 3745 if (dev_allocq_is_runnable(bus->sim->devq)) 3746 xpt_run_dev_allocq(bus); 3747 } 3748 3749 /* Functions accessed by SIM drivers */ 3750 3751 /* 3752 * A sim structure, listing the SIM entry points and instance 3753 * identification info is passed to xpt_bus_register to hook the SIM 3754 * into the CAM framework. xpt_bus_register creates a cam_eb entry 3755 * for this new bus and places it in the array of busses and assigns 3756 * it a path_id. The path_id may be influenced by "hard wiring" 3757 * information specified by the user. Once interrupt services are 3758 * availible, the bus will be probed. 3759 */ 3760 int32_t 3761 xpt_bus_register(struct cam_sim *sim, u_int32_t bus) 3762 { 3763 static path_id_t buscount; 3764 struct cam_eb *new_bus; 3765 struct ccb_pathinq cpi; 3766 int s; 3767 3768 sim->bus_id = bus; 3769 new_bus = (struct cam_eb *)malloc(sizeof(*new_bus), 3770 M_DEVBUF, M_NOWAIT); 3771 if (new_bus == NULL) { 3772 /* Couldn't satisfy request */ 3773 return (CAM_RESRC_UNAVAIL); 3774 } 3775 3776 bzero(new_bus, sizeof(*new_bus)); 3777 3778 if (strcmp(sim->sim_name, "xpt") != 0) { 3779 3780 sim->path_id = xptpathid(sim->sim_name, sim->unit_number, 3781 sim->bus_id, &buscount); 3782 } 3783 3784 new_bus->path_id = sim->path_id; 3785 new_bus->sim = sim; 3786 SLIST_INIT(&new_bus->asyncs); 3787 TAILQ_INIT(&new_bus->et_entries); 3788 s = splsoftcam(); 3789 TAILQ_INSERT_TAIL(&xpt_busses, new_bus, links); 3790 bus_generation++; 3791 3792 /* Notify interested parties */ 3793 if (sim->path_id != CAM_XPT_PATH_ID) { 3794 struct cam_path path; 3795 3796 xpt_compile_path(&path, /*periph*/NULL, sim->path_id, 3797 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 3798 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1); 3799 cpi.ccb_h.func_code = XPT_PATH_INQ; 3800 xpt_action((union ccb *)&cpi); 3801 xpt_async(AC_PATH_REGISTERED, xpt_periph->path, &cpi); 3802 xpt_release_path(&path); 3803 } 3804 splx(s); 3805 return (CAM_SUCCESS); 3806 } 3807 3808 static int 3809 xptnextfreebus(path_id_t startbus) 3810 { 3811 struct cam_sim_config *sim_conf; 3812 3813 sim_conf = cam_sinit; 3814 while (sim_conf->sim_name != NULL) { 3815 3816 if (IS_SPECIFIED(sim_conf->pathid) 3817 && (startbus == sim_conf->pathid)) { 3818 ++startbus; 3819 /* Start the search over */ 3820 sim_conf = cam_sinit; 3821 } else { 3822 sim_conf++; 3823 } 3824 } 3825 return (startbus); 3826 } 3827 3828 static int 3829 xptpathid(const char *sim_name, int sim_unit, 3830 int sim_bus, path_id_t *nextpath) 3831 { 3832 struct cam_sim_config *sim_conf; 3833 path_id_t pathid; 3834 3835 pathid = CAM_XPT_PATH_ID; 3836 for (sim_conf = cam_sinit; sim_conf->sim_name != NULL; sim_conf++) { 3837 3838 if (!IS_SPECIFIED(sim_conf->pathid)) 3839 continue; 3840 3841 if (!strcmp(sim_name, sim_conf->sim_name) 3842 && (sim_unit == sim_conf->sim_unit)) { 3843 3844 if (IS_SPECIFIED(sim_conf->sim_bus)) { 3845 if (sim_bus == sim_conf->sim_bus) { 3846 pathid = sim_conf->pathid; 3847 break; 3848 } 3849 } else if (sim_bus == 0) { 3850 /* Unspecified matches bus 0 */ 3851 pathid = sim_conf->pathid; 3852 break; 3853 } else { 3854 printf("Ambiguous scbus configuration for %s%d " 3855 "bus %d, cannot wire down. The kernel " 3856 "config entry for scbus%d should " 3857 "specify a controller bus.\n" 3858 "Scbus will be assigned dynamically.\n", 3859 sim_name, sim_unit, sim_bus, 3860 sim_conf->pathid); 3861 break; 3862 } 3863 } 3864 } 3865 3866 if (pathid == CAM_XPT_PATH_ID) { 3867 pathid = xptnextfreebus(*nextpath); 3868 *nextpath = pathid + 1; 3869 } 3870 return (pathid); 3871 } 3872 3873 int32_t 3874 xpt_bus_deregister(path_id) 3875 u_int8_t path_id; 3876 { 3877 /* XXX */ 3878 return (CAM_SUCCESS); 3879 } 3880 3881 void 3882 xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg) 3883 { 3884 struct cam_eb *bus; 3885 struct cam_et *target, *next_target; 3886 struct cam_ed *device, *next_device; 3887 int s; 3888 3889 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_async\n")); 3890 3891 s = splsoftcam(); 3892 3893 bus = path->bus; 3894 3895 /* 3896 * Freeze the SIM queue for SCSI_DELAY ms to 3897 * allow the bus to settle. 3898 */ 3899 if (async_code == AC_BUS_RESET) { 3900 struct cam_sim *sim; 3901 3902 sim = bus->sim; 3903 3904 /* 3905 * If there isn't already another timeout pending, go ahead 3906 * and freeze the simq and set the timeout flag. If there 3907 * is another timeout pending, replace it with this 3908 * timeout. There could be two bus reset async broadcasts 3909 * sent for some dual-channel controllers. 3910 */ 3911 if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) == 0) { 3912 xpt_freeze_simq(sim, 1); 3913 sim->flags |= CAM_SIM_REL_TIMEOUT_PENDING; 3914 } else 3915 untimeout(xpt_release_simq_timeout, sim, sim->c_handle); 3916 3917 sim->c_handle = timeout(xpt_release_simq_timeout, 3918 sim, (SCSI_DELAY * hz) / 1000); 3919 } 3920 3921 for (target = TAILQ_FIRST(&bus->et_entries); 3922 target != NULL; 3923 target = next_target) { 3924 3925 next_target = TAILQ_NEXT(target, links); 3926 3927 if (path->target != target 3928 && path->target != NULL) 3929 continue; 3930 3931 for (device = TAILQ_FIRST(&target->ed_entries); 3932 device != NULL; 3933 device = next_device) { 3934 cam_status status; 3935 struct cam_path newpath; 3936 3937 next_device = TAILQ_NEXT(device, links); 3938 3939 if (path->device != device 3940 && path->device != NULL) 3941 continue; 3942 3943 /* 3944 * We need our own path with wildcards expanded to 3945 * handle certain types of events. 3946 */ 3947 if ((async_code == AC_SENT_BDR) 3948 || (async_code == AC_BUS_RESET) 3949 || (async_code == AC_INQ_CHANGED)) 3950 status = xpt_compile_path(&newpath, NULL, 3951 bus->path_id, 3952 target->target_id, 3953 device->lun_id); 3954 else 3955 status = CAM_REQ_CMP_ERR; 3956 3957 if (status == CAM_REQ_CMP) { 3958 3959 /* 3960 * Allow transfer negotiation to occur in a 3961 * tag free environment. 3962 */ 3963 if (async_code == AC_SENT_BDR 3964 || async_code == AC_BUS_RESET) 3965 xpt_toggle_tags(&newpath); 3966 3967 /* 3968 * If we send a BDR, freeze the device queue 3969 * for SCSI_DELAY ms to allow it to settle 3970 * down. 3971 */ 3972 if (async_code == AC_SENT_BDR) { 3973 xpt_freeze_devq(&newpath, 1); 3974 /* 3975 * Although this looks bad, it 3976 * isn't as bad as it seems. We're 3977 * passing in a stack-allocated path 3978 * that we then immediately release 3979 * after scheduling a timeout to 3980 * release the device queue. So 3981 * the path won't be around when 3982 * the timeout fires, right? Right. 3983 * But it doesn't matter, since 3984 * xpt_release_devq and its timeout 3985 * function both take the device as 3986 * an argument. Theoretically, the 3987 * device will still be there when 3988 * the timeout fires, even though 3989 * the path will be gone. 3990 */ 3991 cam_release_devq( 3992 &newpath, 3993 /*relsim_flags*/ 3994 RELSIM_RELEASE_AFTER_TIMEOUT, 3995 /*reduction*/0, 3996 /*timeout*/SCSI_DELAY, 3997 /*getcount_only*/0); 3998 } else if (async_code == AC_INQ_CHANGED) { 3999 /* 4000 * We've sent a start unit command, or 4001 * something similar to a device that 4002 * may have caused its inquiry data to 4003 * change. So we re-scan the device to 4004 * refresh the inquiry data for it. 4005 */ 4006 xpt_scan_lun(newpath.periph, &newpath, 4007 CAM_EXPECT_INQ_CHANGE, 4008 NULL); 4009 } 4010 xpt_release_path(&newpath); 4011 } else if (async_code == AC_LOST_DEVICE) { 4012 device->flags |= CAM_DEV_UNCONFIGURED; 4013 } else if (async_code == AC_TRANSFER_NEG) { 4014 struct ccb_trans_settings *settings; 4015 4016 settings = 4017 (struct ccb_trans_settings *)async_arg; 4018 xpt_set_transfer_settings(settings, device, 4019 /*async_update*/TRUE); 4020 } 4021 4022 4023 xpt_async_bcast(&device->asyncs, 4024 async_code, 4025 path, 4026 async_arg); 4027 } 4028 } 4029 xpt_async_bcast(&bus->asyncs, async_code, 4030 path, async_arg); 4031 splx(s); 4032 } 4033 4034 static void 4035 xpt_async_bcast(struct async_list *async_head, 4036 u_int32_t async_code, 4037 struct cam_path *path, void *async_arg) 4038 { 4039 struct async_node *cur_entry; 4040 4041 cur_entry = SLIST_FIRST(async_head); 4042 while (cur_entry != NULL) { 4043 struct async_node *next_entry; 4044 /* 4045 * Grab the next list entry before we call the current 4046 * entry's callback. This is because the callback function 4047 * can delete its async callback entry. 4048 */ 4049 next_entry = SLIST_NEXT(cur_entry, links); 4050 if ((cur_entry->event_enable & async_code) != 0) 4051 cur_entry->callback(cur_entry->callback_arg, 4052 async_code, path, 4053 async_arg); 4054 cur_entry = next_entry; 4055 } 4056 } 4057 4058 u_int32_t 4059 xpt_freeze_devq(struct cam_path *path, u_int count) 4060 { 4061 int s; 4062 struct ccb_hdr *ccbh; 4063 4064 s = splcam(); 4065 path->device->qfrozen_cnt += count; 4066 4067 /* 4068 * Mark the last CCB in the queue as needing 4069 * to be requeued if the driver hasn't 4070 * changed it's state yet. This fixes a race 4071 * where a ccb is just about to be queued to 4072 * a controller driver when it's interrupt routine 4073 * freezes the queue. To completly close the 4074 * hole, controller drives must check to see 4075 * if a ccb's status is still CAM_REQ_INPROG 4076 * under spl protection just before they queue 4077 * the CCB. See ahc_action/ahc_freeze_devq for 4078 * an example. 4079 */ 4080 ccbh = TAILQ_LAST(&path->device->ccbq.active_ccbs, ccb_hdr_list); 4081 if (ccbh && ccbh->status == CAM_REQ_INPROG) 4082 ccbh->status = CAM_REQUEUE_REQ; 4083 splx(s); 4084 return (path->device->qfrozen_cnt); 4085 } 4086 4087 u_int32_t 4088 xpt_freeze_simq(struct cam_sim *sim, u_int count) 4089 { 4090 sim->devq->send_queue.qfrozen_cnt += count; 4091 if (sim->devq->active_dev != NULL) { 4092 struct ccb_hdr *ccbh; 4093 4094 ccbh = TAILQ_LAST(&sim->devq->active_dev->ccbq.active_ccbs, 4095 ccb_hdr_list); 4096 if (ccbh && ccbh->status == CAM_REQ_INPROG) 4097 ccbh->status = CAM_REQUEUE_REQ; 4098 } 4099 return (sim->devq->send_queue.qfrozen_cnt); 4100 } 4101 4102 static void 4103 xpt_release_devq_timeout(void *arg) 4104 { 4105 struct cam_ed *device; 4106 4107 device = (struct cam_ed *)arg; 4108 4109 xpt_release_devq(device, /*run_queue*/TRUE); 4110 } 4111 4112 void 4113 xpt_release_devq(struct cam_ed *dev, int run_queue) 4114 { 4115 int rundevq; 4116 int s; 4117 4118 rundevq = 0; 4119 s = splcam(); 4120 if (dev->qfrozen_cnt > 0) { 4121 4122 dev->qfrozen_cnt--; 4123 if (dev->qfrozen_cnt == 0) { 4124 4125 /* 4126 * No longer need to wait for a successful 4127 * command completion. 4128 */ 4129 dev->flags &= ~CAM_DEV_REL_ON_COMPLETE; 4130 4131 /* 4132 * Remove any timeouts that might be scheduled 4133 * to release this queue. 4134 */ 4135 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) { 4136 untimeout(xpt_release_devq_timeout, dev, 4137 dev->c_handle); 4138 dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING; 4139 } 4140 4141 /* 4142 * Now that we are unfrozen schedule the 4143 * device so any pending transactions are 4144 * run. 4145 */ 4146 if ((dev->ccbq.queue.entries > 0) 4147 && (xpt_schedule_dev_sendq(dev->target->bus, dev)) 4148 && (run_queue != 0)) { 4149 rundevq = 1; 4150 } 4151 } 4152 } 4153 splx(s); 4154 if (rundevq != 0) 4155 xpt_run_dev_sendq(dev->target->bus); 4156 } 4157 4158 void 4159 xpt_release_simq(struct cam_sim *sim, int run_queue) 4160 { 4161 int s; 4162 struct camq *sendq; 4163 4164 sendq = &(sim->devq->send_queue); 4165 s = splcam(); 4166 if (sendq->qfrozen_cnt > 0) { 4167 4168 sendq->qfrozen_cnt--; 4169 if (sendq->qfrozen_cnt == 0) { 4170 4171 /* 4172 * If there is a timeout scheduled to release this 4173 * sim queue, remove it. The queue frozen count is 4174 * already at 0. 4175 */ 4176 if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){ 4177 untimeout(xpt_release_simq_timeout, sim, 4178 sim->c_handle); 4179 sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING; 4180 } 4181 4182 splx(s); 4183 4184 if (run_queue) { 4185 /* 4186 * Now that we are unfrozen run the send queue. 4187 */ 4188 xpt_run_dev_sendq(xpt_find_bus(sim->path_id)); 4189 } 4190 } else 4191 splx(s); 4192 } else 4193 splx(s); 4194 } 4195 4196 static void 4197 xpt_release_simq_timeout(void *arg) 4198 { 4199 struct cam_sim *sim; 4200 4201 sim = (struct cam_sim *)arg; 4202 xpt_release_simq(sim, /* run_queue */ TRUE); 4203 } 4204 4205 void 4206 xpt_done(union ccb *done_ccb) 4207 { 4208 int s; 4209 4210 s = splcam(); 4211 4212 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done\n")); 4213 switch (done_ccb->ccb_h.func_code) { 4214 case XPT_SCSI_IO: 4215 case XPT_ENG_EXEC: 4216 case XPT_TARGET_IO: 4217 case XPT_ACCEPT_TARGET_IO: 4218 case XPT_CONT_TARGET_IO: 4219 case XPT_SCAN_BUS: 4220 case XPT_SCAN_LUN: 4221 { 4222 /* 4223 * Queue up the request for handling by our SWI handler 4224 * any of the "non-immediate" type of ccbs. 4225 */ 4226 switch (done_ccb->ccb_h.path->periph->type) { 4227 case CAM_PERIPH_BIO: 4228 TAILQ_INSERT_TAIL(&cam_bioq, &done_ccb->ccb_h, 4229 sim_links.tqe); 4230 done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX; 4231 setsoftcambio(); 4232 break; 4233 case CAM_PERIPH_NET: 4234 TAILQ_INSERT_TAIL(&cam_netq, &done_ccb->ccb_h, 4235 sim_links.tqe); 4236 done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX; 4237 setsoftcamnet(); 4238 break; 4239 } 4240 break; 4241 } 4242 default: 4243 break; 4244 } 4245 splx(s); 4246 } 4247 4248 union ccb * 4249 xpt_alloc_ccb() 4250 { 4251 union ccb *new_ccb; 4252 4253 new_ccb = malloc(sizeof(*new_ccb), M_DEVBUF, M_WAITOK); 4254 return (new_ccb); 4255 } 4256 4257 void 4258 xpt_free_ccb(union ccb *free_ccb) 4259 { 4260 free(free_ccb, M_DEVBUF); 4261 } 4262 4263 4264 4265 /* Private XPT functions */ 4266 4267 /* 4268 * Get a CAM control block for the caller. Charge the structure to the device 4269 * referenced by the path. If the this device has no 'credits' then the 4270 * device already has the maximum number of outstanding operations under way 4271 * and we return NULL. If we don't have sufficient resources to allocate more 4272 * ccbs, we also return NULL. 4273 */ 4274 static union ccb * 4275 xpt_get_ccb(struct cam_ed *device) 4276 { 4277 union ccb *new_ccb; 4278 int s; 4279 4280 s = splsoftcam(); 4281 if ((new_ccb = (union ccb *)ccb_freeq.slh_first) == NULL) { 4282 new_ccb = malloc(sizeof(*new_ccb), M_DEVBUF, M_NOWAIT); 4283 if (new_ccb == NULL) { 4284 splx(s); 4285 return (NULL); 4286 } 4287 callout_handle_init(&new_ccb->ccb_h.timeout_ch); 4288 SLIST_INSERT_HEAD(&ccb_freeq, &new_ccb->ccb_h, 4289 xpt_links.sle); 4290 xpt_ccb_count++; 4291 } 4292 cam_ccbq_take_opening(&device->ccbq); 4293 SLIST_REMOVE_HEAD(&ccb_freeq, xpt_links.sle); 4294 splx(s); 4295 return (new_ccb); 4296 } 4297 4298 4299 static struct cam_et * 4300 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id) 4301 { 4302 struct cam_et *target; 4303 4304 target = (struct cam_et *)malloc(sizeof(*target), M_DEVBUF, M_NOWAIT); 4305 if (target != NULL) { 4306 struct cam_et *cur_target; 4307 4308 target->bus = bus; 4309 target->target_id = target_id; 4310 target->refcount = 1; 4311 TAILQ_INIT(&target->ed_entries); 4312 4313 /* Insertion sort into our bus's target list */ 4314 cur_target = TAILQ_FIRST(&bus->et_entries); 4315 while (cur_target != NULL && cur_target->target_id < target_id) 4316 cur_target = TAILQ_NEXT(cur_target, links); 4317 4318 if (cur_target != NULL) { 4319 TAILQ_INSERT_BEFORE(cur_target, target, links); 4320 } else { 4321 TAILQ_INSERT_TAIL(&bus->et_entries, target, links); 4322 bus->generation++; 4323 } 4324 } 4325 return (target); 4326 } 4327 4328 void 4329 xpt_release_target(struct cam_eb *bus, struct cam_et *target) 4330 { 4331 if ((--target->refcount == 0) 4332 && (TAILQ_FIRST(&target->ed_entries) == NULL)) { 4333 TAILQ_REMOVE(&bus->et_entries, target, links); 4334 bus->generation++; 4335 free(target, M_DEVBUF); 4336 } 4337 } 4338 4339 static struct cam_ed * 4340 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id) 4341 { 4342 struct cam_ed *device; 4343 struct cam_devq *devq; 4344 int32_t status; 4345 int s; 4346 4347 s = splsoftcam(); 4348 /* Make space for us in the device queue on our bus */ 4349 devq = bus->sim->devq; 4350 status = cam_devq_resize(devq, devq->alloc_queue.array_size + 1); 4351 splx(s); 4352 4353 if (status != CAM_REQ_CMP) { 4354 device = NULL; 4355 } else { 4356 device = (struct cam_ed *)malloc(sizeof(*device), 4357 M_DEVBUF, M_NOWAIT); 4358 } 4359 4360 if (device != NULL) { 4361 struct cam_ed *cur_device; 4362 4363 bzero(device, sizeof(*device)); 4364 4365 SLIST_INIT(&device->asyncs); 4366 SLIST_INIT(&device->periphs); 4367 callout_handle_init(&device->c_handle); 4368 device->refcount = 1; 4369 device->flags |= CAM_DEV_UNCONFIGURED; 4370 4371 cam_init_pinfo(&device->alloc_ccb_entry.pinfo); 4372 device->alloc_ccb_entry.device = device; 4373 cam_init_pinfo(&device->send_ccb_entry.pinfo); 4374 device->send_ccb_entry.device = device; 4375 4376 device->target = target; 4377 4378 device->lun_id = lun_id; 4379 4380 /* Initialize our queues */ 4381 if (camq_init(&device->drvq, 0) != 0) { 4382 free(device, M_DEVBUF); 4383 return (NULL); 4384 } 4385 4386 if (cam_ccbq_init(&device->ccbq, 4387 bus->sim->max_dev_openings) != 0) { 4388 camq_fini(&device->drvq); 4389 free(device, M_DEVBUF); 4390 return (NULL); 4391 } 4392 s = splsoftcam(); 4393 /* 4394 * XXX should be limited by number of CCBs this bus can 4395 * do. 4396 */ 4397 xpt_max_ccbs += device->ccbq.devq_openings; 4398 /* Insertion sort into our target's device list */ 4399 cur_device = TAILQ_FIRST(&target->ed_entries); 4400 while (cur_device != NULL && cur_device->lun_id < lun_id) 4401 cur_device = TAILQ_NEXT(cur_device, links); 4402 if (cur_device != NULL) { 4403 TAILQ_INSERT_BEFORE(cur_device, device, links); 4404 } else { 4405 TAILQ_INSERT_TAIL(&target->ed_entries, device, links); 4406 target->generation++; 4407 } 4408 splx(s); 4409 } 4410 return (device); 4411 } 4412 4413 static void 4414 xpt_release_device(struct cam_eb *bus, struct cam_et *target, 4415 struct cam_ed *device) 4416 { 4417 int s; 4418 4419 if ((--device->refcount == 0) 4420 && ((device->flags & CAM_DEV_UNCONFIGURED) != 0)) { 4421 struct cam_devq *devq; 4422 4423 s = splsoftcam(); 4424 TAILQ_REMOVE(&target->ed_entries, device,links); 4425 target->generation++; 4426 xpt_max_ccbs -= device->ccbq.devq_openings; 4427 free(device, M_DEVBUF); 4428 /* Release our slot in the devq */ 4429 devq = bus->sim->devq; 4430 cam_devq_resize(devq, devq->alloc_queue.array_size - 1); 4431 splx(s); 4432 } 4433 } 4434 4435 static u_int32_t 4436 xpt_dev_ccbq_resize(struct cam_path *path, int newopenings) 4437 { 4438 int s; 4439 int diff; 4440 int result; 4441 struct cam_ed *dev; 4442 4443 dev = path->device; 4444 s = splsoftcam(); 4445 4446 diff = newopenings - (dev->ccbq.dev_active + dev->ccbq.dev_openings); 4447 result = cam_ccbq_resize(&dev->ccbq, newopenings); 4448 if (result == CAM_REQ_CMP && (diff < 0)) { 4449 dev->flags |= CAM_DEV_RESIZE_QUEUE_NEEDED; 4450 } 4451 /* Adjust the global limit */ 4452 xpt_max_ccbs += diff; 4453 splx(s); 4454 return (result); 4455 } 4456 4457 static struct cam_eb * 4458 xpt_find_bus(path_id_t path_id) 4459 { 4460 struct cam_eb *bus; 4461 4462 for (bus = TAILQ_FIRST(&xpt_busses); 4463 bus != NULL; 4464 bus = TAILQ_NEXT(bus, links)) { 4465 if (bus->path_id == path_id) 4466 break; 4467 } 4468 return (bus); 4469 } 4470 4471 static struct cam_et * 4472 xpt_find_target(struct cam_eb *bus, target_id_t target_id) 4473 { 4474 struct cam_et *target; 4475 4476 for (target = TAILQ_FIRST(&bus->et_entries); 4477 target != NULL; 4478 target = TAILQ_NEXT(target, links)) { 4479 if (target->target_id == target_id) { 4480 target->refcount++; 4481 break; 4482 } 4483 } 4484 return (target); 4485 } 4486 4487 static struct cam_ed * 4488 xpt_find_device(struct cam_et *target, lun_id_t lun_id) 4489 { 4490 struct cam_ed *device; 4491 4492 for (device = TAILQ_FIRST(&target->ed_entries); 4493 device != NULL; 4494 device = TAILQ_NEXT(device, links)) { 4495 if (device->lun_id == lun_id) { 4496 device->refcount++; 4497 break; 4498 } 4499 } 4500 return (device); 4501 } 4502 4503 typedef struct { 4504 union ccb *request_ccb; 4505 struct ccb_pathinq *cpi; 4506 int pending_count; 4507 } xpt_scan_bus_info; 4508 4509 /* 4510 * To start a scan, request_ccb is an XPT_SCAN_BUS ccb. 4511 * As the scan progresses, xpt_scan_bus is used as the 4512 * callback on completion function. 4513 */ 4514 static void 4515 xpt_scan_bus(struct cam_periph *periph, union ccb *request_ccb) 4516 { 4517 CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE, 4518 ("xpt_scan_bus\n")); 4519 switch (request_ccb->ccb_h.func_code) { 4520 case XPT_SCAN_BUS: 4521 { 4522 xpt_scan_bus_info *scan_info; 4523 union ccb *work_ccb; 4524 struct cam_path *path; 4525 u_int i; 4526 u_int max_target; 4527 u_int initiator_id; 4528 4529 /* Find out the characteristics of the bus */ 4530 work_ccb = xpt_alloc_ccb(); 4531 xpt_setup_ccb(&work_ccb->ccb_h, request_ccb->ccb_h.path, 4532 request_ccb->ccb_h.pinfo.priority); 4533 work_ccb->ccb_h.func_code = XPT_PATH_INQ; 4534 xpt_action(work_ccb); 4535 if (work_ccb->ccb_h.status != CAM_REQ_CMP) { 4536 request_ccb->ccb_h.status = work_ccb->ccb_h.status; 4537 xpt_free_ccb(work_ccb); 4538 xpt_done(request_ccb); 4539 return; 4540 } 4541 4542 /* Save some state for use while we probe for devices */ 4543 scan_info = (xpt_scan_bus_info *) 4544 malloc(sizeof(xpt_scan_bus_info), M_TEMP, M_WAITOK); 4545 scan_info->request_ccb = request_ccb; 4546 scan_info->cpi = &work_ccb->cpi; 4547 4548 /* Cache on our stack so we can work asynchronously */ 4549 max_target = scan_info->cpi->max_target; 4550 initiator_id = scan_info->cpi->initiator_id; 4551 4552 /* 4553 * Don't count the initiator if the 4554 * initiator is addressable. 4555 */ 4556 scan_info->pending_count = max_target + 1; 4557 if (initiator_id <= max_target) 4558 scan_info->pending_count--; 4559 4560 for (i = 0; i <= max_target; i++) { 4561 cam_status status; 4562 if (i == initiator_id) 4563 continue; 4564 4565 status = xpt_create_path(&path, xpt_periph, 4566 request_ccb->ccb_h.path_id, 4567 i, 0); 4568 if (status != CAM_REQ_CMP) { 4569 printf("xpt_scan_bus: xpt_create_path failed" 4570 " with status %#x, bus scan halted\n", 4571 status); 4572 break; 4573 } 4574 work_ccb = xpt_alloc_ccb(); 4575 xpt_setup_ccb(&work_ccb->ccb_h, path, 4576 request_ccb->ccb_h.pinfo.priority); 4577 work_ccb->ccb_h.func_code = XPT_SCAN_LUN; 4578 work_ccb->ccb_h.cbfcnp = xpt_scan_bus; 4579 work_ccb->ccb_h.ppriv_ptr0 = scan_info; 4580 work_ccb->crcn.flags = request_ccb->crcn.flags; 4581 #if 0 4582 printf("xpt_scan_bus: probing %d:%d:%d\n", 4583 request_ccb->ccb_h.path_id, i, 0); 4584 #endif 4585 xpt_action(work_ccb); 4586 } 4587 break; 4588 } 4589 case XPT_SCAN_LUN: 4590 { 4591 xpt_scan_bus_info *scan_info; 4592 path_id_t path_id; 4593 target_id_t target_id; 4594 lun_id_t lun_id; 4595 4596 /* Reuse the same CCB to query if a device was really found */ 4597 scan_info = (xpt_scan_bus_info *)request_ccb->ccb_h.ppriv_ptr0; 4598 xpt_setup_ccb(&request_ccb->ccb_h, request_ccb->ccb_h.path, 4599 request_ccb->ccb_h.pinfo.priority); 4600 request_ccb->ccb_h.func_code = XPT_GDEV_TYPE; 4601 4602 path_id = request_ccb->ccb_h.path_id; 4603 target_id = request_ccb->ccb_h.target_id; 4604 lun_id = request_ccb->ccb_h.target_lun; 4605 xpt_action(request_ccb); 4606 4607 #if 0 4608 printf("xpt_scan_bus: got back probe from %d:%d:%d\n", 4609 path_id, target_id, lun_id); 4610 #endif 4611 4612 if (request_ccb->ccb_h.status != CAM_REQ_CMP) { 4613 struct cam_ed *device; 4614 struct cam_et *target; 4615 4616 /* 4617 * If we already probed lun 0 successfully, or 4618 * we have additional configured luns on this 4619 * target that might have "gone away", go onto 4620 * the next lun. 4621 */ 4622 target = request_ccb->ccb_h.path->target; 4623 device = TAILQ_FIRST(&target->ed_entries); 4624 if (device != NULL) 4625 device = TAILQ_NEXT(device, links); 4626 4627 if ((lun_id != 0) || (device != NULL)) { 4628 /* Try the next lun */ 4629 lun_id++; 4630 } 4631 } else { 4632 struct cam_ed *device; 4633 4634 device = request_ccb->ccb_h.path->device; 4635 4636 if ((device->quirk->quirks & CAM_QUIRK_NOLUNS) == 0) { 4637 /* Try the next lun */ 4638 lun_id++; 4639 } 4640 } 4641 4642 xpt_free_path(request_ccb->ccb_h.path); 4643 4644 /* Check Bounds */ 4645 if ((lun_id == request_ccb->ccb_h.target_lun) 4646 || lun_id > scan_info->cpi->max_lun) { 4647 /* We're done */ 4648 4649 xpt_free_ccb(request_ccb); 4650 scan_info->pending_count--; 4651 if (scan_info->pending_count == 0) { 4652 xpt_free_ccb((union ccb *)scan_info->cpi); 4653 request_ccb = scan_info->request_ccb; 4654 free(scan_info, M_TEMP); 4655 request_ccb->ccb_h.status = CAM_REQ_CMP; 4656 xpt_done(request_ccb); 4657 } 4658 } else { 4659 /* Try the next device */ 4660 struct cam_path *path; 4661 cam_status status; 4662 4663 path = request_ccb->ccb_h.path; 4664 status = xpt_create_path(&path, xpt_periph, 4665 path_id, target_id, lun_id); 4666 if (status != CAM_REQ_CMP) { 4667 printf("xpt_scan_bus: xpt_create_path failed " 4668 "with status %#x, halting LUN scan\n", 4669 status); 4670 xpt_free_ccb(request_ccb); 4671 scan_info->pending_count--; 4672 if (scan_info->pending_count == 0) { 4673 xpt_free_ccb( 4674 (union ccb *)scan_info->cpi); 4675 request_ccb = scan_info->request_ccb; 4676 free(scan_info, M_TEMP); 4677 request_ccb->ccb_h.status = CAM_REQ_CMP; 4678 xpt_done(request_ccb); 4679 break; 4680 } 4681 } 4682 xpt_setup_ccb(&request_ccb->ccb_h, path, 4683 request_ccb->ccb_h.pinfo.priority); 4684 request_ccb->ccb_h.func_code = XPT_SCAN_LUN; 4685 request_ccb->ccb_h.cbfcnp = xpt_scan_bus; 4686 request_ccb->ccb_h.ppriv_ptr0 = scan_info; 4687 request_ccb->crcn.flags = 4688 scan_info->request_ccb->crcn.flags; 4689 #if 0 4690 xpt_print_path(path); 4691 printf("xpt_scan bus probing\n"); 4692 #endif 4693 xpt_action(request_ccb); 4694 } 4695 break; 4696 } 4697 default: 4698 break; 4699 } 4700 } 4701 4702 typedef enum { 4703 PROBE_TUR, 4704 PROBE_INQUIRY, 4705 PROBE_MODE_SENSE, 4706 PROBE_SERIAL_NUM, 4707 PROBE_TUR_FOR_NEGOTIATION 4708 } probe_action; 4709 4710 typedef enum { 4711 PROBE_INQUIRY_CKSUM = 0x01, 4712 PROBE_SERIAL_CKSUM = 0x02, 4713 PROBE_NO_ANNOUNCE = 0x04 4714 } probe_flags; 4715 4716 typedef struct { 4717 TAILQ_HEAD(, ccb_hdr) request_ccbs; 4718 probe_action action; 4719 union ccb saved_ccb; 4720 probe_flags flags; 4721 MD5_CTX context; 4722 u_int8_t digest[16]; 4723 } probe_softc; 4724 4725 static void 4726 xpt_scan_lun(struct cam_periph *periph, struct cam_path *path, 4727 cam_flags flags, union ccb *request_ccb) 4728 { 4729 cam_status status; 4730 struct cam_path *new_path; 4731 struct cam_periph *old_periph; 4732 int s; 4733 4734 CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE, 4735 ("xpt_scan_lun\n")); 4736 4737 if (request_ccb == NULL) { 4738 request_ccb = malloc(sizeof(union ccb), M_TEMP, M_NOWAIT); 4739 if (request_ccb == NULL) { 4740 xpt_print_path(path); 4741 printf("xpt_scan_lun: can't allocate CCB, can't " 4742 "continue\n"); 4743 return; 4744 } 4745 new_path = malloc(sizeof(*new_path), M_TEMP, M_NOWAIT); 4746 if (new_path == NULL) { 4747 xpt_print_path(path); 4748 printf("xpt_scan_lun: can't allocate path, can't " 4749 "continue\n"); 4750 free(request_ccb, M_TEMP); 4751 return; 4752 } 4753 status = xpt_compile_path(new_path, xpt_periph, 4754 path->bus->path_id, 4755 path->target->target_id, 4756 path->device->lun_id); 4757 4758 if (status != CAM_REQ_CMP) { 4759 xpt_print_path(path); 4760 printf("xpt_scan_lun: can't compile path, can't " 4761 "continue\n"); 4762 free(request_ccb, M_TEMP); 4763 free(new_path, M_TEMP); 4764 return; 4765 } 4766 xpt_setup_ccb(&request_ccb->ccb_h, new_path, /*priority*/ 1); 4767 request_ccb->ccb_h.cbfcnp = xptscandone; 4768 request_ccb->ccb_h.func_code = XPT_SCAN_LUN; 4769 request_ccb->crcn.flags = flags; 4770 } 4771 4772 s = splsoftcam(); 4773 if ((old_periph = cam_periph_find(path, "probe")) != NULL) { 4774 probe_softc *softc; 4775 4776 softc = (probe_softc *)old_periph->softc; 4777 TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h, 4778 periph_links.tqe); 4779 } else { 4780 status = cam_periph_alloc(proberegister, NULL, probecleanup, 4781 probestart, "probe", 4782 CAM_PERIPH_BIO, 4783 request_ccb->ccb_h.path, NULL, 0, 4784 request_ccb); 4785 4786 if (status != CAM_REQ_CMP) { 4787 xpt_print_path(path); 4788 printf("xpt_scan_lun: cam_alloc_periph returned an " 4789 "error, can't continue probe\n"); 4790 request_ccb->ccb_h.status = status; 4791 xpt_done(request_ccb); 4792 } 4793 } 4794 splx(s); 4795 } 4796 4797 static void 4798 xptscandone(struct cam_periph *periph, union ccb *done_ccb) 4799 { 4800 xpt_release_path(done_ccb->ccb_h.path); 4801 free(done_ccb->ccb_h.path, M_TEMP); 4802 free(done_ccb, M_TEMP); 4803 } 4804 4805 static cam_status 4806 proberegister(struct cam_periph *periph, void *arg) 4807 { 4808 struct ccb_getdev *cgd; 4809 probe_softc *softc; 4810 union ccb *ccb; 4811 4812 cgd = (struct ccb_getdev *)arg; 4813 if (periph == NULL) { 4814 printf("proberegister: periph was NULL!!\n"); 4815 return(CAM_REQ_CMP_ERR); 4816 } 4817 4818 if (cgd == NULL) { 4819 printf("proberegister: no getdev CCB, can't register device\n"); 4820 return(CAM_REQ_CMP_ERR); 4821 } 4822 4823 softc = (probe_softc *)malloc(sizeof(*softc), M_TEMP, M_NOWAIT); 4824 4825 if (softc == NULL) { 4826 printf("proberegister: Unable to probe new device. " 4827 "Unable to allocate softc\n"); 4828 return(CAM_REQ_CMP_ERR); 4829 } 4830 ccb = (union ccb *)cgd; 4831 TAILQ_INIT(&softc->request_ccbs); 4832 TAILQ_INSERT_TAIL(&softc->request_ccbs, &ccb->ccb_h, periph_links.tqe); 4833 softc->flags = 0; 4834 periph->softc = softc; 4835 cam_periph_acquire(periph); 4836 probeschedule(periph); 4837 return(CAM_REQ_CMP); 4838 } 4839 4840 static void 4841 probeschedule(struct cam_periph *periph) 4842 { 4843 union ccb *ccb; 4844 probe_softc *softc; 4845 4846 softc = (probe_softc *)periph->softc; 4847 ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs); 4848 4849 /* 4850 * If a device has gone away and another device, or the same one, 4851 * is back in the same place, it should have a unit attention 4852 * condition pending. It will not report the unit attention in 4853 * response to an inquiry, which may leave invalid transfer 4854 * negotiations in effect. The TUR will reveal the unit attention 4855 * condition. Only send the TUR for lun 0, since some devices 4856 * will get confused by commands other than inquiry to non-existent 4857 * luns. If you think a device has gone away start your scan from 4858 * lun 0. This will insure that any bogus transfer settings are 4859 * invalidated. 4860 */ 4861 if (((ccb->ccb_h.path->device->flags & CAM_DEV_UNCONFIGURED)==0) 4862 && (ccb->ccb_h.target_lun == 0)) 4863 softc->action = PROBE_TUR; 4864 else 4865 softc->action = PROBE_INQUIRY; 4866 4867 if (ccb->crcn.flags & CAM_EXPECT_INQ_CHANGE) 4868 softc->flags |= PROBE_NO_ANNOUNCE; 4869 else 4870 softc->flags &= ~PROBE_NO_ANNOUNCE; 4871 4872 xpt_schedule(periph, ccb->ccb_h.pinfo.priority); 4873 } 4874 4875 static void 4876 probestart(struct cam_periph *periph, union ccb *start_ccb) 4877 { 4878 /* Probe the device that our peripheral driver points to */ 4879 struct ccb_scsiio *csio; 4880 probe_softc *softc; 4881 4882 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probestart\n")); 4883 4884 softc = (probe_softc *)periph->softc; 4885 csio = &start_ccb->csio; 4886 4887 switch (softc->action) { 4888 case PROBE_TUR: 4889 case PROBE_TUR_FOR_NEGOTIATION: 4890 { 4891 scsi_test_unit_ready(csio, 4892 /*retries*/4, 4893 probedone, 4894 MSG_SIMPLE_Q_TAG, 4895 SSD_FULL_SIZE, 4896 /*timeout*/60000); 4897 break; 4898 } 4899 case PROBE_INQUIRY: 4900 { 4901 struct scsi_inquiry_data *inq_buf; 4902 4903 inq_buf = &periph->path->device->inq_data; 4904 /* 4905 * If the device is currently configured, we calculate an 4906 * MD5 checksum of the inquiry data, and if the serial number 4907 * length is greater than 0, add the serial number data 4908 * into the checksum as well. Once the inquiry and the 4909 * serial number check finish, we attempt to figure out 4910 * whether we still have the same device. 4911 */ 4912 if ((periph->path->device->flags & CAM_DEV_UNCONFIGURED) == 0) { 4913 4914 MD5Init(&softc->context); 4915 MD5Update(&softc->context, (unsigned char *)inq_buf, 4916 sizeof(struct scsi_inquiry_data)); 4917 softc->flags |= PROBE_INQUIRY_CKSUM; 4918 if (periph->path->device->serial_num_len > 0) { 4919 MD5Update(&softc->context, 4920 periph->path->device->serial_num, 4921 periph->path->device->serial_num_len); 4922 softc->flags |= PROBE_SERIAL_CKSUM; 4923 } 4924 MD5Final(softc->digest, &softc->context); 4925 } 4926 4927 scsi_inquiry(csio, 4928 /*retries*/4, 4929 probedone, 4930 MSG_SIMPLE_Q_TAG, 4931 (u_int8_t *)inq_buf, 4932 sizeof(*inq_buf), 4933 /*evpd*/FALSE, 4934 /*page_code*/0, 4935 SSD_MIN_SIZE, 4936 /*timeout*/60 * 1000); 4937 break; 4938 } 4939 case PROBE_MODE_SENSE: 4940 { 4941 void *mode_buf; 4942 int mode_buf_len; 4943 4944 mode_buf_len = sizeof(struct scsi_mode_header_6) 4945 + sizeof(struct scsi_mode_blk_desc) 4946 + sizeof(struct scsi_control_page); 4947 mode_buf = malloc(mode_buf_len, M_TEMP, M_NOWAIT); 4948 if (mode_buf != NULL) { 4949 scsi_mode_sense(csio, 4950 /*retries*/4, 4951 probedone, 4952 MSG_SIMPLE_Q_TAG, 4953 /*dbd*/FALSE, 4954 SMS_PAGE_CTRL_CURRENT, 4955 SMS_CONTROL_MODE_PAGE, 4956 mode_buf, 4957 mode_buf_len, 4958 SSD_FULL_SIZE, 4959 /*timeout*/60000); 4960 break; 4961 } 4962 xpt_print_path(periph->path); 4963 printf("Unable to mode sense control page - malloc failure\n"); 4964 softc->action = PROBE_SERIAL_NUM; 4965 /* FALLTHROUGH */ 4966 } 4967 case PROBE_SERIAL_NUM: 4968 { 4969 struct scsi_vpd_unit_serial_number *serial_buf; 4970 struct cam_ed* device; 4971 4972 serial_buf = NULL; 4973 device = periph->path->device; 4974 device->serial_num = NULL; 4975 device->serial_num_len = 0; 4976 4977 if ((device->quirk->quirks & CAM_QUIRK_NOSERIAL) == 0) 4978 serial_buf = (struct scsi_vpd_unit_serial_number *) 4979 malloc(sizeof(*serial_buf), M_TEMP, M_NOWAIT); 4980 4981 if (serial_buf != NULL) { 4982 bzero(serial_buf, sizeof(*serial_buf)); 4983 scsi_inquiry(csio, 4984 /*retries*/4, 4985 probedone, 4986 MSG_SIMPLE_Q_TAG, 4987 (u_int8_t *)serial_buf, 4988 sizeof(*serial_buf), 4989 /*evpd*/TRUE, 4990 SVPD_UNIT_SERIAL_NUMBER, 4991 SSD_MIN_SIZE, 4992 /*timeout*/60 * 1000); 4993 break; 4994 } 4995 /* 4996 * We'll have to do without, let our probedone 4997 * routine finish up for us. 4998 */ 4999 start_ccb->csio.data_ptr = NULL; 5000 probedone(periph, start_ccb); 5001 return; 5002 } 5003 } 5004 xpt_action(start_ccb); 5005 } 5006 5007 static void 5008 probedone(struct cam_periph *periph, union ccb *done_ccb) 5009 { 5010 probe_softc *softc; 5011 struct cam_path *path; 5012 u_int32_t priority; 5013 5014 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probedone\n")); 5015 5016 softc = (probe_softc *)periph->softc; 5017 path = done_ccb->ccb_h.path; 5018 priority = done_ccb->ccb_h.pinfo.priority; 5019 5020 switch (softc->action) { 5021 case PROBE_TUR: 5022 { 5023 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 5024 5025 if (cam_periph_error(done_ccb, 0, 5026 SF_NO_PRINT, NULL) == ERESTART) 5027 return; 5028 else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 5029 /* Don't wedge the queue */ 5030 xpt_release_devq(done_ccb->ccb_h.path->device, 5031 /*run_queue*/TRUE); 5032 } 5033 softc->action = PROBE_INQUIRY; 5034 xpt_release_ccb(done_ccb); 5035 xpt_schedule(periph, priority); 5036 return; 5037 } 5038 case PROBE_INQUIRY: 5039 { 5040 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 5041 struct scsi_inquiry_data *inq_buf; 5042 u_int8_t periph_qual; 5043 u_int8_t periph_dtype; 5044 5045 inq_buf = &path->device->inq_data; 5046 5047 periph_qual = SID_QUAL(inq_buf); 5048 periph_dtype = SID_TYPE(inq_buf); 5049 if (periph_dtype != T_NODEVICE) { 5050 switch(periph_qual) { 5051 case SID_QUAL_LU_CONNECTED: 5052 { 5053 xpt_find_quirk(path->device); 5054 5055 if ((inq_buf->flags & SID_CmdQue) != 0) 5056 softc->action = 5057 PROBE_MODE_SENSE; 5058 else 5059 softc->action = 5060 PROBE_SERIAL_NUM; 5061 5062 path->device->flags &= 5063 ~CAM_DEV_UNCONFIGURED; 5064 5065 xpt_release_ccb(done_ccb); 5066 xpt_schedule(periph, priority); 5067 return; 5068 } 5069 default: 5070 break; 5071 } 5072 } 5073 } else if (cam_periph_error(done_ccb, 0, 5074 done_ccb->ccb_h.target_lun > 0 5075 ? SF_RETRY_UA|SF_QUIET_IR 5076 : SF_RETRY_UA, 5077 &softc->saved_ccb) == ERESTART) { 5078 return; 5079 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 5080 /* Don't wedge the queue */ 5081 xpt_release_devq(done_ccb->ccb_h.path->device, 5082 /*run_queue*/TRUE); 5083 } 5084 /* 5085 * If we get to this point, we got an error status back 5086 * from the inquiry and the error status doesn't require 5087 * automatically retrying the command. Therefore, the 5088 * inquiry failed. If we had inquiry information before 5089 * for this device, but this latest inquiry command failed, 5090 * the device has probably gone away. If this device isn't 5091 * already marked unconfigured, notify the peripheral 5092 * drivers that this device is no more. 5093 */ 5094 if ((path->device->flags & CAM_DEV_UNCONFIGURED) == 0) 5095 /* Send the async notification. */ 5096 xpt_async(AC_LOST_DEVICE, path, NULL); 5097 5098 xpt_release_ccb(done_ccb); 5099 break; 5100 } 5101 case PROBE_MODE_SENSE: 5102 { 5103 struct ccb_scsiio *csio; 5104 struct scsi_mode_header_6 *mode_hdr; 5105 5106 csio = &done_ccb->csio; 5107 mode_hdr = (struct scsi_mode_header_6 *)csio->data_ptr; 5108 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 5109 struct scsi_control_page *page; 5110 u_int8_t *offset; 5111 5112 offset = ((u_int8_t *)&mode_hdr[1]) 5113 + mode_hdr->blk_desc_len; 5114 page = (struct scsi_control_page *)offset; 5115 path->device->queue_flags = page->queue_flags; 5116 } else if (cam_periph_error(done_ccb, 0, 5117 SF_RETRY_UA|SF_NO_PRINT, 5118 &softc->saved_ccb) == ERESTART) { 5119 return; 5120 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 5121 /* Don't wedge the queue */ 5122 xpt_release_devq(done_ccb->ccb_h.path->device, 5123 /*run_queue*/TRUE); 5124 } 5125 xpt_release_ccb(done_ccb); 5126 free(mode_hdr, M_TEMP); 5127 softc->action = PROBE_SERIAL_NUM; 5128 xpt_schedule(periph, priority); 5129 return; 5130 } 5131 case PROBE_SERIAL_NUM: 5132 { 5133 struct ccb_scsiio *csio; 5134 struct scsi_vpd_unit_serial_number *serial_buf; 5135 u_int32_t priority; 5136 int changed; 5137 int have_serialnum; 5138 5139 changed = 1; 5140 have_serialnum = 0; 5141 csio = &done_ccb->csio; 5142 priority = done_ccb->ccb_h.pinfo.priority; 5143 serial_buf = 5144 (struct scsi_vpd_unit_serial_number *)csio->data_ptr; 5145 5146 /* Clean up from previous instance of this device */ 5147 if (path->device->serial_num != NULL) { 5148 free(path->device->serial_num, M_DEVBUF); 5149 path->device->serial_num = NULL; 5150 path->device->serial_num_len = 0; 5151 } 5152 5153 if (serial_buf == NULL) { 5154 /* 5155 * Don't process the command as it was never sent 5156 */ 5157 } else if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP 5158 && (serial_buf->length > 0)) { 5159 5160 have_serialnum = 1; 5161 path->device->serial_num = 5162 (u_int8_t *)malloc((serial_buf->length + 1), 5163 M_DEVBUF, M_NOWAIT); 5164 if (path->device->serial_num != NULL) { 5165 bcopy(serial_buf->serial_num, 5166 path->device->serial_num, 5167 serial_buf->length); 5168 path->device->serial_num_len = 5169 serial_buf->length; 5170 path->device->serial_num[serial_buf->length] 5171 = '\0'; 5172 } 5173 } else if (cam_periph_error(done_ccb, 0, 5174 SF_RETRY_UA|SF_NO_PRINT, 5175 &softc->saved_ccb) == ERESTART) { 5176 return; 5177 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 5178 /* Don't wedge the queue */ 5179 xpt_release_devq(done_ccb->ccb_h.path->device, 5180 /*run_queue*/TRUE); 5181 } 5182 5183 /* 5184 * Let's see if we have seen this device before. 5185 */ 5186 if ((softc->flags & PROBE_INQUIRY_CKSUM) != 0) { 5187 MD5_CTX context; 5188 u_int8_t digest[16]; 5189 5190 MD5Init(&context); 5191 5192 MD5Update(&context, 5193 (unsigned char *)&path->device->inq_data, 5194 sizeof(struct scsi_inquiry_data)); 5195 5196 if (have_serialnum) 5197 MD5Update(&context, serial_buf->serial_num, 5198 serial_buf->length); 5199 5200 MD5Final(digest, &context); 5201 if (bcmp(softc->digest, digest, 16) == 0) 5202 changed = 0; 5203 5204 /* 5205 * XXX Do we need to do a TUR in order to ensure 5206 * that the device really hasn't changed??? 5207 */ 5208 if ((changed != 0) 5209 && ((softc->flags & PROBE_NO_ANNOUNCE) == 0)) 5210 xpt_async(AC_LOST_DEVICE, path, NULL); 5211 } 5212 if (serial_buf != NULL) 5213 free(serial_buf, M_TEMP); 5214 5215 if (changed != 0) { 5216 /* 5217 * Now that we have all the necessary 5218 * information to safely perform transfer 5219 * negotiations... Controllers don't perform 5220 * any negotiation or tagged queuing until 5221 * after the first XPT_SET_TRAN_SETTINGS ccb is 5222 * received. So, on a new device, just retreive 5223 * the user settings, and set them as the current 5224 * settings to set the device up. 5225 */ 5226 done_ccb->ccb_h.func_code = XPT_GET_TRAN_SETTINGS; 5227 done_ccb->cts.flags = CCB_TRANS_USER_SETTINGS; 5228 xpt_action(done_ccb); 5229 done_ccb->ccb_h.func_code = XPT_SET_TRAN_SETTINGS; 5230 done_ccb->cts.flags &= ~CCB_TRANS_USER_SETTINGS; 5231 done_ccb->cts.flags |= CCB_TRANS_CURRENT_SETTINGS; 5232 xpt_action(done_ccb); 5233 xpt_release_ccb(done_ccb); 5234 5235 /* 5236 * Perform a TUR to allow the controller to 5237 * perform any necessary transfer negotiation. 5238 */ 5239 softc->action = PROBE_TUR_FOR_NEGOTIATION; 5240 xpt_schedule(periph, priority); 5241 return; 5242 } 5243 xpt_release_ccb(done_ccb); 5244 break; 5245 } 5246 case PROBE_TUR_FOR_NEGOTIATION: 5247 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 5248 /* Don't wedge the queue */ 5249 xpt_release_devq(done_ccb->ccb_h.path->device, 5250 /*run_queue*/TRUE); 5251 } 5252 5253 path->device->flags &= ~CAM_DEV_UNCONFIGURED; 5254 5255 if ((softc->flags & PROBE_NO_ANNOUNCE) == 0) { 5256 /* Inform the XPT that a new device has been found */ 5257 done_ccb->ccb_h.func_code = XPT_GDEV_TYPE; 5258 xpt_action(done_ccb); 5259 5260 xpt_async(AC_FOUND_DEVICE, xpt_periph->path, done_ccb); 5261 } 5262 xpt_release_ccb(done_ccb); 5263 break; 5264 } 5265 done_ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs); 5266 TAILQ_REMOVE(&softc->request_ccbs, &done_ccb->ccb_h, periph_links.tqe); 5267 done_ccb->ccb_h.status = CAM_REQ_CMP; 5268 xpt_done(done_ccb); 5269 if (TAILQ_FIRST(&softc->request_ccbs) == NULL) { 5270 cam_periph_invalidate(periph); 5271 cam_periph_release(periph); 5272 } else { 5273 probeschedule(periph); 5274 } 5275 } 5276 5277 static void 5278 probecleanup(struct cam_periph *periph) 5279 { 5280 free(periph->softc, M_TEMP); 5281 } 5282 5283 static void 5284 xpt_find_quirk(struct cam_ed *device) 5285 { 5286 caddr_t match; 5287 5288 match = cam_quirkmatch((caddr_t)&device->inq_data, 5289 (caddr_t)xpt_quirk_table, 5290 sizeof(xpt_quirk_table)/sizeof(*xpt_quirk_table), 5291 sizeof(*xpt_quirk_table), scsi_inquiry_match); 5292 5293 if (match == NULL) 5294 panic("xpt_find_quirk: device didn't match wildcard entry!!"); 5295 5296 device->quirk = (struct xpt_quirk_entry *)match; 5297 } 5298 5299 static void 5300 xpt_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device, 5301 int async_update) 5302 { 5303 struct cam_sim *sim; 5304 int qfrozen; 5305 5306 sim = cts->ccb_h.path->bus->sim; 5307 if (async_update == FALSE) { 5308 struct scsi_inquiry_data *inq_data; 5309 struct ccb_pathinq cpi; 5310 5311 if (device == NULL) { 5312 cts->ccb_h.status = CAM_PATH_INVALID; 5313 xpt_done((union ccb *)cts); 5314 return; 5315 } 5316 5317 /* 5318 * Perform sanity checking against what the 5319 * controller and device can do. 5320 */ 5321 xpt_setup_ccb(&cpi.ccb_h, cts->ccb_h.path, /*priority*/1); 5322 cpi.ccb_h.func_code = XPT_PATH_INQ; 5323 xpt_action((union ccb *)&cpi); 5324 5325 inq_data = &device->inq_data; 5326 if ((inq_data->flags & SID_Sync) == 0 5327 || (cpi.hba_inquiry & PI_SDTR_ABLE) == 0) { 5328 /* Force async */ 5329 cts->sync_period = 0; 5330 cts->sync_offset = 0; 5331 } 5332 5333 switch (cts->bus_width) { 5334 case MSG_EXT_WDTR_BUS_32_BIT: 5335 if ((inq_data->flags & SID_WBus32) != 0 5336 && (cpi.hba_inquiry & PI_WIDE_32) != 0) 5337 break; 5338 /* Fall Through to 16-bit */ 5339 case MSG_EXT_WDTR_BUS_16_BIT: 5340 if ((inq_data->flags & SID_WBus16) != 0 5341 && (cpi.hba_inquiry & PI_WIDE_16) != 0) { 5342 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 5343 break; 5344 } 5345 /* Fall Through to 8-bit */ 5346 default: /* New bus width?? */ 5347 case MSG_EXT_WDTR_BUS_8_BIT: 5348 /* All targets can do this */ 5349 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 5350 break; 5351 } 5352 5353 if ((cts->flags & CCB_TRANS_DISC_ENB) == 0) { 5354 /* 5355 * Can't tag queue without disconnection. 5356 */ 5357 cts->flags &= ~CCB_TRANS_TAG_ENB; 5358 cts->valid |= CCB_TRANS_TQ_VALID; 5359 } 5360 5361 if ((cpi.hba_inquiry & PI_TAG_ABLE) == 0 5362 || (inq_data->flags & SID_CmdQue) == 0 5363 || (device->queue_flags & SCP_QUEUE_DQUE) != 0 5364 || (device->quirk->mintags == 0)) { 5365 /* 5366 * Can't tag on hardware that doesn't support, 5367 * doesn't have it enabled, or has broken tag support. 5368 */ 5369 cts->flags &= ~CCB_TRANS_TAG_ENB; 5370 } 5371 } 5372 5373 qfrozen = FALSE; 5374 if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) { 5375 int device_tagenb; 5376 5377 /* 5378 * If we are transitioning from tags to no-tags or 5379 * vice-versa, we need to carefully freeze and restart 5380 * the queue so that we don't overlap tagged and non-tagged 5381 * commands. We also temporarily stop tags if there is 5382 * a change in transfer negotiation settings to allow 5383 * "tag-less" negotiation. 5384 */ 5385 if ((device->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 5386 || (device->inq_flags & SID_CmdQue) != 0) 5387 device_tagenb = TRUE; 5388 else 5389 device_tagenb = FALSE; 5390 5391 if (((cts->flags & CCB_TRANS_TAG_ENB) != 0 5392 && device_tagenb == FALSE) 5393 || ((cts->flags & CCB_TRANS_TAG_ENB) == 0 5394 && device_tagenb == TRUE)) { 5395 5396 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) { 5397 /* 5398 * Delay change to use tags until after a 5399 * few commands have gone to this device so 5400 * the controller has time to perform transfer 5401 * negotiations without tagged messages getting 5402 * in the way. 5403 */ 5404 device->tag_delay_count = CAM_TAG_DELAY_COUNT; 5405 device->flags |= CAM_DEV_TAG_AFTER_COUNT; 5406 } else { 5407 xpt_freeze_devq(cts->ccb_h.path, /*count*/1); 5408 qfrozen = TRUE; 5409 device->inq_flags &= ~SID_CmdQue; 5410 xpt_dev_ccbq_resize(cts->ccb_h.path, 5411 sim->max_dev_openings); 5412 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT; 5413 device->tag_delay_count = 0; 5414 } 5415 } else if ((cts->flags & (CCB_TRANS_SYNC_RATE_VALID| 5416 CCB_TRANS_SYNC_OFFSET_VALID| 5417 CCB_TRANS_BUS_WIDTH_VALID)) != 0) { 5418 xpt_toggle_tags(cts->ccb_h.path); 5419 } 5420 } 5421 5422 if (async_update == FALSE) 5423 (*(sim->sim_action))(sim, (union ccb *)cts); 5424 5425 if (qfrozen) { 5426 struct ccb_relsim crs; 5427 5428 xpt_setup_ccb(&crs.ccb_h, cts->ccb_h.path, 5429 /*priority*/1); 5430 crs.ccb_h.func_code = XPT_REL_SIMQ; 5431 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY; 5432 crs.openings 5433 = crs.release_timeout 5434 = crs.qfrozen_cnt 5435 = 0; 5436 xpt_action((union ccb *)&crs); 5437 } 5438 } 5439 5440 static void 5441 xpt_toggle_tags(struct cam_path *path) 5442 { 5443 /* 5444 * Give controllers a chance to renegotiate 5445 * before starting tag operations. We 5446 * "toggle" tagged queuing off then on 5447 * which causes the tag enable command delay 5448 * counter to come into effect. 5449 */ 5450 if ((path->device->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 5451 || (path->device->inq_flags & SID_CmdQue) != 0) { 5452 struct ccb_trans_settings cts; 5453 5454 xpt_setup_ccb(&cts.ccb_h, path, 1); 5455 cts.flags = 0; 5456 cts.valid = CCB_TRANS_TQ_VALID; 5457 xpt_set_transfer_settings(&cts, path->device, 5458 /*async_update*/TRUE); 5459 cts.flags = CCB_TRANS_TAG_ENB; 5460 xpt_set_transfer_settings(&cts, path->device, 5461 /*async_update*/TRUE); 5462 } 5463 } 5464 5465 static void 5466 xpt_start_tags(struct cam_path *path) 5467 { 5468 struct ccb_relsim crs; 5469 struct cam_ed *device; 5470 struct cam_sim *sim; 5471 int newopenings; 5472 5473 device = path->device; 5474 sim = path->bus->sim; 5475 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT; 5476 xpt_freeze_devq(path, /*count*/1); 5477 device->inq_flags |= SID_CmdQue; 5478 newopenings = min(device->quirk->maxtags, sim->max_tagged_dev_openings); 5479 xpt_dev_ccbq_resize(path, newopenings); 5480 xpt_setup_ccb(&crs.ccb_h, path, /*priority*/1); 5481 crs.ccb_h.func_code = XPT_REL_SIMQ; 5482 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY; 5483 crs.openings 5484 = crs.release_timeout 5485 = crs.qfrozen_cnt 5486 = 0; 5487 xpt_action((union ccb *)&crs); 5488 } 5489 5490 static int busses_to_config; 5491 5492 static int 5493 xptconfigbuscountfunc(struct cam_eb *bus, void *arg) 5494 { 5495 if (bus->path_id != CAM_XPT_PATH_ID) 5496 busses_to_config++; 5497 5498 return(1); 5499 } 5500 5501 static int 5502 xptconfigfunc(struct cam_eb *bus, void *arg) 5503 { 5504 struct cam_path *path; 5505 union ccb *work_ccb; 5506 5507 if (bus->path_id != CAM_XPT_PATH_ID) { 5508 cam_status status; 5509 5510 work_ccb = xpt_alloc_ccb(); 5511 if ((status = xpt_create_path(&path, xpt_periph, bus->path_id, 5512 CAM_TARGET_WILDCARD, 5513 CAM_LUN_WILDCARD)) !=CAM_REQ_CMP){ 5514 printf("xptconfigfunc: xpt_create_path failed with " 5515 "status %#x for bus %d\n", status, bus->path_id); 5516 printf("xptconfigfunc: halting bus configuration\n"); 5517 xpt_free_ccb(work_ccb); 5518 return(0); 5519 } 5520 xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1); 5521 work_ccb->ccb_h.func_code = XPT_RESET_BUS; 5522 work_ccb->ccb_h.cbfcnp = NULL; 5523 CAM_DEBUG(path, CAM_DEBUG_SUBTRACE, 5524 ("Resetting Bus\n")); 5525 xpt_action(work_ccb); 5526 xpt_finishconfig(xpt_periph, work_ccb); 5527 } 5528 5529 return(1); 5530 5531 } 5532 5533 static void 5534 xpt_config(void *arg) 5535 { 5536 /* Now that interrupts are enabled, go find our devices */ 5537 5538 #ifdef CAMDEBUG 5539 /* Setup debugging flags and path */ 5540 #ifdef CAM_DEBUG_FLAGS 5541 cam_dflags = CAM_DEBUG_FLAGS; 5542 #else /* !CAM_DEBUG_FLAGS */ 5543 cam_dflags = CAM_DEBUG_NONE; 5544 #endif /* CAM_DEBUG_FLAGS */ 5545 #ifdef CAM_DEBUG_BUS 5546 if (cam_dflags != CAM_DEBUG_NONE) { 5547 if (xpt_create_path(&cam_dpath, xpt_periph, 5548 CAM_DEBUG_BUS, CAM_DEBUG_TARGET, 5549 CAM_DEBUG_LUN) != CAM_REQ_CMP) { 5550 printf("xpt_config: xpt_create_path() failed for debug" 5551 " target %d:%d:%d, debugging disabled\n", 5552 CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN); 5553 cam_dflags = CAM_DEBUG_NONE; 5554 } 5555 } else 5556 cam_dpath = NULL; 5557 #else /* !CAM_DEBUG_BUS */ 5558 cam_dpath = NULL; 5559 #endif /* CAM_DEBUG_BUS */ 5560 #endif /* CAMDEBUG */ 5561 5562 /* Scan all installed busses */ 5563 xpt_for_all_busses(xptconfigbuscountfunc, NULL); 5564 5565 if (busses_to_config == 0) { 5566 /* Call manually because we don't have any busses */ 5567 xpt_finishconfig(xpt_periph, NULL); 5568 } else { 5569 if (SCSI_DELAY >= 2000) { 5570 printf("Waiting %d seconds for SCSI " 5571 "devices to settle\n", SCSI_DELAY/1000); 5572 } 5573 xpt_for_all_busses(xptconfigfunc, NULL); 5574 } 5575 } 5576 5577 static int 5578 xptfinishconfigfunc(struct cam_ed *device, void *arg) 5579 { 5580 union ccb work_ccb; 5581 struct cam_path path; 5582 cam_status status; 5583 5584 if ((status = xpt_compile_path(&path, xpt_periph, 5585 device->target->bus->path_id, 5586 device->target->target_id, 5587 device->lun_id)) != CAM_REQ_CMP) { 5588 printf("xptfinishconfig: xpt_compile_path failed with status" 5589 " %#x, halting device registration\n", status); 5590 return(0); 5591 } 5592 5593 xpt_setup_ccb(&work_ccb.ccb_h, &path, /*priority*/1); 5594 5595 work_ccb.ccb_h.func_code = XPT_GDEV_TYPE; 5596 xpt_action(&work_ccb); 5597 xpt_async(AC_FOUND_DEVICE, &path, &work_ccb); 5598 5599 xpt_release_path(&path); 5600 return(1); 5601 } 5602 5603 /* 5604 * If the given device only has one peripheral attached to it, and if that 5605 * peripheral is the passthrough driver, announce it. This insures that the 5606 * user sees some sort of announcement for every peripheral in their system. 5607 */ 5608 static int 5609 xptpassannouncefunc(struct cam_ed *device, void *arg) 5610 { 5611 struct cam_periph *periph; 5612 int i; 5613 5614 for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL; 5615 periph = SLIST_NEXT(periph, periph_links), i++); 5616 5617 periph = SLIST_FIRST(&device->periphs); 5618 if ((i == 1) 5619 && (strncmp(periph->periph_name, "pass", 4) == 0)) 5620 xpt_announce_periph(periph, NULL); 5621 5622 return(1); 5623 } 5624 5625 static void 5626 xpt_finishconfig(struct cam_periph *periph, union ccb *done_ccb) 5627 { 5628 struct periph_driver **p_drv; 5629 int i; 5630 5631 if (done_ccb != NULL) { 5632 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, 5633 ("xpt_finishconfig\n")); 5634 switch(done_ccb->ccb_h.func_code) { 5635 case XPT_RESET_BUS: 5636 if (done_ccb->ccb_h.status == CAM_REQ_CMP) { 5637 done_ccb->ccb_h.func_code = XPT_SCAN_BUS; 5638 done_ccb->ccb_h.cbfcnp = xpt_finishconfig; 5639 xpt_action(done_ccb); 5640 return; 5641 } 5642 /* FALLTHROUGH */ 5643 case XPT_SCAN_BUS: 5644 xpt_free_path(done_ccb->ccb_h.path); 5645 busses_to_config--; 5646 break; 5647 default: 5648 break; 5649 } 5650 } 5651 5652 if (busses_to_config == 0) { 5653 /* Register all the peripheral drivers */ 5654 /* XXX This will have to change when we have LKMs */ 5655 p_drv = (struct periph_driver **)periphdriver_set.ls_items; 5656 for (i = 0; p_drv[i] != NULL; i++) { 5657 (*p_drv[i]->init)(); 5658 } 5659 5660 /* 5661 * Itterate through our devices announcing 5662 * them in probed bus order. 5663 */ 5664 xpt_for_all_devices(xptfinishconfigfunc, NULL); 5665 5666 /* 5667 * Check for devices with no "standard" peripheral driver 5668 * attached. For any devices like that, announce the 5669 * passthrough driver so the user will see something. 5670 */ 5671 xpt_for_all_devices(xptpassannouncefunc, NULL); 5672 5673 /* Release our hook so that the boot can continue. */ 5674 config_intrhook_disestablish(xpt_config_hook); 5675 } 5676 if (done_ccb != NULL) 5677 xpt_free_ccb(done_ccb); 5678 } 5679 5680 static void 5681 xptaction(struct cam_sim *sim, union ccb *work_ccb) 5682 { 5683 CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n")); 5684 5685 switch (work_ccb->ccb_h.func_code) { 5686 /* Common cases first */ 5687 case XPT_PATH_INQ: /* Path routing inquiry */ 5688 { 5689 struct ccb_pathinq *cpi; 5690 5691 cpi = &work_ccb->cpi; 5692 cpi->version_num = 1; /* XXX??? */ 5693 cpi->hba_inquiry = 0; 5694 cpi->target_sprt = 0; 5695 cpi->hba_misc = 0; 5696 cpi->hba_eng_cnt = 0; 5697 cpi->max_target = 0; 5698 cpi->max_lun = 0; 5699 cpi->initiator_id = 0; 5700 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 5701 strncpy(cpi->hba_vid, "", HBA_IDLEN); 5702 strncpy(cpi->dev_name, sim->sim_name, DEV_IDLEN); 5703 cpi->unit_number = sim->unit_number; 5704 cpi->bus_id = sim->bus_id; 5705 cpi->ccb_h.status = CAM_REQ_CMP; 5706 xpt_done(work_ccb); 5707 break; 5708 } 5709 default: 5710 work_ccb->ccb_h.status = CAM_REQ_INVALID; 5711 xpt_done(work_ccb); 5712 break; 5713 } 5714 } 5715 5716 /* 5717 * Should only be called by the machine interrupt dispatch routines, 5718 * so put these prototypes here instead of in the header. 5719 * 5720 * XXX we should really have a way to dynamically register SWI handlers. 5721 */ 5722 5723 void 5724 swi_camnet() 5725 { 5726 camisr(&cam_netq); 5727 } 5728 5729 void 5730 swi_cambio() 5731 { 5732 camisr(&cam_bioq); 5733 } 5734 5735 static void 5736 camisr(cam_isrq_t *queue) 5737 { 5738 int s; 5739 struct ccb_hdr *ccb_h; 5740 5741 s = splcam(); 5742 while ((ccb_h = TAILQ_FIRST(queue)) != NULL) { 5743 int runq; 5744 5745 TAILQ_REMOVE(queue, ccb_h, sim_links.tqe); 5746 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX; 5747 splx(s); 5748 5749 CAM_DEBUG(ccb_h->path, CAM_DEBUG_TRACE, 5750 ("camisr")); 5751 5752 runq = FALSE; 5753 5754 if (ccb_h->flags & CAM_HIGH_POWER) { 5755 struct highpowerlist *hphead; 5756 struct cam_ed *device; 5757 union ccb *send_ccb; 5758 5759 hphead = &highpowerq; 5760 5761 send_ccb = (union ccb *)STAILQ_FIRST(hphead); 5762 5763 /* 5764 * Increment the count since this command is done. 5765 */ 5766 num_highpower++; 5767 5768 /* 5769 * Any high powered commands queued up? 5770 */ 5771 if (send_ccb != NULL) { 5772 device = send_ccb->ccb_h.path->device; 5773 5774 STAILQ_REMOVE_HEAD(hphead, xpt_links.stqe); 5775 5776 xpt_release_devq(send_ccb->ccb_h.path->device, 5777 TRUE); 5778 } 5779 } 5780 if ((ccb_h->func_code != XPT_ACCEPT_TARGET_IO) 5781 && (ccb_h->func_code != XPT_SCAN_LUN) 5782 && (ccb_h->func_code != XPT_SCAN_BUS)) { 5783 struct cam_ed *dev; 5784 5785 dev = ccb_h->path->device; 5786 5787 s = splcam(); 5788 cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h); 5789 5790 ccb_h->path->bus->sim->devq->send_active--; 5791 ccb_h->path->bus->sim->devq->send_openings++; 5792 splx(s); 5793 5794 if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0 5795 || ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0 5796 && (dev->ccbq.dev_active == 0))) { 5797 5798 xpt_release_devq(ccb_h->path->device, 5799 /*run_queue*/TRUE); 5800 } 5801 5802 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 5803 && (--dev->tag_delay_count == 0)) 5804 xpt_start_tags(ccb_h->path); 5805 5806 if ((dev->ccbq.queue.entries > 0) 5807 && (dev->qfrozen_cnt == 0) 5808 && (device_is_send_queued(dev) == 0)) { 5809 runq = xpt_schedule_dev_sendq(ccb_h->path->bus, 5810 dev); 5811 } 5812 } 5813 5814 if (ccb_h->status & CAM_RELEASE_SIMQ) { 5815 xpt_release_simq(ccb_h->path->bus->sim, 5816 /*run_queue*/TRUE); 5817 } else if ((ccb_h->flags & CAM_DEV_QFRZDIS) 5818 && (ccb_h->status & CAM_DEV_QFRZN)) { 5819 xpt_release_devq(ccb_h->path->device, 5820 /*run_queue*/TRUE); 5821 ccb_h->status &= ~CAM_DEV_QFRZN; 5822 } else if (runq) { 5823 xpt_run_dev_sendq(ccb_h->path->bus); 5824 } 5825 5826 /* Call the peripheral driver's callback */ 5827 (*ccb_h->cbfcnp)(ccb_h->path->periph, 5828 (union ccb *)ccb_h); 5829 5830 /* Raise IPL for while test */ 5831 s = splcam(); 5832 } 5833 splx(s); 5834 } 5835