1 /*- 2 * Implementation of the Common Access Method Transport (XPT) layer. 3 * 4 * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs. 5 * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions, and the following disclaimer, 13 * without modification, immediately at the beginning of the file. 14 * 2. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include <sys/param.h> 34 #include <sys/bus.h> 35 #include <sys/systm.h> 36 #include <sys/types.h> 37 #include <sys/malloc.h> 38 #include <sys/kernel.h> 39 #include <sys/time.h> 40 #include <sys/conf.h> 41 #include <sys/fcntl.h> 42 #include <sys/interrupt.h> 43 #include <sys/sbuf.h> 44 #include <sys/taskqueue.h> 45 46 #include <sys/lock.h> 47 #include <sys/mutex.h> 48 #include <sys/sysctl.h> 49 #include <sys/kthread.h> 50 51 #include <cam/cam.h> 52 #include <cam/cam_ccb.h> 53 #include <cam/cam_periph.h> 54 #include <cam/cam_queue.h> 55 #include <cam/cam_sim.h> 56 #include <cam/cam_xpt.h> 57 #include <cam/cam_xpt_sim.h> 58 #include <cam/cam_xpt_periph.h> 59 #include <cam/cam_xpt_internal.h> 60 #include <cam/cam_debug.h> 61 62 #include <cam/scsi/scsi_all.h> 63 #include <cam/scsi/scsi_message.h> 64 #include <cam/scsi/scsi_pass.h> 65 66 #include <machine/md_var.h> /* geometry translation */ 67 #include <machine/stdarg.h> /* for xpt_print below */ 68 69 #include "opt_cam.h" 70 71 /* 72 * This is the maximum number of high powered commands (e.g. start unit) 73 * that can be outstanding at a particular time. 74 */ 75 #ifndef CAM_MAX_HIGHPOWER 76 #define CAM_MAX_HIGHPOWER 4 77 #endif 78 79 /* Datastructures internal to the xpt layer */ 80 MALLOC_DEFINE(M_CAMXPT, "CAM XPT", "CAM XPT buffers"); 81 82 /* Object for defering XPT actions to a taskqueue */ 83 struct xpt_task { 84 struct task task; 85 void *data1; 86 uintptr_t data2; 87 }; 88 89 typedef enum { 90 XPT_FLAG_OPEN = 0x01 91 } xpt_flags; 92 93 struct xpt_softc { 94 xpt_flags flags; 95 u_int32_t xpt_generation; 96 97 /* number of high powered commands that can go through right now */ 98 STAILQ_HEAD(highpowerlist, ccb_hdr) highpowerq; 99 int num_highpower; 100 101 /* queue for handling async rescan requests. */ 102 TAILQ_HEAD(, ccb_hdr) ccb_scanq; 103 int buses_to_config; 104 int buses_config_done; 105 106 /* Registered busses */ 107 TAILQ_HEAD(,cam_eb) xpt_busses; 108 u_int bus_generation; 109 110 struct intr_config_hook *xpt_config_hook; 111 112 int boot_delay; 113 struct callout boot_callout; 114 115 struct mtx xpt_topo_lock; 116 struct mtx xpt_lock; 117 }; 118 119 typedef enum { 120 DM_RET_COPY = 0x01, 121 DM_RET_FLAG_MASK = 0x0f, 122 DM_RET_NONE = 0x00, 123 DM_RET_STOP = 0x10, 124 DM_RET_DESCEND = 0x20, 125 DM_RET_ERROR = 0x30, 126 DM_RET_ACTION_MASK = 0xf0 127 } dev_match_ret; 128 129 typedef enum { 130 XPT_DEPTH_BUS, 131 XPT_DEPTH_TARGET, 132 XPT_DEPTH_DEVICE, 133 XPT_DEPTH_PERIPH 134 } xpt_traverse_depth; 135 136 struct xpt_traverse_config { 137 xpt_traverse_depth depth; 138 void *tr_func; 139 void *tr_arg; 140 }; 141 142 typedef int xpt_busfunc_t (struct cam_eb *bus, void *arg); 143 typedef int xpt_targetfunc_t (struct cam_et *target, void *arg); 144 typedef int xpt_devicefunc_t (struct cam_ed *device, void *arg); 145 typedef int xpt_periphfunc_t (struct cam_periph *periph, void *arg); 146 typedef int xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg); 147 148 /* Transport layer configuration information */ 149 static struct xpt_softc xsoftc; 150 151 TUNABLE_INT("kern.cam.boot_delay", &xsoftc.boot_delay); 152 SYSCTL_INT(_kern_cam, OID_AUTO, boot_delay, CTLFLAG_RDTUN, 153 &xsoftc.boot_delay, 0, "Bus registration wait time"); 154 155 /* Queues for our software interrupt handler */ 156 typedef TAILQ_HEAD(cam_isrq, ccb_hdr) cam_isrq_t; 157 typedef TAILQ_HEAD(cam_simq, cam_sim) cam_simq_t; 158 static cam_simq_t cam_simq; 159 static struct mtx cam_simq_lock; 160 161 /* Pointers to software interrupt handlers */ 162 static void *cambio_ih; 163 164 struct cam_periph *xpt_periph; 165 166 static periph_init_t xpt_periph_init; 167 168 static struct periph_driver xpt_driver = 169 { 170 xpt_periph_init, "xpt", 171 TAILQ_HEAD_INITIALIZER(xpt_driver.units), /* generation */ 0, 172 CAM_PERIPH_DRV_EARLY 173 }; 174 175 PERIPHDRIVER_DECLARE(xpt, xpt_driver); 176 177 static d_open_t xptopen; 178 static d_close_t xptclose; 179 static d_ioctl_t xptioctl; 180 181 static struct cdevsw xpt_cdevsw = { 182 .d_version = D_VERSION, 183 .d_flags = 0, 184 .d_open = xptopen, 185 .d_close = xptclose, 186 .d_ioctl = xptioctl, 187 .d_name = "xpt", 188 }; 189 190 /* Storage for debugging datastructures */ 191 #ifdef CAMDEBUG 192 struct cam_path *cam_dpath; 193 #ifdef CAM_DEBUG_FLAGS 194 u_int32_t cam_dflags = CAM_DEBUG_FLAGS; 195 #else 196 u_int32_t cam_dflags = CAM_DEBUG_NONE; 197 #endif 198 TUNABLE_INT("kern.cam.dflags", &cam_dflags); 199 SYSCTL_UINT(_kern_cam, OID_AUTO, dflags, CTLFLAG_RW, 200 &cam_dflags, 0, "Cam Debug Flags"); 201 u_int32_t cam_debug_delay; 202 TUNABLE_INT("kern.cam.debug_delay", &cam_debug_delay); 203 SYSCTL_UINT(_kern_cam, OID_AUTO, debug_delay, CTLFLAG_RW, 204 &cam_debug_delay, 0, "Cam Debug Flags"); 205 #endif 206 207 /* Our boot-time initialization hook */ 208 static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *); 209 210 static moduledata_t cam_moduledata = { 211 "cam", 212 cam_module_event_handler, 213 NULL 214 }; 215 216 static int xpt_init(void *); 217 218 DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND); 219 MODULE_VERSION(cam, 1); 220 221 222 static void xpt_async_bcast(struct async_list *async_head, 223 u_int32_t async_code, 224 struct cam_path *path, 225 void *async_arg); 226 static path_id_t xptnextfreepathid(void); 227 static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus); 228 static union ccb *xpt_get_ccb(struct cam_ed *device); 229 static void xpt_run_dev_allocq(struct cam_eb *bus); 230 static void xpt_run_dev_sendq(struct cam_eb *bus); 231 static timeout_t xpt_release_devq_timeout; 232 static void xpt_release_simq_timeout(void *arg) __unused; 233 static void xpt_release_bus(struct cam_eb *bus); 234 static void xpt_release_devq_device(struct cam_ed *dev, cam_rl rl, 235 u_int count, int run_queue); 236 static struct cam_et* 237 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id); 238 static void xpt_release_target(struct cam_et *target); 239 static struct cam_eb* 240 xpt_find_bus(path_id_t path_id); 241 static struct cam_et* 242 xpt_find_target(struct cam_eb *bus, target_id_t target_id); 243 static struct cam_ed* 244 xpt_find_device(struct cam_et *target, lun_id_t lun_id); 245 static void xpt_config(void *arg); 246 static xpt_devicefunc_t xptpassannouncefunc; 247 static void xptaction(struct cam_sim *sim, union ccb *work_ccb); 248 static void xptpoll(struct cam_sim *sim); 249 static void camisr(void *); 250 static void camisr_runqueue(void *); 251 static dev_match_ret xptbusmatch(struct dev_match_pattern *patterns, 252 u_int num_patterns, struct cam_eb *bus); 253 static dev_match_ret xptdevicematch(struct dev_match_pattern *patterns, 254 u_int num_patterns, 255 struct cam_ed *device); 256 static dev_match_ret xptperiphmatch(struct dev_match_pattern *patterns, 257 u_int num_patterns, 258 struct cam_periph *periph); 259 static xpt_busfunc_t xptedtbusfunc; 260 static xpt_targetfunc_t xptedttargetfunc; 261 static xpt_devicefunc_t xptedtdevicefunc; 262 static xpt_periphfunc_t xptedtperiphfunc; 263 static xpt_pdrvfunc_t xptplistpdrvfunc; 264 static xpt_periphfunc_t xptplistperiphfunc; 265 static int xptedtmatch(struct ccb_dev_match *cdm); 266 static int xptperiphlistmatch(struct ccb_dev_match *cdm); 267 static int xptbustraverse(struct cam_eb *start_bus, 268 xpt_busfunc_t *tr_func, void *arg); 269 static int xpttargettraverse(struct cam_eb *bus, 270 struct cam_et *start_target, 271 xpt_targetfunc_t *tr_func, void *arg); 272 static int xptdevicetraverse(struct cam_et *target, 273 struct cam_ed *start_device, 274 xpt_devicefunc_t *tr_func, void *arg); 275 static int xptperiphtraverse(struct cam_ed *device, 276 struct cam_periph *start_periph, 277 xpt_periphfunc_t *tr_func, void *arg); 278 static int xptpdrvtraverse(struct periph_driver **start_pdrv, 279 xpt_pdrvfunc_t *tr_func, void *arg); 280 static int xptpdperiphtraverse(struct periph_driver **pdrv, 281 struct cam_periph *start_periph, 282 xpt_periphfunc_t *tr_func, 283 void *arg); 284 static xpt_busfunc_t xptdefbusfunc; 285 static xpt_targetfunc_t xptdeftargetfunc; 286 static xpt_devicefunc_t xptdefdevicefunc; 287 static xpt_periphfunc_t xptdefperiphfunc; 288 static void xpt_finishconfig_task(void *context, int pending); 289 static void xpt_dev_async_default(u_int32_t async_code, 290 struct cam_eb *bus, 291 struct cam_et *target, 292 struct cam_ed *device, 293 void *async_arg); 294 static struct cam_ed * xpt_alloc_device_default(struct cam_eb *bus, 295 struct cam_et *target, 296 lun_id_t lun_id); 297 static xpt_devicefunc_t xptsetasyncfunc; 298 static xpt_busfunc_t xptsetasyncbusfunc; 299 static cam_status xptregister(struct cam_periph *periph, 300 void *arg); 301 static __inline int periph_is_queued(struct cam_periph *periph); 302 static __inline int device_is_alloc_queued(struct cam_ed *device); 303 static __inline int device_is_send_queued(struct cam_ed *device); 304 305 static __inline int 306 xpt_schedule_dev_allocq(struct cam_eb *bus, struct cam_ed *dev) 307 { 308 int retval; 309 310 if ((dev->drvq.entries > 0) && 311 (dev->ccbq.devq_openings > 0) && 312 (cam_ccbq_frozen(&dev->ccbq, CAM_PRIORITY_TO_RL( 313 CAMQ_GET_PRIO(&dev->drvq))) == 0)) { 314 /* 315 * The priority of a device waiting for CCB resources 316 * is that of the highest priority peripheral driver 317 * enqueued. 318 */ 319 retval = xpt_schedule_dev(&bus->sim->devq->alloc_queue, 320 &dev->alloc_ccb_entry.pinfo, 321 CAMQ_GET_PRIO(&dev->drvq)); 322 } else { 323 retval = 0; 324 } 325 326 return (retval); 327 } 328 329 static __inline int 330 xpt_schedule_dev_sendq(struct cam_eb *bus, struct cam_ed *dev) 331 { 332 int retval; 333 334 if ((dev->ccbq.queue.entries > 0) && 335 (dev->ccbq.dev_openings > 0) && 336 (cam_ccbq_frozen_top(&dev->ccbq) == 0)) { 337 /* 338 * The priority of a device waiting for controller 339 * resources is that of the highest priority CCB 340 * enqueued. 341 */ 342 retval = 343 xpt_schedule_dev(&bus->sim->devq->send_queue, 344 &dev->send_ccb_entry.pinfo, 345 CAMQ_GET_PRIO(&dev->ccbq.queue)); 346 } else { 347 retval = 0; 348 } 349 return (retval); 350 } 351 352 static __inline int 353 periph_is_queued(struct cam_periph *periph) 354 { 355 return (periph->pinfo.index != CAM_UNQUEUED_INDEX); 356 } 357 358 static __inline int 359 device_is_alloc_queued(struct cam_ed *device) 360 { 361 return (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX); 362 } 363 364 static __inline int 365 device_is_send_queued(struct cam_ed *device) 366 { 367 return (device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX); 368 } 369 370 static void 371 xpt_periph_init() 372 { 373 make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0"); 374 } 375 376 static void 377 xptdone(struct cam_periph *periph, union ccb *done_ccb) 378 { 379 /* Caller will release the CCB */ 380 wakeup(&done_ccb->ccb_h.cbfcnp); 381 } 382 383 static int 384 xptopen(struct cdev *dev, int flags, int fmt, struct thread *td) 385 { 386 387 /* 388 * Only allow read-write access. 389 */ 390 if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0)) 391 return(EPERM); 392 393 /* 394 * We don't allow nonblocking access. 395 */ 396 if ((flags & O_NONBLOCK) != 0) { 397 printf("%s: can't do nonblocking access\n", devtoname(dev)); 398 return(ENODEV); 399 } 400 401 /* Mark ourselves open */ 402 mtx_lock(&xsoftc.xpt_lock); 403 xsoftc.flags |= XPT_FLAG_OPEN; 404 mtx_unlock(&xsoftc.xpt_lock); 405 406 return(0); 407 } 408 409 static int 410 xptclose(struct cdev *dev, int flag, int fmt, struct thread *td) 411 { 412 413 /* Mark ourselves closed */ 414 mtx_lock(&xsoftc.xpt_lock); 415 xsoftc.flags &= ~XPT_FLAG_OPEN; 416 mtx_unlock(&xsoftc.xpt_lock); 417 418 return(0); 419 } 420 421 /* 422 * Don't automatically grab the xpt softc lock here even though this is going 423 * through the xpt device. The xpt device is really just a back door for 424 * accessing other devices and SIMs, so the right thing to do is to grab 425 * the appropriate SIM lock once the bus/SIM is located. 426 */ 427 static int 428 xptioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) 429 { 430 int error; 431 432 error = 0; 433 434 switch(cmd) { 435 /* 436 * For the transport layer CAMIOCOMMAND ioctl, we really only want 437 * to accept CCB types that don't quite make sense to send through a 438 * passthrough driver. XPT_PATH_INQ is an exception to this, as stated 439 * in the CAM spec. 440 */ 441 case CAMIOCOMMAND: { 442 union ccb *ccb; 443 union ccb *inccb; 444 struct cam_eb *bus; 445 446 inccb = (union ccb *)addr; 447 448 bus = xpt_find_bus(inccb->ccb_h.path_id); 449 if (bus == NULL) 450 return (EINVAL); 451 452 switch (inccb->ccb_h.func_code) { 453 case XPT_SCAN_BUS: 454 case XPT_RESET_BUS: 455 if (inccb->ccb_h.target_id != CAM_TARGET_WILDCARD || 456 inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) { 457 xpt_release_bus(bus); 458 return (EINVAL); 459 } 460 break; 461 case XPT_SCAN_TGT: 462 if (inccb->ccb_h.target_id == CAM_TARGET_WILDCARD || 463 inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) { 464 xpt_release_bus(bus); 465 return (EINVAL); 466 } 467 break; 468 default: 469 break; 470 } 471 472 switch(inccb->ccb_h.func_code) { 473 case XPT_SCAN_BUS: 474 case XPT_RESET_BUS: 475 case XPT_PATH_INQ: 476 case XPT_ENG_INQ: 477 case XPT_SCAN_LUN: 478 case XPT_SCAN_TGT: 479 480 ccb = xpt_alloc_ccb(); 481 482 CAM_SIM_LOCK(bus->sim); 483 484 /* 485 * Create a path using the bus, target, and lun the 486 * user passed in. 487 */ 488 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, 489 inccb->ccb_h.path_id, 490 inccb->ccb_h.target_id, 491 inccb->ccb_h.target_lun) != 492 CAM_REQ_CMP){ 493 error = EINVAL; 494 CAM_SIM_UNLOCK(bus->sim); 495 xpt_free_ccb(ccb); 496 break; 497 } 498 /* Ensure all of our fields are correct */ 499 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, 500 inccb->ccb_h.pinfo.priority); 501 xpt_merge_ccb(ccb, inccb); 502 ccb->ccb_h.cbfcnp = xptdone; 503 cam_periph_runccb(ccb, NULL, 0, 0, NULL); 504 bcopy(ccb, inccb, sizeof(union ccb)); 505 xpt_free_path(ccb->ccb_h.path); 506 xpt_free_ccb(ccb); 507 CAM_SIM_UNLOCK(bus->sim); 508 break; 509 510 case XPT_DEBUG: { 511 union ccb ccb; 512 513 /* 514 * This is an immediate CCB, so it's okay to 515 * allocate it on the stack. 516 */ 517 518 CAM_SIM_LOCK(bus->sim); 519 520 /* 521 * Create a path using the bus, target, and lun the 522 * user passed in. 523 */ 524 if (xpt_create_path(&ccb.ccb_h.path, xpt_periph, 525 inccb->ccb_h.path_id, 526 inccb->ccb_h.target_id, 527 inccb->ccb_h.target_lun) != 528 CAM_REQ_CMP){ 529 error = EINVAL; 530 CAM_SIM_UNLOCK(bus->sim); 531 break; 532 } 533 /* Ensure all of our fields are correct */ 534 xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path, 535 inccb->ccb_h.pinfo.priority); 536 xpt_merge_ccb(&ccb, inccb); 537 ccb.ccb_h.cbfcnp = xptdone; 538 xpt_action(&ccb); 539 CAM_SIM_UNLOCK(bus->sim); 540 bcopy(&ccb, inccb, sizeof(union ccb)); 541 xpt_free_path(ccb.ccb_h.path); 542 break; 543 544 } 545 case XPT_DEV_MATCH: { 546 struct cam_periph_map_info mapinfo; 547 struct cam_path *old_path; 548 549 /* 550 * We can't deal with physical addresses for this 551 * type of transaction. 552 */ 553 if (inccb->ccb_h.flags & CAM_DATA_PHYS) { 554 error = EINVAL; 555 break; 556 } 557 558 /* 559 * Save this in case the caller had it set to 560 * something in particular. 561 */ 562 old_path = inccb->ccb_h.path; 563 564 /* 565 * We really don't need a path for the matching 566 * code. The path is needed because of the 567 * debugging statements in xpt_action(). They 568 * assume that the CCB has a valid path. 569 */ 570 inccb->ccb_h.path = xpt_periph->path; 571 572 bzero(&mapinfo, sizeof(mapinfo)); 573 574 /* 575 * Map the pattern and match buffers into kernel 576 * virtual address space. 577 */ 578 error = cam_periph_mapmem(inccb, &mapinfo); 579 580 if (error) { 581 inccb->ccb_h.path = old_path; 582 break; 583 } 584 585 /* 586 * This is an immediate CCB, we can send it on directly. 587 */ 588 xpt_action(inccb); 589 590 /* 591 * Map the buffers back into user space. 592 */ 593 cam_periph_unmapmem(inccb, &mapinfo); 594 595 inccb->ccb_h.path = old_path; 596 597 error = 0; 598 break; 599 } 600 default: 601 error = ENOTSUP; 602 break; 603 } 604 xpt_release_bus(bus); 605 break; 606 } 607 /* 608 * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input, 609 * with the periphal driver name and unit name filled in. The other 610 * fields don't really matter as input. The passthrough driver name 611 * ("pass"), and unit number are passed back in the ccb. The current 612 * device generation number, and the index into the device peripheral 613 * driver list, and the status are also passed back. Note that 614 * since we do everything in one pass, unlike the XPT_GDEVLIST ccb, 615 * we never return a status of CAM_GDEVLIST_LIST_CHANGED. It is 616 * (or rather should be) impossible for the device peripheral driver 617 * list to change since we look at the whole thing in one pass, and 618 * we do it with lock protection. 619 * 620 */ 621 case CAMGETPASSTHRU: { 622 union ccb *ccb; 623 struct cam_periph *periph; 624 struct periph_driver **p_drv; 625 char *name; 626 u_int unit; 627 u_int cur_generation; 628 int base_periph_found; 629 int splbreaknum; 630 631 ccb = (union ccb *)addr; 632 unit = ccb->cgdl.unit_number; 633 name = ccb->cgdl.periph_name; 634 /* 635 * Every 100 devices, we want to drop our lock protection to 636 * give the software interrupt handler a chance to run. 637 * Most systems won't run into this check, but this should 638 * avoid starvation in the software interrupt handler in 639 * large systems. 640 */ 641 splbreaknum = 100; 642 643 ccb = (union ccb *)addr; 644 645 base_periph_found = 0; 646 647 /* 648 * Sanity check -- make sure we don't get a null peripheral 649 * driver name. 650 */ 651 if (*ccb->cgdl.periph_name == '\0') { 652 error = EINVAL; 653 break; 654 } 655 656 /* Keep the list from changing while we traverse it */ 657 mtx_lock(&xsoftc.xpt_topo_lock); 658 ptstartover: 659 cur_generation = xsoftc.xpt_generation; 660 661 /* first find our driver in the list of drivers */ 662 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) 663 if (strcmp((*p_drv)->driver_name, name) == 0) 664 break; 665 666 if (*p_drv == NULL) { 667 mtx_unlock(&xsoftc.xpt_topo_lock); 668 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 669 ccb->cgdl.status = CAM_GDEVLIST_ERROR; 670 *ccb->cgdl.periph_name = '\0'; 671 ccb->cgdl.unit_number = 0; 672 error = ENOENT; 673 break; 674 } 675 676 /* 677 * Run through every peripheral instance of this driver 678 * and check to see whether it matches the unit passed 679 * in by the user. If it does, get out of the loops and 680 * find the passthrough driver associated with that 681 * peripheral driver. 682 */ 683 for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL; 684 periph = TAILQ_NEXT(periph, unit_links)) { 685 686 if (periph->unit_number == unit) { 687 break; 688 } else if (--splbreaknum == 0) { 689 mtx_unlock(&xsoftc.xpt_topo_lock); 690 mtx_lock(&xsoftc.xpt_topo_lock); 691 splbreaknum = 100; 692 if (cur_generation != xsoftc.xpt_generation) 693 goto ptstartover; 694 } 695 } 696 /* 697 * If we found the peripheral driver that the user passed 698 * in, go through all of the peripheral drivers for that 699 * particular device and look for a passthrough driver. 700 */ 701 if (periph != NULL) { 702 struct cam_ed *device; 703 int i; 704 705 base_periph_found = 1; 706 device = periph->path->device; 707 for (i = 0, periph = SLIST_FIRST(&device->periphs); 708 periph != NULL; 709 periph = SLIST_NEXT(periph, periph_links), i++) { 710 /* 711 * Check to see whether we have a 712 * passthrough device or not. 713 */ 714 if (strcmp(periph->periph_name, "pass") == 0) { 715 /* 716 * Fill in the getdevlist fields. 717 */ 718 strcpy(ccb->cgdl.periph_name, 719 periph->periph_name); 720 ccb->cgdl.unit_number = 721 periph->unit_number; 722 if (SLIST_NEXT(periph, periph_links)) 723 ccb->cgdl.status = 724 CAM_GDEVLIST_MORE_DEVS; 725 else 726 ccb->cgdl.status = 727 CAM_GDEVLIST_LAST_DEVICE; 728 ccb->cgdl.generation = 729 device->generation; 730 ccb->cgdl.index = i; 731 /* 732 * Fill in some CCB header fields 733 * that the user may want. 734 */ 735 ccb->ccb_h.path_id = 736 periph->path->bus->path_id; 737 ccb->ccb_h.target_id = 738 periph->path->target->target_id; 739 ccb->ccb_h.target_lun = 740 periph->path->device->lun_id; 741 ccb->ccb_h.status = CAM_REQ_CMP; 742 break; 743 } 744 } 745 } 746 747 /* 748 * If the periph is null here, one of two things has 749 * happened. The first possibility is that we couldn't 750 * find the unit number of the particular peripheral driver 751 * that the user is asking about. e.g. the user asks for 752 * the passthrough driver for "da11". We find the list of 753 * "da" peripherals all right, but there is no unit 11. 754 * The other possibility is that we went through the list 755 * of peripheral drivers attached to the device structure, 756 * but didn't find one with the name "pass". Either way, 757 * we return ENOENT, since we couldn't find something. 758 */ 759 if (periph == NULL) { 760 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 761 ccb->cgdl.status = CAM_GDEVLIST_ERROR; 762 *ccb->cgdl.periph_name = '\0'; 763 ccb->cgdl.unit_number = 0; 764 error = ENOENT; 765 /* 766 * It is unfortunate that this is even necessary, 767 * but there are many, many clueless users out there. 768 * If this is true, the user is looking for the 769 * passthrough driver, but doesn't have one in his 770 * kernel. 771 */ 772 if (base_periph_found == 1) { 773 printf("xptioctl: pass driver is not in the " 774 "kernel\n"); 775 printf("xptioctl: put \"device pass\" in " 776 "your kernel config file\n"); 777 } 778 } 779 mtx_unlock(&xsoftc.xpt_topo_lock); 780 break; 781 } 782 default: 783 error = ENOTTY; 784 break; 785 } 786 787 return(error); 788 } 789 790 static int 791 cam_module_event_handler(module_t mod, int what, void *arg) 792 { 793 int error; 794 795 switch (what) { 796 case MOD_LOAD: 797 if ((error = xpt_init(NULL)) != 0) 798 return (error); 799 break; 800 case MOD_UNLOAD: 801 return EBUSY; 802 default: 803 return EOPNOTSUPP; 804 } 805 806 return 0; 807 } 808 809 static void 810 xpt_rescan_done(struct cam_periph *periph, union ccb *done_ccb) 811 { 812 813 if (done_ccb->ccb_h.ppriv_ptr1 == NULL) { 814 xpt_free_path(done_ccb->ccb_h.path); 815 xpt_free_ccb(done_ccb); 816 } else { 817 done_ccb->ccb_h.cbfcnp = done_ccb->ccb_h.ppriv_ptr1; 818 (*done_ccb->ccb_h.cbfcnp)(periph, done_ccb); 819 } 820 xpt_release_boot(); 821 } 822 823 /* thread to handle bus rescans */ 824 static void 825 xpt_scanner_thread(void *dummy) 826 { 827 union ccb *ccb; 828 struct cam_sim *sim; 829 830 xpt_lock_buses(); 831 for (;;) { 832 if (TAILQ_EMPTY(&xsoftc.ccb_scanq)) 833 msleep(&xsoftc.ccb_scanq, &xsoftc.xpt_topo_lock, PRIBIO, 834 "ccb_scanq", 0); 835 if ((ccb = (union ccb *)TAILQ_FIRST(&xsoftc.ccb_scanq)) != NULL) { 836 TAILQ_REMOVE(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe); 837 xpt_unlock_buses(); 838 839 sim = ccb->ccb_h.path->bus->sim; 840 CAM_SIM_LOCK(sim); 841 xpt_action(ccb); 842 CAM_SIM_UNLOCK(sim); 843 844 xpt_lock_buses(); 845 } 846 } 847 } 848 849 void 850 xpt_rescan(union ccb *ccb) 851 { 852 struct ccb_hdr *hdr; 853 854 /* Prepare request */ 855 if (ccb->ccb_h.path->target->target_id == CAM_TARGET_WILDCARD && 856 ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD) 857 ccb->ccb_h.func_code = XPT_SCAN_BUS; 858 else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD && 859 ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD) 860 ccb->ccb_h.func_code = XPT_SCAN_TGT; 861 else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD && 862 ccb->ccb_h.path->device->lun_id != CAM_LUN_WILDCARD) 863 ccb->ccb_h.func_code = XPT_SCAN_LUN; 864 else { 865 xpt_print(ccb->ccb_h.path, "illegal scan path\n"); 866 xpt_free_path(ccb->ccb_h.path); 867 xpt_free_ccb(ccb); 868 return; 869 } 870 ccb->ccb_h.ppriv_ptr1 = ccb->ccb_h.cbfcnp; 871 ccb->ccb_h.cbfcnp = xpt_rescan_done; 872 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, CAM_PRIORITY_XPT); 873 /* Don't make duplicate entries for the same paths. */ 874 xpt_lock_buses(); 875 if (ccb->ccb_h.ppriv_ptr1 == NULL) { 876 TAILQ_FOREACH(hdr, &xsoftc.ccb_scanq, sim_links.tqe) { 877 if (xpt_path_comp(hdr->path, ccb->ccb_h.path) == 0) { 878 wakeup(&xsoftc.ccb_scanq); 879 xpt_unlock_buses(); 880 xpt_print(ccb->ccb_h.path, "rescan already queued\n"); 881 xpt_free_path(ccb->ccb_h.path); 882 xpt_free_ccb(ccb); 883 return; 884 } 885 } 886 } 887 TAILQ_INSERT_TAIL(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe); 888 xsoftc.buses_to_config++; 889 wakeup(&xsoftc.ccb_scanq); 890 xpt_unlock_buses(); 891 } 892 893 /* Functions accessed by the peripheral drivers */ 894 static int 895 xpt_init(void *dummy) 896 { 897 struct cam_sim *xpt_sim; 898 struct cam_path *path; 899 struct cam_devq *devq; 900 cam_status status; 901 902 TAILQ_INIT(&xsoftc.xpt_busses); 903 TAILQ_INIT(&cam_simq); 904 TAILQ_INIT(&xsoftc.ccb_scanq); 905 STAILQ_INIT(&xsoftc.highpowerq); 906 xsoftc.num_highpower = CAM_MAX_HIGHPOWER; 907 908 mtx_init(&cam_simq_lock, "CAM SIMQ lock", NULL, MTX_DEF); 909 mtx_init(&xsoftc.xpt_lock, "XPT lock", NULL, MTX_DEF); 910 mtx_init(&xsoftc.xpt_topo_lock, "XPT topology lock", NULL, MTX_DEF); 911 912 /* 913 * The xpt layer is, itself, the equivelent of a SIM. 914 * Allow 16 ccbs in the ccb pool for it. This should 915 * give decent parallelism when we probe busses and 916 * perform other XPT functions. 917 */ 918 devq = cam_simq_alloc(16); 919 xpt_sim = cam_sim_alloc(xptaction, 920 xptpoll, 921 "xpt", 922 /*softc*/NULL, 923 /*unit*/0, 924 /*mtx*/&xsoftc.xpt_lock, 925 /*max_dev_transactions*/0, 926 /*max_tagged_dev_transactions*/0, 927 devq); 928 if (xpt_sim == NULL) 929 return (ENOMEM); 930 931 mtx_lock(&xsoftc.xpt_lock); 932 if ((status = xpt_bus_register(xpt_sim, NULL, 0)) != CAM_SUCCESS) { 933 mtx_unlock(&xsoftc.xpt_lock); 934 printf("xpt_init: xpt_bus_register failed with status %#x," 935 " failing attach\n", status); 936 return (EINVAL); 937 } 938 939 /* 940 * Looking at the XPT from the SIM layer, the XPT is 941 * the equivelent of a peripheral driver. Allocate 942 * a peripheral driver entry for us. 943 */ 944 if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID, 945 CAM_TARGET_WILDCARD, 946 CAM_LUN_WILDCARD)) != CAM_REQ_CMP) { 947 mtx_unlock(&xsoftc.xpt_lock); 948 printf("xpt_init: xpt_create_path failed with status %#x," 949 " failing attach\n", status); 950 return (EINVAL); 951 } 952 953 cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO, 954 path, NULL, 0, xpt_sim); 955 xpt_free_path(path); 956 mtx_unlock(&xsoftc.xpt_lock); 957 /* Install our software interrupt handlers */ 958 swi_add(NULL, "cambio", camisr, NULL, SWI_CAMBIO, INTR_MPSAFE, &cambio_ih); 959 /* 960 * Register a callback for when interrupts are enabled. 961 */ 962 xsoftc.xpt_config_hook = 963 (struct intr_config_hook *)malloc(sizeof(struct intr_config_hook), 964 M_CAMXPT, M_NOWAIT | M_ZERO); 965 if (xsoftc.xpt_config_hook == NULL) { 966 printf("xpt_init: Cannot malloc config hook " 967 "- failing attach\n"); 968 return (ENOMEM); 969 } 970 xsoftc.xpt_config_hook->ich_func = xpt_config; 971 if (config_intrhook_establish(xsoftc.xpt_config_hook) != 0) { 972 free (xsoftc.xpt_config_hook, M_CAMXPT); 973 printf("xpt_init: config_intrhook_establish failed " 974 "- failing attach\n"); 975 } 976 977 return (0); 978 } 979 980 static cam_status 981 xptregister(struct cam_periph *periph, void *arg) 982 { 983 struct cam_sim *xpt_sim; 984 985 if (periph == NULL) { 986 printf("xptregister: periph was NULL!!\n"); 987 return(CAM_REQ_CMP_ERR); 988 } 989 990 xpt_sim = (struct cam_sim *)arg; 991 xpt_sim->softc = periph; 992 xpt_periph = periph; 993 periph->softc = NULL; 994 995 return(CAM_REQ_CMP); 996 } 997 998 int32_t 999 xpt_add_periph(struct cam_periph *periph) 1000 { 1001 struct cam_ed *device; 1002 int32_t status; 1003 struct periph_list *periph_head; 1004 1005 mtx_assert(periph->sim->mtx, MA_OWNED); 1006 1007 device = periph->path->device; 1008 1009 periph_head = &device->periphs; 1010 1011 status = CAM_REQ_CMP; 1012 1013 if (device != NULL) { 1014 /* 1015 * Make room for this peripheral 1016 * so it will fit in the queue 1017 * when it's scheduled to run 1018 */ 1019 status = camq_resize(&device->drvq, 1020 device->drvq.array_size + 1); 1021 1022 device->generation++; 1023 1024 SLIST_INSERT_HEAD(periph_head, periph, periph_links); 1025 } 1026 1027 mtx_lock(&xsoftc.xpt_topo_lock); 1028 xsoftc.xpt_generation++; 1029 mtx_unlock(&xsoftc.xpt_topo_lock); 1030 1031 return (status); 1032 } 1033 1034 void 1035 xpt_remove_periph(struct cam_periph *periph) 1036 { 1037 struct cam_ed *device; 1038 1039 mtx_assert(periph->sim->mtx, MA_OWNED); 1040 1041 device = periph->path->device; 1042 1043 if (device != NULL) { 1044 struct periph_list *periph_head; 1045 1046 periph_head = &device->periphs; 1047 1048 /* Release the slot for this peripheral */ 1049 camq_resize(&device->drvq, device->drvq.array_size - 1); 1050 1051 device->generation++; 1052 1053 SLIST_REMOVE(periph_head, periph, cam_periph, periph_links); 1054 } 1055 1056 mtx_lock(&xsoftc.xpt_topo_lock); 1057 xsoftc.xpt_generation++; 1058 mtx_unlock(&xsoftc.xpt_topo_lock); 1059 } 1060 1061 1062 void 1063 xpt_announce_periph(struct cam_periph *periph, char *announce_string) 1064 { 1065 struct cam_path *path = periph->path; 1066 1067 mtx_assert(periph->sim->mtx, MA_OWNED); 1068 1069 printf("%s%d at %s%d bus %d scbus%d target %d lun %d\n", 1070 periph->periph_name, periph->unit_number, 1071 path->bus->sim->sim_name, 1072 path->bus->sim->unit_number, 1073 path->bus->sim->bus_id, 1074 path->bus->path_id, 1075 path->target->target_id, 1076 path->device->lun_id); 1077 printf("%s%d: ", periph->periph_name, periph->unit_number); 1078 if (path->device->protocol == PROTO_SCSI) 1079 scsi_print_inquiry(&path->device->inq_data); 1080 else if (path->device->protocol == PROTO_ATA || 1081 path->device->protocol == PROTO_SATAPM) 1082 ata_print_ident(&path->device->ident_data); 1083 else if (path->device->protocol == PROTO_SEMB) 1084 semb_print_ident( 1085 (struct sep_identify_data *)&path->device->ident_data); 1086 else 1087 printf("Unknown protocol device\n"); 1088 if (bootverbose && path->device->serial_num_len > 0) { 1089 /* Don't wrap the screen - print only the first 60 chars */ 1090 printf("%s%d: Serial Number %.60s\n", periph->periph_name, 1091 periph->unit_number, path->device->serial_num); 1092 } 1093 /* Announce transport details. */ 1094 (*(path->bus->xport->announce))(periph); 1095 /* Announce command queueing. */ 1096 if (path->device->inq_flags & SID_CmdQue 1097 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) { 1098 printf("%s%d: Command Queueing enabled\n", 1099 periph->periph_name, periph->unit_number); 1100 } 1101 /* Announce caller's details if they've passed in. */ 1102 if (announce_string != NULL) 1103 printf("%s%d: %s\n", periph->periph_name, 1104 periph->unit_number, announce_string); 1105 } 1106 1107 int 1108 xpt_getattr(char *buf, size_t len, const char *attr, struct cam_path *path) 1109 { 1110 int ret = -1; 1111 struct ccb_dev_advinfo cdai; 1112 1113 memset(&cdai, 0, sizeof(cdai)); 1114 xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL); 1115 cdai.ccb_h.func_code = XPT_DEV_ADVINFO; 1116 cdai.bufsiz = len; 1117 1118 if (!strcmp(attr, "GEOM::ident")) 1119 cdai.buftype = CDAI_TYPE_SERIAL_NUM; 1120 else if (!strcmp(attr, "GEOM::physpath")) 1121 cdai.buftype = CDAI_TYPE_PHYS_PATH; 1122 else 1123 goto out; 1124 1125 cdai.buf = malloc(cdai.bufsiz, M_CAMXPT, M_NOWAIT|M_ZERO); 1126 if (cdai.buf == NULL) { 1127 ret = ENOMEM; 1128 goto out; 1129 } 1130 xpt_action((union ccb *)&cdai); /* can only be synchronous */ 1131 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0) 1132 cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE); 1133 if (cdai.provsiz == 0) 1134 goto out; 1135 ret = 0; 1136 if (strlcpy(buf, cdai.buf, len) >= len) 1137 ret = EFAULT; 1138 1139 out: 1140 if (cdai.buf != NULL) 1141 free(cdai.buf, M_CAMXPT); 1142 return ret; 1143 } 1144 1145 static dev_match_ret 1146 xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns, 1147 struct cam_eb *bus) 1148 { 1149 dev_match_ret retval; 1150 int i; 1151 1152 retval = DM_RET_NONE; 1153 1154 /* 1155 * If we aren't given something to match against, that's an error. 1156 */ 1157 if (bus == NULL) 1158 return(DM_RET_ERROR); 1159 1160 /* 1161 * If there are no match entries, then this bus matches no 1162 * matter what. 1163 */ 1164 if ((patterns == NULL) || (num_patterns == 0)) 1165 return(DM_RET_DESCEND | DM_RET_COPY); 1166 1167 for (i = 0; i < num_patterns; i++) { 1168 struct bus_match_pattern *cur_pattern; 1169 1170 /* 1171 * If the pattern in question isn't for a bus node, we 1172 * aren't interested. However, we do indicate to the 1173 * calling routine that we should continue descending the 1174 * tree, since the user wants to match against lower-level 1175 * EDT elements. 1176 */ 1177 if (patterns[i].type != DEV_MATCH_BUS) { 1178 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) 1179 retval |= DM_RET_DESCEND; 1180 continue; 1181 } 1182 1183 cur_pattern = &patterns[i].pattern.bus_pattern; 1184 1185 /* 1186 * If they want to match any bus node, we give them any 1187 * device node. 1188 */ 1189 if (cur_pattern->flags == BUS_MATCH_ANY) { 1190 /* set the copy flag */ 1191 retval |= DM_RET_COPY; 1192 1193 /* 1194 * If we've already decided on an action, go ahead 1195 * and return. 1196 */ 1197 if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE) 1198 return(retval); 1199 } 1200 1201 /* 1202 * Not sure why someone would do this... 1203 */ 1204 if (cur_pattern->flags == BUS_MATCH_NONE) 1205 continue; 1206 1207 if (((cur_pattern->flags & BUS_MATCH_PATH) != 0) 1208 && (cur_pattern->path_id != bus->path_id)) 1209 continue; 1210 1211 if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0) 1212 && (cur_pattern->bus_id != bus->sim->bus_id)) 1213 continue; 1214 1215 if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0) 1216 && (cur_pattern->unit_number != bus->sim->unit_number)) 1217 continue; 1218 1219 if (((cur_pattern->flags & BUS_MATCH_NAME) != 0) 1220 && (strncmp(cur_pattern->dev_name, bus->sim->sim_name, 1221 DEV_IDLEN) != 0)) 1222 continue; 1223 1224 /* 1225 * If we get to this point, the user definitely wants 1226 * information on this bus. So tell the caller to copy the 1227 * data out. 1228 */ 1229 retval |= DM_RET_COPY; 1230 1231 /* 1232 * If the return action has been set to descend, then we 1233 * know that we've already seen a non-bus matching 1234 * expression, therefore we need to further descend the tree. 1235 * This won't change by continuing around the loop, so we 1236 * go ahead and return. If we haven't seen a non-bus 1237 * matching expression, we keep going around the loop until 1238 * we exhaust the matching expressions. We'll set the stop 1239 * flag once we fall out of the loop. 1240 */ 1241 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND) 1242 return(retval); 1243 } 1244 1245 /* 1246 * If the return action hasn't been set to descend yet, that means 1247 * we haven't seen anything other than bus matching patterns. So 1248 * tell the caller to stop descending the tree -- the user doesn't 1249 * want to match against lower level tree elements. 1250 */ 1251 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) 1252 retval |= DM_RET_STOP; 1253 1254 return(retval); 1255 } 1256 1257 static dev_match_ret 1258 xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns, 1259 struct cam_ed *device) 1260 { 1261 dev_match_ret retval; 1262 int i; 1263 1264 retval = DM_RET_NONE; 1265 1266 /* 1267 * If we aren't given something to match against, that's an error. 1268 */ 1269 if (device == NULL) 1270 return(DM_RET_ERROR); 1271 1272 /* 1273 * If there are no match entries, then this device matches no 1274 * matter what. 1275 */ 1276 if ((patterns == NULL) || (num_patterns == 0)) 1277 return(DM_RET_DESCEND | DM_RET_COPY); 1278 1279 for (i = 0; i < num_patterns; i++) { 1280 struct device_match_pattern *cur_pattern; 1281 struct scsi_vpd_device_id *device_id_page; 1282 1283 /* 1284 * If the pattern in question isn't for a device node, we 1285 * aren't interested. 1286 */ 1287 if (patterns[i].type != DEV_MATCH_DEVICE) { 1288 if ((patterns[i].type == DEV_MATCH_PERIPH) 1289 && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)) 1290 retval |= DM_RET_DESCEND; 1291 continue; 1292 } 1293 1294 cur_pattern = &patterns[i].pattern.device_pattern; 1295 1296 /* Error out if mutually exclusive options are specified. */ 1297 if ((cur_pattern->flags & (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID)) 1298 == (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID)) 1299 return(DM_RET_ERROR); 1300 1301 /* 1302 * If they want to match any device node, we give them any 1303 * device node. 1304 */ 1305 if (cur_pattern->flags == DEV_MATCH_ANY) 1306 goto copy_dev_node; 1307 1308 /* 1309 * Not sure why someone would do this... 1310 */ 1311 if (cur_pattern->flags == DEV_MATCH_NONE) 1312 continue; 1313 1314 if (((cur_pattern->flags & DEV_MATCH_PATH) != 0) 1315 && (cur_pattern->path_id != device->target->bus->path_id)) 1316 continue; 1317 1318 if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0) 1319 && (cur_pattern->target_id != device->target->target_id)) 1320 continue; 1321 1322 if (((cur_pattern->flags & DEV_MATCH_LUN) != 0) 1323 && (cur_pattern->target_lun != device->lun_id)) 1324 continue; 1325 1326 if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0) 1327 && (cam_quirkmatch((caddr_t)&device->inq_data, 1328 (caddr_t)&cur_pattern->data.inq_pat, 1329 1, sizeof(cur_pattern->data.inq_pat), 1330 scsi_static_inquiry_match) == NULL)) 1331 continue; 1332 1333 device_id_page = (struct scsi_vpd_device_id *)device->device_id; 1334 if (((cur_pattern->flags & DEV_MATCH_DEVID) != 0) 1335 && (device->device_id_len < SVPD_DEVICE_ID_HDR_LEN 1336 || scsi_devid_match((uint8_t *)device_id_page->desc_list, 1337 device->device_id_len 1338 - SVPD_DEVICE_ID_HDR_LEN, 1339 cur_pattern->data.devid_pat.id, 1340 cur_pattern->data.devid_pat.id_len) != 0)) 1341 continue; 1342 1343 copy_dev_node: 1344 /* 1345 * If we get to this point, the user definitely wants 1346 * information on this device. So tell the caller to copy 1347 * the data out. 1348 */ 1349 retval |= DM_RET_COPY; 1350 1351 /* 1352 * If the return action has been set to descend, then we 1353 * know that we've already seen a peripheral matching 1354 * expression, therefore we need to further descend the tree. 1355 * This won't change by continuing around the loop, so we 1356 * go ahead and return. If we haven't seen a peripheral 1357 * matching expression, we keep going around the loop until 1358 * we exhaust the matching expressions. We'll set the stop 1359 * flag once we fall out of the loop. 1360 */ 1361 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND) 1362 return(retval); 1363 } 1364 1365 /* 1366 * If the return action hasn't been set to descend yet, that means 1367 * we haven't seen any peripheral matching patterns. So tell the 1368 * caller to stop descending the tree -- the user doesn't want to 1369 * match against lower level tree elements. 1370 */ 1371 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) 1372 retval |= DM_RET_STOP; 1373 1374 return(retval); 1375 } 1376 1377 /* 1378 * Match a single peripheral against any number of match patterns. 1379 */ 1380 static dev_match_ret 1381 xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns, 1382 struct cam_periph *periph) 1383 { 1384 dev_match_ret retval; 1385 int i; 1386 1387 /* 1388 * If we aren't given something to match against, that's an error. 1389 */ 1390 if (periph == NULL) 1391 return(DM_RET_ERROR); 1392 1393 /* 1394 * If there are no match entries, then this peripheral matches no 1395 * matter what. 1396 */ 1397 if ((patterns == NULL) || (num_patterns == 0)) 1398 return(DM_RET_STOP | DM_RET_COPY); 1399 1400 /* 1401 * There aren't any nodes below a peripheral node, so there's no 1402 * reason to descend the tree any further. 1403 */ 1404 retval = DM_RET_STOP; 1405 1406 for (i = 0; i < num_patterns; i++) { 1407 struct periph_match_pattern *cur_pattern; 1408 1409 /* 1410 * If the pattern in question isn't for a peripheral, we 1411 * aren't interested. 1412 */ 1413 if (patterns[i].type != DEV_MATCH_PERIPH) 1414 continue; 1415 1416 cur_pattern = &patterns[i].pattern.periph_pattern; 1417 1418 /* 1419 * If they want to match on anything, then we will do so. 1420 */ 1421 if (cur_pattern->flags == PERIPH_MATCH_ANY) { 1422 /* set the copy flag */ 1423 retval |= DM_RET_COPY; 1424 1425 /* 1426 * We've already set the return action to stop, 1427 * since there are no nodes below peripherals in 1428 * the tree. 1429 */ 1430 return(retval); 1431 } 1432 1433 /* 1434 * Not sure why someone would do this... 1435 */ 1436 if (cur_pattern->flags == PERIPH_MATCH_NONE) 1437 continue; 1438 1439 if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0) 1440 && (cur_pattern->path_id != periph->path->bus->path_id)) 1441 continue; 1442 1443 /* 1444 * For the target and lun id's, we have to make sure the 1445 * target and lun pointers aren't NULL. The xpt peripheral 1446 * has a wildcard target and device. 1447 */ 1448 if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0) 1449 && ((periph->path->target == NULL) 1450 ||(cur_pattern->target_id != periph->path->target->target_id))) 1451 continue; 1452 1453 if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0) 1454 && ((periph->path->device == NULL) 1455 || (cur_pattern->target_lun != periph->path->device->lun_id))) 1456 continue; 1457 1458 if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0) 1459 && (cur_pattern->unit_number != periph->unit_number)) 1460 continue; 1461 1462 if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0) 1463 && (strncmp(cur_pattern->periph_name, periph->periph_name, 1464 DEV_IDLEN) != 0)) 1465 continue; 1466 1467 /* 1468 * If we get to this point, the user definitely wants 1469 * information on this peripheral. So tell the caller to 1470 * copy the data out. 1471 */ 1472 retval |= DM_RET_COPY; 1473 1474 /* 1475 * The return action has already been set to stop, since 1476 * peripherals don't have any nodes below them in the EDT. 1477 */ 1478 return(retval); 1479 } 1480 1481 /* 1482 * If we get to this point, the peripheral that was passed in 1483 * doesn't match any of the patterns. 1484 */ 1485 return(retval); 1486 } 1487 1488 static int 1489 xptedtbusfunc(struct cam_eb *bus, void *arg) 1490 { 1491 struct ccb_dev_match *cdm; 1492 dev_match_ret retval; 1493 1494 cdm = (struct ccb_dev_match *)arg; 1495 1496 /* 1497 * If our position is for something deeper in the tree, that means 1498 * that we've already seen this node. So, we keep going down. 1499 */ 1500 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1501 && (cdm->pos.cookie.bus == bus) 1502 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1503 && (cdm->pos.cookie.target != NULL)) 1504 retval = DM_RET_DESCEND; 1505 else 1506 retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus); 1507 1508 /* 1509 * If we got an error, bail out of the search. 1510 */ 1511 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 1512 cdm->status = CAM_DEV_MATCH_ERROR; 1513 return(0); 1514 } 1515 1516 /* 1517 * If the copy flag is set, copy this bus out. 1518 */ 1519 if (retval & DM_RET_COPY) { 1520 int spaceleft, j; 1521 1522 spaceleft = cdm->match_buf_len - (cdm->num_matches * 1523 sizeof(struct dev_match_result)); 1524 1525 /* 1526 * If we don't have enough space to put in another 1527 * match result, save our position and tell the 1528 * user there are more devices to check. 1529 */ 1530 if (spaceleft < sizeof(struct dev_match_result)) { 1531 bzero(&cdm->pos, sizeof(cdm->pos)); 1532 cdm->pos.position_type = 1533 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS; 1534 1535 cdm->pos.cookie.bus = bus; 1536 cdm->pos.generations[CAM_BUS_GENERATION]= 1537 xsoftc.bus_generation; 1538 cdm->status = CAM_DEV_MATCH_MORE; 1539 return(0); 1540 } 1541 j = cdm->num_matches; 1542 cdm->num_matches++; 1543 cdm->matches[j].type = DEV_MATCH_BUS; 1544 cdm->matches[j].result.bus_result.path_id = bus->path_id; 1545 cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id; 1546 cdm->matches[j].result.bus_result.unit_number = 1547 bus->sim->unit_number; 1548 strncpy(cdm->matches[j].result.bus_result.dev_name, 1549 bus->sim->sim_name, DEV_IDLEN); 1550 } 1551 1552 /* 1553 * If the user is only interested in busses, there's no 1554 * reason to descend to the next level in the tree. 1555 */ 1556 if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP) 1557 return(1); 1558 1559 /* 1560 * If there is a target generation recorded, check it to 1561 * make sure the target list hasn't changed. 1562 */ 1563 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1564 && (bus == cdm->pos.cookie.bus) 1565 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1566 && (cdm->pos.generations[CAM_TARGET_GENERATION] != 0) 1567 && (cdm->pos.generations[CAM_TARGET_GENERATION] != 1568 bus->generation)) { 1569 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 1570 return(0); 1571 } 1572 1573 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1574 && (cdm->pos.cookie.bus == bus) 1575 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1576 && (cdm->pos.cookie.target != NULL)) 1577 return(xpttargettraverse(bus, 1578 (struct cam_et *)cdm->pos.cookie.target, 1579 xptedttargetfunc, arg)); 1580 else 1581 return(xpttargettraverse(bus, NULL, xptedttargetfunc, arg)); 1582 } 1583 1584 static int 1585 xptedttargetfunc(struct cam_et *target, void *arg) 1586 { 1587 struct ccb_dev_match *cdm; 1588 1589 cdm = (struct ccb_dev_match *)arg; 1590 1591 /* 1592 * If there is a device list generation recorded, check it to 1593 * make sure the device list hasn't changed. 1594 */ 1595 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1596 && (cdm->pos.cookie.bus == target->bus) 1597 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1598 && (cdm->pos.cookie.target == target) 1599 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) 1600 && (cdm->pos.generations[CAM_DEV_GENERATION] != 0) 1601 && (cdm->pos.generations[CAM_DEV_GENERATION] != 1602 target->generation)) { 1603 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 1604 return(0); 1605 } 1606 1607 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1608 && (cdm->pos.cookie.bus == target->bus) 1609 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1610 && (cdm->pos.cookie.target == target) 1611 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) 1612 && (cdm->pos.cookie.device != NULL)) 1613 return(xptdevicetraverse(target, 1614 (struct cam_ed *)cdm->pos.cookie.device, 1615 xptedtdevicefunc, arg)); 1616 else 1617 return(xptdevicetraverse(target, NULL, xptedtdevicefunc, arg)); 1618 } 1619 1620 static int 1621 xptedtdevicefunc(struct cam_ed *device, void *arg) 1622 { 1623 1624 struct ccb_dev_match *cdm; 1625 dev_match_ret retval; 1626 1627 cdm = (struct ccb_dev_match *)arg; 1628 1629 /* 1630 * If our position is for something deeper in the tree, that means 1631 * that we've already seen this node. So, we keep going down. 1632 */ 1633 if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE) 1634 && (cdm->pos.cookie.device == device) 1635 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 1636 && (cdm->pos.cookie.periph != NULL)) 1637 retval = DM_RET_DESCEND; 1638 else 1639 retval = xptdevicematch(cdm->patterns, cdm->num_patterns, 1640 device); 1641 1642 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 1643 cdm->status = CAM_DEV_MATCH_ERROR; 1644 return(0); 1645 } 1646 1647 /* 1648 * If the copy flag is set, copy this device out. 1649 */ 1650 if (retval & DM_RET_COPY) { 1651 int spaceleft, j; 1652 1653 spaceleft = cdm->match_buf_len - (cdm->num_matches * 1654 sizeof(struct dev_match_result)); 1655 1656 /* 1657 * If we don't have enough space to put in another 1658 * match result, save our position and tell the 1659 * user there are more devices to check. 1660 */ 1661 if (spaceleft < sizeof(struct dev_match_result)) { 1662 bzero(&cdm->pos, sizeof(cdm->pos)); 1663 cdm->pos.position_type = 1664 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS | 1665 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE; 1666 1667 cdm->pos.cookie.bus = device->target->bus; 1668 cdm->pos.generations[CAM_BUS_GENERATION]= 1669 xsoftc.bus_generation; 1670 cdm->pos.cookie.target = device->target; 1671 cdm->pos.generations[CAM_TARGET_GENERATION] = 1672 device->target->bus->generation; 1673 cdm->pos.cookie.device = device; 1674 cdm->pos.generations[CAM_DEV_GENERATION] = 1675 device->target->generation; 1676 cdm->status = CAM_DEV_MATCH_MORE; 1677 return(0); 1678 } 1679 j = cdm->num_matches; 1680 cdm->num_matches++; 1681 cdm->matches[j].type = DEV_MATCH_DEVICE; 1682 cdm->matches[j].result.device_result.path_id = 1683 device->target->bus->path_id; 1684 cdm->matches[j].result.device_result.target_id = 1685 device->target->target_id; 1686 cdm->matches[j].result.device_result.target_lun = 1687 device->lun_id; 1688 cdm->matches[j].result.device_result.protocol = 1689 device->protocol; 1690 bcopy(&device->inq_data, 1691 &cdm->matches[j].result.device_result.inq_data, 1692 sizeof(struct scsi_inquiry_data)); 1693 bcopy(&device->ident_data, 1694 &cdm->matches[j].result.device_result.ident_data, 1695 sizeof(struct ata_params)); 1696 1697 /* Let the user know whether this device is unconfigured */ 1698 if (device->flags & CAM_DEV_UNCONFIGURED) 1699 cdm->matches[j].result.device_result.flags = 1700 DEV_RESULT_UNCONFIGURED; 1701 else 1702 cdm->matches[j].result.device_result.flags = 1703 DEV_RESULT_NOFLAG; 1704 } 1705 1706 /* 1707 * If the user isn't interested in peripherals, don't descend 1708 * the tree any further. 1709 */ 1710 if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP) 1711 return(1); 1712 1713 /* 1714 * If there is a peripheral list generation recorded, make sure 1715 * it hasn't changed. 1716 */ 1717 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1718 && (device->target->bus == cdm->pos.cookie.bus) 1719 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1720 && (device->target == cdm->pos.cookie.target) 1721 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) 1722 && (device == cdm->pos.cookie.device) 1723 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 1724 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0) 1725 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 1726 device->generation)){ 1727 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 1728 return(0); 1729 } 1730 1731 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1732 && (cdm->pos.cookie.bus == device->target->bus) 1733 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1734 && (cdm->pos.cookie.target == device->target) 1735 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) 1736 && (cdm->pos.cookie.device == device) 1737 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 1738 && (cdm->pos.cookie.periph != NULL)) 1739 return(xptperiphtraverse(device, 1740 (struct cam_periph *)cdm->pos.cookie.periph, 1741 xptedtperiphfunc, arg)); 1742 else 1743 return(xptperiphtraverse(device, NULL, xptedtperiphfunc, arg)); 1744 } 1745 1746 static int 1747 xptedtperiphfunc(struct cam_periph *periph, void *arg) 1748 { 1749 struct ccb_dev_match *cdm; 1750 dev_match_ret retval; 1751 1752 cdm = (struct ccb_dev_match *)arg; 1753 1754 retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph); 1755 1756 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 1757 cdm->status = CAM_DEV_MATCH_ERROR; 1758 return(0); 1759 } 1760 1761 /* 1762 * If the copy flag is set, copy this peripheral out. 1763 */ 1764 if (retval & DM_RET_COPY) { 1765 int spaceleft, j; 1766 1767 spaceleft = cdm->match_buf_len - (cdm->num_matches * 1768 sizeof(struct dev_match_result)); 1769 1770 /* 1771 * If we don't have enough space to put in another 1772 * match result, save our position and tell the 1773 * user there are more devices to check. 1774 */ 1775 if (spaceleft < sizeof(struct dev_match_result)) { 1776 bzero(&cdm->pos, sizeof(cdm->pos)); 1777 cdm->pos.position_type = 1778 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS | 1779 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE | 1780 CAM_DEV_POS_PERIPH; 1781 1782 cdm->pos.cookie.bus = periph->path->bus; 1783 cdm->pos.generations[CAM_BUS_GENERATION]= 1784 xsoftc.bus_generation; 1785 cdm->pos.cookie.target = periph->path->target; 1786 cdm->pos.generations[CAM_TARGET_GENERATION] = 1787 periph->path->bus->generation; 1788 cdm->pos.cookie.device = periph->path->device; 1789 cdm->pos.generations[CAM_DEV_GENERATION] = 1790 periph->path->target->generation; 1791 cdm->pos.cookie.periph = periph; 1792 cdm->pos.generations[CAM_PERIPH_GENERATION] = 1793 periph->path->device->generation; 1794 cdm->status = CAM_DEV_MATCH_MORE; 1795 return(0); 1796 } 1797 1798 j = cdm->num_matches; 1799 cdm->num_matches++; 1800 cdm->matches[j].type = DEV_MATCH_PERIPH; 1801 cdm->matches[j].result.periph_result.path_id = 1802 periph->path->bus->path_id; 1803 cdm->matches[j].result.periph_result.target_id = 1804 periph->path->target->target_id; 1805 cdm->matches[j].result.periph_result.target_lun = 1806 periph->path->device->lun_id; 1807 cdm->matches[j].result.periph_result.unit_number = 1808 periph->unit_number; 1809 strncpy(cdm->matches[j].result.periph_result.periph_name, 1810 periph->periph_name, DEV_IDLEN); 1811 } 1812 1813 return(1); 1814 } 1815 1816 static int 1817 xptedtmatch(struct ccb_dev_match *cdm) 1818 { 1819 int ret; 1820 1821 cdm->num_matches = 0; 1822 1823 /* 1824 * Check the bus list generation. If it has changed, the user 1825 * needs to reset everything and start over. 1826 */ 1827 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1828 && (cdm->pos.generations[CAM_BUS_GENERATION] != 0) 1829 && (cdm->pos.generations[CAM_BUS_GENERATION] != xsoftc.bus_generation)) { 1830 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 1831 return(0); 1832 } 1833 1834 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1835 && (cdm->pos.cookie.bus != NULL)) 1836 ret = xptbustraverse((struct cam_eb *)cdm->pos.cookie.bus, 1837 xptedtbusfunc, cdm); 1838 else 1839 ret = xptbustraverse(NULL, xptedtbusfunc, cdm); 1840 1841 /* 1842 * If we get back 0, that means that we had to stop before fully 1843 * traversing the EDT. It also means that one of the subroutines 1844 * has set the status field to the proper value. If we get back 1, 1845 * we've fully traversed the EDT and copied out any matching entries. 1846 */ 1847 if (ret == 1) 1848 cdm->status = CAM_DEV_MATCH_LAST; 1849 1850 return(ret); 1851 } 1852 1853 static int 1854 xptplistpdrvfunc(struct periph_driver **pdrv, void *arg) 1855 { 1856 struct ccb_dev_match *cdm; 1857 1858 cdm = (struct ccb_dev_match *)arg; 1859 1860 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR) 1861 && (cdm->pos.cookie.pdrv == pdrv) 1862 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 1863 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0) 1864 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 1865 (*pdrv)->generation)) { 1866 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 1867 return(0); 1868 } 1869 1870 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR) 1871 && (cdm->pos.cookie.pdrv == pdrv) 1872 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 1873 && (cdm->pos.cookie.periph != NULL)) 1874 return(xptpdperiphtraverse(pdrv, 1875 (struct cam_periph *)cdm->pos.cookie.periph, 1876 xptplistperiphfunc, arg)); 1877 else 1878 return(xptpdperiphtraverse(pdrv, NULL,xptplistperiphfunc, arg)); 1879 } 1880 1881 static int 1882 xptplistperiphfunc(struct cam_periph *periph, void *arg) 1883 { 1884 struct ccb_dev_match *cdm; 1885 dev_match_ret retval; 1886 1887 cdm = (struct ccb_dev_match *)arg; 1888 1889 retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph); 1890 1891 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 1892 cdm->status = CAM_DEV_MATCH_ERROR; 1893 return(0); 1894 } 1895 1896 /* 1897 * If the copy flag is set, copy this peripheral out. 1898 */ 1899 if (retval & DM_RET_COPY) { 1900 int spaceleft, j; 1901 1902 spaceleft = cdm->match_buf_len - (cdm->num_matches * 1903 sizeof(struct dev_match_result)); 1904 1905 /* 1906 * If we don't have enough space to put in another 1907 * match result, save our position and tell the 1908 * user there are more devices to check. 1909 */ 1910 if (spaceleft < sizeof(struct dev_match_result)) { 1911 struct periph_driver **pdrv; 1912 1913 pdrv = NULL; 1914 bzero(&cdm->pos, sizeof(cdm->pos)); 1915 cdm->pos.position_type = 1916 CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR | 1917 CAM_DEV_POS_PERIPH; 1918 1919 /* 1920 * This may look a bit non-sensical, but it is 1921 * actually quite logical. There are very few 1922 * peripheral drivers, and bloating every peripheral 1923 * structure with a pointer back to its parent 1924 * peripheral driver linker set entry would cost 1925 * more in the long run than doing this quick lookup. 1926 */ 1927 for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) { 1928 if (strcmp((*pdrv)->driver_name, 1929 periph->periph_name) == 0) 1930 break; 1931 } 1932 1933 if (*pdrv == NULL) { 1934 cdm->status = CAM_DEV_MATCH_ERROR; 1935 return(0); 1936 } 1937 1938 cdm->pos.cookie.pdrv = pdrv; 1939 /* 1940 * The periph generation slot does double duty, as 1941 * does the periph pointer slot. They are used for 1942 * both edt and pdrv lookups and positioning. 1943 */ 1944 cdm->pos.cookie.periph = periph; 1945 cdm->pos.generations[CAM_PERIPH_GENERATION] = 1946 (*pdrv)->generation; 1947 cdm->status = CAM_DEV_MATCH_MORE; 1948 return(0); 1949 } 1950 1951 j = cdm->num_matches; 1952 cdm->num_matches++; 1953 cdm->matches[j].type = DEV_MATCH_PERIPH; 1954 cdm->matches[j].result.periph_result.path_id = 1955 periph->path->bus->path_id; 1956 1957 /* 1958 * The transport layer peripheral doesn't have a target or 1959 * lun. 1960 */ 1961 if (periph->path->target) 1962 cdm->matches[j].result.periph_result.target_id = 1963 periph->path->target->target_id; 1964 else 1965 cdm->matches[j].result.periph_result.target_id = -1; 1966 1967 if (periph->path->device) 1968 cdm->matches[j].result.periph_result.target_lun = 1969 periph->path->device->lun_id; 1970 else 1971 cdm->matches[j].result.periph_result.target_lun = -1; 1972 1973 cdm->matches[j].result.periph_result.unit_number = 1974 periph->unit_number; 1975 strncpy(cdm->matches[j].result.periph_result.periph_name, 1976 periph->periph_name, DEV_IDLEN); 1977 } 1978 1979 return(1); 1980 } 1981 1982 static int 1983 xptperiphlistmatch(struct ccb_dev_match *cdm) 1984 { 1985 int ret; 1986 1987 cdm->num_matches = 0; 1988 1989 /* 1990 * At this point in the edt traversal function, we check the bus 1991 * list generation to make sure that no busses have been added or 1992 * removed since the user last sent a XPT_DEV_MATCH ccb through. 1993 * For the peripheral driver list traversal function, however, we 1994 * don't have to worry about new peripheral driver types coming or 1995 * going; they're in a linker set, and therefore can't change 1996 * without a recompile. 1997 */ 1998 1999 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR) 2000 && (cdm->pos.cookie.pdrv != NULL)) 2001 ret = xptpdrvtraverse( 2002 (struct periph_driver **)cdm->pos.cookie.pdrv, 2003 xptplistpdrvfunc, cdm); 2004 else 2005 ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm); 2006 2007 /* 2008 * If we get back 0, that means that we had to stop before fully 2009 * traversing the peripheral driver tree. It also means that one of 2010 * the subroutines has set the status field to the proper value. If 2011 * we get back 1, we've fully traversed the EDT and copied out any 2012 * matching entries. 2013 */ 2014 if (ret == 1) 2015 cdm->status = CAM_DEV_MATCH_LAST; 2016 2017 return(ret); 2018 } 2019 2020 static int 2021 xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg) 2022 { 2023 struct cam_eb *bus, *next_bus; 2024 int retval; 2025 2026 retval = 1; 2027 2028 mtx_lock(&xsoftc.xpt_topo_lock); 2029 for (bus = (start_bus ? start_bus : TAILQ_FIRST(&xsoftc.xpt_busses)); 2030 bus != NULL; 2031 bus = next_bus) { 2032 2033 bus->refcount++; 2034 2035 /* 2036 * XXX The locking here is obviously very complex. We 2037 * should work to simplify it. 2038 */ 2039 mtx_unlock(&xsoftc.xpt_topo_lock); 2040 CAM_SIM_LOCK(bus->sim); 2041 retval = tr_func(bus, arg); 2042 CAM_SIM_UNLOCK(bus->sim); 2043 2044 mtx_lock(&xsoftc.xpt_topo_lock); 2045 next_bus = TAILQ_NEXT(bus, links); 2046 mtx_unlock(&xsoftc.xpt_topo_lock); 2047 2048 xpt_release_bus(bus); 2049 2050 if (retval == 0) 2051 return(retval); 2052 mtx_lock(&xsoftc.xpt_topo_lock); 2053 } 2054 mtx_unlock(&xsoftc.xpt_topo_lock); 2055 2056 return(retval); 2057 } 2058 2059 int 2060 xpt_sim_opened(struct cam_sim *sim) 2061 { 2062 struct cam_eb *bus; 2063 struct cam_et *target; 2064 struct cam_ed *device; 2065 struct cam_periph *periph; 2066 2067 KASSERT(sim->refcount >= 1, ("sim->refcount >= 1")); 2068 mtx_assert(sim->mtx, MA_OWNED); 2069 2070 mtx_lock(&xsoftc.xpt_topo_lock); 2071 TAILQ_FOREACH(bus, &xsoftc.xpt_busses, links) { 2072 if (bus->sim != sim) 2073 continue; 2074 2075 TAILQ_FOREACH(target, &bus->et_entries, links) { 2076 TAILQ_FOREACH(device, &target->ed_entries, links) { 2077 SLIST_FOREACH(periph, &device->periphs, 2078 periph_links) { 2079 if (periph->refcount > 0) { 2080 mtx_unlock(&xsoftc.xpt_topo_lock); 2081 return (1); 2082 } 2083 } 2084 } 2085 } 2086 } 2087 2088 mtx_unlock(&xsoftc.xpt_topo_lock); 2089 return (0); 2090 } 2091 2092 static int 2093 xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target, 2094 xpt_targetfunc_t *tr_func, void *arg) 2095 { 2096 struct cam_et *target, *next_target; 2097 int retval; 2098 2099 retval = 1; 2100 for (target = (start_target ? start_target : 2101 TAILQ_FIRST(&bus->et_entries)); 2102 target != NULL; target = next_target) { 2103 2104 target->refcount++; 2105 2106 retval = tr_func(target, arg); 2107 2108 next_target = TAILQ_NEXT(target, links); 2109 2110 xpt_release_target(target); 2111 2112 if (retval == 0) 2113 return(retval); 2114 } 2115 2116 return(retval); 2117 } 2118 2119 static int 2120 xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device, 2121 xpt_devicefunc_t *tr_func, void *arg) 2122 { 2123 struct cam_ed *device, *next_device; 2124 int retval; 2125 2126 retval = 1; 2127 for (device = (start_device ? start_device : 2128 TAILQ_FIRST(&target->ed_entries)); 2129 device != NULL; 2130 device = next_device) { 2131 2132 /* 2133 * Hold a reference so the current device does not go away 2134 * on us. 2135 */ 2136 device->refcount++; 2137 2138 retval = tr_func(device, arg); 2139 2140 /* 2141 * Grab our next pointer before we release the current 2142 * device. 2143 */ 2144 next_device = TAILQ_NEXT(device, links); 2145 2146 xpt_release_device(device); 2147 2148 if (retval == 0) 2149 return(retval); 2150 } 2151 2152 return(retval); 2153 } 2154 2155 static int 2156 xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph, 2157 xpt_periphfunc_t *tr_func, void *arg) 2158 { 2159 struct cam_periph *periph, *next_periph; 2160 int retval; 2161 2162 retval = 1; 2163 2164 xpt_lock_buses(); 2165 for (periph = (start_periph ? start_periph : 2166 SLIST_FIRST(&device->periphs)); 2167 periph != NULL; 2168 periph = next_periph) { 2169 2170 2171 /* 2172 * In this case, we want to show peripherals that have been 2173 * invalidated, but not peripherals that are scheduled to 2174 * be freed. So instead of calling cam_periph_acquire(), 2175 * which will fail if the periph has been invalidated, we 2176 * just check for the free flag here. If it is free, we 2177 * skip to the next periph. 2178 */ 2179 if (periph->flags & CAM_PERIPH_FREE) { 2180 next_periph = SLIST_NEXT(periph, periph_links); 2181 continue; 2182 } 2183 2184 /* 2185 * Acquire a reference to this periph while we call the 2186 * traversal function, so it can't go away. 2187 */ 2188 periph->refcount++; 2189 2190 xpt_unlock_buses(); 2191 2192 retval = tr_func(periph, arg); 2193 2194 /* 2195 * We need the lock for list traversal. 2196 */ 2197 xpt_lock_buses(); 2198 2199 /* 2200 * Grab the next peripheral before we release this one, so 2201 * our next pointer is still valid. 2202 */ 2203 next_periph = SLIST_NEXT(periph, periph_links); 2204 2205 cam_periph_release_locked_buses(periph); 2206 2207 if (retval == 0) 2208 goto bailout_done; 2209 } 2210 2211 bailout_done: 2212 2213 xpt_unlock_buses(); 2214 2215 return(retval); 2216 } 2217 2218 static int 2219 xptpdrvtraverse(struct periph_driver **start_pdrv, 2220 xpt_pdrvfunc_t *tr_func, void *arg) 2221 { 2222 struct periph_driver **pdrv; 2223 int retval; 2224 2225 retval = 1; 2226 2227 /* 2228 * We don't traverse the peripheral driver list like we do the 2229 * other lists, because it is a linker set, and therefore cannot be 2230 * changed during runtime. If the peripheral driver list is ever 2231 * re-done to be something other than a linker set (i.e. it can 2232 * change while the system is running), the list traversal should 2233 * be modified to work like the other traversal functions. 2234 */ 2235 for (pdrv = (start_pdrv ? start_pdrv : periph_drivers); 2236 *pdrv != NULL; pdrv++) { 2237 retval = tr_func(pdrv, arg); 2238 2239 if (retval == 0) 2240 return(retval); 2241 } 2242 2243 return(retval); 2244 } 2245 2246 static int 2247 xptpdperiphtraverse(struct periph_driver **pdrv, 2248 struct cam_periph *start_periph, 2249 xpt_periphfunc_t *tr_func, void *arg) 2250 { 2251 struct cam_periph *periph, *next_periph; 2252 int retval; 2253 2254 retval = 1; 2255 2256 xpt_lock_buses(); 2257 for (periph = (start_periph ? start_periph : 2258 TAILQ_FIRST(&(*pdrv)->units)); periph != NULL; 2259 periph = next_periph) { 2260 2261 2262 /* 2263 * In this case, we want to show peripherals that have been 2264 * invalidated, but not peripherals that are scheduled to 2265 * be freed. So instead of calling cam_periph_acquire(), 2266 * which will fail if the periph has been invalidated, we 2267 * just check for the free flag here. If it is free, we 2268 * skip to the next periph. 2269 */ 2270 if (periph->flags & CAM_PERIPH_FREE) { 2271 next_periph = TAILQ_NEXT(periph, unit_links); 2272 continue; 2273 } 2274 2275 /* 2276 * Acquire a reference to this periph while we call the 2277 * traversal function, so it can't go away. 2278 */ 2279 periph->refcount++; 2280 2281 /* 2282 * XXX KDM we have the toplogy lock here, but in 2283 * xptperiphtraverse(), we drop it before calling the 2284 * traversal function. Which is correct? 2285 */ 2286 retval = tr_func(periph, arg); 2287 2288 /* 2289 * Grab the next peripheral before we release this one, so 2290 * our next pointer is still valid. 2291 */ 2292 next_periph = TAILQ_NEXT(periph, unit_links); 2293 2294 cam_periph_release_locked_buses(periph); 2295 2296 if (retval == 0) 2297 goto bailout_done; 2298 } 2299 bailout_done: 2300 2301 xpt_unlock_buses(); 2302 2303 return(retval); 2304 } 2305 2306 static int 2307 xptdefbusfunc(struct cam_eb *bus, void *arg) 2308 { 2309 struct xpt_traverse_config *tr_config; 2310 2311 tr_config = (struct xpt_traverse_config *)arg; 2312 2313 if (tr_config->depth == XPT_DEPTH_BUS) { 2314 xpt_busfunc_t *tr_func; 2315 2316 tr_func = (xpt_busfunc_t *)tr_config->tr_func; 2317 2318 return(tr_func(bus, tr_config->tr_arg)); 2319 } else 2320 return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg)); 2321 } 2322 2323 static int 2324 xptdeftargetfunc(struct cam_et *target, void *arg) 2325 { 2326 struct xpt_traverse_config *tr_config; 2327 2328 tr_config = (struct xpt_traverse_config *)arg; 2329 2330 if (tr_config->depth == XPT_DEPTH_TARGET) { 2331 xpt_targetfunc_t *tr_func; 2332 2333 tr_func = (xpt_targetfunc_t *)tr_config->tr_func; 2334 2335 return(tr_func(target, tr_config->tr_arg)); 2336 } else 2337 return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg)); 2338 } 2339 2340 static int 2341 xptdefdevicefunc(struct cam_ed *device, void *arg) 2342 { 2343 struct xpt_traverse_config *tr_config; 2344 2345 tr_config = (struct xpt_traverse_config *)arg; 2346 2347 if (tr_config->depth == XPT_DEPTH_DEVICE) { 2348 xpt_devicefunc_t *tr_func; 2349 2350 tr_func = (xpt_devicefunc_t *)tr_config->tr_func; 2351 2352 return(tr_func(device, tr_config->tr_arg)); 2353 } else 2354 return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg)); 2355 } 2356 2357 static int 2358 xptdefperiphfunc(struct cam_periph *periph, void *arg) 2359 { 2360 struct xpt_traverse_config *tr_config; 2361 xpt_periphfunc_t *tr_func; 2362 2363 tr_config = (struct xpt_traverse_config *)arg; 2364 2365 tr_func = (xpt_periphfunc_t *)tr_config->tr_func; 2366 2367 /* 2368 * Unlike the other default functions, we don't check for depth 2369 * here. The peripheral driver level is the last level in the EDT, 2370 * so if we're here, we should execute the function in question. 2371 */ 2372 return(tr_func(periph, tr_config->tr_arg)); 2373 } 2374 2375 /* 2376 * Execute the given function for every bus in the EDT. 2377 */ 2378 static int 2379 xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg) 2380 { 2381 struct xpt_traverse_config tr_config; 2382 2383 tr_config.depth = XPT_DEPTH_BUS; 2384 tr_config.tr_func = tr_func; 2385 tr_config.tr_arg = arg; 2386 2387 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config)); 2388 } 2389 2390 /* 2391 * Execute the given function for every device in the EDT. 2392 */ 2393 static int 2394 xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg) 2395 { 2396 struct xpt_traverse_config tr_config; 2397 2398 tr_config.depth = XPT_DEPTH_DEVICE; 2399 tr_config.tr_func = tr_func; 2400 tr_config.tr_arg = arg; 2401 2402 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config)); 2403 } 2404 2405 static int 2406 xptsetasyncfunc(struct cam_ed *device, void *arg) 2407 { 2408 struct cam_path path; 2409 struct ccb_getdev cgd; 2410 struct ccb_setasync *csa = (struct ccb_setasync *)arg; 2411 2412 /* 2413 * Don't report unconfigured devices (Wildcard devs, 2414 * devices only for target mode, device instances 2415 * that have been invalidated but are waiting for 2416 * their last reference count to be released). 2417 */ 2418 if ((device->flags & CAM_DEV_UNCONFIGURED) != 0) 2419 return (1); 2420 2421 xpt_compile_path(&path, 2422 NULL, 2423 device->target->bus->path_id, 2424 device->target->target_id, 2425 device->lun_id); 2426 xpt_setup_ccb(&cgd.ccb_h, &path, CAM_PRIORITY_NORMAL); 2427 cgd.ccb_h.func_code = XPT_GDEV_TYPE; 2428 xpt_action((union ccb *)&cgd); 2429 csa->callback(csa->callback_arg, 2430 AC_FOUND_DEVICE, 2431 &path, &cgd); 2432 xpt_release_path(&path); 2433 2434 return(1); 2435 } 2436 2437 static int 2438 xptsetasyncbusfunc(struct cam_eb *bus, void *arg) 2439 { 2440 struct cam_path path; 2441 struct ccb_pathinq cpi; 2442 struct ccb_setasync *csa = (struct ccb_setasync *)arg; 2443 2444 xpt_compile_path(&path, /*periph*/NULL, 2445 bus->sim->path_id, 2446 CAM_TARGET_WILDCARD, 2447 CAM_LUN_WILDCARD); 2448 xpt_setup_ccb(&cpi.ccb_h, &path, CAM_PRIORITY_NORMAL); 2449 cpi.ccb_h.func_code = XPT_PATH_INQ; 2450 xpt_action((union ccb *)&cpi); 2451 csa->callback(csa->callback_arg, 2452 AC_PATH_REGISTERED, 2453 &path, &cpi); 2454 xpt_release_path(&path); 2455 2456 return(1); 2457 } 2458 2459 void 2460 xpt_action(union ccb *start_ccb) 2461 { 2462 2463 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action\n")); 2464 2465 start_ccb->ccb_h.status = CAM_REQ_INPROG; 2466 /* Compatibility for RL-unaware code. */ 2467 if (CAM_PRIORITY_TO_RL(start_ccb->ccb_h.pinfo.priority) == 0) 2468 start_ccb->ccb_h.pinfo.priority += CAM_PRIORITY_NORMAL - 1; 2469 (*(start_ccb->ccb_h.path->bus->xport->action))(start_ccb); 2470 } 2471 2472 void 2473 xpt_action_default(union ccb *start_ccb) 2474 { 2475 #ifdef CAMDEBUG 2476 char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1]; 2477 #endif 2478 struct cam_path *path; 2479 2480 path = start_ccb->ccb_h.path; 2481 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_action_default\n")); 2482 2483 switch (start_ccb->ccb_h.func_code) { 2484 case XPT_SCSI_IO: 2485 { 2486 struct cam_ed *device; 2487 2488 /* 2489 * For the sake of compatibility with SCSI-1 2490 * devices that may not understand the identify 2491 * message, we include lun information in the 2492 * second byte of all commands. SCSI-1 specifies 2493 * that luns are a 3 bit value and reserves only 3 2494 * bits for lun information in the CDB. Later 2495 * revisions of the SCSI spec allow for more than 8 2496 * luns, but have deprecated lun information in the 2497 * CDB. So, if the lun won't fit, we must omit. 2498 * 2499 * Also be aware that during initial probing for devices, 2500 * the inquiry information is unknown but initialized to 0. 2501 * This means that this code will be exercised while probing 2502 * devices with an ANSI revision greater than 2. 2503 */ 2504 device = path->device; 2505 if (device->protocol_version <= SCSI_REV_2 2506 && start_ccb->ccb_h.target_lun < 8 2507 && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) { 2508 2509 start_ccb->csio.cdb_io.cdb_bytes[1] |= 2510 start_ccb->ccb_h.target_lun << 5; 2511 } 2512 start_ccb->csio.scsi_status = SCSI_STATUS_OK; 2513 CAM_DEBUG(path, CAM_DEBUG_CDB,("%s. CDB: %s\n", 2514 scsi_op_desc(start_ccb->csio.cdb_io.cdb_bytes[0], 2515 &path->device->inq_data), 2516 scsi_cdb_string(start_ccb->csio.cdb_io.cdb_bytes, 2517 cdb_str, sizeof(cdb_str)))); 2518 } 2519 /* FALLTHROUGH */ 2520 case XPT_TARGET_IO: 2521 case XPT_CONT_TARGET_IO: 2522 start_ccb->csio.sense_resid = 0; 2523 start_ccb->csio.resid = 0; 2524 /* FALLTHROUGH */ 2525 case XPT_ATA_IO: 2526 if (start_ccb->ccb_h.func_code == XPT_ATA_IO) { 2527 start_ccb->ataio.resid = 0; 2528 CAM_DEBUG(path, CAM_DEBUG_CDB,("%s. ACB: %s\n", 2529 ata_op_string(&start_ccb->ataio.cmd), 2530 ata_cmd_string(&start_ccb->ataio.cmd, 2531 cdb_str, sizeof(cdb_str)))); 2532 } 2533 /* FALLTHROUGH */ 2534 case XPT_RESET_DEV: 2535 case XPT_ENG_EXEC: 2536 case XPT_SMP_IO: 2537 { 2538 int frozen; 2539 2540 frozen = cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb); 2541 path->device->sim->devq->alloc_openings += frozen; 2542 if (frozen > 0) 2543 xpt_run_dev_allocq(path->bus); 2544 if (xpt_schedule_dev_sendq(path->bus, path->device)) 2545 xpt_run_dev_sendq(path->bus); 2546 break; 2547 } 2548 case XPT_CALC_GEOMETRY: 2549 { 2550 struct cam_sim *sim; 2551 2552 /* Filter out garbage */ 2553 if (start_ccb->ccg.block_size == 0 2554 || start_ccb->ccg.volume_size == 0) { 2555 start_ccb->ccg.cylinders = 0; 2556 start_ccb->ccg.heads = 0; 2557 start_ccb->ccg.secs_per_track = 0; 2558 start_ccb->ccb_h.status = CAM_REQ_CMP; 2559 break; 2560 } 2561 #if defined(PC98) || defined(__sparc64__) 2562 /* 2563 * In a PC-98 system, geometry translation depens on 2564 * the "real" device geometry obtained from mode page 4. 2565 * SCSI geometry translation is performed in the 2566 * initialization routine of the SCSI BIOS and the result 2567 * stored in host memory. If the translation is available 2568 * in host memory, use it. If not, rely on the default 2569 * translation the device driver performs. 2570 * For sparc64, we may need adjust the geometry of large 2571 * disks in order to fit the limitations of the 16-bit 2572 * fields of the VTOC8 disk label. 2573 */ 2574 if (scsi_da_bios_params(&start_ccb->ccg) != 0) { 2575 start_ccb->ccb_h.status = CAM_REQ_CMP; 2576 break; 2577 } 2578 #endif 2579 sim = path->bus->sim; 2580 (*(sim->sim_action))(sim, start_ccb); 2581 break; 2582 } 2583 case XPT_ABORT: 2584 { 2585 union ccb* abort_ccb; 2586 2587 abort_ccb = start_ccb->cab.abort_ccb; 2588 if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) { 2589 2590 if (abort_ccb->ccb_h.pinfo.index >= 0) { 2591 struct cam_ccbq *ccbq; 2592 struct cam_ed *device; 2593 2594 device = abort_ccb->ccb_h.path->device; 2595 ccbq = &device->ccbq; 2596 device->sim->devq->alloc_openings -= 2597 cam_ccbq_remove_ccb(ccbq, abort_ccb); 2598 abort_ccb->ccb_h.status = 2599 CAM_REQ_ABORTED|CAM_DEV_QFRZN; 2600 xpt_freeze_devq(abort_ccb->ccb_h.path, 1); 2601 xpt_done(abort_ccb); 2602 start_ccb->ccb_h.status = CAM_REQ_CMP; 2603 break; 2604 } 2605 if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX 2606 && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) { 2607 /* 2608 * We've caught this ccb en route to 2609 * the SIM. Flag it for abort and the 2610 * SIM will do so just before starting 2611 * real work on the CCB. 2612 */ 2613 abort_ccb->ccb_h.status = 2614 CAM_REQ_ABORTED|CAM_DEV_QFRZN; 2615 xpt_freeze_devq(abort_ccb->ccb_h.path, 1); 2616 start_ccb->ccb_h.status = CAM_REQ_CMP; 2617 break; 2618 } 2619 } 2620 if (XPT_FC_IS_QUEUED(abort_ccb) 2621 && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) { 2622 /* 2623 * It's already completed but waiting 2624 * for our SWI to get to it. 2625 */ 2626 start_ccb->ccb_h.status = CAM_UA_ABORT; 2627 break; 2628 } 2629 /* 2630 * If we weren't able to take care of the abort request 2631 * in the XPT, pass the request down to the SIM for processing. 2632 */ 2633 } 2634 /* FALLTHROUGH */ 2635 case XPT_ACCEPT_TARGET_IO: 2636 case XPT_EN_LUN: 2637 case XPT_IMMED_NOTIFY: 2638 case XPT_NOTIFY_ACK: 2639 case XPT_RESET_BUS: 2640 case XPT_IMMEDIATE_NOTIFY: 2641 case XPT_NOTIFY_ACKNOWLEDGE: 2642 case XPT_GET_SIM_KNOB: 2643 case XPT_SET_SIM_KNOB: 2644 { 2645 struct cam_sim *sim; 2646 2647 sim = path->bus->sim; 2648 (*(sim->sim_action))(sim, start_ccb); 2649 break; 2650 } 2651 case XPT_PATH_INQ: 2652 { 2653 struct cam_sim *sim; 2654 2655 sim = path->bus->sim; 2656 (*(sim->sim_action))(sim, start_ccb); 2657 break; 2658 } 2659 case XPT_PATH_STATS: 2660 start_ccb->cpis.last_reset = path->bus->last_reset; 2661 start_ccb->ccb_h.status = CAM_REQ_CMP; 2662 break; 2663 case XPT_GDEV_TYPE: 2664 { 2665 struct cam_ed *dev; 2666 2667 dev = path->device; 2668 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) { 2669 start_ccb->ccb_h.status = CAM_DEV_NOT_THERE; 2670 } else { 2671 struct ccb_getdev *cgd; 2672 2673 cgd = &start_ccb->cgd; 2674 cgd->protocol = dev->protocol; 2675 cgd->inq_data = dev->inq_data; 2676 cgd->ident_data = dev->ident_data; 2677 cgd->inq_flags = dev->inq_flags; 2678 cgd->ccb_h.status = CAM_REQ_CMP; 2679 cgd->serial_num_len = dev->serial_num_len; 2680 if ((dev->serial_num_len > 0) 2681 && (dev->serial_num != NULL)) 2682 bcopy(dev->serial_num, cgd->serial_num, 2683 dev->serial_num_len); 2684 } 2685 break; 2686 } 2687 case XPT_GDEV_STATS: 2688 { 2689 struct cam_ed *dev; 2690 2691 dev = path->device; 2692 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) { 2693 start_ccb->ccb_h.status = CAM_DEV_NOT_THERE; 2694 } else { 2695 struct ccb_getdevstats *cgds; 2696 struct cam_eb *bus; 2697 struct cam_et *tar; 2698 2699 cgds = &start_ccb->cgds; 2700 bus = path->bus; 2701 tar = path->target; 2702 cgds->dev_openings = dev->ccbq.dev_openings; 2703 cgds->dev_active = dev->ccbq.dev_active; 2704 cgds->devq_openings = dev->ccbq.devq_openings; 2705 cgds->devq_queued = dev->ccbq.queue.entries; 2706 cgds->held = dev->ccbq.held; 2707 cgds->last_reset = tar->last_reset; 2708 cgds->maxtags = dev->maxtags; 2709 cgds->mintags = dev->mintags; 2710 if (timevalcmp(&tar->last_reset, &bus->last_reset, <)) 2711 cgds->last_reset = bus->last_reset; 2712 cgds->ccb_h.status = CAM_REQ_CMP; 2713 } 2714 break; 2715 } 2716 case XPT_GDEVLIST: 2717 { 2718 struct cam_periph *nperiph; 2719 struct periph_list *periph_head; 2720 struct ccb_getdevlist *cgdl; 2721 u_int i; 2722 struct cam_ed *device; 2723 int found; 2724 2725 2726 found = 0; 2727 2728 /* 2729 * Don't want anyone mucking with our data. 2730 */ 2731 device = path->device; 2732 periph_head = &device->periphs; 2733 cgdl = &start_ccb->cgdl; 2734 2735 /* 2736 * Check and see if the list has changed since the user 2737 * last requested a list member. If so, tell them that the 2738 * list has changed, and therefore they need to start over 2739 * from the beginning. 2740 */ 2741 if ((cgdl->index != 0) && 2742 (cgdl->generation != device->generation)) { 2743 cgdl->status = CAM_GDEVLIST_LIST_CHANGED; 2744 break; 2745 } 2746 2747 /* 2748 * Traverse the list of peripherals and attempt to find 2749 * the requested peripheral. 2750 */ 2751 for (nperiph = SLIST_FIRST(periph_head), i = 0; 2752 (nperiph != NULL) && (i <= cgdl->index); 2753 nperiph = SLIST_NEXT(nperiph, periph_links), i++) { 2754 if (i == cgdl->index) { 2755 strncpy(cgdl->periph_name, 2756 nperiph->periph_name, 2757 DEV_IDLEN); 2758 cgdl->unit_number = nperiph->unit_number; 2759 found = 1; 2760 } 2761 } 2762 if (found == 0) { 2763 cgdl->status = CAM_GDEVLIST_ERROR; 2764 break; 2765 } 2766 2767 if (nperiph == NULL) 2768 cgdl->status = CAM_GDEVLIST_LAST_DEVICE; 2769 else 2770 cgdl->status = CAM_GDEVLIST_MORE_DEVS; 2771 2772 cgdl->index++; 2773 cgdl->generation = device->generation; 2774 2775 cgdl->ccb_h.status = CAM_REQ_CMP; 2776 break; 2777 } 2778 case XPT_DEV_MATCH: 2779 { 2780 dev_pos_type position_type; 2781 struct ccb_dev_match *cdm; 2782 2783 cdm = &start_ccb->cdm; 2784 2785 /* 2786 * There are two ways of getting at information in the EDT. 2787 * The first way is via the primary EDT tree. It starts 2788 * with a list of busses, then a list of targets on a bus, 2789 * then devices/luns on a target, and then peripherals on a 2790 * device/lun. The "other" way is by the peripheral driver 2791 * lists. The peripheral driver lists are organized by 2792 * peripheral driver. (obviously) So it makes sense to 2793 * use the peripheral driver list if the user is looking 2794 * for something like "da1", or all "da" devices. If the 2795 * user is looking for something on a particular bus/target 2796 * or lun, it's generally better to go through the EDT tree. 2797 */ 2798 2799 if (cdm->pos.position_type != CAM_DEV_POS_NONE) 2800 position_type = cdm->pos.position_type; 2801 else { 2802 u_int i; 2803 2804 position_type = CAM_DEV_POS_NONE; 2805 2806 for (i = 0; i < cdm->num_patterns; i++) { 2807 if ((cdm->patterns[i].type == DEV_MATCH_BUS) 2808 ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){ 2809 position_type = CAM_DEV_POS_EDT; 2810 break; 2811 } 2812 } 2813 2814 if (cdm->num_patterns == 0) 2815 position_type = CAM_DEV_POS_EDT; 2816 else if (position_type == CAM_DEV_POS_NONE) 2817 position_type = CAM_DEV_POS_PDRV; 2818 } 2819 2820 switch(position_type & CAM_DEV_POS_TYPEMASK) { 2821 case CAM_DEV_POS_EDT: 2822 xptedtmatch(cdm); 2823 break; 2824 case CAM_DEV_POS_PDRV: 2825 xptperiphlistmatch(cdm); 2826 break; 2827 default: 2828 cdm->status = CAM_DEV_MATCH_ERROR; 2829 break; 2830 } 2831 2832 if (cdm->status == CAM_DEV_MATCH_ERROR) 2833 start_ccb->ccb_h.status = CAM_REQ_CMP_ERR; 2834 else 2835 start_ccb->ccb_h.status = CAM_REQ_CMP; 2836 2837 break; 2838 } 2839 case XPT_SASYNC_CB: 2840 { 2841 struct ccb_setasync *csa; 2842 struct async_node *cur_entry; 2843 struct async_list *async_head; 2844 u_int32_t added; 2845 2846 csa = &start_ccb->csa; 2847 added = csa->event_enable; 2848 async_head = &path->device->asyncs; 2849 2850 /* 2851 * If there is already an entry for us, simply 2852 * update it. 2853 */ 2854 cur_entry = SLIST_FIRST(async_head); 2855 while (cur_entry != NULL) { 2856 if ((cur_entry->callback_arg == csa->callback_arg) 2857 && (cur_entry->callback == csa->callback)) 2858 break; 2859 cur_entry = SLIST_NEXT(cur_entry, links); 2860 } 2861 2862 if (cur_entry != NULL) { 2863 /* 2864 * If the request has no flags set, 2865 * remove the entry. 2866 */ 2867 added &= ~cur_entry->event_enable; 2868 if (csa->event_enable == 0) { 2869 SLIST_REMOVE(async_head, cur_entry, 2870 async_node, links); 2871 xpt_release_device(path->device); 2872 free(cur_entry, M_CAMXPT); 2873 } else { 2874 cur_entry->event_enable = csa->event_enable; 2875 } 2876 csa->event_enable = added; 2877 } else { 2878 cur_entry = malloc(sizeof(*cur_entry), M_CAMXPT, 2879 M_NOWAIT); 2880 if (cur_entry == NULL) { 2881 csa->ccb_h.status = CAM_RESRC_UNAVAIL; 2882 break; 2883 } 2884 cur_entry->event_enable = csa->event_enable; 2885 cur_entry->callback_arg = csa->callback_arg; 2886 cur_entry->callback = csa->callback; 2887 SLIST_INSERT_HEAD(async_head, cur_entry, links); 2888 xpt_acquire_device(path->device); 2889 } 2890 start_ccb->ccb_h.status = CAM_REQ_CMP; 2891 break; 2892 } 2893 case XPT_REL_SIMQ: 2894 { 2895 struct ccb_relsim *crs; 2896 struct cam_ed *dev; 2897 2898 crs = &start_ccb->crs; 2899 dev = path->device; 2900 if (dev == NULL) { 2901 2902 crs->ccb_h.status = CAM_DEV_NOT_THERE; 2903 break; 2904 } 2905 2906 if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) { 2907 2908 /* Don't ever go below one opening */ 2909 if (crs->openings > 0) { 2910 xpt_dev_ccbq_resize(path, crs->openings); 2911 if (bootverbose) { 2912 xpt_print(path, 2913 "number of openings is now %d\n", 2914 crs->openings); 2915 } 2916 } 2917 } 2918 2919 if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) { 2920 2921 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) { 2922 2923 /* 2924 * Just extend the old timeout and decrement 2925 * the freeze count so that a single timeout 2926 * is sufficient for releasing the queue. 2927 */ 2928 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE; 2929 callout_stop(&dev->callout); 2930 } else { 2931 2932 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 2933 } 2934 2935 callout_reset(&dev->callout, 2936 (crs->release_timeout * hz) / 1000, 2937 xpt_release_devq_timeout, dev); 2938 2939 dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING; 2940 2941 } 2942 2943 if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) { 2944 2945 if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) { 2946 /* 2947 * Decrement the freeze count so that a single 2948 * completion is still sufficient to unfreeze 2949 * the queue. 2950 */ 2951 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE; 2952 } else { 2953 2954 dev->flags |= CAM_DEV_REL_ON_COMPLETE; 2955 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 2956 } 2957 } 2958 2959 if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) { 2960 2961 if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0 2962 || (dev->ccbq.dev_active == 0)) { 2963 2964 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE; 2965 } else { 2966 2967 dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY; 2968 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 2969 } 2970 } 2971 2972 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) { 2973 xpt_release_devq_rl(path, /*runlevel*/ 2974 (crs->release_flags & RELSIM_RELEASE_RUNLEVEL) ? 2975 crs->release_timeout : 0, 2976 /*count*/1, /*run_queue*/TRUE); 2977 } 2978 start_ccb->crs.qfrozen_cnt = dev->ccbq.queue.qfrozen_cnt[0]; 2979 start_ccb->ccb_h.status = CAM_REQ_CMP; 2980 break; 2981 } 2982 case XPT_DEBUG: { 2983 #ifdef CAMDEBUG 2984 #ifdef CAM_DEBUG_DELAY 2985 cam_debug_delay = CAM_DEBUG_DELAY; 2986 #endif 2987 cam_dflags = start_ccb->cdbg.flags; 2988 if (cam_dpath != NULL) { 2989 xpt_free_path(cam_dpath); 2990 cam_dpath = NULL; 2991 } 2992 2993 if (cam_dflags != CAM_DEBUG_NONE) { 2994 if (xpt_create_path(&cam_dpath, xpt_periph, 2995 start_ccb->ccb_h.path_id, 2996 start_ccb->ccb_h.target_id, 2997 start_ccb->ccb_h.target_lun) != 2998 CAM_REQ_CMP) { 2999 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 3000 cam_dflags = CAM_DEBUG_NONE; 3001 } else { 3002 start_ccb->ccb_h.status = CAM_REQ_CMP; 3003 xpt_print(cam_dpath, "debugging flags now %x\n", 3004 cam_dflags); 3005 } 3006 } else { 3007 cam_dpath = NULL; 3008 start_ccb->ccb_h.status = CAM_REQ_CMP; 3009 } 3010 #else /* !CAMDEBUG */ 3011 start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 3012 #endif /* CAMDEBUG */ 3013 break; 3014 } 3015 case XPT_FREEZE_QUEUE: 3016 { 3017 struct ccb_relsim *crs = &start_ccb->crs; 3018 3019 xpt_freeze_devq_rl(path, /*runlevel*/ 3020 (crs->release_flags & RELSIM_RELEASE_RUNLEVEL) ? 3021 crs->release_timeout : 0, /*count*/1); 3022 start_ccb->ccb_h.status = CAM_REQ_CMP; 3023 break; 3024 } 3025 case XPT_NOOP: 3026 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0) 3027 xpt_freeze_devq(path, 1); 3028 start_ccb->ccb_h.status = CAM_REQ_CMP; 3029 break; 3030 default: 3031 case XPT_SDEV_TYPE: 3032 case XPT_TERM_IO: 3033 case XPT_ENG_INQ: 3034 /* XXX Implement */ 3035 printf("%s: CCB type %#x not supported\n", __func__, 3036 start_ccb->ccb_h.func_code); 3037 start_ccb->ccb_h.status = CAM_PROVIDE_FAIL; 3038 if (start_ccb->ccb_h.func_code & XPT_FC_DEV_QUEUED) { 3039 xpt_done(start_ccb); 3040 } 3041 break; 3042 } 3043 } 3044 3045 void 3046 xpt_polled_action(union ccb *start_ccb) 3047 { 3048 u_int32_t timeout; 3049 struct cam_sim *sim; 3050 struct cam_devq *devq; 3051 struct cam_ed *dev; 3052 3053 3054 timeout = start_ccb->ccb_h.timeout * 10; 3055 sim = start_ccb->ccb_h.path->bus->sim; 3056 devq = sim->devq; 3057 dev = start_ccb->ccb_h.path->device; 3058 3059 mtx_assert(sim->mtx, MA_OWNED); 3060 3061 /* Don't use ISR for this SIM while polling. */ 3062 sim->flags |= CAM_SIM_POLLED; 3063 3064 /* 3065 * Steal an opening so that no other queued requests 3066 * can get it before us while we simulate interrupts. 3067 */ 3068 dev->ccbq.devq_openings--; 3069 dev->ccbq.dev_openings--; 3070 3071 while(((devq != NULL && devq->send_openings <= 0) || 3072 dev->ccbq.dev_openings < 0) && (--timeout > 0)) { 3073 DELAY(100); 3074 (*(sim->sim_poll))(sim); 3075 camisr_runqueue(&sim->sim_doneq); 3076 } 3077 3078 dev->ccbq.devq_openings++; 3079 dev->ccbq.dev_openings++; 3080 3081 if (timeout != 0) { 3082 xpt_action(start_ccb); 3083 while(--timeout > 0) { 3084 (*(sim->sim_poll))(sim); 3085 camisr_runqueue(&sim->sim_doneq); 3086 if ((start_ccb->ccb_h.status & CAM_STATUS_MASK) 3087 != CAM_REQ_INPROG) 3088 break; 3089 DELAY(100); 3090 } 3091 if (timeout == 0) { 3092 /* 3093 * XXX Is it worth adding a sim_timeout entry 3094 * point so we can attempt recovery? If 3095 * this is only used for dumps, I don't think 3096 * it is. 3097 */ 3098 start_ccb->ccb_h.status = CAM_CMD_TIMEOUT; 3099 } 3100 } else { 3101 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 3102 } 3103 3104 /* We will use CAM ISR for this SIM again. */ 3105 sim->flags &= ~CAM_SIM_POLLED; 3106 } 3107 3108 /* 3109 * Schedule a peripheral driver to receive a ccb when it's 3110 * target device has space for more transactions. 3111 */ 3112 void 3113 xpt_schedule(struct cam_periph *perph, u_int32_t new_priority) 3114 { 3115 struct cam_ed *device; 3116 int runq = 0; 3117 3118 mtx_assert(perph->sim->mtx, MA_OWNED); 3119 3120 CAM_DEBUG(perph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n")); 3121 device = perph->path->device; 3122 if (periph_is_queued(perph)) { 3123 /* Simply reorder based on new priority */ 3124 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE, 3125 (" change priority to %d\n", new_priority)); 3126 if (new_priority < perph->pinfo.priority) { 3127 camq_change_priority(&device->drvq, 3128 perph->pinfo.index, 3129 new_priority); 3130 runq = xpt_schedule_dev_allocq(perph->path->bus, device); 3131 } 3132 } else { 3133 /* New entry on the queue */ 3134 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE, 3135 (" added periph to queue\n")); 3136 perph->pinfo.priority = new_priority; 3137 perph->pinfo.generation = ++device->drvq.generation; 3138 camq_insert(&device->drvq, &perph->pinfo); 3139 runq = xpt_schedule_dev_allocq(perph->path->bus, device); 3140 } 3141 if (runq != 0) { 3142 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE, 3143 (" calling xpt_run_devq\n")); 3144 xpt_run_dev_allocq(perph->path->bus); 3145 } 3146 } 3147 3148 3149 /* 3150 * Schedule a device to run on a given queue. 3151 * If the device was inserted as a new entry on the queue, 3152 * return 1 meaning the device queue should be run. If we 3153 * were already queued, implying someone else has already 3154 * started the queue, return 0 so the caller doesn't attempt 3155 * to run the queue. 3156 */ 3157 int 3158 xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo, 3159 u_int32_t new_priority) 3160 { 3161 int retval; 3162 u_int32_t old_priority; 3163 3164 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n")); 3165 3166 old_priority = pinfo->priority; 3167 3168 /* 3169 * Are we already queued? 3170 */ 3171 if (pinfo->index != CAM_UNQUEUED_INDEX) { 3172 /* Simply reorder based on new priority */ 3173 if (new_priority < old_priority) { 3174 camq_change_priority(queue, pinfo->index, 3175 new_priority); 3176 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3177 ("changed priority to %d\n", 3178 new_priority)); 3179 retval = 1; 3180 } else 3181 retval = 0; 3182 } else { 3183 /* New entry on the queue */ 3184 if (new_priority < old_priority) 3185 pinfo->priority = new_priority; 3186 3187 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3188 ("Inserting onto queue\n")); 3189 pinfo->generation = ++queue->generation; 3190 camq_insert(queue, pinfo); 3191 retval = 1; 3192 } 3193 return (retval); 3194 } 3195 3196 static void 3197 xpt_run_dev_allocq(struct cam_eb *bus) 3198 { 3199 struct cam_devq *devq; 3200 3201 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_allocq\n")); 3202 devq = bus->sim->devq; 3203 3204 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3205 (" qfrozen_cnt == 0x%x, entries == %d, " 3206 "openings == %d, active == %d\n", 3207 devq->alloc_queue.qfrozen_cnt[0], 3208 devq->alloc_queue.entries, 3209 devq->alloc_openings, 3210 devq->alloc_active)); 3211 3212 devq->alloc_queue.qfrozen_cnt[0]++; 3213 while ((devq->alloc_queue.entries > 0) 3214 && (devq->alloc_openings > 0) 3215 && (devq->alloc_queue.qfrozen_cnt[0] <= 1)) { 3216 struct cam_ed_qinfo *qinfo; 3217 struct cam_ed *device; 3218 union ccb *work_ccb; 3219 struct cam_periph *drv; 3220 struct camq *drvq; 3221 3222 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue, 3223 CAMQ_HEAD); 3224 device = qinfo->device; 3225 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3226 ("running device %p\n", device)); 3227 3228 drvq = &device->drvq; 3229 3230 #ifdef CAMDEBUG 3231 if (drvq->entries <= 0) { 3232 panic("xpt_run_dev_allocq: " 3233 "Device on queue without any work to do"); 3234 } 3235 #endif 3236 if ((work_ccb = xpt_get_ccb(device)) != NULL) { 3237 devq->alloc_openings--; 3238 devq->alloc_active++; 3239 drv = (struct cam_periph*)camq_remove(drvq, CAMQ_HEAD); 3240 xpt_setup_ccb(&work_ccb->ccb_h, drv->path, 3241 drv->pinfo.priority); 3242 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3243 ("calling periph start\n")); 3244 drv->periph_start(drv, work_ccb); 3245 } else { 3246 /* 3247 * Malloc failure in alloc_ccb 3248 */ 3249 /* 3250 * XXX add us to a list to be run from free_ccb 3251 * if we don't have any ccbs active on this 3252 * device queue otherwise we may never get run 3253 * again. 3254 */ 3255 break; 3256 } 3257 3258 /* We may have more work. Attempt to reschedule. */ 3259 xpt_schedule_dev_allocq(bus, device); 3260 } 3261 devq->alloc_queue.qfrozen_cnt[0]--; 3262 } 3263 3264 static void 3265 xpt_run_dev_sendq(struct cam_eb *bus) 3266 { 3267 struct cam_devq *devq; 3268 3269 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_sendq\n")); 3270 3271 devq = bus->sim->devq; 3272 3273 devq->send_queue.qfrozen_cnt[0]++; 3274 while ((devq->send_queue.entries > 0) 3275 && (devq->send_openings > 0) 3276 && (devq->send_queue.qfrozen_cnt[0] <= 1)) { 3277 struct cam_ed_qinfo *qinfo; 3278 struct cam_ed *device; 3279 union ccb *work_ccb; 3280 struct cam_sim *sim; 3281 3282 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue, 3283 CAMQ_HEAD); 3284 device = qinfo->device; 3285 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3286 ("running device %p\n", device)); 3287 3288 work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD); 3289 if (work_ccb == NULL) { 3290 printf("device on run queue with no ccbs???\n"); 3291 continue; 3292 } 3293 3294 if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) { 3295 3296 mtx_lock(&xsoftc.xpt_lock); 3297 if (xsoftc.num_highpower <= 0) { 3298 /* 3299 * We got a high power command, but we 3300 * don't have any available slots. Freeze 3301 * the device queue until we have a slot 3302 * available. 3303 */ 3304 xpt_freeze_devq(work_ccb->ccb_h.path, 1); 3305 STAILQ_INSERT_TAIL(&xsoftc.highpowerq, 3306 &work_ccb->ccb_h, 3307 xpt_links.stqe); 3308 3309 mtx_unlock(&xsoftc.xpt_lock); 3310 continue; 3311 } else { 3312 /* 3313 * Consume a high power slot while 3314 * this ccb runs. 3315 */ 3316 xsoftc.num_highpower--; 3317 } 3318 mtx_unlock(&xsoftc.xpt_lock); 3319 } 3320 cam_ccbq_remove_ccb(&device->ccbq, work_ccb); 3321 cam_ccbq_send_ccb(&device->ccbq, work_ccb); 3322 3323 devq->send_openings--; 3324 devq->send_active++; 3325 3326 xpt_schedule_dev_sendq(bus, device); 3327 3328 if (work_ccb && (work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0){ 3329 /* 3330 * The client wants to freeze the queue 3331 * after this CCB is sent. 3332 */ 3333 xpt_freeze_devq(work_ccb->ccb_h.path, 1); 3334 } 3335 3336 /* In Target mode, the peripheral driver knows best... */ 3337 if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) { 3338 if ((device->inq_flags & SID_CmdQue) != 0 3339 && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE) 3340 work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID; 3341 else 3342 /* 3343 * Clear this in case of a retried CCB that 3344 * failed due to a rejected tag. 3345 */ 3346 work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID; 3347 } 3348 3349 /* 3350 * Device queues can be shared among multiple sim instances 3351 * that reside on different busses. Use the SIM in the queue 3352 * CCB's path, rather than the one in the bus that was passed 3353 * into this function. 3354 */ 3355 sim = work_ccb->ccb_h.path->bus->sim; 3356 (*(sim->sim_action))(sim, work_ccb); 3357 } 3358 devq->send_queue.qfrozen_cnt[0]--; 3359 } 3360 3361 /* 3362 * This function merges stuff from the slave ccb into the master ccb, while 3363 * keeping important fields in the master ccb constant. 3364 */ 3365 void 3366 xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb) 3367 { 3368 3369 /* 3370 * Pull fields that are valid for peripheral drivers to set 3371 * into the master CCB along with the CCB "payload". 3372 */ 3373 master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count; 3374 master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code; 3375 master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout; 3376 master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags; 3377 bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1], 3378 sizeof(union ccb) - sizeof(struct ccb_hdr)); 3379 } 3380 3381 void 3382 xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority) 3383 { 3384 3385 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n")); 3386 ccb_h->pinfo.priority = priority; 3387 ccb_h->path = path; 3388 ccb_h->path_id = path->bus->path_id; 3389 if (path->target) 3390 ccb_h->target_id = path->target->target_id; 3391 else 3392 ccb_h->target_id = CAM_TARGET_WILDCARD; 3393 if (path->device) { 3394 ccb_h->target_lun = path->device->lun_id; 3395 ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation; 3396 } else { 3397 ccb_h->target_lun = CAM_TARGET_WILDCARD; 3398 } 3399 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX; 3400 ccb_h->flags = 0; 3401 } 3402 3403 /* Path manipulation functions */ 3404 cam_status 3405 xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph, 3406 path_id_t path_id, target_id_t target_id, lun_id_t lun_id) 3407 { 3408 struct cam_path *path; 3409 cam_status status; 3410 3411 path = (struct cam_path *)malloc(sizeof(*path), M_CAMXPT, M_NOWAIT); 3412 3413 if (path == NULL) { 3414 status = CAM_RESRC_UNAVAIL; 3415 return(status); 3416 } 3417 status = xpt_compile_path(path, perph, path_id, target_id, lun_id); 3418 if (status != CAM_REQ_CMP) { 3419 free(path, M_CAMXPT); 3420 path = NULL; 3421 } 3422 *new_path_ptr = path; 3423 return (status); 3424 } 3425 3426 cam_status 3427 xpt_create_path_unlocked(struct cam_path **new_path_ptr, 3428 struct cam_periph *periph, path_id_t path_id, 3429 target_id_t target_id, lun_id_t lun_id) 3430 { 3431 struct cam_path *path; 3432 struct cam_eb *bus = NULL; 3433 cam_status status; 3434 int need_unlock = 0; 3435 3436 path = (struct cam_path *)malloc(sizeof(*path), M_CAMXPT, M_WAITOK); 3437 3438 if (path_id != CAM_BUS_WILDCARD) { 3439 bus = xpt_find_bus(path_id); 3440 if (bus != NULL) { 3441 need_unlock = 1; 3442 CAM_SIM_LOCK(bus->sim); 3443 } 3444 } 3445 status = xpt_compile_path(path, periph, path_id, target_id, lun_id); 3446 if (need_unlock) { 3447 CAM_SIM_UNLOCK(bus->sim); 3448 xpt_release_bus(bus); 3449 } 3450 if (status != CAM_REQ_CMP) { 3451 free(path, M_CAMXPT); 3452 path = NULL; 3453 } 3454 *new_path_ptr = path; 3455 return (status); 3456 } 3457 3458 cam_status 3459 xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph, 3460 path_id_t path_id, target_id_t target_id, lun_id_t lun_id) 3461 { 3462 struct cam_eb *bus; 3463 struct cam_et *target; 3464 struct cam_ed *device; 3465 cam_status status; 3466 3467 status = CAM_REQ_CMP; /* Completed without error */ 3468 target = NULL; /* Wildcarded */ 3469 device = NULL; /* Wildcarded */ 3470 3471 /* 3472 * We will potentially modify the EDT, so block interrupts 3473 * that may attempt to create cam paths. 3474 */ 3475 bus = xpt_find_bus(path_id); 3476 if (bus == NULL) { 3477 status = CAM_PATH_INVALID; 3478 } else { 3479 target = xpt_find_target(bus, target_id); 3480 if (target == NULL) { 3481 /* Create one */ 3482 struct cam_et *new_target; 3483 3484 new_target = xpt_alloc_target(bus, target_id); 3485 if (new_target == NULL) { 3486 status = CAM_RESRC_UNAVAIL; 3487 } else { 3488 target = new_target; 3489 } 3490 } 3491 if (target != NULL) { 3492 device = xpt_find_device(target, lun_id); 3493 if (device == NULL) { 3494 /* Create one */ 3495 struct cam_ed *new_device; 3496 3497 new_device = 3498 (*(bus->xport->alloc_device))(bus, 3499 target, 3500 lun_id); 3501 if (new_device == NULL) { 3502 status = CAM_RESRC_UNAVAIL; 3503 } else { 3504 device = new_device; 3505 } 3506 } 3507 } 3508 } 3509 3510 /* 3511 * Only touch the user's data if we are successful. 3512 */ 3513 if (status == CAM_REQ_CMP) { 3514 new_path->periph = perph; 3515 new_path->bus = bus; 3516 new_path->target = target; 3517 new_path->device = device; 3518 CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n")); 3519 } else { 3520 if (device != NULL) 3521 xpt_release_device(device); 3522 if (target != NULL) 3523 xpt_release_target(target); 3524 if (bus != NULL) 3525 xpt_release_bus(bus); 3526 } 3527 return (status); 3528 } 3529 3530 void 3531 xpt_release_path(struct cam_path *path) 3532 { 3533 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n")); 3534 if (path->device != NULL) { 3535 xpt_release_device(path->device); 3536 path->device = NULL; 3537 } 3538 if (path->target != NULL) { 3539 xpt_release_target(path->target); 3540 path->target = NULL; 3541 } 3542 if (path->bus != NULL) { 3543 xpt_release_bus(path->bus); 3544 path->bus = NULL; 3545 } 3546 } 3547 3548 void 3549 xpt_free_path(struct cam_path *path) 3550 { 3551 3552 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n")); 3553 xpt_release_path(path); 3554 free(path, M_CAMXPT); 3555 } 3556 3557 void 3558 xpt_path_counts(struct cam_path *path, uint32_t *bus_ref, 3559 uint32_t *periph_ref, uint32_t *target_ref, uint32_t *device_ref) 3560 { 3561 3562 mtx_lock(&xsoftc.xpt_topo_lock); 3563 if (bus_ref) { 3564 if (path->bus) 3565 *bus_ref = path->bus->refcount; 3566 else 3567 *bus_ref = 0; 3568 } 3569 mtx_unlock(&xsoftc.xpt_topo_lock); 3570 if (periph_ref) { 3571 if (path->periph) 3572 *periph_ref = path->periph->refcount; 3573 else 3574 *periph_ref = 0; 3575 } 3576 if (target_ref) { 3577 if (path->target) 3578 *target_ref = path->target->refcount; 3579 else 3580 *target_ref = 0; 3581 } 3582 if (device_ref) { 3583 if (path->device) 3584 *device_ref = path->device->refcount; 3585 else 3586 *device_ref = 0; 3587 } 3588 } 3589 3590 /* 3591 * Return -1 for failure, 0 for exact match, 1 for match with wildcards 3592 * in path1, 2 for match with wildcards in path2. 3593 */ 3594 int 3595 xpt_path_comp(struct cam_path *path1, struct cam_path *path2) 3596 { 3597 int retval = 0; 3598 3599 if (path1->bus != path2->bus) { 3600 if (path1->bus->path_id == CAM_BUS_WILDCARD) 3601 retval = 1; 3602 else if (path2->bus->path_id == CAM_BUS_WILDCARD) 3603 retval = 2; 3604 else 3605 return (-1); 3606 } 3607 if (path1->target != path2->target) { 3608 if (path1->target->target_id == CAM_TARGET_WILDCARD) { 3609 if (retval == 0) 3610 retval = 1; 3611 } else if (path2->target->target_id == CAM_TARGET_WILDCARD) 3612 retval = 2; 3613 else 3614 return (-1); 3615 } 3616 if (path1->device != path2->device) { 3617 if (path1->device->lun_id == CAM_LUN_WILDCARD) { 3618 if (retval == 0) 3619 retval = 1; 3620 } else if (path2->device->lun_id == CAM_LUN_WILDCARD) 3621 retval = 2; 3622 else 3623 return (-1); 3624 } 3625 return (retval); 3626 } 3627 3628 void 3629 xpt_print_path(struct cam_path *path) 3630 { 3631 3632 if (path == NULL) 3633 printf("(nopath): "); 3634 else { 3635 if (path->periph != NULL) 3636 printf("(%s%d:", path->periph->periph_name, 3637 path->periph->unit_number); 3638 else 3639 printf("(noperiph:"); 3640 3641 if (path->bus != NULL) 3642 printf("%s%d:%d:", path->bus->sim->sim_name, 3643 path->bus->sim->unit_number, 3644 path->bus->sim->bus_id); 3645 else 3646 printf("nobus:"); 3647 3648 if (path->target != NULL) 3649 printf("%d:", path->target->target_id); 3650 else 3651 printf("X:"); 3652 3653 if (path->device != NULL) 3654 printf("%d): ", path->device->lun_id); 3655 else 3656 printf("X): "); 3657 } 3658 } 3659 3660 void 3661 xpt_print(struct cam_path *path, const char *fmt, ...) 3662 { 3663 va_list ap; 3664 xpt_print_path(path); 3665 va_start(ap, fmt); 3666 vprintf(fmt, ap); 3667 va_end(ap); 3668 } 3669 3670 int 3671 xpt_path_string(struct cam_path *path, char *str, size_t str_len) 3672 { 3673 struct sbuf sb; 3674 3675 #ifdef INVARIANTS 3676 if (path != NULL && path->bus != NULL) 3677 mtx_assert(path->bus->sim->mtx, MA_OWNED); 3678 #endif 3679 3680 sbuf_new(&sb, str, str_len, 0); 3681 3682 if (path == NULL) 3683 sbuf_printf(&sb, "(nopath): "); 3684 else { 3685 if (path->periph != NULL) 3686 sbuf_printf(&sb, "(%s%d:", path->periph->periph_name, 3687 path->periph->unit_number); 3688 else 3689 sbuf_printf(&sb, "(noperiph:"); 3690 3691 if (path->bus != NULL) 3692 sbuf_printf(&sb, "%s%d:%d:", path->bus->sim->sim_name, 3693 path->bus->sim->unit_number, 3694 path->bus->sim->bus_id); 3695 else 3696 sbuf_printf(&sb, "nobus:"); 3697 3698 if (path->target != NULL) 3699 sbuf_printf(&sb, "%d:", path->target->target_id); 3700 else 3701 sbuf_printf(&sb, "X:"); 3702 3703 if (path->device != NULL) 3704 sbuf_printf(&sb, "%d): ", path->device->lun_id); 3705 else 3706 sbuf_printf(&sb, "X): "); 3707 } 3708 sbuf_finish(&sb); 3709 3710 return(sbuf_len(&sb)); 3711 } 3712 3713 path_id_t 3714 xpt_path_path_id(struct cam_path *path) 3715 { 3716 return(path->bus->path_id); 3717 } 3718 3719 target_id_t 3720 xpt_path_target_id(struct cam_path *path) 3721 { 3722 if (path->target != NULL) 3723 return (path->target->target_id); 3724 else 3725 return (CAM_TARGET_WILDCARD); 3726 } 3727 3728 lun_id_t 3729 xpt_path_lun_id(struct cam_path *path) 3730 { 3731 if (path->device != NULL) 3732 return (path->device->lun_id); 3733 else 3734 return (CAM_LUN_WILDCARD); 3735 } 3736 3737 struct cam_sim * 3738 xpt_path_sim(struct cam_path *path) 3739 { 3740 3741 return (path->bus->sim); 3742 } 3743 3744 struct cam_periph* 3745 xpt_path_periph(struct cam_path *path) 3746 { 3747 mtx_assert(path->bus->sim->mtx, MA_OWNED); 3748 3749 return (path->periph); 3750 } 3751 3752 int 3753 xpt_path_legacy_ata_id(struct cam_path *path) 3754 { 3755 struct cam_eb *bus; 3756 int bus_id; 3757 3758 if ((strcmp(path->bus->sim->sim_name, "ata") != 0) && 3759 strcmp(path->bus->sim->sim_name, "ahcich") != 0 && 3760 strcmp(path->bus->sim->sim_name, "mvsch") != 0 && 3761 strcmp(path->bus->sim->sim_name, "siisch") != 0) 3762 return (-1); 3763 3764 if (strcmp(path->bus->sim->sim_name, "ata") == 0 && 3765 path->bus->sim->unit_number < 2) { 3766 bus_id = path->bus->sim->unit_number; 3767 } else { 3768 bus_id = 2; 3769 xpt_lock_buses(); 3770 TAILQ_FOREACH(bus, &xsoftc.xpt_busses, links) { 3771 if (bus == path->bus) 3772 break; 3773 if ((strcmp(bus->sim->sim_name, "ata") == 0 && 3774 bus->sim->unit_number >= 2) || 3775 strcmp(bus->sim->sim_name, "ahcich") == 0 || 3776 strcmp(bus->sim->sim_name, "mvsch") == 0 || 3777 strcmp(bus->sim->sim_name, "siisch") == 0) 3778 bus_id++; 3779 } 3780 xpt_unlock_buses(); 3781 } 3782 if (path->target != NULL) { 3783 if (path->target->target_id < 2) 3784 return (bus_id * 2 + path->target->target_id); 3785 else 3786 return (-1); 3787 } else 3788 return (bus_id * 2); 3789 } 3790 3791 /* 3792 * Release a CAM control block for the caller. Remit the cost of the structure 3793 * to the device referenced by the path. If the this device had no 'credits' 3794 * and peripheral drivers have registered async callbacks for this notification 3795 * call them now. 3796 */ 3797 void 3798 xpt_release_ccb(union ccb *free_ccb) 3799 { 3800 struct cam_path *path; 3801 struct cam_ed *device; 3802 struct cam_eb *bus; 3803 struct cam_sim *sim; 3804 3805 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n")); 3806 path = free_ccb->ccb_h.path; 3807 device = path->device; 3808 bus = path->bus; 3809 sim = bus->sim; 3810 3811 mtx_assert(sim->mtx, MA_OWNED); 3812 3813 cam_ccbq_release_opening(&device->ccbq); 3814 if (device->flags & CAM_DEV_RESIZE_QUEUE_NEEDED) { 3815 device->flags &= ~CAM_DEV_RESIZE_QUEUE_NEEDED; 3816 cam_ccbq_resize(&device->ccbq, 3817 device->ccbq.dev_openings + device->ccbq.dev_active); 3818 } 3819 if (sim->ccb_count > sim->max_ccbs) { 3820 xpt_free_ccb(free_ccb); 3821 sim->ccb_count--; 3822 } else { 3823 SLIST_INSERT_HEAD(&sim->ccb_freeq, &free_ccb->ccb_h, 3824 xpt_links.sle); 3825 } 3826 if (sim->devq == NULL) { 3827 return; 3828 } 3829 sim->devq->alloc_openings++; 3830 sim->devq->alloc_active--; 3831 if (device_is_alloc_queued(device) == 0) 3832 xpt_schedule_dev_allocq(bus, device); 3833 xpt_run_dev_allocq(bus); 3834 } 3835 3836 /* Functions accessed by SIM drivers */ 3837 3838 static struct xpt_xport xport_default = { 3839 .alloc_device = xpt_alloc_device_default, 3840 .action = xpt_action_default, 3841 .async = xpt_dev_async_default, 3842 }; 3843 3844 /* 3845 * A sim structure, listing the SIM entry points and instance 3846 * identification info is passed to xpt_bus_register to hook the SIM 3847 * into the CAM framework. xpt_bus_register creates a cam_eb entry 3848 * for this new bus and places it in the array of busses and assigns 3849 * it a path_id. The path_id may be influenced by "hard wiring" 3850 * information specified by the user. Once interrupt services are 3851 * available, the bus will be probed. 3852 */ 3853 int32_t 3854 xpt_bus_register(struct cam_sim *sim, device_t parent, u_int32_t bus) 3855 { 3856 struct cam_eb *new_bus; 3857 struct cam_eb *old_bus; 3858 struct ccb_pathinq cpi; 3859 struct cam_path *path; 3860 cam_status status; 3861 3862 mtx_assert(sim->mtx, MA_OWNED); 3863 3864 sim->bus_id = bus; 3865 new_bus = (struct cam_eb *)malloc(sizeof(*new_bus), 3866 M_CAMXPT, M_NOWAIT); 3867 if (new_bus == NULL) { 3868 /* Couldn't satisfy request */ 3869 return (CAM_RESRC_UNAVAIL); 3870 } 3871 path = (struct cam_path *)malloc(sizeof(*path), M_CAMXPT, M_NOWAIT); 3872 if (path == NULL) { 3873 free(new_bus, M_CAMXPT); 3874 return (CAM_RESRC_UNAVAIL); 3875 } 3876 3877 if (strcmp(sim->sim_name, "xpt") != 0) { 3878 sim->path_id = 3879 xptpathid(sim->sim_name, sim->unit_number, sim->bus_id); 3880 } 3881 3882 TAILQ_INIT(&new_bus->et_entries); 3883 new_bus->path_id = sim->path_id; 3884 cam_sim_hold(sim); 3885 new_bus->sim = sim; 3886 timevalclear(&new_bus->last_reset); 3887 new_bus->flags = 0; 3888 new_bus->refcount = 1; /* Held until a bus_deregister event */ 3889 new_bus->generation = 0; 3890 3891 mtx_lock(&xsoftc.xpt_topo_lock); 3892 old_bus = TAILQ_FIRST(&xsoftc.xpt_busses); 3893 while (old_bus != NULL 3894 && old_bus->path_id < new_bus->path_id) 3895 old_bus = TAILQ_NEXT(old_bus, links); 3896 if (old_bus != NULL) 3897 TAILQ_INSERT_BEFORE(old_bus, new_bus, links); 3898 else 3899 TAILQ_INSERT_TAIL(&xsoftc.xpt_busses, new_bus, links); 3900 xsoftc.bus_generation++; 3901 mtx_unlock(&xsoftc.xpt_topo_lock); 3902 3903 /* 3904 * Set a default transport so that a PATH_INQ can be issued to 3905 * the SIM. This will then allow for probing and attaching of 3906 * a more appropriate transport. 3907 */ 3908 new_bus->xport = &xport_default; 3909 3910 status = xpt_compile_path(path, /*periph*/NULL, sim->path_id, 3911 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 3912 if (status != CAM_REQ_CMP) 3913 printf("xpt_compile_path returned %d\n", status); 3914 3915 xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NORMAL); 3916 cpi.ccb_h.func_code = XPT_PATH_INQ; 3917 xpt_action((union ccb *)&cpi); 3918 3919 if (cpi.ccb_h.status == CAM_REQ_CMP) { 3920 switch (cpi.transport) { 3921 case XPORT_SPI: 3922 case XPORT_SAS: 3923 case XPORT_FC: 3924 case XPORT_USB: 3925 case XPORT_ISCSI: 3926 case XPORT_PPB: 3927 new_bus->xport = scsi_get_xport(); 3928 break; 3929 case XPORT_ATA: 3930 case XPORT_SATA: 3931 new_bus->xport = ata_get_xport(); 3932 break; 3933 default: 3934 new_bus->xport = &xport_default; 3935 break; 3936 } 3937 } 3938 3939 /* Notify interested parties */ 3940 if (sim->path_id != CAM_XPT_PATH_ID) { 3941 union ccb *scan_ccb; 3942 3943 xpt_async(AC_PATH_REGISTERED, path, &cpi); 3944 /* Initiate bus rescan. */ 3945 scan_ccb = xpt_alloc_ccb_nowait(); 3946 scan_ccb->ccb_h.path = path; 3947 scan_ccb->ccb_h.func_code = XPT_SCAN_BUS; 3948 scan_ccb->crcn.flags = 0; 3949 xpt_rescan(scan_ccb); 3950 } else 3951 xpt_free_path(path); 3952 return (CAM_SUCCESS); 3953 } 3954 3955 int32_t 3956 xpt_bus_deregister(path_id_t pathid) 3957 { 3958 struct cam_path bus_path; 3959 cam_status status; 3960 3961 status = xpt_compile_path(&bus_path, NULL, pathid, 3962 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 3963 if (status != CAM_REQ_CMP) 3964 return (status); 3965 3966 xpt_async(AC_LOST_DEVICE, &bus_path, NULL); 3967 xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL); 3968 3969 /* Release the reference count held while registered. */ 3970 xpt_release_bus(bus_path.bus); 3971 xpt_release_path(&bus_path); 3972 3973 return (CAM_REQ_CMP); 3974 } 3975 3976 static path_id_t 3977 xptnextfreepathid(void) 3978 { 3979 struct cam_eb *bus; 3980 path_id_t pathid; 3981 const char *strval; 3982 3983 pathid = 0; 3984 mtx_lock(&xsoftc.xpt_topo_lock); 3985 bus = TAILQ_FIRST(&xsoftc.xpt_busses); 3986 retry: 3987 /* Find an unoccupied pathid */ 3988 while (bus != NULL && bus->path_id <= pathid) { 3989 if (bus->path_id == pathid) 3990 pathid++; 3991 bus = TAILQ_NEXT(bus, links); 3992 } 3993 mtx_unlock(&xsoftc.xpt_topo_lock); 3994 3995 /* 3996 * Ensure that this pathid is not reserved for 3997 * a bus that may be registered in the future. 3998 */ 3999 if (resource_string_value("scbus", pathid, "at", &strval) == 0) { 4000 ++pathid; 4001 /* Start the search over */ 4002 mtx_lock(&xsoftc.xpt_topo_lock); 4003 goto retry; 4004 } 4005 return (pathid); 4006 } 4007 4008 static path_id_t 4009 xptpathid(const char *sim_name, int sim_unit, int sim_bus) 4010 { 4011 path_id_t pathid; 4012 int i, dunit, val; 4013 char buf[32]; 4014 const char *dname; 4015 4016 pathid = CAM_XPT_PATH_ID; 4017 snprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit); 4018 i = 0; 4019 while ((resource_find_match(&i, &dname, &dunit, "at", buf)) == 0) { 4020 if (strcmp(dname, "scbus")) { 4021 /* Avoid a bit of foot shooting. */ 4022 continue; 4023 } 4024 if (dunit < 0) /* unwired?! */ 4025 continue; 4026 if (resource_int_value("scbus", dunit, "bus", &val) == 0) { 4027 if (sim_bus == val) { 4028 pathid = dunit; 4029 break; 4030 } 4031 } else if (sim_bus == 0) { 4032 /* Unspecified matches bus 0 */ 4033 pathid = dunit; 4034 break; 4035 } else { 4036 printf("Ambiguous scbus configuration for %s%d " 4037 "bus %d, cannot wire down. The kernel " 4038 "config entry for scbus%d should " 4039 "specify a controller bus.\n" 4040 "Scbus will be assigned dynamically.\n", 4041 sim_name, sim_unit, sim_bus, dunit); 4042 break; 4043 } 4044 } 4045 4046 if (pathid == CAM_XPT_PATH_ID) 4047 pathid = xptnextfreepathid(); 4048 return (pathid); 4049 } 4050 4051 void 4052 xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg) 4053 { 4054 struct cam_eb *bus; 4055 struct cam_et *target, *next_target; 4056 struct cam_ed *device, *next_device; 4057 4058 mtx_assert(path->bus->sim->mtx, MA_OWNED); 4059 4060 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_async\n")); 4061 4062 /* 4063 * Most async events come from a CAM interrupt context. In 4064 * a few cases, the error recovery code at the peripheral layer, 4065 * which may run from our SWI or a process context, may signal 4066 * deferred events with a call to xpt_async. 4067 */ 4068 4069 bus = path->bus; 4070 4071 if (async_code == AC_BUS_RESET) { 4072 /* Update our notion of when the last reset occurred */ 4073 microtime(&bus->last_reset); 4074 } 4075 4076 for (target = TAILQ_FIRST(&bus->et_entries); 4077 target != NULL; 4078 target = next_target) { 4079 4080 next_target = TAILQ_NEXT(target, links); 4081 4082 if (path->target != target 4083 && path->target->target_id != CAM_TARGET_WILDCARD 4084 && target->target_id != CAM_TARGET_WILDCARD) 4085 continue; 4086 4087 if (async_code == AC_SENT_BDR) { 4088 /* Update our notion of when the last reset occurred */ 4089 microtime(&path->target->last_reset); 4090 } 4091 4092 for (device = TAILQ_FIRST(&target->ed_entries); 4093 device != NULL; 4094 device = next_device) { 4095 4096 next_device = TAILQ_NEXT(device, links); 4097 4098 if (path->device != device 4099 && path->device->lun_id != CAM_LUN_WILDCARD 4100 && device->lun_id != CAM_LUN_WILDCARD) 4101 continue; 4102 /* 4103 * The async callback could free the device. 4104 * If it is a broadcast async, it doesn't hold 4105 * device reference, so take our own reference. 4106 */ 4107 xpt_acquire_device(device); 4108 (*(bus->xport->async))(async_code, bus, 4109 target, device, 4110 async_arg); 4111 4112 xpt_async_bcast(&device->asyncs, async_code, 4113 path, async_arg); 4114 xpt_release_device(device); 4115 } 4116 } 4117 4118 /* 4119 * If this wasn't a fully wildcarded async, tell all 4120 * clients that want all async events. 4121 */ 4122 if (bus != xpt_periph->path->bus) 4123 xpt_async_bcast(&xpt_periph->path->device->asyncs, async_code, 4124 path, async_arg); 4125 } 4126 4127 static void 4128 xpt_async_bcast(struct async_list *async_head, 4129 u_int32_t async_code, 4130 struct cam_path *path, void *async_arg) 4131 { 4132 struct async_node *cur_entry; 4133 4134 cur_entry = SLIST_FIRST(async_head); 4135 while (cur_entry != NULL) { 4136 struct async_node *next_entry; 4137 /* 4138 * Grab the next list entry before we call the current 4139 * entry's callback. This is because the callback function 4140 * can delete its async callback entry. 4141 */ 4142 next_entry = SLIST_NEXT(cur_entry, links); 4143 if ((cur_entry->event_enable & async_code) != 0) 4144 cur_entry->callback(cur_entry->callback_arg, 4145 async_code, path, 4146 async_arg); 4147 cur_entry = next_entry; 4148 } 4149 } 4150 4151 static void 4152 xpt_dev_async_default(u_int32_t async_code, struct cam_eb *bus, 4153 struct cam_et *target, struct cam_ed *device, 4154 void *async_arg) 4155 { 4156 printf("%s called\n", __func__); 4157 } 4158 4159 u_int32_t 4160 xpt_freeze_devq_rl(struct cam_path *path, cam_rl rl, u_int count) 4161 { 4162 struct cam_ed *dev = path->device; 4163 4164 mtx_assert(path->bus->sim->mtx, MA_OWNED); 4165 dev->sim->devq->alloc_openings += 4166 cam_ccbq_freeze(&dev->ccbq, rl, count); 4167 /* Remove frozen device from allocq. */ 4168 if (device_is_alloc_queued(dev) && 4169 cam_ccbq_frozen(&dev->ccbq, CAM_PRIORITY_TO_RL( 4170 CAMQ_GET_PRIO(&dev->drvq)))) { 4171 camq_remove(&dev->sim->devq->alloc_queue, 4172 dev->alloc_ccb_entry.pinfo.index); 4173 } 4174 /* Remove frozen device from sendq. */ 4175 if (device_is_send_queued(dev) && 4176 cam_ccbq_frozen_top(&dev->ccbq)) { 4177 camq_remove(&dev->sim->devq->send_queue, 4178 dev->send_ccb_entry.pinfo.index); 4179 } 4180 return (dev->ccbq.queue.qfrozen_cnt[rl]); 4181 } 4182 4183 u_int32_t 4184 xpt_freeze_devq(struct cam_path *path, u_int count) 4185 { 4186 4187 return (xpt_freeze_devq_rl(path, 0, count)); 4188 } 4189 4190 u_int32_t 4191 xpt_freeze_simq(struct cam_sim *sim, u_int count) 4192 { 4193 4194 mtx_assert(sim->mtx, MA_OWNED); 4195 sim->devq->send_queue.qfrozen_cnt[0] += count; 4196 return (sim->devq->send_queue.qfrozen_cnt[0]); 4197 } 4198 4199 static void 4200 xpt_release_devq_timeout(void *arg) 4201 { 4202 struct cam_ed *device; 4203 4204 device = (struct cam_ed *)arg; 4205 4206 xpt_release_devq_device(device, /*rl*/0, /*count*/1, /*run_queue*/TRUE); 4207 } 4208 4209 void 4210 xpt_release_devq(struct cam_path *path, u_int count, int run_queue) 4211 { 4212 mtx_assert(path->bus->sim->mtx, MA_OWNED); 4213 4214 xpt_release_devq_device(path->device, /*rl*/0, count, run_queue); 4215 } 4216 4217 void 4218 xpt_release_devq_rl(struct cam_path *path, cam_rl rl, u_int count, int run_queue) 4219 { 4220 mtx_assert(path->bus->sim->mtx, MA_OWNED); 4221 4222 xpt_release_devq_device(path->device, rl, count, run_queue); 4223 } 4224 4225 static void 4226 xpt_release_devq_device(struct cam_ed *dev, cam_rl rl, u_int count, int run_queue) 4227 { 4228 4229 if (count > dev->ccbq.queue.qfrozen_cnt[rl]) { 4230 #ifdef INVARIANTS 4231 printf("xpt_release_devq(%d): requested %u > present %u\n", 4232 rl, count, dev->ccbq.queue.qfrozen_cnt[rl]); 4233 #endif 4234 count = dev->ccbq.queue.qfrozen_cnt[rl]; 4235 } 4236 dev->sim->devq->alloc_openings -= 4237 cam_ccbq_release(&dev->ccbq, rl, count); 4238 if (cam_ccbq_frozen(&dev->ccbq, CAM_PRIORITY_TO_RL( 4239 CAMQ_GET_PRIO(&dev->drvq))) == 0) { 4240 if (xpt_schedule_dev_allocq(dev->target->bus, dev)) 4241 xpt_run_dev_allocq(dev->target->bus); 4242 } 4243 if (cam_ccbq_frozen_top(&dev->ccbq) == 0) { 4244 /* 4245 * No longer need to wait for a successful 4246 * command completion. 4247 */ 4248 dev->flags &= ~CAM_DEV_REL_ON_COMPLETE; 4249 /* 4250 * Remove any timeouts that might be scheduled 4251 * to release this queue. 4252 */ 4253 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) { 4254 callout_stop(&dev->callout); 4255 dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING; 4256 } 4257 if (run_queue == 0) 4258 return; 4259 /* 4260 * Now that we are unfrozen schedule the 4261 * device so any pending transactions are 4262 * run. 4263 */ 4264 if (xpt_schedule_dev_sendq(dev->target->bus, dev)) 4265 xpt_run_dev_sendq(dev->target->bus); 4266 } 4267 } 4268 4269 void 4270 xpt_release_simq(struct cam_sim *sim, int run_queue) 4271 { 4272 struct camq *sendq; 4273 4274 mtx_assert(sim->mtx, MA_OWNED); 4275 sendq = &(sim->devq->send_queue); 4276 if (sendq->qfrozen_cnt[0] <= 0) { 4277 #ifdef INVARIANTS 4278 printf("xpt_release_simq: requested 1 > present %u\n", 4279 sendq->qfrozen_cnt[0]); 4280 #endif 4281 } else 4282 sendq->qfrozen_cnt[0]--; 4283 if (sendq->qfrozen_cnt[0] == 0) { 4284 /* 4285 * If there is a timeout scheduled to release this 4286 * sim queue, remove it. The queue frozen count is 4287 * already at 0. 4288 */ 4289 if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){ 4290 callout_stop(&sim->callout); 4291 sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING; 4292 } 4293 if (run_queue) { 4294 struct cam_eb *bus; 4295 4296 /* 4297 * Now that we are unfrozen run the send queue. 4298 */ 4299 bus = xpt_find_bus(sim->path_id); 4300 xpt_run_dev_sendq(bus); 4301 xpt_release_bus(bus); 4302 } 4303 } 4304 } 4305 4306 /* 4307 * XXX Appears to be unused. 4308 */ 4309 static void 4310 xpt_release_simq_timeout(void *arg) 4311 { 4312 struct cam_sim *sim; 4313 4314 sim = (struct cam_sim *)arg; 4315 xpt_release_simq(sim, /* run_queue */ TRUE); 4316 } 4317 4318 void 4319 xpt_done(union ccb *done_ccb) 4320 { 4321 struct cam_sim *sim; 4322 int first; 4323 4324 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done\n")); 4325 if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) != 0) { 4326 /* 4327 * Queue up the request for handling by our SWI handler 4328 * any of the "non-immediate" type of ccbs. 4329 */ 4330 sim = done_ccb->ccb_h.path->bus->sim; 4331 TAILQ_INSERT_TAIL(&sim->sim_doneq, &done_ccb->ccb_h, 4332 sim_links.tqe); 4333 done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX; 4334 if ((sim->flags & (CAM_SIM_ON_DONEQ | CAM_SIM_POLLED | 4335 CAM_SIM_BATCH)) == 0) { 4336 mtx_lock(&cam_simq_lock); 4337 first = TAILQ_EMPTY(&cam_simq); 4338 TAILQ_INSERT_TAIL(&cam_simq, sim, links); 4339 mtx_unlock(&cam_simq_lock); 4340 sim->flags |= CAM_SIM_ON_DONEQ; 4341 if (first) 4342 swi_sched(cambio_ih, 0); 4343 } 4344 } 4345 } 4346 4347 void 4348 xpt_batch_start(struct cam_sim *sim) 4349 { 4350 4351 KASSERT((sim->flags & CAM_SIM_BATCH) == 0, ("Batch flag already set")); 4352 sim->flags |= CAM_SIM_BATCH; 4353 } 4354 4355 void 4356 xpt_batch_done(struct cam_sim *sim) 4357 { 4358 4359 KASSERT((sim->flags & CAM_SIM_BATCH) != 0, ("Batch flag was not set")); 4360 sim->flags &= ~CAM_SIM_BATCH; 4361 if (!TAILQ_EMPTY(&sim->sim_doneq) && 4362 (sim->flags & CAM_SIM_ON_DONEQ) == 0) 4363 camisr_runqueue(&sim->sim_doneq); 4364 } 4365 4366 union ccb * 4367 xpt_alloc_ccb() 4368 { 4369 union ccb *new_ccb; 4370 4371 new_ccb = malloc(sizeof(*new_ccb), M_CAMXPT, M_ZERO|M_WAITOK); 4372 return (new_ccb); 4373 } 4374 4375 union ccb * 4376 xpt_alloc_ccb_nowait() 4377 { 4378 union ccb *new_ccb; 4379 4380 new_ccb = malloc(sizeof(*new_ccb), M_CAMXPT, M_ZERO|M_NOWAIT); 4381 return (new_ccb); 4382 } 4383 4384 void 4385 xpt_free_ccb(union ccb *free_ccb) 4386 { 4387 free(free_ccb, M_CAMXPT); 4388 } 4389 4390 4391 4392 /* Private XPT functions */ 4393 4394 /* 4395 * Get a CAM control block for the caller. Charge the structure to the device 4396 * referenced by the path. If the this device has no 'credits' then the 4397 * device already has the maximum number of outstanding operations under way 4398 * and we return NULL. If we don't have sufficient resources to allocate more 4399 * ccbs, we also return NULL. 4400 */ 4401 static union ccb * 4402 xpt_get_ccb(struct cam_ed *device) 4403 { 4404 union ccb *new_ccb; 4405 struct cam_sim *sim; 4406 4407 sim = device->sim; 4408 if ((new_ccb = (union ccb *)SLIST_FIRST(&sim->ccb_freeq)) == NULL) { 4409 new_ccb = xpt_alloc_ccb_nowait(); 4410 if (new_ccb == NULL) { 4411 return (NULL); 4412 } 4413 if ((sim->flags & CAM_SIM_MPSAFE) == 0) 4414 callout_handle_init(&new_ccb->ccb_h.timeout_ch); 4415 SLIST_INSERT_HEAD(&sim->ccb_freeq, &new_ccb->ccb_h, 4416 xpt_links.sle); 4417 sim->ccb_count++; 4418 } 4419 cam_ccbq_take_opening(&device->ccbq); 4420 SLIST_REMOVE_HEAD(&sim->ccb_freeq, xpt_links.sle); 4421 return (new_ccb); 4422 } 4423 4424 static void 4425 xpt_release_bus(struct cam_eb *bus) 4426 { 4427 4428 mtx_lock(&xsoftc.xpt_topo_lock); 4429 KASSERT(bus->refcount >= 1, ("bus->refcount >= 1")); 4430 if ((--bus->refcount == 0) 4431 && (TAILQ_FIRST(&bus->et_entries) == NULL)) { 4432 TAILQ_REMOVE(&xsoftc.xpt_busses, bus, links); 4433 xsoftc.bus_generation++; 4434 mtx_unlock(&xsoftc.xpt_topo_lock); 4435 cam_sim_release(bus->sim); 4436 free(bus, M_CAMXPT); 4437 } else 4438 mtx_unlock(&xsoftc.xpt_topo_lock); 4439 } 4440 4441 static struct cam_et * 4442 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id) 4443 { 4444 struct cam_et *target; 4445 4446 target = (struct cam_et *)malloc(sizeof(*target), M_CAMXPT, 4447 M_NOWAIT|M_ZERO); 4448 if (target != NULL) { 4449 struct cam_et *cur_target; 4450 4451 TAILQ_INIT(&target->ed_entries); 4452 target->bus = bus; 4453 target->target_id = target_id; 4454 target->refcount = 1; 4455 target->generation = 0; 4456 target->luns = NULL; 4457 timevalclear(&target->last_reset); 4458 /* 4459 * Hold a reference to our parent bus so it 4460 * will not go away before we do. 4461 */ 4462 mtx_lock(&xsoftc.xpt_topo_lock); 4463 bus->refcount++; 4464 mtx_unlock(&xsoftc.xpt_topo_lock); 4465 4466 /* Insertion sort into our bus's target list */ 4467 cur_target = TAILQ_FIRST(&bus->et_entries); 4468 while (cur_target != NULL && cur_target->target_id < target_id) 4469 cur_target = TAILQ_NEXT(cur_target, links); 4470 4471 if (cur_target != NULL) { 4472 TAILQ_INSERT_BEFORE(cur_target, target, links); 4473 } else { 4474 TAILQ_INSERT_TAIL(&bus->et_entries, target, links); 4475 } 4476 bus->generation++; 4477 } 4478 return (target); 4479 } 4480 4481 static void 4482 xpt_release_target(struct cam_et *target) 4483 { 4484 4485 if (target->refcount == 1) { 4486 if (TAILQ_FIRST(&target->ed_entries) == NULL) { 4487 TAILQ_REMOVE(&target->bus->et_entries, target, links); 4488 target->bus->generation++; 4489 xpt_release_bus(target->bus); 4490 if (target->luns) 4491 free(target->luns, M_CAMXPT); 4492 free(target, M_CAMXPT); 4493 } 4494 } else 4495 target->refcount--; 4496 } 4497 4498 static struct cam_ed * 4499 xpt_alloc_device_default(struct cam_eb *bus, struct cam_et *target, 4500 lun_id_t lun_id) 4501 { 4502 struct cam_ed *device, *cur_device; 4503 4504 device = xpt_alloc_device(bus, target, lun_id); 4505 if (device == NULL) 4506 return (NULL); 4507 4508 device->mintags = 1; 4509 device->maxtags = 1; 4510 bus->sim->max_ccbs += device->ccbq.devq_openings; 4511 cur_device = TAILQ_FIRST(&target->ed_entries); 4512 while (cur_device != NULL && cur_device->lun_id < lun_id) 4513 cur_device = TAILQ_NEXT(cur_device, links); 4514 if (cur_device != NULL) { 4515 TAILQ_INSERT_BEFORE(cur_device, device, links); 4516 } else { 4517 TAILQ_INSERT_TAIL(&target->ed_entries, device, links); 4518 } 4519 target->generation++; 4520 4521 return (device); 4522 } 4523 4524 struct cam_ed * 4525 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id) 4526 { 4527 struct cam_ed *device; 4528 struct cam_devq *devq; 4529 cam_status status; 4530 4531 /* Make space for us in the device queue on our bus */ 4532 devq = bus->sim->devq; 4533 status = cam_devq_resize(devq, devq->alloc_queue.array_size + 1); 4534 4535 if (status != CAM_REQ_CMP) { 4536 device = NULL; 4537 } else { 4538 device = (struct cam_ed *)malloc(sizeof(*device), 4539 M_CAMXPT, M_NOWAIT|M_ZERO); 4540 } 4541 4542 if (device != NULL) { 4543 cam_init_pinfo(&device->alloc_ccb_entry.pinfo); 4544 device->alloc_ccb_entry.device = device; 4545 cam_init_pinfo(&device->send_ccb_entry.pinfo); 4546 device->send_ccb_entry.device = device; 4547 device->target = target; 4548 device->lun_id = lun_id; 4549 device->sim = bus->sim; 4550 /* Initialize our queues */ 4551 if (camq_init(&device->drvq, 0) != 0) { 4552 free(device, M_CAMXPT); 4553 return (NULL); 4554 } 4555 if (cam_ccbq_init(&device->ccbq, 4556 bus->sim->max_dev_openings) != 0) { 4557 camq_fini(&device->drvq); 4558 free(device, M_CAMXPT); 4559 return (NULL); 4560 } 4561 SLIST_INIT(&device->asyncs); 4562 SLIST_INIT(&device->periphs); 4563 device->generation = 0; 4564 device->owner = NULL; 4565 device->flags = CAM_DEV_UNCONFIGURED; 4566 device->tag_delay_count = 0; 4567 device->tag_saved_openings = 0; 4568 device->refcount = 1; 4569 callout_init_mtx(&device->callout, bus->sim->mtx, 0); 4570 4571 /* 4572 * Hold a reference to our parent target so it 4573 * will not go away before we do. 4574 */ 4575 target->refcount++; 4576 4577 } 4578 return (device); 4579 } 4580 4581 void 4582 xpt_acquire_device(struct cam_ed *device) 4583 { 4584 4585 device->refcount++; 4586 } 4587 4588 void 4589 xpt_release_device(struct cam_ed *device) 4590 { 4591 4592 if (device->refcount == 1) { 4593 struct cam_devq *devq; 4594 4595 if (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX 4596 || device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX) 4597 panic("Removing device while still queued for ccbs"); 4598 4599 if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) 4600 callout_stop(&device->callout); 4601 4602 TAILQ_REMOVE(&device->target->ed_entries, device,links); 4603 device->target->generation++; 4604 device->target->bus->sim->max_ccbs -= device->ccbq.devq_openings; 4605 /* Release our slot in the devq */ 4606 devq = device->target->bus->sim->devq; 4607 cam_devq_resize(devq, devq->alloc_queue.array_size - 1); 4608 camq_fini(&device->drvq); 4609 cam_ccbq_fini(&device->ccbq); 4610 /* 4611 * Free allocated memory. free(9) does nothing if the 4612 * supplied pointer is NULL, so it is safe to call without 4613 * checking. 4614 */ 4615 free(device->supported_vpds, M_CAMXPT); 4616 free(device->device_id, M_CAMXPT); 4617 free(device->physpath, M_CAMXPT); 4618 free(device->rcap_buf, M_CAMXPT); 4619 free(device->serial_num, M_CAMXPT); 4620 4621 xpt_release_target(device->target); 4622 free(device, M_CAMXPT); 4623 } else 4624 device->refcount--; 4625 } 4626 4627 u_int32_t 4628 xpt_dev_ccbq_resize(struct cam_path *path, int newopenings) 4629 { 4630 int diff; 4631 int result; 4632 struct cam_ed *dev; 4633 4634 dev = path->device; 4635 4636 diff = newopenings - (dev->ccbq.dev_active + dev->ccbq.dev_openings); 4637 result = cam_ccbq_resize(&dev->ccbq, newopenings); 4638 if (result == CAM_REQ_CMP && (diff < 0)) { 4639 dev->flags |= CAM_DEV_RESIZE_QUEUE_NEEDED; 4640 } 4641 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 4642 || (dev->inq_flags & SID_CmdQue) != 0) 4643 dev->tag_saved_openings = newopenings; 4644 /* Adjust the global limit */ 4645 dev->sim->max_ccbs += diff; 4646 return (result); 4647 } 4648 4649 static struct cam_eb * 4650 xpt_find_bus(path_id_t path_id) 4651 { 4652 struct cam_eb *bus; 4653 4654 mtx_lock(&xsoftc.xpt_topo_lock); 4655 for (bus = TAILQ_FIRST(&xsoftc.xpt_busses); 4656 bus != NULL; 4657 bus = TAILQ_NEXT(bus, links)) { 4658 if (bus->path_id == path_id) { 4659 bus->refcount++; 4660 break; 4661 } 4662 } 4663 mtx_unlock(&xsoftc.xpt_topo_lock); 4664 return (bus); 4665 } 4666 4667 static struct cam_et * 4668 xpt_find_target(struct cam_eb *bus, target_id_t target_id) 4669 { 4670 struct cam_et *target; 4671 4672 for (target = TAILQ_FIRST(&bus->et_entries); 4673 target != NULL; 4674 target = TAILQ_NEXT(target, links)) { 4675 if (target->target_id == target_id) { 4676 target->refcount++; 4677 break; 4678 } 4679 } 4680 return (target); 4681 } 4682 4683 static struct cam_ed * 4684 xpt_find_device(struct cam_et *target, lun_id_t lun_id) 4685 { 4686 struct cam_ed *device; 4687 4688 for (device = TAILQ_FIRST(&target->ed_entries); 4689 device != NULL; 4690 device = TAILQ_NEXT(device, links)) { 4691 if (device->lun_id == lun_id) { 4692 device->refcount++; 4693 break; 4694 } 4695 } 4696 return (device); 4697 } 4698 4699 void 4700 xpt_start_tags(struct cam_path *path) 4701 { 4702 struct ccb_relsim crs; 4703 struct cam_ed *device; 4704 struct cam_sim *sim; 4705 int newopenings; 4706 4707 device = path->device; 4708 sim = path->bus->sim; 4709 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT; 4710 xpt_freeze_devq(path, /*count*/1); 4711 device->inq_flags |= SID_CmdQue; 4712 if (device->tag_saved_openings != 0) 4713 newopenings = device->tag_saved_openings; 4714 else 4715 newopenings = min(device->maxtags, 4716 sim->max_tagged_dev_openings); 4717 xpt_dev_ccbq_resize(path, newopenings); 4718 xpt_async(AC_GETDEV_CHANGED, path, NULL); 4719 xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL); 4720 crs.ccb_h.func_code = XPT_REL_SIMQ; 4721 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY; 4722 crs.openings 4723 = crs.release_timeout 4724 = crs.qfrozen_cnt 4725 = 0; 4726 xpt_action((union ccb *)&crs); 4727 } 4728 4729 void 4730 xpt_stop_tags(struct cam_path *path) 4731 { 4732 struct ccb_relsim crs; 4733 struct cam_ed *device; 4734 struct cam_sim *sim; 4735 4736 device = path->device; 4737 sim = path->bus->sim; 4738 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT; 4739 device->tag_delay_count = 0; 4740 xpt_freeze_devq(path, /*count*/1); 4741 device->inq_flags &= ~SID_CmdQue; 4742 xpt_dev_ccbq_resize(path, sim->max_dev_openings); 4743 xpt_async(AC_GETDEV_CHANGED, path, NULL); 4744 xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL); 4745 crs.ccb_h.func_code = XPT_REL_SIMQ; 4746 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY; 4747 crs.openings 4748 = crs.release_timeout 4749 = crs.qfrozen_cnt 4750 = 0; 4751 xpt_action((union ccb *)&crs); 4752 } 4753 4754 static void 4755 xpt_boot_delay(void *arg) 4756 { 4757 4758 xpt_release_boot(); 4759 } 4760 4761 static void 4762 xpt_config(void *arg) 4763 { 4764 /* 4765 * Now that interrupts are enabled, go find our devices 4766 */ 4767 4768 #ifdef CAMDEBUG 4769 /* Setup debugging flags and path */ 4770 #ifdef CAM_DEBUG_BUS 4771 if (cam_dflags != CAM_DEBUG_NONE) { 4772 /* 4773 * Locking is specifically omitted here. No SIMs have 4774 * registered yet, so xpt_create_path will only be searching 4775 * empty lists of targets and devices. 4776 */ 4777 if (xpt_create_path(&cam_dpath, xpt_periph, 4778 CAM_DEBUG_BUS, CAM_DEBUG_TARGET, 4779 CAM_DEBUG_LUN) != CAM_REQ_CMP) { 4780 printf("xpt_config: xpt_create_path() failed for debug" 4781 " target %d:%d:%d, debugging disabled\n", 4782 CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN); 4783 cam_dflags = CAM_DEBUG_NONE; 4784 } 4785 } else 4786 cam_dpath = NULL; 4787 #else /* !CAM_DEBUG_BUS */ 4788 cam_dpath = NULL; 4789 #endif /* CAM_DEBUG_BUS */ 4790 #endif /* CAMDEBUG */ 4791 4792 periphdriver_init(1); 4793 xpt_hold_boot(); 4794 callout_init(&xsoftc.boot_callout, 1); 4795 callout_reset(&xsoftc.boot_callout, hz * xsoftc.boot_delay / 1000, 4796 xpt_boot_delay, NULL); 4797 /* Fire up rescan thread. */ 4798 if (kproc_create(xpt_scanner_thread, NULL, NULL, 0, 0, "xpt_thrd")) { 4799 printf("xpt_config: failed to create rescan thread.\n"); 4800 } 4801 } 4802 4803 void 4804 xpt_hold_boot(void) 4805 { 4806 xpt_lock_buses(); 4807 xsoftc.buses_to_config++; 4808 xpt_unlock_buses(); 4809 } 4810 4811 void 4812 xpt_release_boot(void) 4813 { 4814 xpt_lock_buses(); 4815 xsoftc.buses_to_config--; 4816 if (xsoftc.buses_to_config == 0 && xsoftc.buses_config_done == 0) { 4817 struct xpt_task *task; 4818 4819 xsoftc.buses_config_done = 1; 4820 xpt_unlock_buses(); 4821 /* Call manually because we don't have any busses */ 4822 task = malloc(sizeof(struct xpt_task), M_CAMXPT, M_NOWAIT); 4823 if (task != NULL) { 4824 TASK_INIT(&task->task, 0, xpt_finishconfig_task, task); 4825 taskqueue_enqueue(taskqueue_thread, &task->task); 4826 } 4827 } else 4828 xpt_unlock_buses(); 4829 } 4830 4831 /* 4832 * If the given device only has one peripheral attached to it, and if that 4833 * peripheral is the passthrough driver, announce it. This insures that the 4834 * user sees some sort of announcement for every peripheral in their system. 4835 */ 4836 static int 4837 xptpassannouncefunc(struct cam_ed *device, void *arg) 4838 { 4839 struct cam_periph *periph; 4840 int i; 4841 4842 for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL; 4843 periph = SLIST_NEXT(periph, periph_links), i++); 4844 4845 periph = SLIST_FIRST(&device->periphs); 4846 if ((i == 1) 4847 && (strncmp(periph->periph_name, "pass", 4) == 0)) 4848 xpt_announce_periph(periph, NULL); 4849 4850 return(1); 4851 } 4852 4853 static void 4854 xpt_finishconfig_task(void *context, int pending) 4855 { 4856 4857 periphdriver_init(2); 4858 /* 4859 * Check for devices with no "standard" peripheral driver 4860 * attached. For any devices like that, announce the 4861 * passthrough driver so the user will see something. 4862 */ 4863 if (!bootverbose) 4864 xpt_for_all_devices(xptpassannouncefunc, NULL); 4865 4866 /* Release our hook so that the boot can continue. */ 4867 config_intrhook_disestablish(xsoftc.xpt_config_hook); 4868 free(xsoftc.xpt_config_hook, M_CAMXPT); 4869 xsoftc.xpt_config_hook = NULL; 4870 4871 free(context, M_CAMXPT); 4872 } 4873 4874 cam_status 4875 xpt_register_async(int event, ac_callback_t *cbfunc, void *cbarg, 4876 struct cam_path *path) 4877 { 4878 struct ccb_setasync csa; 4879 cam_status status; 4880 int xptpath = 0; 4881 4882 if (path == NULL) { 4883 mtx_lock(&xsoftc.xpt_lock); 4884 status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID, 4885 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 4886 if (status != CAM_REQ_CMP) { 4887 mtx_unlock(&xsoftc.xpt_lock); 4888 return (status); 4889 } 4890 xptpath = 1; 4891 } 4892 4893 xpt_setup_ccb(&csa.ccb_h, path, CAM_PRIORITY_NORMAL); 4894 csa.ccb_h.func_code = XPT_SASYNC_CB; 4895 csa.event_enable = event; 4896 csa.callback = cbfunc; 4897 csa.callback_arg = cbarg; 4898 xpt_action((union ccb *)&csa); 4899 status = csa.ccb_h.status; 4900 4901 if (xptpath) { 4902 xpt_free_path(path); 4903 mtx_unlock(&xsoftc.xpt_lock); 4904 } 4905 4906 if ((status == CAM_REQ_CMP) && 4907 (csa.event_enable & AC_FOUND_DEVICE)) { 4908 /* 4909 * Get this peripheral up to date with all 4910 * the currently existing devices. 4911 */ 4912 xpt_for_all_devices(xptsetasyncfunc, &csa); 4913 } 4914 if ((status == CAM_REQ_CMP) && 4915 (csa.event_enable & AC_PATH_REGISTERED)) { 4916 /* 4917 * Get this peripheral up to date with all 4918 * the currently existing busses. 4919 */ 4920 xpt_for_all_busses(xptsetasyncbusfunc, &csa); 4921 } 4922 4923 return (status); 4924 } 4925 4926 static void 4927 xptaction(struct cam_sim *sim, union ccb *work_ccb) 4928 { 4929 CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n")); 4930 4931 switch (work_ccb->ccb_h.func_code) { 4932 /* Common cases first */ 4933 case XPT_PATH_INQ: /* Path routing inquiry */ 4934 { 4935 struct ccb_pathinq *cpi; 4936 4937 cpi = &work_ccb->cpi; 4938 cpi->version_num = 1; /* XXX??? */ 4939 cpi->hba_inquiry = 0; 4940 cpi->target_sprt = 0; 4941 cpi->hba_misc = 0; 4942 cpi->hba_eng_cnt = 0; 4943 cpi->max_target = 0; 4944 cpi->max_lun = 0; 4945 cpi->initiator_id = 0; 4946 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 4947 strncpy(cpi->hba_vid, "", HBA_IDLEN); 4948 strncpy(cpi->dev_name, sim->sim_name, DEV_IDLEN); 4949 cpi->unit_number = sim->unit_number; 4950 cpi->bus_id = sim->bus_id; 4951 cpi->base_transfer_speed = 0; 4952 cpi->protocol = PROTO_UNSPECIFIED; 4953 cpi->protocol_version = PROTO_VERSION_UNSPECIFIED; 4954 cpi->transport = XPORT_UNSPECIFIED; 4955 cpi->transport_version = XPORT_VERSION_UNSPECIFIED; 4956 cpi->ccb_h.status = CAM_REQ_CMP; 4957 xpt_done(work_ccb); 4958 break; 4959 } 4960 default: 4961 work_ccb->ccb_h.status = CAM_REQ_INVALID; 4962 xpt_done(work_ccb); 4963 break; 4964 } 4965 } 4966 4967 /* 4968 * The xpt as a "controller" has no interrupt sources, so polling 4969 * is a no-op. 4970 */ 4971 static void 4972 xptpoll(struct cam_sim *sim) 4973 { 4974 } 4975 4976 void 4977 xpt_lock_buses(void) 4978 { 4979 mtx_lock(&xsoftc.xpt_topo_lock); 4980 } 4981 4982 void 4983 xpt_unlock_buses(void) 4984 { 4985 mtx_unlock(&xsoftc.xpt_topo_lock); 4986 } 4987 4988 static void 4989 camisr(void *dummy) 4990 { 4991 cam_simq_t queue; 4992 struct cam_sim *sim; 4993 4994 mtx_lock(&cam_simq_lock); 4995 TAILQ_INIT(&queue); 4996 while (!TAILQ_EMPTY(&cam_simq)) { 4997 TAILQ_CONCAT(&queue, &cam_simq, links); 4998 mtx_unlock(&cam_simq_lock); 4999 5000 while ((sim = TAILQ_FIRST(&queue)) != NULL) { 5001 TAILQ_REMOVE(&queue, sim, links); 5002 CAM_SIM_LOCK(sim); 5003 sim->flags &= ~CAM_SIM_ON_DONEQ; 5004 camisr_runqueue(&sim->sim_doneq); 5005 CAM_SIM_UNLOCK(sim); 5006 } 5007 mtx_lock(&cam_simq_lock); 5008 } 5009 mtx_unlock(&cam_simq_lock); 5010 } 5011 5012 static void 5013 camisr_runqueue(void *V_queue) 5014 { 5015 cam_isrq_t *queue = V_queue; 5016 struct ccb_hdr *ccb_h; 5017 5018 while ((ccb_h = TAILQ_FIRST(queue)) != NULL) { 5019 int runq; 5020 5021 TAILQ_REMOVE(queue, ccb_h, sim_links.tqe); 5022 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX; 5023 5024 CAM_DEBUG(ccb_h->path, CAM_DEBUG_TRACE, 5025 ("camisr\n")); 5026 5027 runq = FALSE; 5028 5029 if (ccb_h->flags & CAM_HIGH_POWER) { 5030 struct highpowerlist *hphead; 5031 union ccb *send_ccb; 5032 5033 mtx_lock(&xsoftc.xpt_lock); 5034 hphead = &xsoftc.highpowerq; 5035 5036 send_ccb = (union ccb *)STAILQ_FIRST(hphead); 5037 5038 /* 5039 * Increment the count since this command is done. 5040 */ 5041 xsoftc.num_highpower++; 5042 5043 /* 5044 * Any high powered commands queued up? 5045 */ 5046 if (send_ccb != NULL) { 5047 5048 STAILQ_REMOVE_HEAD(hphead, xpt_links.stqe); 5049 mtx_unlock(&xsoftc.xpt_lock); 5050 5051 xpt_release_devq(send_ccb->ccb_h.path, 5052 /*count*/1, /*runqueue*/TRUE); 5053 } else 5054 mtx_unlock(&xsoftc.xpt_lock); 5055 } 5056 5057 if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) { 5058 struct cam_ed *dev; 5059 5060 dev = ccb_h->path->device; 5061 5062 cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h); 5063 ccb_h->path->bus->sim->devq->send_active--; 5064 ccb_h->path->bus->sim->devq->send_openings++; 5065 runq = TRUE; 5066 5067 if (((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0 5068 && (dev->ccbq.dev_active == 0))) { 5069 dev->flags &= ~CAM_DEV_REL_ON_QUEUE_EMPTY; 5070 xpt_release_devq(ccb_h->path, /*count*/1, 5071 /*run_queue*/FALSE); 5072 } 5073 5074 if (((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0 5075 && (ccb_h->status&CAM_STATUS_MASK) != CAM_REQUEUE_REQ)) { 5076 dev->flags &= ~CAM_DEV_REL_ON_COMPLETE; 5077 xpt_release_devq(ccb_h->path, /*count*/1, 5078 /*run_queue*/FALSE); 5079 } 5080 5081 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 5082 && (--dev->tag_delay_count == 0)) 5083 xpt_start_tags(ccb_h->path); 5084 if (!device_is_send_queued(dev)) { 5085 (void)xpt_schedule_dev_sendq(ccb_h->path->bus, 5086 dev); 5087 } 5088 } 5089 5090 if (ccb_h->status & CAM_RELEASE_SIMQ) { 5091 xpt_release_simq(ccb_h->path->bus->sim, 5092 /*run_queue*/TRUE); 5093 ccb_h->status &= ~CAM_RELEASE_SIMQ; 5094 runq = FALSE; 5095 } 5096 5097 if ((ccb_h->flags & CAM_DEV_QFRZDIS) 5098 && (ccb_h->status & CAM_DEV_QFRZN)) { 5099 xpt_release_devq(ccb_h->path, /*count*/1, 5100 /*run_queue*/TRUE); 5101 ccb_h->status &= ~CAM_DEV_QFRZN; 5102 } else if (runq) { 5103 xpt_run_dev_sendq(ccb_h->path->bus); 5104 } 5105 5106 /* Call the peripheral driver's callback */ 5107 (*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h); 5108 } 5109 } 5110