1 /*- 2 * Implementation of the Common Access Method Transport (XPT) layer. 3 * 4 * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs. 5 * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions, and the following disclaimer, 13 * without modification, immediately at the beginning of the file. 14 * 2. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include <sys/param.h> 34 #include <sys/bus.h> 35 #include <sys/systm.h> 36 #include <sys/types.h> 37 #include <sys/malloc.h> 38 #include <sys/kernel.h> 39 #include <sys/time.h> 40 #include <sys/conf.h> 41 #include <sys/fcntl.h> 42 #include <sys/interrupt.h> 43 #include <sys/proc.h> 44 #include <sys/sbuf.h> 45 #include <sys/smp.h> 46 #include <sys/taskqueue.h> 47 48 #include <sys/lock.h> 49 #include <sys/mutex.h> 50 #include <sys/sysctl.h> 51 #include <sys/kthread.h> 52 53 #include <cam/cam.h> 54 #include <cam/cam_ccb.h> 55 #include <cam/cam_periph.h> 56 #include <cam/cam_queue.h> 57 #include <cam/cam_sim.h> 58 #include <cam/cam_xpt.h> 59 #include <cam/cam_xpt_sim.h> 60 #include <cam/cam_xpt_periph.h> 61 #include <cam/cam_xpt_internal.h> 62 #include <cam/cam_debug.h> 63 #include <cam/cam_compat.h> 64 65 #include <cam/scsi/scsi_all.h> 66 #include <cam/scsi/scsi_message.h> 67 #include <cam/scsi/scsi_pass.h> 68 69 #include <machine/md_var.h> /* geometry translation */ 70 #include <machine/stdarg.h> /* for xpt_print below */ 71 72 #include "opt_cam.h" 73 74 /* 75 * This is the maximum number of high powered commands (e.g. start unit) 76 * that can be outstanding at a particular time. 77 */ 78 #ifndef CAM_MAX_HIGHPOWER 79 #define CAM_MAX_HIGHPOWER 4 80 #endif 81 82 /* Datastructures internal to the xpt layer */ 83 MALLOC_DEFINE(M_CAMXPT, "CAM XPT", "CAM XPT buffers"); 84 MALLOC_DEFINE(M_CAMDEV, "CAM DEV", "CAM devices"); 85 MALLOC_DEFINE(M_CAMCCB, "CAM CCB", "CAM CCBs"); 86 MALLOC_DEFINE(M_CAMPATH, "CAM path", "CAM paths"); 87 88 /* Object for defering XPT actions to a taskqueue */ 89 struct xpt_task { 90 struct task task; 91 void *data1; 92 uintptr_t data2; 93 }; 94 95 struct xpt_softc { 96 uint32_t xpt_generation; 97 98 /* number of high powered commands that can go through right now */ 99 struct mtx xpt_highpower_lock; 100 STAILQ_HEAD(highpowerlist, cam_ed) highpowerq; 101 int num_highpower; 102 103 /* queue for handling async rescan requests. */ 104 TAILQ_HEAD(, ccb_hdr) ccb_scanq; 105 int buses_to_config; 106 int buses_config_done; 107 108 /* Registered busses */ 109 TAILQ_HEAD(,cam_eb) xpt_busses; 110 u_int bus_generation; 111 112 struct intr_config_hook *xpt_config_hook; 113 114 int boot_delay; 115 struct callout boot_callout; 116 117 struct mtx xpt_topo_lock; 118 struct mtx xpt_lock; 119 struct taskqueue *xpt_taskq; 120 }; 121 122 typedef enum { 123 DM_RET_COPY = 0x01, 124 DM_RET_FLAG_MASK = 0x0f, 125 DM_RET_NONE = 0x00, 126 DM_RET_STOP = 0x10, 127 DM_RET_DESCEND = 0x20, 128 DM_RET_ERROR = 0x30, 129 DM_RET_ACTION_MASK = 0xf0 130 } dev_match_ret; 131 132 typedef enum { 133 XPT_DEPTH_BUS, 134 XPT_DEPTH_TARGET, 135 XPT_DEPTH_DEVICE, 136 XPT_DEPTH_PERIPH 137 } xpt_traverse_depth; 138 139 struct xpt_traverse_config { 140 xpt_traverse_depth depth; 141 void *tr_func; 142 void *tr_arg; 143 }; 144 145 typedef int xpt_busfunc_t (struct cam_eb *bus, void *arg); 146 typedef int xpt_targetfunc_t (struct cam_et *target, void *arg); 147 typedef int xpt_devicefunc_t (struct cam_ed *device, void *arg); 148 typedef int xpt_periphfunc_t (struct cam_periph *periph, void *arg); 149 typedef int xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg); 150 151 /* Transport layer configuration information */ 152 static struct xpt_softc xsoftc; 153 154 MTX_SYSINIT(xpt_topo_init, &xsoftc.xpt_topo_lock, "XPT topology lock", MTX_DEF); 155 156 SYSCTL_INT(_kern_cam, OID_AUTO, boot_delay, CTLFLAG_RDTUN, 157 &xsoftc.boot_delay, 0, "Bus registration wait time"); 158 SYSCTL_UINT(_kern_cam, OID_AUTO, xpt_generation, CTLFLAG_RD, 159 &xsoftc.xpt_generation, 0, "CAM peripheral generation count"); 160 161 struct cam_doneq { 162 struct mtx_padalign cam_doneq_mtx; 163 STAILQ_HEAD(, ccb_hdr) cam_doneq; 164 int cam_doneq_sleep; 165 }; 166 167 static struct cam_doneq cam_doneqs[MAXCPU]; 168 static int cam_num_doneqs; 169 static struct proc *cam_proc; 170 171 SYSCTL_INT(_kern_cam, OID_AUTO, num_doneqs, CTLFLAG_RDTUN, 172 &cam_num_doneqs, 0, "Number of completion queues/threads"); 173 174 struct cam_periph *xpt_periph; 175 176 static periph_init_t xpt_periph_init; 177 178 static struct periph_driver xpt_driver = 179 { 180 xpt_periph_init, "xpt", 181 TAILQ_HEAD_INITIALIZER(xpt_driver.units), /* generation */ 0, 182 CAM_PERIPH_DRV_EARLY 183 }; 184 185 PERIPHDRIVER_DECLARE(xpt, xpt_driver); 186 187 static d_open_t xptopen; 188 static d_close_t xptclose; 189 static d_ioctl_t xptioctl; 190 static d_ioctl_t xptdoioctl; 191 192 static struct cdevsw xpt_cdevsw = { 193 .d_version = D_VERSION, 194 .d_flags = 0, 195 .d_open = xptopen, 196 .d_close = xptclose, 197 .d_ioctl = xptioctl, 198 .d_name = "xpt", 199 }; 200 201 /* Storage for debugging datastructures */ 202 struct cam_path *cam_dpath; 203 u_int32_t cam_dflags = CAM_DEBUG_FLAGS; 204 SYSCTL_UINT(_kern_cam, OID_AUTO, dflags, CTLFLAG_RWTUN, 205 &cam_dflags, 0, "Enabled debug flags"); 206 u_int32_t cam_debug_delay = CAM_DEBUG_DELAY; 207 SYSCTL_UINT(_kern_cam, OID_AUTO, debug_delay, CTLFLAG_RWTUN, 208 &cam_debug_delay, 0, "Delay in us after each debug message"); 209 210 /* Our boot-time initialization hook */ 211 static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *); 212 213 static moduledata_t cam_moduledata = { 214 "cam", 215 cam_module_event_handler, 216 NULL 217 }; 218 219 static int xpt_init(void *); 220 221 DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND); 222 MODULE_VERSION(cam, 1); 223 224 225 static void xpt_async_bcast(struct async_list *async_head, 226 u_int32_t async_code, 227 struct cam_path *path, 228 void *async_arg); 229 static path_id_t xptnextfreepathid(void); 230 static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus); 231 static union ccb *xpt_get_ccb(struct cam_periph *periph); 232 static union ccb *xpt_get_ccb_nowait(struct cam_periph *periph); 233 static void xpt_run_allocq(struct cam_periph *periph, int sleep); 234 static void xpt_run_allocq_task(void *context, int pending); 235 static void xpt_run_devq(struct cam_devq *devq); 236 static timeout_t xpt_release_devq_timeout; 237 static void xpt_release_simq_timeout(void *arg) __unused; 238 static void xpt_acquire_bus(struct cam_eb *bus); 239 static void xpt_release_bus(struct cam_eb *bus); 240 static uint32_t xpt_freeze_devq_device(struct cam_ed *dev, u_int count); 241 static int xpt_release_devq_device(struct cam_ed *dev, u_int count, 242 int run_queue); 243 static struct cam_et* 244 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id); 245 static void xpt_acquire_target(struct cam_et *target); 246 static void xpt_release_target(struct cam_et *target); 247 static struct cam_eb* 248 xpt_find_bus(path_id_t path_id); 249 static struct cam_et* 250 xpt_find_target(struct cam_eb *bus, target_id_t target_id); 251 static struct cam_ed* 252 xpt_find_device(struct cam_et *target, lun_id_t lun_id); 253 static void xpt_config(void *arg); 254 static int xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo, 255 u_int32_t new_priority); 256 static xpt_devicefunc_t xptpassannouncefunc; 257 static void xptaction(struct cam_sim *sim, union ccb *work_ccb); 258 static void xptpoll(struct cam_sim *sim); 259 static void camisr_runqueue(void); 260 static void xpt_done_process(struct ccb_hdr *ccb_h); 261 static void xpt_done_td(void *); 262 static dev_match_ret xptbusmatch(struct dev_match_pattern *patterns, 263 u_int num_patterns, struct cam_eb *bus); 264 static dev_match_ret xptdevicematch(struct dev_match_pattern *patterns, 265 u_int num_patterns, 266 struct cam_ed *device); 267 static dev_match_ret xptperiphmatch(struct dev_match_pattern *patterns, 268 u_int num_patterns, 269 struct cam_periph *periph); 270 static xpt_busfunc_t xptedtbusfunc; 271 static xpt_targetfunc_t xptedttargetfunc; 272 static xpt_devicefunc_t xptedtdevicefunc; 273 static xpt_periphfunc_t xptedtperiphfunc; 274 static xpt_pdrvfunc_t xptplistpdrvfunc; 275 static xpt_periphfunc_t xptplistperiphfunc; 276 static int xptedtmatch(struct ccb_dev_match *cdm); 277 static int xptperiphlistmatch(struct ccb_dev_match *cdm); 278 static int xptbustraverse(struct cam_eb *start_bus, 279 xpt_busfunc_t *tr_func, void *arg); 280 static int xpttargettraverse(struct cam_eb *bus, 281 struct cam_et *start_target, 282 xpt_targetfunc_t *tr_func, void *arg); 283 static int xptdevicetraverse(struct cam_et *target, 284 struct cam_ed *start_device, 285 xpt_devicefunc_t *tr_func, void *arg); 286 static int xptperiphtraverse(struct cam_ed *device, 287 struct cam_periph *start_periph, 288 xpt_periphfunc_t *tr_func, void *arg); 289 static int xptpdrvtraverse(struct periph_driver **start_pdrv, 290 xpt_pdrvfunc_t *tr_func, void *arg); 291 static int xptpdperiphtraverse(struct periph_driver **pdrv, 292 struct cam_periph *start_periph, 293 xpt_periphfunc_t *tr_func, 294 void *arg); 295 static xpt_busfunc_t xptdefbusfunc; 296 static xpt_targetfunc_t xptdeftargetfunc; 297 static xpt_devicefunc_t xptdefdevicefunc; 298 static xpt_periphfunc_t xptdefperiphfunc; 299 static void xpt_finishconfig_task(void *context, int pending); 300 static void xpt_dev_async_default(u_int32_t async_code, 301 struct cam_eb *bus, 302 struct cam_et *target, 303 struct cam_ed *device, 304 void *async_arg); 305 static struct cam_ed * xpt_alloc_device_default(struct cam_eb *bus, 306 struct cam_et *target, 307 lun_id_t lun_id); 308 static xpt_devicefunc_t xptsetasyncfunc; 309 static xpt_busfunc_t xptsetasyncbusfunc; 310 static cam_status xptregister(struct cam_periph *periph, 311 void *arg); 312 static const char * xpt_action_name(uint32_t action); 313 static __inline int device_is_queued(struct cam_ed *device); 314 315 static __inline int 316 xpt_schedule_devq(struct cam_devq *devq, struct cam_ed *dev) 317 { 318 int retval; 319 320 mtx_assert(&devq->send_mtx, MA_OWNED); 321 if ((dev->ccbq.queue.entries > 0) && 322 (dev->ccbq.dev_openings > 0) && 323 (dev->ccbq.queue.qfrozen_cnt == 0)) { 324 /* 325 * The priority of a device waiting for controller 326 * resources is that of the highest priority CCB 327 * enqueued. 328 */ 329 retval = 330 xpt_schedule_dev(&devq->send_queue, 331 &dev->devq_entry, 332 CAMQ_GET_PRIO(&dev->ccbq.queue)); 333 } else { 334 retval = 0; 335 } 336 return (retval); 337 } 338 339 static __inline int 340 device_is_queued(struct cam_ed *device) 341 { 342 return (device->devq_entry.index != CAM_UNQUEUED_INDEX); 343 } 344 345 static void 346 xpt_periph_init() 347 { 348 make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0"); 349 } 350 351 static int 352 xptopen(struct cdev *dev, int flags, int fmt, struct thread *td) 353 { 354 355 /* 356 * Only allow read-write access. 357 */ 358 if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0)) 359 return(EPERM); 360 361 /* 362 * We don't allow nonblocking access. 363 */ 364 if ((flags & O_NONBLOCK) != 0) { 365 printf("%s: can't do nonblocking access\n", devtoname(dev)); 366 return(ENODEV); 367 } 368 369 return(0); 370 } 371 372 static int 373 xptclose(struct cdev *dev, int flag, int fmt, struct thread *td) 374 { 375 376 return(0); 377 } 378 379 /* 380 * Don't automatically grab the xpt softc lock here even though this is going 381 * through the xpt device. The xpt device is really just a back door for 382 * accessing other devices and SIMs, so the right thing to do is to grab 383 * the appropriate SIM lock once the bus/SIM is located. 384 */ 385 static int 386 xptioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) 387 { 388 int error; 389 390 if ((error = xptdoioctl(dev, cmd, addr, flag, td)) == ENOTTY) { 391 error = cam_compat_ioctl(dev, cmd, addr, flag, td, xptdoioctl); 392 } 393 return (error); 394 } 395 396 static int 397 xptdoioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) 398 { 399 int error; 400 401 error = 0; 402 403 switch(cmd) { 404 /* 405 * For the transport layer CAMIOCOMMAND ioctl, we really only want 406 * to accept CCB types that don't quite make sense to send through a 407 * passthrough driver. XPT_PATH_INQ is an exception to this, as stated 408 * in the CAM spec. 409 */ 410 case CAMIOCOMMAND: { 411 union ccb *ccb; 412 union ccb *inccb; 413 struct cam_eb *bus; 414 415 inccb = (union ccb *)addr; 416 417 bus = xpt_find_bus(inccb->ccb_h.path_id); 418 if (bus == NULL) 419 return (EINVAL); 420 421 switch (inccb->ccb_h.func_code) { 422 case XPT_SCAN_BUS: 423 case XPT_RESET_BUS: 424 if (inccb->ccb_h.target_id != CAM_TARGET_WILDCARD || 425 inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) { 426 xpt_release_bus(bus); 427 return (EINVAL); 428 } 429 break; 430 case XPT_SCAN_TGT: 431 if (inccb->ccb_h.target_id == CAM_TARGET_WILDCARD || 432 inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) { 433 xpt_release_bus(bus); 434 return (EINVAL); 435 } 436 break; 437 default: 438 break; 439 } 440 441 switch(inccb->ccb_h.func_code) { 442 case XPT_SCAN_BUS: 443 case XPT_RESET_BUS: 444 case XPT_PATH_INQ: 445 case XPT_ENG_INQ: 446 case XPT_SCAN_LUN: 447 case XPT_SCAN_TGT: 448 449 ccb = xpt_alloc_ccb(); 450 451 /* 452 * Create a path using the bus, target, and lun the 453 * user passed in. 454 */ 455 if (xpt_create_path(&ccb->ccb_h.path, NULL, 456 inccb->ccb_h.path_id, 457 inccb->ccb_h.target_id, 458 inccb->ccb_h.target_lun) != 459 CAM_REQ_CMP){ 460 error = EINVAL; 461 xpt_free_ccb(ccb); 462 break; 463 } 464 /* Ensure all of our fields are correct */ 465 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, 466 inccb->ccb_h.pinfo.priority); 467 xpt_merge_ccb(ccb, inccb); 468 xpt_path_lock(ccb->ccb_h.path); 469 cam_periph_runccb(ccb, NULL, 0, 0, NULL); 470 xpt_path_unlock(ccb->ccb_h.path); 471 bcopy(ccb, inccb, sizeof(union ccb)); 472 xpt_free_path(ccb->ccb_h.path); 473 xpt_free_ccb(ccb); 474 break; 475 476 case XPT_DEBUG: { 477 union ccb ccb; 478 479 /* 480 * This is an immediate CCB, so it's okay to 481 * allocate it on the stack. 482 */ 483 484 /* 485 * Create a path using the bus, target, and lun the 486 * user passed in. 487 */ 488 if (xpt_create_path(&ccb.ccb_h.path, NULL, 489 inccb->ccb_h.path_id, 490 inccb->ccb_h.target_id, 491 inccb->ccb_h.target_lun) != 492 CAM_REQ_CMP){ 493 error = EINVAL; 494 break; 495 } 496 /* Ensure all of our fields are correct */ 497 xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path, 498 inccb->ccb_h.pinfo.priority); 499 xpt_merge_ccb(&ccb, inccb); 500 xpt_action(&ccb); 501 bcopy(&ccb, inccb, sizeof(union ccb)); 502 xpt_free_path(ccb.ccb_h.path); 503 break; 504 505 } 506 case XPT_DEV_MATCH: { 507 struct cam_periph_map_info mapinfo; 508 struct cam_path *old_path; 509 510 /* 511 * We can't deal with physical addresses for this 512 * type of transaction. 513 */ 514 if ((inccb->ccb_h.flags & CAM_DATA_MASK) != 515 CAM_DATA_VADDR) { 516 error = EINVAL; 517 break; 518 } 519 520 /* 521 * Save this in case the caller had it set to 522 * something in particular. 523 */ 524 old_path = inccb->ccb_h.path; 525 526 /* 527 * We really don't need a path for the matching 528 * code. The path is needed because of the 529 * debugging statements in xpt_action(). They 530 * assume that the CCB has a valid path. 531 */ 532 inccb->ccb_h.path = xpt_periph->path; 533 534 bzero(&mapinfo, sizeof(mapinfo)); 535 536 /* 537 * Map the pattern and match buffers into kernel 538 * virtual address space. 539 */ 540 error = cam_periph_mapmem(inccb, &mapinfo, MAXPHYS); 541 542 if (error) { 543 inccb->ccb_h.path = old_path; 544 break; 545 } 546 547 /* 548 * This is an immediate CCB, we can send it on directly. 549 */ 550 xpt_action(inccb); 551 552 /* 553 * Map the buffers back into user space. 554 */ 555 cam_periph_unmapmem(inccb, &mapinfo); 556 557 inccb->ccb_h.path = old_path; 558 559 error = 0; 560 break; 561 } 562 default: 563 error = ENOTSUP; 564 break; 565 } 566 xpt_release_bus(bus); 567 break; 568 } 569 /* 570 * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input, 571 * with the periphal driver name and unit name filled in. The other 572 * fields don't really matter as input. The passthrough driver name 573 * ("pass"), and unit number are passed back in the ccb. The current 574 * device generation number, and the index into the device peripheral 575 * driver list, and the status are also passed back. Note that 576 * since we do everything in one pass, unlike the XPT_GDEVLIST ccb, 577 * we never return a status of CAM_GDEVLIST_LIST_CHANGED. It is 578 * (or rather should be) impossible for the device peripheral driver 579 * list to change since we look at the whole thing in one pass, and 580 * we do it with lock protection. 581 * 582 */ 583 case CAMGETPASSTHRU: { 584 union ccb *ccb; 585 struct cam_periph *periph; 586 struct periph_driver **p_drv; 587 char *name; 588 u_int unit; 589 int base_periph_found; 590 591 ccb = (union ccb *)addr; 592 unit = ccb->cgdl.unit_number; 593 name = ccb->cgdl.periph_name; 594 base_periph_found = 0; 595 596 /* 597 * Sanity check -- make sure we don't get a null peripheral 598 * driver name. 599 */ 600 if (*ccb->cgdl.periph_name == '\0') { 601 error = EINVAL; 602 break; 603 } 604 605 /* Keep the list from changing while we traverse it */ 606 xpt_lock_buses(); 607 608 /* first find our driver in the list of drivers */ 609 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) 610 if (strcmp((*p_drv)->driver_name, name) == 0) 611 break; 612 613 if (*p_drv == NULL) { 614 xpt_unlock_buses(); 615 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 616 ccb->cgdl.status = CAM_GDEVLIST_ERROR; 617 *ccb->cgdl.periph_name = '\0'; 618 ccb->cgdl.unit_number = 0; 619 error = ENOENT; 620 break; 621 } 622 623 /* 624 * Run through every peripheral instance of this driver 625 * and check to see whether it matches the unit passed 626 * in by the user. If it does, get out of the loops and 627 * find the passthrough driver associated with that 628 * peripheral driver. 629 */ 630 for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL; 631 periph = TAILQ_NEXT(periph, unit_links)) { 632 633 if (periph->unit_number == unit) 634 break; 635 } 636 /* 637 * If we found the peripheral driver that the user passed 638 * in, go through all of the peripheral drivers for that 639 * particular device and look for a passthrough driver. 640 */ 641 if (periph != NULL) { 642 struct cam_ed *device; 643 int i; 644 645 base_periph_found = 1; 646 device = periph->path->device; 647 for (i = 0, periph = SLIST_FIRST(&device->periphs); 648 periph != NULL; 649 periph = SLIST_NEXT(periph, periph_links), i++) { 650 /* 651 * Check to see whether we have a 652 * passthrough device or not. 653 */ 654 if (strcmp(periph->periph_name, "pass") == 0) { 655 /* 656 * Fill in the getdevlist fields. 657 */ 658 strcpy(ccb->cgdl.periph_name, 659 periph->periph_name); 660 ccb->cgdl.unit_number = 661 periph->unit_number; 662 if (SLIST_NEXT(periph, periph_links)) 663 ccb->cgdl.status = 664 CAM_GDEVLIST_MORE_DEVS; 665 else 666 ccb->cgdl.status = 667 CAM_GDEVLIST_LAST_DEVICE; 668 ccb->cgdl.generation = 669 device->generation; 670 ccb->cgdl.index = i; 671 /* 672 * Fill in some CCB header fields 673 * that the user may want. 674 */ 675 ccb->ccb_h.path_id = 676 periph->path->bus->path_id; 677 ccb->ccb_h.target_id = 678 periph->path->target->target_id; 679 ccb->ccb_h.target_lun = 680 periph->path->device->lun_id; 681 ccb->ccb_h.status = CAM_REQ_CMP; 682 break; 683 } 684 } 685 } 686 687 /* 688 * If the periph is null here, one of two things has 689 * happened. The first possibility is that we couldn't 690 * find the unit number of the particular peripheral driver 691 * that the user is asking about. e.g. the user asks for 692 * the passthrough driver for "da11". We find the list of 693 * "da" peripherals all right, but there is no unit 11. 694 * The other possibility is that we went through the list 695 * of peripheral drivers attached to the device structure, 696 * but didn't find one with the name "pass". Either way, 697 * we return ENOENT, since we couldn't find something. 698 */ 699 if (periph == NULL) { 700 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 701 ccb->cgdl.status = CAM_GDEVLIST_ERROR; 702 *ccb->cgdl.periph_name = '\0'; 703 ccb->cgdl.unit_number = 0; 704 error = ENOENT; 705 /* 706 * It is unfortunate that this is even necessary, 707 * but there are many, many clueless users out there. 708 * If this is true, the user is looking for the 709 * passthrough driver, but doesn't have one in his 710 * kernel. 711 */ 712 if (base_periph_found == 1) { 713 printf("xptioctl: pass driver is not in the " 714 "kernel\n"); 715 printf("xptioctl: put \"device pass\" in " 716 "your kernel config file\n"); 717 } 718 } 719 xpt_unlock_buses(); 720 break; 721 } 722 default: 723 error = ENOTTY; 724 break; 725 } 726 727 return(error); 728 } 729 730 static int 731 cam_module_event_handler(module_t mod, int what, void *arg) 732 { 733 int error; 734 735 switch (what) { 736 case MOD_LOAD: 737 if ((error = xpt_init(NULL)) != 0) 738 return (error); 739 break; 740 case MOD_UNLOAD: 741 return EBUSY; 742 default: 743 return EOPNOTSUPP; 744 } 745 746 return 0; 747 } 748 749 static void 750 xpt_rescan_done(struct cam_periph *periph, union ccb *done_ccb) 751 { 752 753 if (done_ccb->ccb_h.ppriv_ptr1 == NULL) { 754 xpt_free_path(done_ccb->ccb_h.path); 755 xpt_free_ccb(done_ccb); 756 } else { 757 done_ccb->ccb_h.cbfcnp = done_ccb->ccb_h.ppriv_ptr1; 758 (*done_ccb->ccb_h.cbfcnp)(periph, done_ccb); 759 } 760 xpt_release_boot(); 761 } 762 763 /* thread to handle bus rescans */ 764 static void 765 xpt_scanner_thread(void *dummy) 766 { 767 union ccb *ccb; 768 struct cam_path path; 769 770 xpt_lock_buses(); 771 for (;;) { 772 if (TAILQ_EMPTY(&xsoftc.ccb_scanq)) 773 msleep(&xsoftc.ccb_scanq, &xsoftc.xpt_topo_lock, PRIBIO, 774 "-", 0); 775 if ((ccb = (union ccb *)TAILQ_FIRST(&xsoftc.ccb_scanq)) != NULL) { 776 TAILQ_REMOVE(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe); 777 xpt_unlock_buses(); 778 779 /* 780 * Since lock can be dropped inside and path freed 781 * by completion callback even before return here, 782 * take our own path copy for reference. 783 */ 784 xpt_copy_path(&path, ccb->ccb_h.path); 785 xpt_path_lock(&path); 786 xpt_action(ccb); 787 xpt_path_unlock(&path); 788 xpt_release_path(&path); 789 790 xpt_lock_buses(); 791 } 792 } 793 } 794 795 void 796 xpt_rescan(union ccb *ccb) 797 { 798 struct ccb_hdr *hdr; 799 800 /* Prepare request */ 801 if (ccb->ccb_h.path->target->target_id == CAM_TARGET_WILDCARD && 802 ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD) 803 ccb->ccb_h.func_code = XPT_SCAN_BUS; 804 else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD && 805 ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD) 806 ccb->ccb_h.func_code = XPT_SCAN_TGT; 807 else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD && 808 ccb->ccb_h.path->device->lun_id != CAM_LUN_WILDCARD) 809 ccb->ccb_h.func_code = XPT_SCAN_LUN; 810 else { 811 xpt_print(ccb->ccb_h.path, "illegal scan path\n"); 812 xpt_free_path(ccb->ccb_h.path); 813 xpt_free_ccb(ccb); 814 return; 815 } 816 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, 817 ("xpt_rescan: func %#x %s\n", ccb->ccb_h.func_code, 818 xpt_action_name(ccb->ccb_h.func_code))); 819 820 ccb->ccb_h.ppriv_ptr1 = ccb->ccb_h.cbfcnp; 821 ccb->ccb_h.cbfcnp = xpt_rescan_done; 822 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, CAM_PRIORITY_XPT); 823 /* Don't make duplicate entries for the same paths. */ 824 xpt_lock_buses(); 825 if (ccb->ccb_h.ppriv_ptr1 == NULL) { 826 TAILQ_FOREACH(hdr, &xsoftc.ccb_scanq, sim_links.tqe) { 827 if (xpt_path_comp(hdr->path, ccb->ccb_h.path) == 0) { 828 wakeup(&xsoftc.ccb_scanq); 829 xpt_unlock_buses(); 830 xpt_print(ccb->ccb_h.path, "rescan already queued\n"); 831 xpt_free_path(ccb->ccb_h.path); 832 xpt_free_ccb(ccb); 833 return; 834 } 835 } 836 } 837 TAILQ_INSERT_TAIL(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe); 838 xsoftc.buses_to_config++; 839 wakeup(&xsoftc.ccb_scanq); 840 xpt_unlock_buses(); 841 } 842 843 /* Functions accessed by the peripheral drivers */ 844 static int 845 xpt_init(void *dummy) 846 { 847 struct cam_sim *xpt_sim; 848 struct cam_path *path; 849 struct cam_devq *devq; 850 cam_status status; 851 int error, i; 852 853 TAILQ_INIT(&xsoftc.xpt_busses); 854 TAILQ_INIT(&xsoftc.ccb_scanq); 855 STAILQ_INIT(&xsoftc.highpowerq); 856 xsoftc.num_highpower = CAM_MAX_HIGHPOWER; 857 858 mtx_init(&xsoftc.xpt_lock, "XPT lock", NULL, MTX_DEF); 859 mtx_init(&xsoftc.xpt_highpower_lock, "XPT highpower lock", NULL, MTX_DEF); 860 xsoftc.xpt_taskq = taskqueue_create("CAM XPT task", M_WAITOK, 861 taskqueue_thread_enqueue, /*context*/&xsoftc.xpt_taskq); 862 863 #ifdef CAM_BOOT_DELAY 864 /* 865 * Override this value at compile time to assist our users 866 * who don't use loader to boot a kernel. 867 */ 868 xsoftc.boot_delay = CAM_BOOT_DELAY; 869 #endif 870 /* 871 * The xpt layer is, itself, the equivalent of a SIM. 872 * Allow 16 ccbs in the ccb pool for it. This should 873 * give decent parallelism when we probe busses and 874 * perform other XPT functions. 875 */ 876 devq = cam_simq_alloc(16); 877 xpt_sim = cam_sim_alloc(xptaction, 878 xptpoll, 879 "xpt", 880 /*softc*/NULL, 881 /*unit*/0, 882 /*mtx*/&xsoftc.xpt_lock, 883 /*max_dev_transactions*/0, 884 /*max_tagged_dev_transactions*/0, 885 devq); 886 if (xpt_sim == NULL) 887 return (ENOMEM); 888 889 mtx_lock(&xsoftc.xpt_lock); 890 if ((status = xpt_bus_register(xpt_sim, NULL, 0)) != CAM_SUCCESS) { 891 mtx_unlock(&xsoftc.xpt_lock); 892 printf("xpt_init: xpt_bus_register failed with status %#x," 893 " failing attach\n", status); 894 return (EINVAL); 895 } 896 mtx_unlock(&xsoftc.xpt_lock); 897 898 /* 899 * Looking at the XPT from the SIM layer, the XPT is 900 * the equivalent of a peripheral driver. Allocate 901 * a peripheral driver entry for us. 902 */ 903 if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID, 904 CAM_TARGET_WILDCARD, 905 CAM_LUN_WILDCARD)) != CAM_REQ_CMP) { 906 printf("xpt_init: xpt_create_path failed with status %#x," 907 " failing attach\n", status); 908 return (EINVAL); 909 } 910 xpt_path_lock(path); 911 cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO, 912 path, NULL, 0, xpt_sim); 913 xpt_path_unlock(path); 914 xpt_free_path(path); 915 916 if (cam_num_doneqs < 1) 917 cam_num_doneqs = 1 + mp_ncpus / 6; 918 else if (cam_num_doneqs > MAXCPU) 919 cam_num_doneqs = MAXCPU; 920 for (i = 0; i < cam_num_doneqs; i++) { 921 mtx_init(&cam_doneqs[i].cam_doneq_mtx, "CAM doneq", NULL, 922 MTX_DEF); 923 STAILQ_INIT(&cam_doneqs[i].cam_doneq); 924 error = kproc_kthread_add(xpt_done_td, &cam_doneqs[i], 925 &cam_proc, NULL, 0, 0, "cam", "doneq%d", i); 926 if (error != 0) { 927 cam_num_doneqs = i; 928 break; 929 } 930 } 931 if (cam_num_doneqs < 1) { 932 printf("xpt_init: Cannot init completion queues " 933 "- failing attach\n"); 934 return (ENOMEM); 935 } 936 /* 937 * Register a callback for when interrupts are enabled. 938 */ 939 xsoftc.xpt_config_hook = 940 (struct intr_config_hook *)malloc(sizeof(struct intr_config_hook), 941 M_CAMXPT, M_NOWAIT | M_ZERO); 942 if (xsoftc.xpt_config_hook == NULL) { 943 printf("xpt_init: Cannot malloc config hook " 944 "- failing attach\n"); 945 return (ENOMEM); 946 } 947 xsoftc.xpt_config_hook->ich_func = xpt_config; 948 if (config_intrhook_establish(xsoftc.xpt_config_hook) != 0) { 949 free (xsoftc.xpt_config_hook, M_CAMXPT); 950 printf("xpt_init: config_intrhook_establish failed " 951 "- failing attach\n"); 952 } 953 954 return (0); 955 } 956 957 static cam_status 958 xptregister(struct cam_periph *periph, void *arg) 959 { 960 struct cam_sim *xpt_sim; 961 962 if (periph == NULL) { 963 printf("xptregister: periph was NULL!!\n"); 964 return(CAM_REQ_CMP_ERR); 965 } 966 967 xpt_sim = (struct cam_sim *)arg; 968 xpt_sim->softc = periph; 969 xpt_periph = periph; 970 periph->softc = NULL; 971 972 return(CAM_REQ_CMP); 973 } 974 975 int32_t 976 xpt_add_periph(struct cam_periph *periph) 977 { 978 struct cam_ed *device; 979 int32_t status; 980 981 TASK_INIT(&periph->periph_run_task, 0, xpt_run_allocq_task, periph); 982 device = periph->path->device; 983 status = CAM_REQ_CMP; 984 if (device != NULL) { 985 mtx_lock(&device->target->bus->eb_mtx); 986 device->generation++; 987 SLIST_INSERT_HEAD(&device->periphs, periph, periph_links); 988 mtx_unlock(&device->target->bus->eb_mtx); 989 atomic_add_32(&xsoftc.xpt_generation, 1); 990 } 991 992 return (status); 993 } 994 995 void 996 xpt_remove_periph(struct cam_periph *periph) 997 { 998 struct cam_ed *device; 999 1000 device = periph->path->device; 1001 if (device != NULL) { 1002 mtx_lock(&device->target->bus->eb_mtx); 1003 device->generation++; 1004 SLIST_REMOVE(&device->periphs, periph, cam_periph, periph_links); 1005 mtx_unlock(&device->target->bus->eb_mtx); 1006 atomic_add_32(&xsoftc.xpt_generation, 1); 1007 } 1008 } 1009 1010 1011 void 1012 xpt_announce_periph(struct cam_periph *periph, char *announce_string) 1013 { 1014 struct cam_path *path = periph->path; 1015 1016 cam_periph_assert(periph, MA_OWNED); 1017 periph->flags |= CAM_PERIPH_ANNOUNCED; 1018 1019 printf("%s%d at %s%d bus %d scbus%d target %d lun %jx\n", 1020 periph->periph_name, periph->unit_number, 1021 path->bus->sim->sim_name, 1022 path->bus->sim->unit_number, 1023 path->bus->sim->bus_id, 1024 path->bus->path_id, 1025 path->target->target_id, 1026 (uintmax_t)path->device->lun_id); 1027 printf("%s%d: ", periph->periph_name, periph->unit_number); 1028 if (path->device->protocol == PROTO_SCSI) 1029 scsi_print_inquiry(&path->device->inq_data); 1030 else if (path->device->protocol == PROTO_ATA || 1031 path->device->protocol == PROTO_SATAPM) 1032 ata_print_ident(&path->device->ident_data); 1033 else if (path->device->protocol == PROTO_SEMB) 1034 semb_print_ident( 1035 (struct sep_identify_data *)&path->device->ident_data); 1036 else 1037 printf("Unknown protocol device\n"); 1038 if (path->device->serial_num_len > 0) { 1039 /* Don't wrap the screen - print only the first 60 chars */ 1040 printf("%s%d: Serial Number %.60s\n", periph->periph_name, 1041 periph->unit_number, path->device->serial_num); 1042 } 1043 /* Announce transport details. */ 1044 (*(path->bus->xport->announce))(periph); 1045 /* Announce command queueing. */ 1046 if (path->device->inq_flags & SID_CmdQue 1047 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) { 1048 printf("%s%d: Command Queueing enabled\n", 1049 periph->periph_name, periph->unit_number); 1050 } 1051 /* Announce caller's details if they've passed in. */ 1052 if (announce_string != NULL) 1053 printf("%s%d: %s\n", periph->periph_name, 1054 periph->unit_number, announce_string); 1055 } 1056 1057 void 1058 xpt_announce_quirks(struct cam_periph *periph, int quirks, char *bit_string) 1059 { 1060 if (quirks != 0) { 1061 printf("%s%d: quirks=0x%b\n", periph->periph_name, 1062 periph->unit_number, quirks, bit_string); 1063 } 1064 } 1065 1066 void 1067 xpt_denounce_periph(struct cam_periph *periph) 1068 { 1069 struct cam_path *path = periph->path; 1070 1071 cam_periph_assert(periph, MA_OWNED); 1072 printf("%s%d at %s%d bus %d scbus%d target %d lun %jx\n", 1073 periph->periph_name, periph->unit_number, 1074 path->bus->sim->sim_name, 1075 path->bus->sim->unit_number, 1076 path->bus->sim->bus_id, 1077 path->bus->path_id, 1078 path->target->target_id, 1079 (uintmax_t)path->device->lun_id); 1080 printf("%s%d: ", periph->periph_name, periph->unit_number); 1081 if (path->device->protocol == PROTO_SCSI) 1082 scsi_print_inquiry_short(&path->device->inq_data); 1083 else if (path->device->protocol == PROTO_ATA || 1084 path->device->protocol == PROTO_SATAPM) 1085 ata_print_ident_short(&path->device->ident_data); 1086 else if (path->device->protocol == PROTO_SEMB) 1087 semb_print_ident_short( 1088 (struct sep_identify_data *)&path->device->ident_data); 1089 else 1090 printf("Unknown protocol device"); 1091 if (path->device->serial_num_len > 0) 1092 printf(" s/n %.60s", path->device->serial_num); 1093 printf(" detached\n"); 1094 } 1095 1096 1097 int 1098 xpt_getattr(char *buf, size_t len, const char *attr, struct cam_path *path) 1099 { 1100 int ret = -1, l; 1101 struct ccb_dev_advinfo cdai; 1102 struct scsi_vpd_id_descriptor *idd; 1103 1104 xpt_path_assert(path, MA_OWNED); 1105 1106 memset(&cdai, 0, sizeof(cdai)); 1107 xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL); 1108 cdai.ccb_h.func_code = XPT_DEV_ADVINFO; 1109 cdai.bufsiz = len; 1110 1111 if (!strcmp(attr, "GEOM::ident")) 1112 cdai.buftype = CDAI_TYPE_SERIAL_NUM; 1113 else if (!strcmp(attr, "GEOM::physpath")) 1114 cdai.buftype = CDAI_TYPE_PHYS_PATH; 1115 else if (strcmp(attr, "GEOM::lunid") == 0 || 1116 strcmp(attr, "GEOM::lunname") == 0) { 1117 cdai.buftype = CDAI_TYPE_SCSI_DEVID; 1118 cdai.bufsiz = CAM_SCSI_DEVID_MAXLEN; 1119 } else 1120 goto out; 1121 1122 cdai.buf = malloc(cdai.bufsiz, M_CAMXPT, M_NOWAIT|M_ZERO); 1123 if (cdai.buf == NULL) { 1124 ret = ENOMEM; 1125 goto out; 1126 } 1127 xpt_action((union ccb *)&cdai); /* can only be synchronous */ 1128 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0) 1129 cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE); 1130 if (cdai.provsiz == 0) 1131 goto out; 1132 if (cdai.buftype == CDAI_TYPE_SCSI_DEVID) { 1133 if (strcmp(attr, "GEOM::lunid") == 0) { 1134 idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf, 1135 cdai.provsiz, scsi_devid_is_lun_naa); 1136 if (idd == NULL) 1137 idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf, 1138 cdai.provsiz, scsi_devid_is_lun_eui64); 1139 } else 1140 idd = NULL; 1141 if (idd == NULL) 1142 idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf, 1143 cdai.provsiz, scsi_devid_is_lun_t10); 1144 if (idd == NULL) 1145 idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf, 1146 cdai.provsiz, scsi_devid_is_lun_name); 1147 if (idd == NULL) 1148 goto out; 1149 ret = 0; 1150 if ((idd->proto_codeset & SVPD_ID_CODESET_MASK) == SVPD_ID_CODESET_ASCII) { 1151 if (idd->length < len) { 1152 for (l = 0; l < idd->length; l++) 1153 buf[l] = idd->identifier[l] ? 1154 idd->identifier[l] : ' '; 1155 buf[l] = 0; 1156 } else 1157 ret = EFAULT; 1158 } else if ((idd->proto_codeset & SVPD_ID_CODESET_MASK) == SVPD_ID_CODESET_UTF8) { 1159 l = strnlen(idd->identifier, idd->length); 1160 if (l < len) { 1161 bcopy(idd->identifier, buf, l); 1162 buf[l] = 0; 1163 } else 1164 ret = EFAULT; 1165 } else { 1166 if (idd->length * 2 < len) { 1167 for (l = 0; l < idd->length; l++) 1168 sprintf(buf + l * 2, "%02x", 1169 idd->identifier[l]); 1170 } else 1171 ret = EFAULT; 1172 } 1173 } else { 1174 ret = 0; 1175 if (strlcpy(buf, cdai.buf, len) >= len) 1176 ret = EFAULT; 1177 } 1178 1179 out: 1180 if (cdai.buf != NULL) 1181 free(cdai.buf, M_CAMXPT); 1182 return ret; 1183 } 1184 1185 static dev_match_ret 1186 xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns, 1187 struct cam_eb *bus) 1188 { 1189 dev_match_ret retval; 1190 u_int i; 1191 1192 retval = DM_RET_NONE; 1193 1194 /* 1195 * If we aren't given something to match against, that's an error. 1196 */ 1197 if (bus == NULL) 1198 return(DM_RET_ERROR); 1199 1200 /* 1201 * If there are no match entries, then this bus matches no 1202 * matter what. 1203 */ 1204 if ((patterns == NULL) || (num_patterns == 0)) 1205 return(DM_RET_DESCEND | DM_RET_COPY); 1206 1207 for (i = 0; i < num_patterns; i++) { 1208 struct bus_match_pattern *cur_pattern; 1209 1210 /* 1211 * If the pattern in question isn't for a bus node, we 1212 * aren't interested. However, we do indicate to the 1213 * calling routine that we should continue descending the 1214 * tree, since the user wants to match against lower-level 1215 * EDT elements. 1216 */ 1217 if (patterns[i].type != DEV_MATCH_BUS) { 1218 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) 1219 retval |= DM_RET_DESCEND; 1220 continue; 1221 } 1222 1223 cur_pattern = &patterns[i].pattern.bus_pattern; 1224 1225 /* 1226 * If they want to match any bus node, we give them any 1227 * device node. 1228 */ 1229 if (cur_pattern->flags == BUS_MATCH_ANY) { 1230 /* set the copy flag */ 1231 retval |= DM_RET_COPY; 1232 1233 /* 1234 * If we've already decided on an action, go ahead 1235 * and return. 1236 */ 1237 if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE) 1238 return(retval); 1239 } 1240 1241 /* 1242 * Not sure why someone would do this... 1243 */ 1244 if (cur_pattern->flags == BUS_MATCH_NONE) 1245 continue; 1246 1247 if (((cur_pattern->flags & BUS_MATCH_PATH) != 0) 1248 && (cur_pattern->path_id != bus->path_id)) 1249 continue; 1250 1251 if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0) 1252 && (cur_pattern->bus_id != bus->sim->bus_id)) 1253 continue; 1254 1255 if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0) 1256 && (cur_pattern->unit_number != bus->sim->unit_number)) 1257 continue; 1258 1259 if (((cur_pattern->flags & BUS_MATCH_NAME) != 0) 1260 && (strncmp(cur_pattern->dev_name, bus->sim->sim_name, 1261 DEV_IDLEN) != 0)) 1262 continue; 1263 1264 /* 1265 * If we get to this point, the user definitely wants 1266 * information on this bus. So tell the caller to copy the 1267 * data out. 1268 */ 1269 retval |= DM_RET_COPY; 1270 1271 /* 1272 * If the return action has been set to descend, then we 1273 * know that we've already seen a non-bus matching 1274 * expression, therefore we need to further descend the tree. 1275 * This won't change by continuing around the loop, so we 1276 * go ahead and return. If we haven't seen a non-bus 1277 * matching expression, we keep going around the loop until 1278 * we exhaust the matching expressions. We'll set the stop 1279 * flag once we fall out of the loop. 1280 */ 1281 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND) 1282 return(retval); 1283 } 1284 1285 /* 1286 * If the return action hasn't been set to descend yet, that means 1287 * we haven't seen anything other than bus matching patterns. So 1288 * tell the caller to stop descending the tree -- the user doesn't 1289 * want to match against lower level tree elements. 1290 */ 1291 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) 1292 retval |= DM_RET_STOP; 1293 1294 return(retval); 1295 } 1296 1297 static dev_match_ret 1298 xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns, 1299 struct cam_ed *device) 1300 { 1301 dev_match_ret retval; 1302 u_int i; 1303 1304 retval = DM_RET_NONE; 1305 1306 /* 1307 * If we aren't given something to match against, that's an error. 1308 */ 1309 if (device == NULL) 1310 return(DM_RET_ERROR); 1311 1312 /* 1313 * If there are no match entries, then this device matches no 1314 * matter what. 1315 */ 1316 if ((patterns == NULL) || (num_patterns == 0)) 1317 return(DM_RET_DESCEND | DM_RET_COPY); 1318 1319 for (i = 0; i < num_patterns; i++) { 1320 struct device_match_pattern *cur_pattern; 1321 struct scsi_vpd_device_id *device_id_page; 1322 1323 /* 1324 * If the pattern in question isn't for a device node, we 1325 * aren't interested. 1326 */ 1327 if (patterns[i].type != DEV_MATCH_DEVICE) { 1328 if ((patterns[i].type == DEV_MATCH_PERIPH) 1329 && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)) 1330 retval |= DM_RET_DESCEND; 1331 continue; 1332 } 1333 1334 cur_pattern = &patterns[i].pattern.device_pattern; 1335 1336 /* Error out if mutually exclusive options are specified. */ 1337 if ((cur_pattern->flags & (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID)) 1338 == (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID)) 1339 return(DM_RET_ERROR); 1340 1341 /* 1342 * If they want to match any device node, we give them any 1343 * device node. 1344 */ 1345 if (cur_pattern->flags == DEV_MATCH_ANY) 1346 goto copy_dev_node; 1347 1348 /* 1349 * Not sure why someone would do this... 1350 */ 1351 if (cur_pattern->flags == DEV_MATCH_NONE) 1352 continue; 1353 1354 if (((cur_pattern->flags & DEV_MATCH_PATH) != 0) 1355 && (cur_pattern->path_id != device->target->bus->path_id)) 1356 continue; 1357 1358 if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0) 1359 && (cur_pattern->target_id != device->target->target_id)) 1360 continue; 1361 1362 if (((cur_pattern->flags & DEV_MATCH_LUN) != 0) 1363 && (cur_pattern->target_lun != device->lun_id)) 1364 continue; 1365 1366 if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0) 1367 && (cam_quirkmatch((caddr_t)&device->inq_data, 1368 (caddr_t)&cur_pattern->data.inq_pat, 1369 1, sizeof(cur_pattern->data.inq_pat), 1370 scsi_static_inquiry_match) == NULL)) 1371 continue; 1372 1373 device_id_page = (struct scsi_vpd_device_id *)device->device_id; 1374 if (((cur_pattern->flags & DEV_MATCH_DEVID) != 0) 1375 && (device->device_id_len < SVPD_DEVICE_ID_HDR_LEN 1376 || scsi_devid_match((uint8_t *)device_id_page->desc_list, 1377 device->device_id_len 1378 - SVPD_DEVICE_ID_HDR_LEN, 1379 cur_pattern->data.devid_pat.id, 1380 cur_pattern->data.devid_pat.id_len) != 0)) 1381 continue; 1382 1383 copy_dev_node: 1384 /* 1385 * If we get to this point, the user definitely wants 1386 * information on this device. So tell the caller to copy 1387 * the data out. 1388 */ 1389 retval |= DM_RET_COPY; 1390 1391 /* 1392 * If the return action has been set to descend, then we 1393 * know that we've already seen a peripheral matching 1394 * expression, therefore we need to further descend the tree. 1395 * This won't change by continuing around the loop, so we 1396 * go ahead and return. If we haven't seen a peripheral 1397 * matching expression, we keep going around the loop until 1398 * we exhaust the matching expressions. We'll set the stop 1399 * flag once we fall out of the loop. 1400 */ 1401 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND) 1402 return(retval); 1403 } 1404 1405 /* 1406 * If the return action hasn't been set to descend yet, that means 1407 * we haven't seen any peripheral matching patterns. So tell the 1408 * caller to stop descending the tree -- the user doesn't want to 1409 * match against lower level tree elements. 1410 */ 1411 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) 1412 retval |= DM_RET_STOP; 1413 1414 return(retval); 1415 } 1416 1417 /* 1418 * Match a single peripheral against any number of match patterns. 1419 */ 1420 static dev_match_ret 1421 xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns, 1422 struct cam_periph *periph) 1423 { 1424 dev_match_ret retval; 1425 u_int i; 1426 1427 /* 1428 * If we aren't given something to match against, that's an error. 1429 */ 1430 if (periph == NULL) 1431 return(DM_RET_ERROR); 1432 1433 /* 1434 * If there are no match entries, then this peripheral matches no 1435 * matter what. 1436 */ 1437 if ((patterns == NULL) || (num_patterns == 0)) 1438 return(DM_RET_STOP | DM_RET_COPY); 1439 1440 /* 1441 * There aren't any nodes below a peripheral node, so there's no 1442 * reason to descend the tree any further. 1443 */ 1444 retval = DM_RET_STOP; 1445 1446 for (i = 0; i < num_patterns; i++) { 1447 struct periph_match_pattern *cur_pattern; 1448 1449 /* 1450 * If the pattern in question isn't for a peripheral, we 1451 * aren't interested. 1452 */ 1453 if (patterns[i].type != DEV_MATCH_PERIPH) 1454 continue; 1455 1456 cur_pattern = &patterns[i].pattern.periph_pattern; 1457 1458 /* 1459 * If they want to match on anything, then we will do so. 1460 */ 1461 if (cur_pattern->flags == PERIPH_MATCH_ANY) { 1462 /* set the copy flag */ 1463 retval |= DM_RET_COPY; 1464 1465 /* 1466 * We've already set the return action to stop, 1467 * since there are no nodes below peripherals in 1468 * the tree. 1469 */ 1470 return(retval); 1471 } 1472 1473 /* 1474 * Not sure why someone would do this... 1475 */ 1476 if (cur_pattern->flags == PERIPH_MATCH_NONE) 1477 continue; 1478 1479 if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0) 1480 && (cur_pattern->path_id != periph->path->bus->path_id)) 1481 continue; 1482 1483 /* 1484 * For the target and lun id's, we have to make sure the 1485 * target and lun pointers aren't NULL. The xpt peripheral 1486 * has a wildcard target and device. 1487 */ 1488 if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0) 1489 && ((periph->path->target == NULL) 1490 ||(cur_pattern->target_id != periph->path->target->target_id))) 1491 continue; 1492 1493 if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0) 1494 && ((periph->path->device == NULL) 1495 || (cur_pattern->target_lun != periph->path->device->lun_id))) 1496 continue; 1497 1498 if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0) 1499 && (cur_pattern->unit_number != periph->unit_number)) 1500 continue; 1501 1502 if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0) 1503 && (strncmp(cur_pattern->periph_name, periph->periph_name, 1504 DEV_IDLEN) != 0)) 1505 continue; 1506 1507 /* 1508 * If we get to this point, the user definitely wants 1509 * information on this peripheral. So tell the caller to 1510 * copy the data out. 1511 */ 1512 retval |= DM_RET_COPY; 1513 1514 /* 1515 * The return action has already been set to stop, since 1516 * peripherals don't have any nodes below them in the EDT. 1517 */ 1518 return(retval); 1519 } 1520 1521 /* 1522 * If we get to this point, the peripheral that was passed in 1523 * doesn't match any of the patterns. 1524 */ 1525 return(retval); 1526 } 1527 1528 static int 1529 xptedtbusfunc(struct cam_eb *bus, void *arg) 1530 { 1531 struct ccb_dev_match *cdm; 1532 struct cam_et *target; 1533 dev_match_ret retval; 1534 1535 cdm = (struct ccb_dev_match *)arg; 1536 1537 /* 1538 * If our position is for something deeper in the tree, that means 1539 * that we've already seen this node. So, we keep going down. 1540 */ 1541 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1542 && (cdm->pos.cookie.bus == bus) 1543 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1544 && (cdm->pos.cookie.target != NULL)) 1545 retval = DM_RET_DESCEND; 1546 else 1547 retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus); 1548 1549 /* 1550 * If we got an error, bail out of the search. 1551 */ 1552 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 1553 cdm->status = CAM_DEV_MATCH_ERROR; 1554 return(0); 1555 } 1556 1557 /* 1558 * If the copy flag is set, copy this bus out. 1559 */ 1560 if (retval & DM_RET_COPY) { 1561 int spaceleft, j; 1562 1563 spaceleft = cdm->match_buf_len - (cdm->num_matches * 1564 sizeof(struct dev_match_result)); 1565 1566 /* 1567 * If we don't have enough space to put in another 1568 * match result, save our position and tell the 1569 * user there are more devices to check. 1570 */ 1571 if (spaceleft < sizeof(struct dev_match_result)) { 1572 bzero(&cdm->pos, sizeof(cdm->pos)); 1573 cdm->pos.position_type = 1574 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS; 1575 1576 cdm->pos.cookie.bus = bus; 1577 cdm->pos.generations[CAM_BUS_GENERATION]= 1578 xsoftc.bus_generation; 1579 cdm->status = CAM_DEV_MATCH_MORE; 1580 return(0); 1581 } 1582 j = cdm->num_matches; 1583 cdm->num_matches++; 1584 cdm->matches[j].type = DEV_MATCH_BUS; 1585 cdm->matches[j].result.bus_result.path_id = bus->path_id; 1586 cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id; 1587 cdm->matches[j].result.bus_result.unit_number = 1588 bus->sim->unit_number; 1589 strncpy(cdm->matches[j].result.bus_result.dev_name, 1590 bus->sim->sim_name, DEV_IDLEN); 1591 } 1592 1593 /* 1594 * If the user is only interested in busses, there's no 1595 * reason to descend to the next level in the tree. 1596 */ 1597 if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP) 1598 return(1); 1599 1600 /* 1601 * If there is a target generation recorded, check it to 1602 * make sure the target list hasn't changed. 1603 */ 1604 mtx_lock(&bus->eb_mtx); 1605 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1606 && (cdm->pos.cookie.bus == bus) 1607 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1608 && (cdm->pos.cookie.target != NULL)) { 1609 if ((cdm->pos.generations[CAM_TARGET_GENERATION] != 1610 bus->generation)) { 1611 mtx_unlock(&bus->eb_mtx); 1612 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 1613 return (0); 1614 } 1615 target = (struct cam_et *)cdm->pos.cookie.target; 1616 target->refcount++; 1617 } else 1618 target = NULL; 1619 mtx_unlock(&bus->eb_mtx); 1620 1621 return (xpttargettraverse(bus, target, xptedttargetfunc, arg)); 1622 } 1623 1624 static int 1625 xptedttargetfunc(struct cam_et *target, void *arg) 1626 { 1627 struct ccb_dev_match *cdm; 1628 struct cam_eb *bus; 1629 struct cam_ed *device; 1630 1631 cdm = (struct ccb_dev_match *)arg; 1632 bus = target->bus; 1633 1634 /* 1635 * If there is a device list generation recorded, check it to 1636 * make sure the device list hasn't changed. 1637 */ 1638 mtx_lock(&bus->eb_mtx); 1639 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1640 && (cdm->pos.cookie.bus == bus) 1641 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1642 && (cdm->pos.cookie.target == target) 1643 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) 1644 && (cdm->pos.cookie.device != NULL)) { 1645 if (cdm->pos.generations[CAM_DEV_GENERATION] != 1646 target->generation) { 1647 mtx_unlock(&bus->eb_mtx); 1648 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 1649 return(0); 1650 } 1651 device = (struct cam_ed *)cdm->pos.cookie.device; 1652 device->refcount++; 1653 } else 1654 device = NULL; 1655 mtx_unlock(&bus->eb_mtx); 1656 1657 return (xptdevicetraverse(target, device, xptedtdevicefunc, arg)); 1658 } 1659 1660 static int 1661 xptedtdevicefunc(struct cam_ed *device, void *arg) 1662 { 1663 struct cam_eb *bus; 1664 struct cam_periph *periph; 1665 struct ccb_dev_match *cdm; 1666 dev_match_ret retval; 1667 1668 cdm = (struct ccb_dev_match *)arg; 1669 bus = device->target->bus; 1670 1671 /* 1672 * If our position is for something deeper in the tree, that means 1673 * that we've already seen this node. So, we keep going down. 1674 */ 1675 if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE) 1676 && (cdm->pos.cookie.device == device) 1677 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 1678 && (cdm->pos.cookie.periph != NULL)) 1679 retval = DM_RET_DESCEND; 1680 else 1681 retval = xptdevicematch(cdm->patterns, cdm->num_patterns, 1682 device); 1683 1684 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 1685 cdm->status = CAM_DEV_MATCH_ERROR; 1686 return(0); 1687 } 1688 1689 /* 1690 * If the copy flag is set, copy this device out. 1691 */ 1692 if (retval & DM_RET_COPY) { 1693 int spaceleft, j; 1694 1695 spaceleft = cdm->match_buf_len - (cdm->num_matches * 1696 sizeof(struct dev_match_result)); 1697 1698 /* 1699 * If we don't have enough space to put in another 1700 * match result, save our position and tell the 1701 * user there are more devices to check. 1702 */ 1703 if (spaceleft < sizeof(struct dev_match_result)) { 1704 bzero(&cdm->pos, sizeof(cdm->pos)); 1705 cdm->pos.position_type = 1706 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS | 1707 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE; 1708 1709 cdm->pos.cookie.bus = device->target->bus; 1710 cdm->pos.generations[CAM_BUS_GENERATION]= 1711 xsoftc.bus_generation; 1712 cdm->pos.cookie.target = device->target; 1713 cdm->pos.generations[CAM_TARGET_GENERATION] = 1714 device->target->bus->generation; 1715 cdm->pos.cookie.device = device; 1716 cdm->pos.generations[CAM_DEV_GENERATION] = 1717 device->target->generation; 1718 cdm->status = CAM_DEV_MATCH_MORE; 1719 return(0); 1720 } 1721 j = cdm->num_matches; 1722 cdm->num_matches++; 1723 cdm->matches[j].type = DEV_MATCH_DEVICE; 1724 cdm->matches[j].result.device_result.path_id = 1725 device->target->bus->path_id; 1726 cdm->matches[j].result.device_result.target_id = 1727 device->target->target_id; 1728 cdm->matches[j].result.device_result.target_lun = 1729 device->lun_id; 1730 cdm->matches[j].result.device_result.protocol = 1731 device->protocol; 1732 bcopy(&device->inq_data, 1733 &cdm->matches[j].result.device_result.inq_data, 1734 sizeof(struct scsi_inquiry_data)); 1735 bcopy(&device->ident_data, 1736 &cdm->matches[j].result.device_result.ident_data, 1737 sizeof(struct ata_params)); 1738 1739 /* Let the user know whether this device is unconfigured */ 1740 if (device->flags & CAM_DEV_UNCONFIGURED) 1741 cdm->matches[j].result.device_result.flags = 1742 DEV_RESULT_UNCONFIGURED; 1743 else 1744 cdm->matches[j].result.device_result.flags = 1745 DEV_RESULT_NOFLAG; 1746 } 1747 1748 /* 1749 * If the user isn't interested in peripherals, don't descend 1750 * the tree any further. 1751 */ 1752 if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP) 1753 return(1); 1754 1755 /* 1756 * If there is a peripheral list generation recorded, make sure 1757 * it hasn't changed. 1758 */ 1759 xpt_lock_buses(); 1760 mtx_lock(&bus->eb_mtx); 1761 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1762 && (cdm->pos.cookie.bus == bus) 1763 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1764 && (cdm->pos.cookie.target == device->target) 1765 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) 1766 && (cdm->pos.cookie.device == device) 1767 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 1768 && (cdm->pos.cookie.periph != NULL)) { 1769 if (cdm->pos.generations[CAM_PERIPH_GENERATION] != 1770 device->generation) { 1771 mtx_unlock(&bus->eb_mtx); 1772 xpt_unlock_buses(); 1773 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 1774 return(0); 1775 } 1776 periph = (struct cam_periph *)cdm->pos.cookie.periph; 1777 periph->refcount++; 1778 } else 1779 periph = NULL; 1780 mtx_unlock(&bus->eb_mtx); 1781 xpt_unlock_buses(); 1782 1783 return (xptperiphtraverse(device, periph, xptedtperiphfunc, arg)); 1784 } 1785 1786 static int 1787 xptedtperiphfunc(struct cam_periph *periph, void *arg) 1788 { 1789 struct ccb_dev_match *cdm; 1790 dev_match_ret retval; 1791 1792 cdm = (struct ccb_dev_match *)arg; 1793 1794 retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph); 1795 1796 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 1797 cdm->status = CAM_DEV_MATCH_ERROR; 1798 return(0); 1799 } 1800 1801 /* 1802 * If the copy flag is set, copy this peripheral out. 1803 */ 1804 if (retval & DM_RET_COPY) { 1805 int spaceleft, j; 1806 1807 spaceleft = cdm->match_buf_len - (cdm->num_matches * 1808 sizeof(struct dev_match_result)); 1809 1810 /* 1811 * If we don't have enough space to put in another 1812 * match result, save our position and tell the 1813 * user there are more devices to check. 1814 */ 1815 if (spaceleft < sizeof(struct dev_match_result)) { 1816 bzero(&cdm->pos, sizeof(cdm->pos)); 1817 cdm->pos.position_type = 1818 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS | 1819 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE | 1820 CAM_DEV_POS_PERIPH; 1821 1822 cdm->pos.cookie.bus = periph->path->bus; 1823 cdm->pos.generations[CAM_BUS_GENERATION]= 1824 xsoftc.bus_generation; 1825 cdm->pos.cookie.target = periph->path->target; 1826 cdm->pos.generations[CAM_TARGET_GENERATION] = 1827 periph->path->bus->generation; 1828 cdm->pos.cookie.device = periph->path->device; 1829 cdm->pos.generations[CAM_DEV_GENERATION] = 1830 periph->path->target->generation; 1831 cdm->pos.cookie.periph = periph; 1832 cdm->pos.generations[CAM_PERIPH_GENERATION] = 1833 periph->path->device->generation; 1834 cdm->status = CAM_DEV_MATCH_MORE; 1835 return(0); 1836 } 1837 1838 j = cdm->num_matches; 1839 cdm->num_matches++; 1840 cdm->matches[j].type = DEV_MATCH_PERIPH; 1841 cdm->matches[j].result.periph_result.path_id = 1842 periph->path->bus->path_id; 1843 cdm->matches[j].result.periph_result.target_id = 1844 periph->path->target->target_id; 1845 cdm->matches[j].result.periph_result.target_lun = 1846 periph->path->device->lun_id; 1847 cdm->matches[j].result.periph_result.unit_number = 1848 periph->unit_number; 1849 strncpy(cdm->matches[j].result.periph_result.periph_name, 1850 periph->periph_name, DEV_IDLEN); 1851 } 1852 1853 return(1); 1854 } 1855 1856 static int 1857 xptedtmatch(struct ccb_dev_match *cdm) 1858 { 1859 struct cam_eb *bus; 1860 int ret; 1861 1862 cdm->num_matches = 0; 1863 1864 /* 1865 * Check the bus list generation. If it has changed, the user 1866 * needs to reset everything and start over. 1867 */ 1868 xpt_lock_buses(); 1869 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1870 && (cdm->pos.cookie.bus != NULL)) { 1871 if (cdm->pos.generations[CAM_BUS_GENERATION] != 1872 xsoftc.bus_generation) { 1873 xpt_unlock_buses(); 1874 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 1875 return(0); 1876 } 1877 bus = (struct cam_eb *)cdm->pos.cookie.bus; 1878 bus->refcount++; 1879 } else 1880 bus = NULL; 1881 xpt_unlock_buses(); 1882 1883 ret = xptbustraverse(bus, xptedtbusfunc, cdm); 1884 1885 /* 1886 * If we get back 0, that means that we had to stop before fully 1887 * traversing the EDT. It also means that one of the subroutines 1888 * has set the status field to the proper value. If we get back 1, 1889 * we've fully traversed the EDT and copied out any matching entries. 1890 */ 1891 if (ret == 1) 1892 cdm->status = CAM_DEV_MATCH_LAST; 1893 1894 return(ret); 1895 } 1896 1897 static int 1898 xptplistpdrvfunc(struct periph_driver **pdrv, void *arg) 1899 { 1900 struct cam_periph *periph; 1901 struct ccb_dev_match *cdm; 1902 1903 cdm = (struct ccb_dev_match *)arg; 1904 1905 xpt_lock_buses(); 1906 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR) 1907 && (cdm->pos.cookie.pdrv == pdrv) 1908 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 1909 && (cdm->pos.cookie.periph != NULL)) { 1910 if (cdm->pos.generations[CAM_PERIPH_GENERATION] != 1911 (*pdrv)->generation) { 1912 xpt_unlock_buses(); 1913 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 1914 return(0); 1915 } 1916 periph = (struct cam_periph *)cdm->pos.cookie.periph; 1917 periph->refcount++; 1918 } else 1919 periph = NULL; 1920 xpt_unlock_buses(); 1921 1922 return (xptpdperiphtraverse(pdrv, periph, xptplistperiphfunc, arg)); 1923 } 1924 1925 static int 1926 xptplistperiphfunc(struct cam_periph *periph, void *arg) 1927 { 1928 struct ccb_dev_match *cdm; 1929 dev_match_ret retval; 1930 1931 cdm = (struct ccb_dev_match *)arg; 1932 1933 retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph); 1934 1935 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 1936 cdm->status = CAM_DEV_MATCH_ERROR; 1937 return(0); 1938 } 1939 1940 /* 1941 * If the copy flag is set, copy this peripheral out. 1942 */ 1943 if (retval & DM_RET_COPY) { 1944 int spaceleft, j; 1945 1946 spaceleft = cdm->match_buf_len - (cdm->num_matches * 1947 sizeof(struct dev_match_result)); 1948 1949 /* 1950 * If we don't have enough space to put in another 1951 * match result, save our position and tell the 1952 * user there are more devices to check. 1953 */ 1954 if (spaceleft < sizeof(struct dev_match_result)) { 1955 struct periph_driver **pdrv; 1956 1957 pdrv = NULL; 1958 bzero(&cdm->pos, sizeof(cdm->pos)); 1959 cdm->pos.position_type = 1960 CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR | 1961 CAM_DEV_POS_PERIPH; 1962 1963 /* 1964 * This may look a bit non-sensical, but it is 1965 * actually quite logical. There are very few 1966 * peripheral drivers, and bloating every peripheral 1967 * structure with a pointer back to its parent 1968 * peripheral driver linker set entry would cost 1969 * more in the long run than doing this quick lookup. 1970 */ 1971 for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) { 1972 if (strcmp((*pdrv)->driver_name, 1973 periph->periph_name) == 0) 1974 break; 1975 } 1976 1977 if (*pdrv == NULL) { 1978 cdm->status = CAM_DEV_MATCH_ERROR; 1979 return(0); 1980 } 1981 1982 cdm->pos.cookie.pdrv = pdrv; 1983 /* 1984 * The periph generation slot does double duty, as 1985 * does the periph pointer slot. They are used for 1986 * both edt and pdrv lookups and positioning. 1987 */ 1988 cdm->pos.cookie.periph = periph; 1989 cdm->pos.generations[CAM_PERIPH_GENERATION] = 1990 (*pdrv)->generation; 1991 cdm->status = CAM_DEV_MATCH_MORE; 1992 return(0); 1993 } 1994 1995 j = cdm->num_matches; 1996 cdm->num_matches++; 1997 cdm->matches[j].type = DEV_MATCH_PERIPH; 1998 cdm->matches[j].result.periph_result.path_id = 1999 periph->path->bus->path_id; 2000 2001 /* 2002 * The transport layer peripheral doesn't have a target or 2003 * lun. 2004 */ 2005 if (periph->path->target) 2006 cdm->matches[j].result.periph_result.target_id = 2007 periph->path->target->target_id; 2008 else 2009 cdm->matches[j].result.periph_result.target_id = 2010 CAM_TARGET_WILDCARD; 2011 2012 if (periph->path->device) 2013 cdm->matches[j].result.periph_result.target_lun = 2014 periph->path->device->lun_id; 2015 else 2016 cdm->matches[j].result.periph_result.target_lun = 2017 CAM_LUN_WILDCARD; 2018 2019 cdm->matches[j].result.periph_result.unit_number = 2020 periph->unit_number; 2021 strncpy(cdm->matches[j].result.periph_result.periph_name, 2022 periph->periph_name, DEV_IDLEN); 2023 } 2024 2025 return(1); 2026 } 2027 2028 static int 2029 xptperiphlistmatch(struct ccb_dev_match *cdm) 2030 { 2031 int ret; 2032 2033 cdm->num_matches = 0; 2034 2035 /* 2036 * At this point in the edt traversal function, we check the bus 2037 * list generation to make sure that no busses have been added or 2038 * removed since the user last sent a XPT_DEV_MATCH ccb through. 2039 * For the peripheral driver list traversal function, however, we 2040 * don't have to worry about new peripheral driver types coming or 2041 * going; they're in a linker set, and therefore can't change 2042 * without a recompile. 2043 */ 2044 2045 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR) 2046 && (cdm->pos.cookie.pdrv != NULL)) 2047 ret = xptpdrvtraverse( 2048 (struct periph_driver **)cdm->pos.cookie.pdrv, 2049 xptplistpdrvfunc, cdm); 2050 else 2051 ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm); 2052 2053 /* 2054 * If we get back 0, that means that we had to stop before fully 2055 * traversing the peripheral driver tree. It also means that one of 2056 * the subroutines has set the status field to the proper value. If 2057 * we get back 1, we've fully traversed the EDT and copied out any 2058 * matching entries. 2059 */ 2060 if (ret == 1) 2061 cdm->status = CAM_DEV_MATCH_LAST; 2062 2063 return(ret); 2064 } 2065 2066 static int 2067 xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg) 2068 { 2069 struct cam_eb *bus, *next_bus; 2070 int retval; 2071 2072 retval = 1; 2073 if (start_bus) 2074 bus = start_bus; 2075 else { 2076 xpt_lock_buses(); 2077 bus = TAILQ_FIRST(&xsoftc.xpt_busses); 2078 if (bus == NULL) { 2079 xpt_unlock_buses(); 2080 return (retval); 2081 } 2082 bus->refcount++; 2083 xpt_unlock_buses(); 2084 } 2085 for (; bus != NULL; bus = next_bus) { 2086 retval = tr_func(bus, arg); 2087 if (retval == 0) { 2088 xpt_release_bus(bus); 2089 break; 2090 } 2091 xpt_lock_buses(); 2092 next_bus = TAILQ_NEXT(bus, links); 2093 if (next_bus) 2094 next_bus->refcount++; 2095 xpt_unlock_buses(); 2096 xpt_release_bus(bus); 2097 } 2098 return(retval); 2099 } 2100 2101 static int 2102 xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target, 2103 xpt_targetfunc_t *tr_func, void *arg) 2104 { 2105 struct cam_et *target, *next_target; 2106 int retval; 2107 2108 retval = 1; 2109 if (start_target) 2110 target = start_target; 2111 else { 2112 mtx_lock(&bus->eb_mtx); 2113 target = TAILQ_FIRST(&bus->et_entries); 2114 if (target == NULL) { 2115 mtx_unlock(&bus->eb_mtx); 2116 return (retval); 2117 } 2118 target->refcount++; 2119 mtx_unlock(&bus->eb_mtx); 2120 } 2121 for (; target != NULL; target = next_target) { 2122 retval = tr_func(target, arg); 2123 if (retval == 0) { 2124 xpt_release_target(target); 2125 break; 2126 } 2127 mtx_lock(&bus->eb_mtx); 2128 next_target = TAILQ_NEXT(target, links); 2129 if (next_target) 2130 next_target->refcount++; 2131 mtx_unlock(&bus->eb_mtx); 2132 xpt_release_target(target); 2133 } 2134 return(retval); 2135 } 2136 2137 static int 2138 xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device, 2139 xpt_devicefunc_t *tr_func, void *arg) 2140 { 2141 struct cam_eb *bus; 2142 struct cam_ed *device, *next_device; 2143 int retval; 2144 2145 retval = 1; 2146 bus = target->bus; 2147 if (start_device) 2148 device = start_device; 2149 else { 2150 mtx_lock(&bus->eb_mtx); 2151 device = TAILQ_FIRST(&target->ed_entries); 2152 if (device == NULL) { 2153 mtx_unlock(&bus->eb_mtx); 2154 return (retval); 2155 } 2156 device->refcount++; 2157 mtx_unlock(&bus->eb_mtx); 2158 } 2159 for (; device != NULL; device = next_device) { 2160 mtx_lock(&device->device_mtx); 2161 retval = tr_func(device, arg); 2162 mtx_unlock(&device->device_mtx); 2163 if (retval == 0) { 2164 xpt_release_device(device); 2165 break; 2166 } 2167 mtx_lock(&bus->eb_mtx); 2168 next_device = TAILQ_NEXT(device, links); 2169 if (next_device) 2170 next_device->refcount++; 2171 mtx_unlock(&bus->eb_mtx); 2172 xpt_release_device(device); 2173 } 2174 return(retval); 2175 } 2176 2177 static int 2178 xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph, 2179 xpt_periphfunc_t *tr_func, void *arg) 2180 { 2181 struct cam_eb *bus; 2182 struct cam_periph *periph, *next_periph; 2183 int retval; 2184 2185 retval = 1; 2186 2187 bus = device->target->bus; 2188 if (start_periph) 2189 periph = start_periph; 2190 else { 2191 xpt_lock_buses(); 2192 mtx_lock(&bus->eb_mtx); 2193 periph = SLIST_FIRST(&device->periphs); 2194 while (periph != NULL && (periph->flags & CAM_PERIPH_FREE) != 0) 2195 periph = SLIST_NEXT(periph, periph_links); 2196 if (periph == NULL) { 2197 mtx_unlock(&bus->eb_mtx); 2198 xpt_unlock_buses(); 2199 return (retval); 2200 } 2201 periph->refcount++; 2202 mtx_unlock(&bus->eb_mtx); 2203 xpt_unlock_buses(); 2204 } 2205 for (; periph != NULL; periph = next_periph) { 2206 retval = tr_func(periph, arg); 2207 if (retval == 0) { 2208 cam_periph_release_locked(periph); 2209 break; 2210 } 2211 xpt_lock_buses(); 2212 mtx_lock(&bus->eb_mtx); 2213 next_periph = SLIST_NEXT(periph, periph_links); 2214 while (next_periph != NULL && 2215 (next_periph->flags & CAM_PERIPH_FREE) != 0) 2216 next_periph = SLIST_NEXT(next_periph, periph_links); 2217 if (next_periph) 2218 next_periph->refcount++; 2219 mtx_unlock(&bus->eb_mtx); 2220 xpt_unlock_buses(); 2221 cam_periph_release_locked(periph); 2222 } 2223 return(retval); 2224 } 2225 2226 static int 2227 xptpdrvtraverse(struct periph_driver **start_pdrv, 2228 xpt_pdrvfunc_t *tr_func, void *arg) 2229 { 2230 struct periph_driver **pdrv; 2231 int retval; 2232 2233 retval = 1; 2234 2235 /* 2236 * We don't traverse the peripheral driver list like we do the 2237 * other lists, because it is a linker set, and therefore cannot be 2238 * changed during runtime. If the peripheral driver list is ever 2239 * re-done to be something other than a linker set (i.e. it can 2240 * change while the system is running), the list traversal should 2241 * be modified to work like the other traversal functions. 2242 */ 2243 for (pdrv = (start_pdrv ? start_pdrv : periph_drivers); 2244 *pdrv != NULL; pdrv++) { 2245 retval = tr_func(pdrv, arg); 2246 2247 if (retval == 0) 2248 return(retval); 2249 } 2250 2251 return(retval); 2252 } 2253 2254 static int 2255 xptpdperiphtraverse(struct periph_driver **pdrv, 2256 struct cam_periph *start_periph, 2257 xpt_periphfunc_t *tr_func, void *arg) 2258 { 2259 struct cam_periph *periph, *next_periph; 2260 int retval; 2261 2262 retval = 1; 2263 2264 if (start_periph) 2265 periph = start_periph; 2266 else { 2267 xpt_lock_buses(); 2268 periph = TAILQ_FIRST(&(*pdrv)->units); 2269 while (periph != NULL && (periph->flags & CAM_PERIPH_FREE) != 0) 2270 periph = TAILQ_NEXT(periph, unit_links); 2271 if (periph == NULL) { 2272 xpt_unlock_buses(); 2273 return (retval); 2274 } 2275 periph->refcount++; 2276 xpt_unlock_buses(); 2277 } 2278 for (; periph != NULL; periph = next_periph) { 2279 cam_periph_lock(periph); 2280 retval = tr_func(periph, arg); 2281 cam_periph_unlock(periph); 2282 if (retval == 0) { 2283 cam_periph_release(periph); 2284 break; 2285 } 2286 xpt_lock_buses(); 2287 next_periph = TAILQ_NEXT(periph, unit_links); 2288 while (next_periph != NULL && 2289 (next_periph->flags & CAM_PERIPH_FREE) != 0) 2290 next_periph = TAILQ_NEXT(next_periph, unit_links); 2291 if (next_periph) 2292 next_periph->refcount++; 2293 xpt_unlock_buses(); 2294 cam_periph_release(periph); 2295 } 2296 return(retval); 2297 } 2298 2299 static int 2300 xptdefbusfunc(struct cam_eb *bus, void *arg) 2301 { 2302 struct xpt_traverse_config *tr_config; 2303 2304 tr_config = (struct xpt_traverse_config *)arg; 2305 2306 if (tr_config->depth == XPT_DEPTH_BUS) { 2307 xpt_busfunc_t *tr_func; 2308 2309 tr_func = (xpt_busfunc_t *)tr_config->tr_func; 2310 2311 return(tr_func(bus, tr_config->tr_arg)); 2312 } else 2313 return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg)); 2314 } 2315 2316 static int 2317 xptdeftargetfunc(struct cam_et *target, void *arg) 2318 { 2319 struct xpt_traverse_config *tr_config; 2320 2321 tr_config = (struct xpt_traverse_config *)arg; 2322 2323 if (tr_config->depth == XPT_DEPTH_TARGET) { 2324 xpt_targetfunc_t *tr_func; 2325 2326 tr_func = (xpt_targetfunc_t *)tr_config->tr_func; 2327 2328 return(tr_func(target, tr_config->tr_arg)); 2329 } else 2330 return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg)); 2331 } 2332 2333 static int 2334 xptdefdevicefunc(struct cam_ed *device, void *arg) 2335 { 2336 struct xpt_traverse_config *tr_config; 2337 2338 tr_config = (struct xpt_traverse_config *)arg; 2339 2340 if (tr_config->depth == XPT_DEPTH_DEVICE) { 2341 xpt_devicefunc_t *tr_func; 2342 2343 tr_func = (xpt_devicefunc_t *)tr_config->tr_func; 2344 2345 return(tr_func(device, tr_config->tr_arg)); 2346 } else 2347 return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg)); 2348 } 2349 2350 static int 2351 xptdefperiphfunc(struct cam_periph *periph, void *arg) 2352 { 2353 struct xpt_traverse_config *tr_config; 2354 xpt_periphfunc_t *tr_func; 2355 2356 tr_config = (struct xpt_traverse_config *)arg; 2357 2358 tr_func = (xpt_periphfunc_t *)tr_config->tr_func; 2359 2360 /* 2361 * Unlike the other default functions, we don't check for depth 2362 * here. The peripheral driver level is the last level in the EDT, 2363 * so if we're here, we should execute the function in question. 2364 */ 2365 return(tr_func(periph, tr_config->tr_arg)); 2366 } 2367 2368 /* 2369 * Execute the given function for every bus in the EDT. 2370 */ 2371 static int 2372 xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg) 2373 { 2374 struct xpt_traverse_config tr_config; 2375 2376 tr_config.depth = XPT_DEPTH_BUS; 2377 tr_config.tr_func = tr_func; 2378 tr_config.tr_arg = arg; 2379 2380 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config)); 2381 } 2382 2383 /* 2384 * Execute the given function for every device in the EDT. 2385 */ 2386 static int 2387 xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg) 2388 { 2389 struct xpt_traverse_config tr_config; 2390 2391 tr_config.depth = XPT_DEPTH_DEVICE; 2392 tr_config.tr_func = tr_func; 2393 tr_config.tr_arg = arg; 2394 2395 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config)); 2396 } 2397 2398 static int 2399 xptsetasyncfunc(struct cam_ed *device, void *arg) 2400 { 2401 struct cam_path path; 2402 struct ccb_getdev cgd; 2403 struct ccb_setasync *csa = (struct ccb_setasync *)arg; 2404 2405 /* 2406 * Don't report unconfigured devices (Wildcard devs, 2407 * devices only for target mode, device instances 2408 * that have been invalidated but are waiting for 2409 * their last reference count to be released). 2410 */ 2411 if ((device->flags & CAM_DEV_UNCONFIGURED) != 0) 2412 return (1); 2413 2414 xpt_compile_path(&path, 2415 NULL, 2416 device->target->bus->path_id, 2417 device->target->target_id, 2418 device->lun_id); 2419 xpt_setup_ccb(&cgd.ccb_h, &path, CAM_PRIORITY_NORMAL); 2420 cgd.ccb_h.func_code = XPT_GDEV_TYPE; 2421 xpt_action((union ccb *)&cgd); 2422 csa->callback(csa->callback_arg, 2423 AC_FOUND_DEVICE, 2424 &path, &cgd); 2425 xpt_release_path(&path); 2426 2427 return(1); 2428 } 2429 2430 static int 2431 xptsetasyncbusfunc(struct cam_eb *bus, void *arg) 2432 { 2433 struct cam_path path; 2434 struct ccb_pathinq cpi; 2435 struct ccb_setasync *csa = (struct ccb_setasync *)arg; 2436 2437 xpt_compile_path(&path, /*periph*/NULL, 2438 bus->path_id, 2439 CAM_TARGET_WILDCARD, 2440 CAM_LUN_WILDCARD); 2441 xpt_path_lock(&path); 2442 xpt_setup_ccb(&cpi.ccb_h, &path, CAM_PRIORITY_NORMAL); 2443 cpi.ccb_h.func_code = XPT_PATH_INQ; 2444 xpt_action((union ccb *)&cpi); 2445 csa->callback(csa->callback_arg, 2446 AC_PATH_REGISTERED, 2447 &path, &cpi); 2448 xpt_path_unlock(&path); 2449 xpt_release_path(&path); 2450 2451 return(1); 2452 } 2453 2454 void 2455 xpt_action(union ccb *start_ccb) 2456 { 2457 2458 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, 2459 ("xpt_action: func %#x %s\n", start_ccb->ccb_h.func_code, 2460 xpt_action_name(start_ccb->ccb_h.func_code))); 2461 2462 start_ccb->ccb_h.status = CAM_REQ_INPROG; 2463 (*(start_ccb->ccb_h.path->bus->xport->action))(start_ccb); 2464 } 2465 2466 void 2467 xpt_action_default(union ccb *start_ccb) 2468 { 2469 struct cam_path *path; 2470 struct cam_sim *sim; 2471 int lock; 2472 2473 path = start_ccb->ccb_h.path; 2474 CAM_DEBUG(path, CAM_DEBUG_TRACE, 2475 ("xpt_action_default: func %#x %s\n", start_ccb->ccb_h.func_code, 2476 xpt_action_name(start_ccb->ccb_h.func_code))); 2477 2478 switch (start_ccb->ccb_h.func_code) { 2479 case XPT_SCSI_IO: 2480 { 2481 struct cam_ed *device; 2482 2483 /* 2484 * For the sake of compatibility with SCSI-1 2485 * devices that may not understand the identify 2486 * message, we include lun information in the 2487 * second byte of all commands. SCSI-1 specifies 2488 * that luns are a 3 bit value and reserves only 3 2489 * bits for lun information in the CDB. Later 2490 * revisions of the SCSI spec allow for more than 8 2491 * luns, but have deprecated lun information in the 2492 * CDB. So, if the lun won't fit, we must omit. 2493 * 2494 * Also be aware that during initial probing for devices, 2495 * the inquiry information is unknown but initialized to 0. 2496 * This means that this code will be exercised while probing 2497 * devices with an ANSI revision greater than 2. 2498 */ 2499 device = path->device; 2500 if (device->protocol_version <= SCSI_REV_2 2501 && start_ccb->ccb_h.target_lun < 8 2502 && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) { 2503 2504 start_ccb->csio.cdb_io.cdb_bytes[1] |= 2505 start_ccb->ccb_h.target_lun << 5; 2506 } 2507 start_ccb->csio.scsi_status = SCSI_STATUS_OK; 2508 } 2509 /* FALLTHROUGH */ 2510 case XPT_TARGET_IO: 2511 case XPT_CONT_TARGET_IO: 2512 start_ccb->csio.sense_resid = 0; 2513 start_ccb->csio.resid = 0; 2514 /* FALLTHROUGH */ 2515 case XPT_ATA_IO: 2516 if (start_ccb->ccb_h.func_code == XPT_ATA_IO) 2517 start_ccb->ataio.resid = 0; 2518 /* FALLTHROUGH */ 2519 case XPT_RESET_DEV: 2520 case XPT_ENG_EXEC: 2521 case XPT_SMP_IO: 2522 { 2523 struct cam_devq *devq; 2524 2525 devq = path->bus->sim->devq; 2526 mtx_lock(&devq->send_mtx); 2527 cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb); 2528 if (xpt_schedule_devq(devq, path->device) != 0) 2529 xpt_run_devq(devq); 2530 mtx_unlock(&devq->send_mtx); 2531 break; 2532 } 2533 case XPT_CALC_GEOMETRY: 2534 /* Filter out garbage */ 2535 if (start_ccb->ccg.block_size == 0 2536 || start_ccb->ccg.volume_size == 0) { 2537 start_ccb->ccg.cylinders = 0; 2538 start_ccb->ccg.heads = 0; 2539 start_ccb->ccg.secs_per_track = 0; 2540 start_ccb->ccb_h.status = CAM_REQ_CMP; 2541 break; 2542 } 2543 #if defined(PC98) || defined(__sparc64__) 2544 /* 2545 * In a PC-98 system, geometry translation depens on 2546 * the "real" device geometry obtained from mode page 4. 2547 * SCSI geometry translation is performed in the 2548 * initialization routine of the SCSI BIOS and the result 2549 * stored in host memory. If the translation is available 2550 * in host memory, use it. If not, rely on the default 2551 * translation the device driver performs. 2552 * For sparc64, we may need adjust the geometry of large 2553 * disks in order to fit the limitations of the 16-bit 2554 * fields of the VTOC8 disk label. 2555 */ 2556 if (scsi_da_bios_params(&start_ccb->ccg) != 0) { 2557 start_ccb->ccb_h.status = CAM_REQ_CMP; 2558 break; 2559 } 2560 #endif 2561 goto call_sim; 2562 case XPT_ABORT: 2563 { 2564 union ccb* abort_ccb; 2565 2566 abort_ccb = start_ccb->cab.abort_ccb; 2567 if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) { 2568 2569 if (abort_ccb->ccb_h.pinfo.index >= 0) { 2570 struct cam_ccbq *ccbq; 2571 struct cam_ed *device; 2572 2573 device = abort_ccb->ccb_h.path->device; 2574 ccbq = &device->ccbq; 2575 cam_ccbq_remove_ccb(ccbq, abort_ccb); 2576 abort_ccb->ccb_h.status = 2577 CAM_REQ_ABORTED|CAM_DEV_QFRZN; 2578 xpt_freeze_devq(abort_ccb->ccb_h.path, 1); 2579 xpt_done(abort_ccb); 2580 start_ccb->ccb_h.status = CAM_REQ_CMP; 2581 break; 2582 } 2583 if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX 2584 && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) { 2585 /* 2586 * We've caught this ccb en route to 2587 * the SIM. Flag it for abort and the 2588 * SIM will do so just before starting 2589 * real work on the CCB. 2590 */ 2591 abort_ccb->ccb_h.status = 2592 CAM_REQ_ABORTED|CAM_DEV_QFRZN; 2593 xpt_freeze_devq(abort_ccb->ccb_h.path, 1); 2594 start_ccb->ccb_h.status = CAM_REQ_CMP; 2595 break; 2596 } 2597 } 2598 if (XPT_FC_IS_QUEUED(abort_ccb) 2599 && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) { 2600 /* 2601 * It's already completed but waiting 2602 * for our SWI to get to it. 2603 */ 2604 start_ccb->ccb_h.status = CAM_UA_ABORT; 2605 break; 2606 } 2607 /* 2608 * If we weren't able to take care of the abort request 2609 * in the XPT, pass the request down to the SIM for processing. 2610 */ 2611 } 2612 /* FALLTHROUGH */ 2613 case XPT_ACCEPT_TARGET_IO: 2614 case XPT_EN_LUN: 2615 case XPT_IMMED_NOTIFY: 2616 case XPT_NOTIFY_ACK: 2617 case XPT_RESET_BUS: 2618 case XPT_IMMEDIATE_NOTIFY: 2619 case XPT_NOTIFY_ACKNOWLEDGE: 2620 case XPT_GET_SIM_KNOB_OLD: 2621 case XPT_GET_SIM_KNOB: 2622 case XPT_SET_SIM_KNOB: 2623 case XPT_GET_TRAN_SETTINGS: 2624 case XPT_SET_TRAN_SETTINGS: 2625 case XPT_PATH_INQ: 2626 call_sim: 2627 sim = path->bus->sim; 2628 lock = (mtx_owned(sim->mtx) == 0); 2629 if (lock) 2630 CAM_SIM_LOCK(sim); 2631 CAM_DEBUG(path, CAM_DEBUG_TRACE, 2632 ("sim->sim_action: func=%#x\n", start_ccb->ccb_h.func_code)); 2633 (*(sim->sim_action))(sim, start_ccb); 2634 CAM_DEBUG(path, CAM_DEBUG_TRACE, 2635 ("sim->sim_action: status=%#x\n", start_ccb->ccb_h.status)); 2636 if (lock) 2637 CAM_SIM_UNLOCK(sim); 2638 break; 2639 case XPT_PATH_STATS: 2640 start_ccb->cpis.last_reset = path->bus->last_reset; 2641 start_ccb->ccb_h.status = CAM_REQ_CMP; 2642 break; 2643 case XPT_GDEV_TYPE: 2644 { 2645 struct cam_ed *dev; 2646 2647 dev = path->device; 2648 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) { 2649 start_ccb->ccb_h.status = CAM_DEV_NOT_THERE; 2650 } else { 2651 struct ccb_getdev *cgd; 2652 2653 cgd = &start_ccb->cgd; 2654 cgd->protocol = dev->protocol; 2655 cgd->inq_data = dev->inq_data; 2656 cgd->ident_data = dev->ident_data; 2657 cgd->inq_flags = dev->inq_flags; 2658 cgd->ccb_h.status = CAM_REQ_CMP; 2659 cgd->serial_num_len = dev->serial_num_len; 2660 if ((dev->serial_num_len > 0) 2661 && (dev->serial_num != NULL)) 2662 bcopy(dev->serial_num, cgd->serial_num, 2663 dev->serial_num_len); 2664 } 2665 break; 2666 } 2667 case XPT_GDEV_STATS: 2668 { 2669 struct cam_ed *dev; 2670 2671 dev = path->device; 2672 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) { 2673 start_ccb->ccb_h.status = CAM_DEV_NOT_THERE; 2674 } else { 2675 struct ccb_getdevstats *cgds; 2676 struct cam_eb *bus; 2677 struct cam_et *tar; 2678 struct cam_devq *devq; 2679 2680 cgds = &start_ccb->cgds; 2681 bus = path->bus; 2682 tar = path->target; 2683 devq = bus->sim->devq; 2684 mtx_lock(&devq->send_mtx); 2685 cgds->dev_openings = dev->ccbq.dev_openings; 2686 cgds->dev_active = dev->ccbq.dev_active; 2687 cgds->allocated = dev->ccbq.allocated; 2688 cgds->queued = cam_ccbq_pending_ccb_count(&dev->ccbq); 2689 cgds->held = cgds->allocated - cgds->dev_active - 2690 cgds->queued; 2691 cgds->last_reset = tar->last_reset; 2692 cgds->maxtags = dev->maxtags; 2693 cgds->mintags = dev->mintags; 2694 if (timevalcmp(&tar->last_reset, &bus->last_reset, <)) 2695 cgds->last_reset = bus->last_reset; 2696 mtx_unlock(&devq->send_mtx); 2697 cgds->ccb_h.status = CAM_REQ_CMP; 2698 } 2699 break; 2700 } 2701 case XPT_GDEVLIST: 2702 { 2703 struct cam_periph *nperiph; 2704 struct periph_list *periph_head; 2705 struct ccb_getdevlist *cgdl; 2706 u_int i; 2707 struct cam_ed *device; 2708 int found; 2709 2710 2711 found = 0; 2712 2713 /* 2714 * Don't want anyone mucking with our data. 2715 */ 2716 device = path->device; 2717 periph_head = &device->periphs; 2718 cgdl = &start_ccb->cgdl; 2719 2720 /* 2721 * Check and see if the list has changed since the user 2722 * last requested a list member. If so, tell them that the 2723 * list has changed, and therefore they need to start over 2724 * from the beginning. 2725 */ 2726 if ((cgdl->index != 0) && 2727 (cgdl->generation != device->generation)) { 2728 cgdl->status = CAM_GDEVLIST_LIST_CHANGED; 2729 break; 2730 } 2731 2732 /* 2733 * Traverse the list of peripherals and attempt to find 2734 * the requested peripheral. 2735 */ 2736 for (nperiph = SLIST_FIRST(periph_head), i = 0; 2737 (nperiph != NULL) && (i <= cgdl->index); 2738 nperiph = SLIST_NEXT(nperiph, periph_links), i++) { 2739 if (i == cgdl->index) { 2740 strncpy(cgdl->periph_name, 2741 nperiph->periph_name, 2742 DEV_IDLEN); 2743 cgdl->unit_number = nperiph->unit_number; 2744 found = 1; 2745 } 2746 } 2747 if (found == 0) { 2748 cgdl->status = CAM_GDEVLIST_ERROR; 2749 break; 2750 } 2751 2752 if (nperiph == NULL) 2753 cgdl->status = CAM_GDEVLIST_LAST_DEVICE; 2754 else 2755 cgdl->status = CAM_GDEVLIST_MORE_DEVS; 2756 2757 cgdl->index++; 2758 cgdl->generation = device->generation; 2759 2760 cgdl->ccb_h.status = CAM_REQ_CMP; 2761 break; 2762 } 2763 case XPT_DEV_MATCH: 2764 { 2765 dev_pos_type position_type; 2766 struct ccb_dev_match *cdm; 2767 2768 cdm = &start_ccb->cdm; 2769 2770 /* 2771 * There are two ways of getting at information in the EDT. 2772 * The first way is via the primary EDT tree. It starts 2773 * with a list of busses, then a list of targets on a bus, 2774 * then devices/luns on a target, and then peripherals on a 2775 * device/lun. The "other" way is by the peripheral driver 2776 * lists. The peripheral driver lists are organized by 2777 * peripheral driver. (obviously) So it makes sense to 2778 * use the peripheral driver list if the user is looking 2779 * for something like "da1", or all "da" devices. If the 2780 * user is looking for something on a particular bus/target 2781 * or lun, it's generally better to go through the EDT tree. 2782 */ 2783 2784 if (cdm->pos.position_type != CAM_DEV_POS_NONE) 2785 position_type = cdm->pos.position_type; 2786 else { 2787 u_int i; 2788 2789 position_type = CAM_DEV_POS_NONE; 2790 2791 for (i = 0; i < cdm->num_patterns; i++) { 2792 if ((cdm->patterns[i].type == DEV_MATCH_BUS) 2793 ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){ 2794 position_type = CAM_DEV_POS_EDT; 2795 break; 2796 } 2797 } 2798 2799 if (cdm->num_patterns == 0) 2800 position_type = CAM_DEV_POS_EDT; 2801 else if (position_type == CAM_DEV_POS_NONE) 2802 position_type = CAM_DEV_POS_PDRV; 2803 } 2804 2805 switch(position_type & CAM_DEV_POS_TYPEMASK) { 2806 case CAM_DEV_POS_EDT: 2807 xptedtmatch(cdm); 2808 break; 2809 case CAM_DEV_POS_PDRV: 2810 xptperiphlistmatch(cdm); 2811 break; 2812 default: 2813 cdm->status = CAM_DEV_MATCH_ERROR; 2814 break; 2815 } 2816 2817 if (cdm->status == CAM_DEV_MATCH_ERROR) 2818 start_ccb->ccb_h.status = CAM_REQ_CMP_ERR; 2819 else 2820 start_ccb->ccb_h.status = CAM_REQ_CMP; 2821 2822 break; 2823 } 2824 case XPT_SASYNC_CB: 2825 { 2826 struct ccb_setasync *csa; 2827 struct async_node *cur_entry; 2828 struct async_list *async_head; 2829 u_int32_t added; 2830 2831 csa = &start_ccb->csa; 2832 added = csa->event_enable; 2833 async_head = &path->device->asyncs; 2834 2835 /* 2836 * If there is already an entry for us, simply 2837 * update it. 2838 */ 2839 cur_entry = SLIST_FIRST(async_head); 2840 while (cur_entry != NULL) { 2841 if ((cur_entry->callback_arg == csa->callback_arg) 2842 && (cur_entry->callback == csa->callback)) 2843 break; 2844 cur_entry = SLIST_NEXT(cur_entry, links); 2845 } 2846 2847 if (cur_entry != NULL) { 2848 /* 2849 * If the request has no flags set, 2850 * remove the entry. 2851 */ 2852 added &= ~cur_entry->event_enable; 2853 if (csa->event_enable == 0) { 2854 SLIST_REMOVE(async_head, cur_entry, 2855 async_node, links); 2856 xpt_release_device(path->device); 2857 free(cur_entry, M_CAMXPT); 2858 } else { 2859 cur_entry->event_enable = csa->event_enable; 2860 } 2861 csa->event_enable = added; 2862 } else { 2863 cur_entry = malloc(sizeof(*cur_entry), M_CAMXPT, 2864 M_NOWAIT); 2865 if (cur_entry == NULL) { 2866 csa->ccb_h.status = CAM_RESRC_UNAVAIL; 2867 break; 2868 } 2869 cur_entry->event_enable = csa->event_enable; 2870 cur_entry->event_lock = 2871 mtx_owned(path->bus->sim->mtx) ? 1 : 0; 2872 cur_entry->callback_arg = csa->callback_arg; 2873 cur_entry->callback = csa->callback; 2874 SLIST_INSERT_HEAD(async_head, cur_entry, links); 2875 xpt_acquire_device(path->device); 2876 } 2877 start_ccb->ccb_h.status = CAM_REQ_CMP; 2878 break; 2879 } 2880 case XPT_REL_SIMQ: 2881 { 2882 struct ccb_relsim *crs; 2883 struct cam_ed *dev; 2884 2885 crs = &start_ccb->crs; 2886 dev = path->device; 2887 if (dev == NULL) { 2888 2889 crs->ccb_h.status = CAM_DEV_NOT_THERE; 2890 break; 2891 } 2892 2893 if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) { 2894 2895 /* Don't ever go below one opening */ 2896 if (crs->openings > 0) { 2897 xpt_dev_ccbq_resize(path, crs->openings); 2898 if (bootverbose) { 2899 xpt_print(path, 2900 "number of openings is now %d\n", 2901 crs->openings); 2902 } 2903 } 2904 } 2905 2906 mtx_lock(&dev->sim->devq->send_mtx); 2907 if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) { 2908 2909 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) { 2910 2911 /* 2912 * Just extend the old timeout and decrement 2913 * the freeze count so that a single timeout 2914 * is sufficient for releasing the queue. 2915 */ 2916 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE; 2917 callout_stop(&dev->callout); 2918 } else { 2919 2920 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 2921 } 2922 2923 callout_reset_sbt(&dev->callout, 2924 SBT_1MS * crs->release_timeout, 0, 2925 xpt_release_devq_timeout, dev, 0); 2926 2927 dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING; 2928 2929 } 2930 2931 if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) { 2932 2933 if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) { 2934 /* 2935 * Decrement the freeze count so that a single 2936 * completion is still sufficient to unfreeze 2937 * the queue. 2938 */ 2939 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE; 2940 } else { 2941 2942 dev->flags |= CAM_DEV_REL_ON_COMPLETE; 2943 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 2944 } 2945 } 2946 2947 if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) { 2948 2949 if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0 2950 || (dev->ccbq.dev_active == 0)) { 2951 2952 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE; 2953 } else { 2954 2955 dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY; 2956 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 2957 } 2958 } 2959 mtx_unlock(&dev->sim->devq->send_mtx); 2960 2961 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) 2962 xpt_release_devq(path, /*count*/1, /*run_queue*/TRUE); 2963 start_ccb->crs.qfrozen_cnt = dev->ccbq.queue.qfrozen_cnt; 2964 start_ccb->ccb_h.status = CAM_REQ_CMP; 2965 break; 2966 } 2967 case XPT_DEBUG: { 2968 struct cam_path *oldpath; 2969 2970 /* Check that all request bits are supported. */ 2971 if (start_ccb->cdbg.flags & ~(CAM_DEBUG_COMPILE)) { 2972 start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 2973 break; 2974 } 2975 2976 cam_dflags = CAM_DEBUG_NONE; 2977 if (cam_dpath != NULL) { 2978 oldpath = cam_dpath; 2979 cam_dpath = NULL; 2980 xpt_free_path(oldpath); 2981 } 2982 if (start_ccb->cdbg.flags != CAM_DEBUG_NONE) { 2983 if (xpt_create_path(&cam_dpath, NULL, 2984 start_ccb->ccb_h.path_id, 2985 start_ccb->ccb_h.target_id, 2986 start_ccb->ccb_h.target_lun) != 2987 CAM_REQ_CMP) { 2988 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 2989 } else { 2990 cam_dflags = start_ccb->cdbg.flags; 2991 start_ccb->ccb_h.status = CAM_REQ_CMP; 2992 xpt_print(cam_dpath, "debugging flags now %x\n", 2993 cam_dflags); 2994 } 2995 } else 2996 start_ccb->ccb_h.status = CAM_REQ_CMP; 2997 break; 2998 } 2999 case XPT_NOOP: 3000 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0) 3001 xpt_freeze_devq(path, 1); 3002 start_ccb->ccb_h.status = CAM_REQ_CMP; 3003 break; 3004 case XPT_REPROBE_LUN: 3005 xpt_async(AC_INQ_CHANGED, path, NULL); 3006 start_ccb->ccb_h.status = CAM_REQ_CMP; 3007 xpt_done(start_ccb); 3008 break; 3009 default: 3010 case XPT_SDEV_TYPE: 3011 case XPT_TERM_IO: 3012 case XPT_ENG_INQ: 3013 /* XXX Implement */ 3014 printf("%s: CCB type %#x not supported\n", __func__, 3015 start_ccb->ccb_h.func_code); 3016 start_ccb->ccb_h.status = CAM_PROVIDE_FAIL; 3017 if (start_ccb->ccb_h.func_code & XPT_FC_DEV_QUEUED) { 3018 xpt_done(start_ccb); 3019 } 3020 break; 3021 } 3022 CAM_DEBUG(path, CAM_DEBUG_TRACE, 3023 ("xpt_action_default: func= %#x %s status %#x\n", 3024 start_ccb->ccb_h.func_code, 3025 xpt_action_name(start_ccb->ccb_h.func_code), 3026 start_ccb->ccb_h.status)); 3027 } 3028 3029 void 3030 xpt_polled_action(union ccb *start_ccb) 3031 { 3032 u_int32_t timeout; 3033 struct cam_sim *sim; 3034 struct cam_devq *devq; 3035 struct cam_ed *dev; 3036 3037 timeout = start_ccb->ccb_h.timeout * 10; 3038 sim = start_ccb->ccb_h.path->bus->sim; 3039 devq = sim->devq; 3040 dev = start_ccb->ccb_h.path->device; 3041 3042 mtx_unlock(&dev->device_mtx); 3043 3044 /* 3045 * Steal an opening so that no other queued requests 3046 * can get it before us while we simulate interrupts. 3047 */ 3048 mtx_lock(&devq->send_mtx); 3049 dev->ccbq.dev_openings--; 3050 while((devq->send_openings <= 0 || dev->ccbq.dev_openings < 0) && 3051 (--timeout > 0)) { 3052 mtx_unlock(&devq->send_mtx); 3053 DELAY(100); 3054 CAM_SIM_LOCK(sim); 3055 (*(sim->sim_poll))(sim); 3056 CAM_SIM_UNLOCK(sim); 3057 camisr_runqueue(); 3058 mtx_lock(&devq->send_mtx); 3059 } 3060 dev->ccbq.dev_openings++; 3061 mtx_unlock(&devq->send_mtx); 3062 3063 if (timeout != 0) { 3064 xpt_action(start_ccb); 3065 while(--timeout > 0) { 3066 CAM_SIM_LOCK(sim); 3067 (*(sim->sim_poll))(sim); 3068 CAM_SIM_UNLOCK(sim); 3069 camisr_runqueue(); 3070 if ((start_ccb->ccb_h.status & CAM_STATUS_MASK) 3071 != CAM_REQ_INPROG) 3072 break; 3073 DELAY(100); 3074 } 3075 if (timeout == 0) { 3076 /* 3077 * XXX Is it worth adding a sim_timeout entry 3078 * point so we can attempt recovery? If 3079 * this is only used for dumps, I don't think 3080 * it is. 3081 */ 3082 start_ccb->ccb_h.status = CAM_CMD_TIMEOUT; 3083 } 3084 } else { 3085 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 3086 } 3087 3088 mtx_lock(&dev->device_mtx); 3089 } 3090 3091 /* 3092 * Schedule a peripheral driver to receive a ccb when its 3093 * target device has space for more transactions. 3094 */ 3095 void 3096 xpt_schedule(struct cam_periph *periph, u_int32_t new_priority) 3097 { 3098 3099 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n")); 3100 cam_periph_assert(periph, MA_OWNED); 3101 if (new_priority < periph->scheduled_priority) { 3102 periph->scheduled_priority = new_priority; 3103 xpt_run_allocq(periph, 0); 3104 } 3105 } 3106 3107 3108 /* 3109 * Schedule a device to run on a given queue. 3110 * If the device was inserted as a new entry on the queue, 3111 * return 1 meaning the device queue should be run. If we 3112 * were already queued, implying someone else has already 3113 * started the queue, return 0 so the caller doesn't attempt 3114 * to run the queue. 3115 */ 3116 static int 3117 xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo, 3118 u_int32_t new_priority) 3119 { 3120 int retval; 3121 u_int32_t old_priority; 3122 3123 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n")); 3124 3125 old_priority = pinfo->priority; 3126 3127 /* 3128 * Are we already queued? 3129 */ 3130 if (pinfo->index != CAM_UNQUEUED_INDEX) { 3131 /* Simply reorder based on new priority */ 3132 if (new_priority < old_priority) { 3133 camq_change_priority(queue, pinfo->index, 3134 new_priority); 3135 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3136 ("changed priority to %d\n", 3137 new_priority)); 3138 retval = 1; 3139 } else 3140 retval = 0; 3141 } else { 3142 /* New entry on the queue */ 3143 if (new_priority < old_priority) 3144 pinfo->priority = new_priority; 3145 3146 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3147 ("Inserting onto queue\n")); 3148 pinfo->generation = ++queue->generation; 3149 camq_insert(queue, pinfo); 3150 retval = 1; 3151 } 3152 return (retval); 3153 } 3154 3155 static void 3156 xpt_run_allocq_task(void *context, int pending) 3157 { 3158 struct cam_periph *periph = context; 3159 3160 cam_periph_lock(periph); 3161 periph->flags &= ~CAM_PERIPH_RUN_TASK; 3162 xpt_run_allocq(periph, 1); 3163 cam_periph_unlock(periph); 3164 cam_periph_release(periph); 3165 } 3166 3167 static void 3168 xpt_run_allocq(struct cam_periph *periph, int sleep) 3169 { 3170 struct cam_ed *device; 3171 union ccb *ccb; 3172 uint32_t prio; 3173 3174 cam_periph_assert(periph, MA_OWNED); 3175 if (periph->periph_allocating) 3176 return; 3177 periph->periph_allocating = 1; 3178 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_allocq(%p)\n", periph)); 3179 device = periph->path->device; 3180 ccb = NULL; 3181 restart: 3182 while ((prio = min(periph->scheduled_priority, 3183 periph->immediate_priority)) != CAM_PRIORITY_NONE && 3184 (periph->periph_allocated - (ccb != NULL ? 1 : 0) < 3185 device->ccbq.total_openings || prio <= CAM_PRIORITY_OOB)) { 3186 3187 if (ccb == NULL && 3188 (ccb = xpt_get_ccb_nowait(periph)) == NULL) { 3189 if (sleep) { 3190 ccb = xpt_get_ccb(periph); 3191 goto restart; 3192 } 3193 if (periph->flags & CAM_PERIPH_RUN_TASK) 3194 break; 3195 cam_periph_doacquire(periph); 3196 periph->flags |= CAM_PERIPH_RUN_TASK; 3197 taskqueue_enqueue(xsoftc.xpt_taskq, 3198 &periph->periph_run_task); 3199 break; 3200 } 3201 xpt_setup_ccb(&ccb->ccb_h, periph->path, prio); 3202 if (prio == periph->immediate_priority) { 3203 periph->immediate_priority = CAM_PRIORITY_NONE; 3204 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3205 ("waking cam_periph_getccb()\n")); 3206 SLIST_INSERT_HEAD(&periph->ccb_list, &ccb->ccb_h, 3207 periph_links.sle); 3208 wakeup(&periph->ccb_list); 3209 } else { 3210 periph->scheduled_priority = CAM_PRIORITY_NONE; 3211 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3212 ("calling periph_start()\n")); 3213 periph->periph_start(periph, ccb); 3214 } 3215 ccb = NULL; 3216 } 3217 if (ccb != NULL) 3218 xpt_release_ccb(ccb); 3219 periph->periph_allocating = 0; 3220 } 3221 3222 static void 3223 xpt_run_devq(struct cam_devq *devq) 3224 { 3225 char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1]; 3226 int lock; 3227 3228 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_devq\n")); 3229 3230 devq->send_queue.qfrozen_cnt++; 3231 while ((devq->send_queue.entries > 0) 3232 && (devq->send_openings > 0) 3233 && (devq->send_queue.qfrozen_cnt <= 1)) { 3234 struct cam_ed *device; 3235 union ccb *work_ccb; 3236 struct cam_sim *sim; 3237 3238 device = (struct cam_ed *)camq_remove(&devq->send_queue, 3239 CAMQ_HEAD); 3240 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3241 ("running device %p\n", device)); 3242 3243 work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD); 3244 if (work_ccb == NULL) { 3245 printf("device on run queue with no ccbs???\n"); 3246 continue; 3247 } 3248 3249 if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) { 3250 3251 mtx_lock(&xsoftc.xpt_highpower_lock); 3252 if (xsoftc.num_highpower <= 0) { 3253 /* 3254 * We got a high power command, but we 3255 * don't have any available slots. Freeze 3256 * the device queue until we have a slot 3257 * available. 3258 */ 3259 xpt_freeze_devq_device(device, 1); 3260 STAILQ_INSERT_TAIL(&xsoftc.highpowerq, device, 3261 highpowerq_entry); 3262 3263 mtx_unlock(&xsoftc.xpt_highpower_lock); 3264 continue; 3265 } else { 3266 /* 3267 * Consume a high power slot while 3268 * this ccb runs. 3269 */ 3270 xsoftc.num_highpower--; 3271 } 3272 mtx_unlock(&xsoftc.xpt_highpower_lock); 3273 } 3274 cam_ccbq_remove_ccb(&device->ccbq, work_ccb); 3275 cam_ccbq_send_ccb(&device->ccbq, work_ccb); 3276 devq->send_openings--; 3277 devq->send_active++; 3278 xpt_schedule_devq(devq, device); 3279 mtx_unlock(&devq->send_mtx); 3280 3281 if ((work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0) { 3282 /* 3283 * The client wants to freeze the queue 3284 * after this CCB is sent. 3285 */ 3286 xpt_freeze_devq(work_ccb->ccb_h.path, 1); 3287 } 3288 3289 /* In Target mode, the peripheral driver knows best... */ 3290 if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) { 3291 if ((device->inq_flags & SID_CmdQue) != 0 3292 && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE) 3293 work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID; 3294 else 3295 /* 3296 * Clear this in case of a retried CCB that 3297 * failed due to a rejected tag. 3298 */ 3299 work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID; 3300 } 3301 3302 switch (work_ccb->ccb_h.func_code) { 3303 case XPT_SCSI_IO: 3304 CAM_DEBUG(work_ccb->ccb_h.path, 3305 CAM_DEBUG_CDB,("%s. CDB: %s\n", 3306 scsi_op_desc(work_ccb->csio.cdb_io.cdb_bytes[0], 3307 &device->inq_data), 3308 scsi_cdb_string(work_ccb->csio.cdb_io.cdb_bytes, 3309 cdb_str, sizeof(cdb_str)))); 3310 break; 3311 case XPT_ATA_IO: 3312 CAM_DEBUG(work_ccb->ccb_h.path, 3313 CAM_DEBUG_CDB,("%s. ACB: %s\n", 3314 ata_op_string(&work_ccb->ataio.cmd), 3315 ata_cmd_string(&work_ccb->ataio.cmd, 3316 cdb_str, sizeof(cdb_str)))); 3317 break; 3318 default: 3319 break; 3320 } 3321 3322 /* 3323 * Device queues can be shared among multiple SIM instances 3324 * that reside on different busses. Use the SIM from the 3325 * queued device, rather than the one from the calling bus. 3326 */ 3327 sim = device->sim; 3328 lock = (mtx_owned(sim->mtx) == 0); 3329 if (lock) 3330 CAM_SIM_LOCK(sim); 3331 work_ccb->ccb_h.qos.sim_data = sbinuptime(); // xxx uintprt_t too small 32bit platforms 3332 (*(sim->sim_action))(sim, work_ccb); 3333 if (lock) 3334 CAM_SIM_UNLOCK(sim); 3335 mtx_lock(&devq->send_mtx); 3336 } 3337 devq->send_queue.qfrozen_cnt--; 3338 } 3339 3340 /* 3341 * This function merges stuff from the slave ccb into the master ccb, while 3342 * keeping important fields in the master ccb constant. 3343 */ 3344 void 3345 xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb) 3346 { 3347 3348 /* 3349 * Pull fields that are valid for peripheral drivers to set 3350 * into the master CCB along with the CCB "payload". 3351 */ 3352 master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count; 3353 master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code; 3354 master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout; 3355 master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags; 3356 bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1], 3357 sizeof(union ccb) - sizeof(struct ccb_hdr)); 3358 } 3359 3360 void 3361 xpt_setup_ccb_flags(struct ccb_hdr *ccb_h, struct cam_path *path, 3362 u_int32_t priority, u_int32_t flags) 3363 { 3364 3365 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n")); 3366 ccb_h->pinfo.priority = priority; 3367 ccb_h->path = path; 3368 ccb_h->path_id = path->bus->path_id; 3369 if (path->target) 3370 ccb_h->target_id = path->target->target_id; 3371 else 3372 ccb_h->target_id = CAM_TARGET_WILDCARD; 3373 if (path->device) { 3374 ccb_h->target_lun = path->device->lun_id; 3375 ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation; 3376 } else { 3377 ccb_h->target_lun = CAM_TARGET_WILDCARD; 3378 } 3379 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX; 3380 ccb_h->flags = flags; 3381 ccb_h->xflags = 0; 3382 } 3383 3384 void 3385 xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority) 3386 { 3387 xpt_setup_ccb_flags(ccb_h, path, priority, /*flags*/ 0); 3388 } 3389 3390 /* Path manipulation functions */ 3391 cam_status 3392 xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph, 3393 path_id_t path_id, target_id_t target_id, lun_id_t lun_id) 3394 { 3395 struct cam_path *path; 3396 cam_status status; 3397 3398 path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_NOWAIT); 3399 3400 if (path == NULL) { 3401 status = CAM_RESRC_UNAVAIL; 3402 return(status); 3403 } 3404 status = xpt_compile_path(path, perph, path_id, target_id, lun_id); 3405 if (status != CAM_REQ_CMP) { 3406 free(path, M_CAMPATH); 3407 path = NULL; 3408 } 3409 *new_path_ptr = path; 3410 return (status); 3411 } 3412 3413 cam_status 3414 xpt_create_path_unlocked(struct cam_path **new_path_ptr, 3415 struct cam_periph *periph, path_id_t path_id, 3416 target_id_t target_id, lun_id_t lun_id) 3417 { 3418 3419 return (xpt_create_path(new_path_ptr, periph, path_id, target_id, 3420 lun_id)); 3421 } 3422 3423 cam_status 3424 xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph, 3425 path_id_t path_id, target_id_t target_id, lun_id_t lun_id) 3426 { 3427 struct cam_eb *bus; 3428 struct cam_et *target; 3429 struct cam_ed *device; 3430 cam_status status; 3431 3432 status = CAM_REQ_CMP; /* Completed without error */ 3433 target = NULL; /* Wildcarded */ 3434 device = NULL; /* Wildcarded */ 3435 3436 /* 3437 * We will potentially modify the EDT, so block interrupts 3438 * that may attempt to create cam paths. 3439 */ 3440 bus = xpt_find_bus(path_id); 3441 if (bus == NULL) { 3442 status = CAM_PATH_INVALID; 3443 } else { 3444 xpt_lock_buses(); 3445 mtx_lock(&bus->eb_mtx); 3446 target = xpt_find_target(bus, target_id); 3447 if (target == NULL) { 3448 /* Create one */ 3449 struct cam_et *new_target; 3450 3451 new_target = xpt_alloc_target(bus, target_id); 3452 if (new_target == NULL) { 3453 status = CAM_RESRC_UNAVAIL; 3454 } else { 3455 target = new_target; 3456 } 3457 } 3458 xpt_unlock_buses(); 3459 if (target != NULL) { 3460 device = xpt_find_device(target, lun_id); 3461 if (device == NULL) { 3462 /* Create one */ 3463 struct cam_ed *new_device; 3464 3465 new_device = 3466 (*(bus->xport->alloc_device))(bus, 3467 target, 3468 lun_id); 3469 if (new_device == NULL) { 3470 status = CAM_RESRC_UNAVAIL; 3471 } else { 3472 device = new_device; 3473 } 3474 } 3475 } 3476 mtx_unlock(&bus->eb_mtx); 3477 } 3478 3479 /* 3480 * Only touch the user's data if we are successful. 3481 */ 3482 if (status == CAM_REQ_CMP) { 3483 new_path->periph = perph; 3484 new_path->bus = bus; 3485 new_path->target = target; 3486 new_path->device = device; 3487 CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n")); 3488 } else { 3489 if (device != NULL) 3490 xpt_release_device(device); 3491 if (target != NULL) 3492 xpt_release_target(target); 3493 if (bus != NULL) 3494 xpt_release_bus(bus); 3495 } 3496 return (status); 3497 } 3498 3499 cam_status 3500 xpt_clone_path(struct cam_path **new_path_ptr, struct cam_path *path) 3501 { 3502 struct cam_path *new_path; 3503 3504 new_path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_NOWAIT); 3505 if (new_path == NULL) 3506 return(CAM_RESRC_UNAVAIL); 3507 xpt_copy_path(new_path, path); 3508 *new_path_ptr = new_path; 3509 return (CAM_REQ_CMP); 3510 } 3511 3512 void 3513 xpt_copy_path(struct cam_path *new_path, struct cam_path *path) 3514 { 3515 3516 *new_path = *path; 3517 if (path->bus != NULL) 3518 xpt_acquire_bus(path->bus); 3519 if (path->target != NULL) 3520 xpt_acquire_target(path->target); 3521 if (path->device != NULL) 3522 xpt_acquire_device(path->device); 3523 } 3524 3525 void 3526 xpt_release_path(struct cam_path *path) 3527 { 3528 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n")); 3529 if (path->device != NULL) { 3530 xpt_release_device(path->device); 3531 path->device = NULL; 3532 } 3533 if (path->target != NULL) { 3534 xpt_release_target(path->target); 3535 path->target = NULL; 3536 } 3537 if (path->bus != NULL) { 3538 xpt_release_bus(path->bus); 3539 path->bus = NULL; 3540 } 3541 } 3542 3543 void 3544 xpt_free_path(struct cam_path *path) 3545 { 3546 3547 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n")); 3548 xpt_release_path(path); 3549 free(path, M_CAMPATH); 3550 } 3551 3552 void 3553 xpt_path_counts(struct cam_path *path, uint32_t *bus_ref, 3554 uint32_t *periph_ref, uint32_t *target_ref, uint32_t *device_ref) 3555 { 3556 3557 xpt_lock_buses(); 3558 if (bus_ref) { 3559 if (path->bus) 3560 *bus_ref = path->bus->refcount; 3561 else 3562 *bus_ref = 0; 3563 } 3564 if (periph_ref) { 3565 if (path->periph) 3566 *periph_ref = path->periph->refcount; 3567 else 3568 *periph_ref = 0; 3569 } 3570 xpt_unlock_buses(); 3571 if (target_ref) { 3572 if (path->target) 3573 *target_ref = path->target->refcount; 3574 else 3575 *target_ref = 0; 3576 } 3577 if (device_ref) { 3578 if (path->device) 3579 *device_ref = path->device->refcount; 3580 else 3581 *device_ref = 0; 3582 } 3583 } 3584 3585 /* 3586 * Return -1 for failure, 0 for exact match, 1 for match with wildcards 3587 * in path1, 2 for match with wildcards in path2. 3588 */ 3589 int 3590 xpt_path_comp(struct cam_path *path1, struct cam_path *path2) 3591 { 3592 int retval = 0; 3593 3594 if (path1->bus != path2->bus) { 3595 if (path1->bus->path_id == CAM_BUS_WILDCARD) 3596 retval = 1; 3597 else if (path2->bus->path_id == CAM_BUS_WILDCARD) 3598 retval = 2; 3599 else 3600 return (-1); 3601 } 3602 if (path1->target != path2->target) { 3603 if (path1->target->target_id == CAM_TARGET_WILDCARD) { 3604 if (retval == 0) 3605 retval = 1; 3606 } else if (path2->target->target_id == CAM_TARGET_WILDCARD) 3607 retval = 2; 3608 else 3609 return (-1); 3610 } 3611 if (path1->device != path2->device) { 3612 if (path1->device->lun_id == CAM_LUN_WILDCARD) { 3613 if (retval == 0) 3614 retval = 1; 3615 } else if (path2->device->lun_id == CAM_LUN_WILDCARD) 3616 retval = 2; 3617 else 3618 return (-1); 3619 } 3620 return (retval); 3621 } 3622 3623 int 3624 xpt_path_comp_dev(struct cam_path *path, struct cam_ed *dev) 3625 { 3626 int retval = 0; 3627 3628 if (path->bus != dev->target->bus) { 3629 if (path->bus->path_id == CAM_BUS_WILDCARD) 3630 retval = 1; 3631 else if (dev->target->bus->path_id == CAM_BUS_WILDCARD) 3632 retval = 2; 3633 else 3634 return (-1); 3635 } 3636 if (path->target != dev->target) { 3637 if (path->target->target_id == CAM_TARGET_WILDCARD) { 3638 if (retval == 0) 3639 retval = 1; 3640 } else if (dev->target->target_id == CAM_TARGET_WILDCARD) 3641 retval = 2; 3642 else 3643 return (-1); 3644 } 3645 if (path->device != dev) { 3646 if (path->device->lun_id == CAM_LUN_WILDCARD) { 3647 if (retval == 0) 3648 retval = 1; 3649 } else if (dev->lun_id == CAM_LUN_WILDCARD) 3650 retval = 2; 3651 else 3652 return (-1); 3653 } 3654 return (retval); 3655 } 3656 3657 void 3658 xpt_print_path(struct cam_path *path) 3659 { 3660 3661 if (path == NULL) 3662 printf("(nopath): "); 3663 else { 3664 if (path->periph != NULL) 3665 printf("(%s%d:", path->periph->periph_name, 3666 path->periph->unit_number); 3667 else 3668 printf("(noperiph:"); 3669 3670 if (path->bus != NULL) 3671 printf("%s%d:%d:", path->bus->sim->sim_name, 3672 path->bus->sim->unit_number, 3673 path->bus->sim->bus_id); 3674 else 3675 printf("nobus:"); 3676 3677 if (path->target != NULL) 3678 printf("%d:", path->target->target_id); 3679 else 3680 printf("X:"); 3681 3682 if (path->device != NULL) 3683 printf("%jx): ", (uintmax_t)path->device->lun_id); 3684 else 3685 printf("X): "); 3686 } 3687 } 3688 3689 void 3690 xpt_print_device(struct cam_ed *device) 3691 { 3692 3693 if (device == NULL) 3694 printf("(nopath): "); 3695 else { 3696 printf("(noperiph:%s%d:%d:%d:%jx): ", device->sim->sim_name, 3697 device->sim->unit_number, 3698 device->sim->bus_id, 3699 device->target->target_id, 3700 (uintmax_t)device->lun_id); 3701 } 3702 } 3703 3704 void 3705 xpt_print(struct cam_path *path, const char *fmt, ...) 3706 { 3707 va_list ap; 3708 xpt_print_path(path); 3709 va_start(ap, fmt); 3710 vprintf(fmt, ap); 3711 va_end(ap); 3712 } 3713 3714 int 3715 xpt_path_string(struct cam_path *path, char *str, size_t str_len) 3716 { 3717 struct sbuf sb; 3718 3719 sbuf_new(&sb, str, str_len, 0); 3720 3721 if (path == NULL) 3722 sbuf_printf(&sb, "(nopath): "); 3723 else { 3724 if (path->periph != NULL) 3725 sbuf_printf(&sb, "(%s%d:", path->periph->periph_name, 3726 path->periph->unit_number); 3727 else 3728 sbuf_printf(&sb, "(noperiph:"); 3729 3730 if (path->bus != NULL) 3731 sbuf_printf(&sb, "%s%d:%d:", path->bus->sim->sim_name, 3732 path->bus->sim->unit_number, 3733 path->bus->sim->bus_id); 3734 else 3735 sbuf_printf(&sb, "nobus:"); 3736 3737 if (path->target != NULL) 3738 sbuf_printf(&sb, "%d:", path->target->target_id); 3739 else 3740 sbuf_printf(&sb, "X:"); 3741 3742 if (path->device != NULL) 3743 sbuf_printf(&sb, "%jx): ", 3744 (uintmax_t)path->device->lun_id); 3745 else 3746 sbuf_printf(&sb, "X): "); 3747 } 3748 sbuf_finish(&sb); 3749 3750 return(sbuf_len(&sb)); 3751 } 3752 3753 path_id_t 3754 xpt_path_path_id(struct cam_path *path) 3755 { 3756 return(path->bus->path_id); 3757 } 3758 3759 target_id_t 3760 xpt_path_target_id(struct cam_path *path) 3761 { 3762 if (path->target != NULL) 3763 return (path->target->target_id); 3764 else 3765 return (CAM_TARGET_WILDCARD); 3766 } 3767 3768 lun_id_t 3769 xpt_path_lun_id(struct cam_path *path) 3770 { 3771 if (path->device != NULL) 3772 return (path->device->lun_id); 3773 else 3774 return (CAM_LUN_WILDCARD); 3775 } 3776 3777 struct cam_sim * 3778 xpt_path_sim(struct cam_path *path) 3779 { 3780 3781 return (path->bus->sim); 3782 } 3783 3784 struct cam_periph* 3785 xpt_path_periph(struct cam_path *path) 3786 { 3787 3788 return (path->periph); 3789 } 3790 3791 /* 3792 * Release a CAM control block for the caller. Remit the cost of the structure 3793 * to the device referenced by the path. If the this device had no 'credits' 3794 * and peripheral drivers have registered async callbacks for this notification 3795 * call them now. 3796 */ 3797 void 3798 xpt_release_ccb(union ccb *free_ccb) 3799 { 3800 struct cam_ed *device; 3801 struct cam_periph *periph; 3802 3803 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n")); 3804 xpt_path_assert(free_ccb->ccb_h.path, MA_OWNED); 3805 device = free_ccb->ccb_h.path->device; 3806 periph = free_ccb->ccb_h.path->periph; 3807 3808 xpt_free_ccb(free_ccb); 3809 periph->periph_allocated--; 3810 cam_ccbq_release_opening(&device->ccbq); 3811 xpt_run_allocq(periph, 0); 3812 } 3813 3814 /* Functions accessed by SIM drivers */ 3815 3816 static struct xpt_xport xport_default = { 3817 .alloc_device = xpt_alloc_device_default, 3818 .action = xpt_action_default, 3819 .async = xpt_dev_async_default, 3820 }; 3821 3822 /* 3823 * A sim structure, listing the SIM entry points and instance 3824 * identification info is passed to xpt_bus_register to hook the SIM 3825 * into the CAM framework. xpt_bus_register creates a cam_eb entry 3826 * for this new bus and places it in the array of busses and assigns 3827 * it a path_id. The path_id may be influenced by "hard wiring" 3828 * information specified by the user. Once interrupt services are 3829 * available, the bus will be probed. 3830 */ 3831 int32_t 3832 xpt_bus_register(struct cam_sim *sim, device_t parent, u_int32_t bus) 3833 { 3834 struct cam_eb *new_bus; 3835 struct cam_eb *old_bus; 3836 struct ccb_pathinq cpi; 3837 struct cam_path *path; 3838 cam_status status; 3839 3840 mtx_assert(sim->mtx, MA_OWNED); 3841 3842 sim->bus_id = bus; 3843 new_bus = (struct cam_eb *)malloc(sizeof(*new_bus), 3844 M_CAMXPT, M_NOWAIT|M_ZERO); 3845 if (new_bus == NULL) { 3846 /* Couldn't satisfy request */ 3847 return (CAM_RESRC_UNAVAIL); 3848 } 3849 3850 mtx_init(&new_bus->eb_mtx, "CAM bus lock", NULL, MTX_DEF); 3851 TAILQ_INIT(&new_bus->et_entries); 3852 cam_sim_hold(sim); 3853 new_bus->sim = sim; 3854 timevalclear(&new_bus->last_reset); 3855 new_bus->flags = 0; 3856 new_bus->refcount = 1; /* Held until a bus_deregister event */ 3857 new_bus->generation = 0; 3858 3859 xpt_lock_buses(); 3860 sim->path_id = new_bus->path_id = 3861 xptpathid(sim->sim_name, sim->unit_number, sim->bus_id); 3862 old_bus = TAILQ_FIRST(&xsoftc.xpt_busses); 3863 while (old_bus != NULL 3864 && old_bus->path_id < new_bus->path_id) 3865 old_bus = TAILQ_NEXT(old_bus, links); 3866 if (old_bus != NULL) 3867 TAILQ_INSERT_BEFORE(old_bus, new_bus, links); 3868 else 3869 TAILQ_INSERT_TAIL(&xsoftc.xpt_busses, new_bus, links); 3870 xsoftc.bus_generation++; 3871 xpt_unlock_buses(); 3872 3873 /* 3874 * Set a default transport so that a PATH_INQ can be issued to 3875 * the SIM. This will then allow for probing and attaching of 3876 * a more appropriate transport. 3877 */ 3878 new_bus->xport = &xport_default; 3879 3880 status = xpt_create_path(&path, /*periph*/NULL, sim->path_id, 3881 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 3882 if (status != CAM_REQ_CMP) { 3883 xpt_release_bus(new_bus); 3884 free(path, M_CAMXPT); 3885 return (CAM_RESRC_UNAVAIL); 3886 } 3887 3888 xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NORMAL); 3889 cpi.ccb_h.func_code = XPT_PATH_INQ; 3890 xpt_action((union ccb *)&cpi); 3891 3892 if (cpi.ccb_h.status == CAM_REQ_CMP) { 3893 switch (cpi.transport) { 3894 case XPORT_SPI: 3895 case XPORT_SAS: 3896 case XPORT_FC: 3897 case XPORT_USB: 3898 case XPORT_ISCSI: 3899 case XPORT_SRP: 3900 case XPORT_PPB: 3901 new_bus->xport = scsi_get_xport(); 3902 break; 3903 case XPORT_ATA: 3904 case XPORT_SATA: 3905 new_bus->xport = ata_get_xport(); 3906 break; 3907 default: 3908 new_bus->xport = &xport_default; 3909 break; 3910 } 3911 } 3912 3913 /* Notify interested parties */ 3914 if (sim->path_id != CAM_XPT_PATH_ID) { 3915 3916 xpt_async(AC_PATH_REGISTERED, path, &cpi); 3917 if ((cpi.hba_misc & PIM_NOSCAN) == 0) { 3918 union ccb *scan_ccb; 3919 3920 /* Initiate bus rescan. */ 3921 scan_ccb = xpt_alloc_ccb_nowait(); 3922 if (scan_ccb != NULL) { 3923 scan_ccb->ccb_h.path = path; 3924 scan_ccb->ccb_h.func_code = XPT_SCAN_BUS; 3925 scan_ccb->crcn.flags = 0; 3926 xpt_rescan(scan_ccb); 3927 } else { 3928 xpt_print(path, 3929 "Can't allocate CCB to scan bus\n"); 3930 xpt_free_path(path); 3931 } 3932 } else 3933 xpt_free_path(path); 3934 } else 3935 xpt_free_path(path); 3936 return (CAM_SUCCESS); 3937 } 3938 3939 int32_t 3940 xpt_bus_deregister(path_id_t pathid) 3941 { 3942 struct cam_path bus_path; 3943 cam_status status; 3944 3945 status = xpt_compile_path(&bus_path, NULL, pathid, 3946 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 3947 if (status != CAM_REQ_CMP) 3948 return (status); 3949 3950 xpt_async(AC_LOST_DEVICE, &bus_path, NULL); 3951 xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL); 3952 3953 /* Release the reference count held while registered. */ 3954 xpt_release_bus(bus_path.bus); 3955 xpt_release_path(&bus_path); 3956 3957 return (CAM_REQ_CMP); 3958 } 3959 3960 static path_id_t 3961 xptnextfreepathid(void) 3962 { 3963 struct cam_eb *bus; 3964 path_id_t pathid; 3965 const char *strval; 3966 3967 mtx_assert(&xsoftc.xpt_topo_lock, MA_OWNED); 3968 pathid = 0; 3969 bus = TAILQ_FIRST(&xsoftc.xpt_busses); 3970 retry: 3971 /* Find an unoccupied pathid */ 3972 while (bus != NULL && bus->path_id <= pathid) { 3973 if (bus->path_id == pathid) 3974 pathid++; 3975 bus = TAILQ_NEXT(bus, links); 3976 } 3977 3978 /* 3979 * Ensure that this pathid is not reserved for 3980 * a bus that may be registered in the future. 3981 */ 3982 if (resource_string_value("scbus", pathid, "at", &strval) == 0) { 3983 ++pathid; 3984 /* Start the search over */ 3985 goto retry; 3986 } 3987 return (pathid); 3988 } 3989 3990 static path_id_t 3991 xptpathid(const char *sim_name, int sim_unit, int sim_bus) 3992 { 3993 path_id_t pathid; 3994 int i, dunit, val; 3995 char buf[32]; 3996 const char *dname; 3997 3998 pathid = CAM_XPT_PATH_ID; 3999 snprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit); 4000 if (strcmp(buf, "xpt0") == 0 && sim_bus == 0) 4001 return (pathid); 4002 i = 0; 4003 while ((resource_find_match(&i, &dname, &dunit, "at", buf)) == 0) { 4004 if (strcmp(dname, "scbus")) { 4005 /* Avoid a bit of foot shooting. */ 4006 continue; 4007 } 4008 if (dunit < 0) /* unwired?! */ 4009 continue; 4010 if (resource_int_value("scbus", dunit, "bus", &val) == 0) { 4011 if (sim_bus == val) { 4012 pathid = dunit; 4013 break; 4014 } 4015 } else if (sim_bus == 0) { 4016 /* Unspecified matches bus 0 */ 4017 pathid = dunit; 4018 break; 4019 } else { 4020 printf("Ambiguous scbus configuration for %s%d " 4021 "bus %d, cannot wire down. The kernel " 4022 "config entry for scbus%d should " 4023 "specify a controller bus.\n" 4024 "Scbus will be assigned dynamically.\n", 4025 sim_name, sim_unit, sim_bus, dunit); 4026 break; 4027 } 4028 } 4029 4030 if (pathid == CAM_XPT_PATH_ID) 4031 pathid = xptnextfreepathid(); 4032 return (pathid); 4033 } 4034 4035 static const char * 4036 xpt_async_string(u_int32_t async_code) 4037 { 4038 4039 switch (async_code) { 4040 case AC_BUS_RESET: return ("AC_BUS_RESET"); 4041 case AC_UNSOL_RESEL: return ("AC_UNSOL_RESEL"); 4042 case AC_SCSI_AEN: return ("AC_SCSI_AEN"); 4043 case AC_SENT_BDR: return ("AC_SENT_BDR"); 4044 case AC_PATH_REGISTERED: return ("AC_PATH_REGISTERED"); 4045 case AC_PATH_DEREGISTERED: return ("AC_PATH_DEREGISTERED"); 4046 case AC_FOUND_DEVICE: return ("AC_FOUND_DEVICE"); 4047 case AC_LOST_DEVICE: return ("AC_LOST_DEVICE"); 4048 case AC_TRANSFER_NEG: return ("AC_TRANSFER_NEG"); 4049 case AC_INQ_CHANGED: return ("AC_INQ_CHANGED"); 4050 case AC_GETDEV_CHANGED: return ("AC_GETDEV_CHANGED"); 4051 case AC_CONTRACT: return ("AC_CONTRACT"); 4052 case AC_ADVINFO_CHANGED: return ("AC_ADVINFO_CHANGED"); 4053 case AC_UNIT_ATTENTION: return ("AC_UNIT_ATTENTION"); 4054 } 4055 return ("AC_UNKNOWN"); 4056 } 4057 4058 static int 4059 xpt_async_size(u_int32_t async_code) 4060 { 4061 4062 switch (async_code) { 4063 case AC_BUS_RESET: return (0); 4064 case AC_UNSOL_RESEL: return (0); 4065 case AC_SCSI_AEN: return (0); 4066 case AC_SENT_BDR: return (0); 4067 case AC_PATH_REGISTERED: return (sizeof(struct ccb_pathinq)); 4068 case AC_PATH_DEREGISTERED: return (0); 4069 case AC_FOUND_DEVICE: return (sizeof(struct ccb_getdev)); 4070 case AC_LOST_DEVICE: return (0); 4071 case AC_TRANSFER_NEG: return (sizeof(struct ccb_trans_settings)); 4072 case AC_INQ_CHANGED: return (0); 4073 case AC_GETDEV_CHANGED: return (0); 4074 case AC_CONTRACT: return (sizeof(struct ac_contract)); 4075 case AC_ADVINFO_CHANGED: return (-1); 4076 case AC_UNIT_ATTENTION: return (sizeof(struct ccb_scsiio)); 4077 } 4078 return (0); 4079 } 4080 4081 static int 4082 xpt_async_process_dev(struct cam_ed *device, void *arg) 4083 { 4084 union ccb *ccb = arg; 4085 struct cam_path *path = ccb->ccb_h.path; 4086 void *async_arg = ccb->casync.async_arg_ptr; 4087 u_int32_t async_code = ccb->casync.async_code; 4088 int relock; 4089 4090 if (path->device != device 4091 && path->device->lun_id != CAM_LUN_WILDCARD 4092 && device->lun_id != CAM_LUN_WILDCARD) 4093 return (1); 4094 4095 /* 4096 * The async callback could free the device. 4097 * If it is a broadcast async, it doesn't hold 4098 * device reference, so take our own reference. 4099 */ 4100 xpt_acquire_device(device); 4101 4102 /* 4103 * If async for specific device is to be delivered to 4104 * the wildcard client, take the specific device lock. 4105 * XXX: We may need a way for client to specify it. 4106 */ 4107 if ((device->lun_id == CAM_LUN_WILDCARD && 4108 path->device->lun_id != CAM_LUN_WILDCARD) || 4109 (device->target->target_id == CAM_TARGET_WILDCARD && 4110 path->target->target_id != CAM_TARGET_WILDCARD) || 4111 (device->target->bus->path_id == CAM_BUS_WILDCARD && 4112 path->target->bus->path_id != CAM_BUS_WILDCARD)) { 4113 mtx_unlock(&device->device_mtx); 4114 xpt_path_lock(path); 4115 relock = 1; 4116 } else 4117 relock = 0; 4118 4119 (*(device->target->bus->xport->async))(async_code, 4120 device->target->bus, device->target, device, async_arg); 4121 xpt_async_bcast(&device->asyncs, async_code, path, async_arg); 4122 4123 if (relock) { 4124 xpt_path_unlock(path); 4125 mtx_lock(&device->device_mtx); 4126 } 4127 xpt_release_device(device); 4128 return (1); 4129 } 4130 4131 static int 4132 xpt_async_process_tgt(struct cam_et *target, void *arg) 4133 { 4134 union ccb *ccb = arg; 4135 struct cam_path *path = ccb->ccb_h.path; 4136 4137 if (path->target != target 4138 && path->target->target_id != CAM_TARGET_WILDCARD 4139 && target->target_id != CAM_TARGET_WILDCARD) 4140 return (1); 4141 4142 if (ccb->casync.async_code == AC_SENT_BDR) { 4143 /* Update our notion of when the last reset occurred */ 4144 microtime(&target->last_reset); 4145 } 4146 4147 return (xptdevicetraverse(target, NULL, xpt_async_process_dev, ccb)); 4148 } 4149 4150 static void 4151 xpt_async_process(struct cam_periph *periph, union ccb *ccb) 4152 { 4153 struct cam_eb *bus; 4154 struct cam_path *path; 4155 void *async_arg; 4156 u_int32_t async_code; 4157 4158 path = ccb->ccb_h.path; 4159 async_code = ccb->casync.async_code; 4160 async_arg = ccb->casync.async_arg_ptr; 4161 CAM_DEBUG(path, CAM_DEBUG_TRACE | CAM_DEBUG_INFO, 4162 ("xpt_async(%s)\n", xpt_async_string(async_code))); 4163 bus = path->bus; 4164 4165 if (async_code == AC_BUS_RESET) { 4166 /* Update our notion of when the last reset occurred */ 4167 microtime(&bus->last_reset); 4168 } 4169 4170 xpttargettraverse(bus, NULL, xpt_async_process_tgt, ccb); 4171 4172 /* 4173 * If this wasn't a fully wildcarded async, tell all 4174 * clients that want all async events. 4175 */ 4176 if (bus != xpt_periph->path->bus) { 4177 xpt_path_lock(xpt_periph->path); 4178 xpt_async_process_dev(xpt_periph->path->device, ccb); 4179 xpt_path_unlock(xpt_periph->path); 4180 } 4181 4182 if (path->device != NULL && path->device->lun_id != CAM_LUN_WILDCARD) 4183 xpt_release_devq(path, 1, TRUE); 4184 else 4185 xpt_release_simq(path->bus->sim, TRUE); 4186 if (ccb->casync.async_arg_size > 0) 4187 free(async_arg, M_CAMXPT); 4188 xpt_free_path(path); 4189 xpt_free_ccb(ccb); 4190 } 4191 4192 static void 4193 xpt_async_bcast(struct async_list *async_head, 4194 u_int32_t async_code, 4195 struct cam_path *path, void *async_arg) 4196 { 4197 struct async_node *cur_entry; 4198 int lock; 4199 4200 cur_entry = SLIST_FIRST(async_head); 4201 while (cur_entry != NULL) { 4202 struct async_node *next_entry; 4203 /* 4204 * Grab the next list entry before we call the current 4205 * entry's callback. This is because the callback function 4206 * can delete its async callback entry. 4207 */ 4208 next_entry = SLIST_NEXT(cur_entry, links); 4209 if ((cur_entry->event_enable & async_code) != 0) { 4210 lock = cur_entry->event_lock; 4211 if (lock) 4212 CAM_SIM_LOCK(path->device->sim); 4213 cur_entry->callback(cur_entry->callback_arg, 4214 async_code, path, 4215 async_arg); 4216 if (lock) 4217 CAM_SIM_UNLOCK(path->device->sim); 4218 } 4219 cur_entry = next_entry; 4220 } 4221 } 4222 4223 void 4224 xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg) 4225 { 4226 union ccb *ccb; 4227 int size; 4228 4229 ccb = xpt_alloc_ccb_nowait(); 4230 if (ccb == NULL) { 4231 xpt_print(path, "Can't allocate CCB to send %s\n", 4232 xpt_async_string(async_code)); 4233 return; 4234 } 4235 4236 if (xpt_clone_path(&ccb->ccb_h.path, path) != CAM_REQ_CMP) { 4237 xpt_print(path, "Can't allocate path to send %s\n", 4238 xpt_async_string(async_code)); 4239 xpt_free_ccb(ccb); 4240 return; 4241 } 4242 ccb->ccb_h.path->periph = NULL; 4243 ccb->ccb_h.func_code = XPT_ASYNC; 4244 ccb->ccb_h.cbfcnp = xpt_async_process; 4245 ccb->ccb_h.flags |= CAM_UNLOCKED; 4246 ccb->casync.async_code = async_code; 4247 ccb->casync.async_arg_size = 0; 4248 size = xpt_async_size(async_code); 4249 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, 4250 ("xpt_async: func %#x %s aync_code %d %s\n", 4251 ccb->ccb_h.func_code, 4252 xpt_action_name(ccb->ccb_h.func_code), 4253 async_code, 4254 xpt_async_string(async_code))); 4255 if (size > 0 && async_arg != NULL) { 4256 ccb->casync.async_arg_ptr = malloc(size, M_CAMXPT, M_NOWAIT); 4257 if (ccb->casync.async_arg_ptr == NULL) { 4258 xpt_print(path, "Can't allocate argument to send %s\n", 4259 xpt_async_string(async_code)); 4260 xpt_free_path(ccb->ccb_h.path); 4261 xpt_free_ccb(ccb); 4262 return; 4263 } 4264 memcpy(ccb->casync.async_arg_ptr, async_arg, size); 4265 ccb->casync.async_arg_size = size; 4266 } else if (size < 0) { 4267 ccb->casync.async_arg_ptr = async_arg; 4268 ccb->casync.async_arg_size = size; 4269 } 4270 if (path->device != NULL && path->device->lun_id != CAM_LUN_WILDCARD) 4271 xpt_freeze_devq(path, 1); 4272 else 4273 xpt_freeze_simq(path->bus->sim, 1); 4274 xpt_done(ccb); 4275 } 4276 4277 static void 4278 xpt_dev_async_default(u_int32_t async_code, struct cam_eb *bus, 4279 struct cam_et *target, struct cam_ed *device, 4280 void *async_arg) 4281 { 4282 4283 /* 4284 * We only need to handle events for real devices. 4285 */ 4286 if (target->target_id == CAM_TARGET_WILDCARD 4287 || device->lun_id == CAM_LUN_WILDCARD) 4288 return; 4289 4290 printf("%s called\n", __func__); 4291 } 4292 4293 static uint32_t 4294 xpt_freeze_devq_device(struct cam_ed *dev, u_int count) 4295 { 4296 struct cam_devq *devq; 4297 uint32_t freeze; 4298 4299 devq = dev->sim->devq; 4300 mtx_assert(&devq->send_mtx, MA_OWNED); 4301 CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE, 4302 ("xpt_freeze_devq_device(%d) %u->%u\n", count, 4303 dev->ccbq.queue.qfrozen_cnt, dev->ccbq.queue.qfrozen_cnt + count)); 4304 freeze = (dev->ccbq.queue.qfrozen_cnt += count); 4305 /* Remove frozen device from sendq. */ 4306 if (device_is_queued(dev)) 4307 camq_remove(&devq->send_queue, dev->devq_entry.index); 4308 return (freeze); 4309 } 4310 4311 u_int32_t 4312 xpt_freeze_devq(struct cam_path *path, u_int count) 4313 { 4314 struct cam_ed *dev = path->device; 4315 struct cam_devq *devq; 4316 uint32_t freeze; 4317 4318 devq = dev->sim->devq; 4319 mtx_lock(&devq->send_mtx); 4320 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_freeze_devq(%d)\n", count)); 4321 freeze = xpt_freeze_devq_device(dev, count); 4322 mtx_unlock(&devq->send_mtx); 4323 return (freeze); 4324 } 4325 4326 u_int32_t 4327 xpt_freeze_simq(struct cam_sim *sim, u_int count) 4328 { 4329 struct cam_devq *devq; 4330 uint32_t freeze; 4331 4332 devq = sim->devq; 4333 mtx_lock(&devq->send_mtx); 4334 freeze = (devq->send_queue.qfrozen_cnt += count); 4335 mtx_unlock(&devq->send_mtx); 4336 return (freeze); 4337 } 4338 4339 static void 4340 xpt_release_devq_timeout(void *arg) 4341 { 4342 struct cam_ed *dev; 4343 struct cam_devq *devq; 4344 4345 dev = (struct cam_ed *)arg; 4346 CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE, ("xpt_release_devq_timeout\n")); 4347 devq = dev->sim->devq; 4348 mtx_assert(&devq->send_mtx, MA_OWNED); 4349 if (xpt_release_devq_device(dev, /*count*/1, /*run_queue*/TRUE)) 4350 xpt_run_devq(devq); 4351 } 4352 4353 void 4354 xpt_release_devq(struct cam_path *path, u_int count, int run_queue) 4355 { 4356 struct cam_ed *dev; 4357 struct cam_devq *devq; 4358 4359 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_devq(%d, %d)\n", 4360 count, run_queue)); 4361 dev = path->device; 4362 devq = dev->sim->devq; 4363 mtx_lock(&devq->send_mtx); 4364 if (xpt_release_devq_device(dev, count, run_queue)) 4365 xpt_run_devq(dev->sim->devq); 4366 mtx_unlock(&devq->send_mtx); 4367 } 4368 4369 static int 4370 xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue) 4371 { 4372 4373 mtx_assert(&dev->sim->devq->send_mtx, MA_OWNED); 4374 CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE, 4375 ("xpt_release_devq_device(%d, %d) %u->%u\n", count, run_queue, 4376 dev->ccbq.queue.qfrozen_cnt, dev->ccbq.queue.qfrozen_cnt - count)); 4377 if (count > dev->ccbq.queue.qfrozen_cnt) { 4378 #ifdef INVARIANTS 4379 printf("xpt_release_devq(): requested %u > present %u\n", 4380 count, dev->ccbq.queue.qfrozen_cnt); 4381 #endif 4382 count = dev->ccbq.queue.qfrozen_cnt; 4383 } 4384 dev->ccbq.queue.qfrozen_cnt -= count; 4385 if (dev->ccbq.queue.qfrozen_cnt == 0) { 4386 /* 4387 * No longer need to wait for a successful 4388 * command completion. 4389 */ 4390 dev->flags &= ~CAM_DEV_REL_ON_COMPLETE; 4391 /* 4392 * Remove any timeouts that might be scheduled 4393 * to release this queue. 4394 */ 4395 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) { 4396 callout_stop(&dev->callout); 4397 dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING; 4398 } 4399 /* 4400 * Now that we are unfrozen schedule the 4401 * device so any pending transactions are 4402 * run. 4403 */ 4404 xpt_schedule_devq(dev->sim->devq, dev); 4405 } else 4406 run_queue = 0; 4407 return (run_queue); 4408 } 4409 4410 void 4411 xpt_release_simq(struct cam_sim *sim, int run_queue) 4412 { 4413 struct cam_devq *devq; 4414 4415 devq = sim->devq; 4416 mtx_lock(&devq->send_mtx); 4417 if (devq->send_queue.qfrozen_cnt <= 0) { 4418 #ifdef INVARIANTS 4419 printf("xpt_release_simq: requested 1 > present %u\n", 4420 devq->send_queue.qfrozen_cnt); 4421 #endif 4422 } else 4423 devq->send_queue.qfrozen_cnt--; 4424 if (devq->send_queue.qfrozen_cnt == 0) { 4425 /* 4426 * If there is a timeout scheduled to release this 4427 * sim queue, remove it. The queue frozen count is 4428 * already at 0. 4429 */ 4430 if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){ 4431 callout_stop(&sim->callout); 4432 sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING; 4433 } 4434 if (run_queue) { 4435 /* 4436 * Now that we are unfrozen run the send queue. 4437 */ 4438 xpt_run_devq(sim->devq); 4439 } 4440 } 4441 mtx_unlock(&devq->send_mtx); 4442 } 4443 4444 /* 4445 * XXX Appears to be unused. 4446 */ 4447 static void 4448 xpt_release_simq_timeout(void *arg) 4449 { 4450 struct cam_sim *sim; 4451 4452 sim = (struct cam_sim *)arg; 4453 xpt_release_simq(sim, /* run_queue */ TRUE); 4454 } 4455 4456 void 4457 xpt_done(union ccb *done_ccb) 4458 { 4459 struct cam_doneq *queue; 4460 int run, hash; 4461 4462 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, 4463 ("xpt_done: func= %#x %s status %#x\n", 4464 done_ccb->ccb_h.func_code, 4465 xpt_action_name(done_ccb->ccb_h.func_code), 4466 done_ccb->ccb_h.status)); 4467 if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0) 4468 return; 4469 4470 /* Store the time the ccb was in the sim */ 4471 done_ccb->ccb_h.qos.sim_data = sbinuptime() - done_ccb->ccb_h.qos.sim_data; 4472 hash = (done_ccb->ccb_h.path_id + done_ccb->ccb_h.target_id + 4473 done_ccb->ccb_h.target_lun) % cam_num_doneqs; 4474 queue = &cam_doneqs[hash]; 4475 mtx_lock(&queue->cam_doneq_mtx); 4476 run = (queue->cam_doneq_sleep && STAILQ_EMPTY(&queue->cam_doneq)); 4477 STAILQ_INSERT_TAIL(&queue->cam_doneq, &done_ccb->ccb_h, sim_links.stqe); 4478 done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX; 4479 mtx_unlock(&queue->cam_doneq_mtx); 4480 if (run) 4481 wakeup(&queue->cam_doneq); 4482 } 4483 4484 void 4485 xpt_done_direct(union ccb *done_ccb) 4486 { 4487 4488 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, 4489 ("xpt_done_direct: status %#x\n", done_ccb->ccb_h.status)); 4490 if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0) 4491 return; 4492 4493 /* Store the time the ccb was in the sim */ 4494 done_ccb->ccb_h.qos.sim_data = sbinuptime() - done_ccb->ccb_h.qos.sim_data; 4495 xpt_done_process(&done_ccb->ccb_h); 4496 } 4497 4498 union ccb * 4499 xpt_alloc_ccb() 4500 { 4501 union ccb *new_ccb; 4502 4503 new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_WAITOK); 4504 return (new_ccb); 4505 } 4506 4507 union ccb * 4508 xpt_alloc_ccb_nowait() 4509 { 4510 union ccb *new_ccb; 4511 4512 new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_NOWAIT); 4513 return (new_ccb); 4514 } 4515 4516 void 4517 xpt_free_ccb(union ccb *free_ccb) 4518 { 4519 free(free_ccb, M_CAMCCB); 4520 } 4521 4522 4523 4524 /* Private XPT functions */ 4525 4526 /* 4527 * Get a CAM control block for the caller. Charge the structure to the device 4528 * referenced by the path. If we don't have sufficient resources to allocate 4529 * more ccbs, we return NULL. 4530 */ 4531 static union ccb * 4532 xpt_get_ccb_nowait(struct cam_periph *periph) 4533 { 4534 union ccb *new_ccb; 4535 4536 new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_NOWAIT); 4537 if (new_ccb == NULL) 4538 return (NULL); 4539 periph->periph_allocated++; 4540 cam_ccbq_take_opening(&periph->path->device->ccbq); 4541 return (new_ccb); 4542 } 4543 4544 static union ccb * 4545 xpt_get_ccb(struct cam_periph *periph) 4546 { 4547 union ccb *new_ccb; 4548 4549 cam_periph_unlock(periph); 4550 new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_WAITOK); 4551 cam_periph_lock(periph); 4552 periph->periph_allocated++; 4553 cam_ccbq_take_opening(&periph->path->device->ccbq); 4554 return (new_ccb); 4555 } 4556 4557 union ccb * 4558 cam_periph_getccb(struct cam_periph *periph, u_int32_t priority) 4559 { 4560 struct ccb_hdr *ccb_h; 4561 4562 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("cam_periph_getccb\n")); 4563 cam_periph_assert(periph, MA_OWNED); 4564 while ((ccb_h = SLIST_FIRST(&periph->ccb_list)) == NULL || 4565 ccb_h->pinfo.priority != priority) { 4566 if (priority < periph->immediate_priority) { 4567 periph->immediate_priority = priority; 4568 xpt_run_allocq(periph, 0); 4569 } else 4570 cam_periph_sleep(periph, &periph->ccb_list, PRIBIO, 4571 "cgticb", 0); 4572 } 4573 SLIST_REMOVE_HEAD(&periph->ccb_list, periph_links.sle); 4574 return ((union ccb *)ccb_h); 4575 } 4576 4577 static void 4578 xpt_acquire_bus(struct cam_eb *bus) 4579 { 4580 4581 xpt_lock_buses(); 4582 bus->refcount++; 4583 xpt_unlock_buses(); 4584 } 4585 4586 static void 4587 xpt_release_bus(struct cam_eb *bus) 4588 { 4589 4590 xpt_lock_buses(); 4591 KASSERT(bus->refcount >= 1, ("bus->refcount >= 1")); 4592 if (--bus->refcount > 0) { 4593 xpt_unlock_buses(); 4594 return; 4595 } 4596 TAILQ_REMOVE(&xsoftc.xpt_busses, bus, links); 4597 xsoftc.bus_generation++; 4598 xpt_unlock_buses(); 4599 KASSERT(TAILQ_EMPTY(&bus->et_entries), 4600 ("destroying bus, but target list is not empty")); 4601 cam_sim_release(bus->sim); 4602 mtx_destroy(&bus->eb_mtx); 4603 free(bus, M_CAMXPT); 4604 } 4605 4606 static struct cam_et * 4607 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id) 4608 { 4609 struct cam_et *cur_target, *target; 4610 4611 mtx_assert(&xsoftc.xpt_topo_lock, MA_OWNED); 4612 mtx_assert(&bus->eb_mtx, MA_OWNED); 4613 target = (struct cam_et *)malloc(sizeof(*target), M_CAMXPT, 4614 M_NOWAIT|M_ZERO); 4615 if (target == NULL) 4616 return (NULL); 4617 4618 TAILQ_INIT(&target->ed_entries); 4619 target->bus = bus; 4620 target->target_id = target_id; 4621 target->refcount = 1; 4622 target->generation = 0; 4623 target->luns = NULL; 4624 mtx_init(&target->luns_mtx, "CAM LUNs lock", NULL, MTX_DEF); 4625 timevalclear(&target->last_reset); 4626 /* 4627 * Hold a reference to our parent bus so it 4628 * will not go away before we do. 4629 */ 4630 bus->refcount++; 4631 4632 /* Insertion sort into our bus's target list */ 4633 cur_target = TAILQ_FIRST(&bus->et_entries); 4634 while (cur_target != NULL && cur_target->target_id < target_id) 4635 cur_target = TAILQ_NEXT(cur_target, links); 4636 if (cur_target != NULL) { 4637 TAILQ_INSERT_BEFORE(cur_target, target, links); 4638 } else { 4639 TAILQ_INSERT_TAIL(&bus->et_entries, target, links); 4640 } 4641 bus->generation++; 4642 return (target); 4643 } 4644 4645 static void 4646 xpt_acquire_target(struct cam_et *target) 4647 { 4648 struct cam_eb *bus = target->bus; 4649 4650 mtx_lock(&bus->eb_mtx); 4651 target->refcount++; 4652 mtx_unlock(&bus->eb_mtx); 4653 } 4654 4655 static void 4656 xpt_release_target(struct cam_et *target) 4657 { 4658 struct cam_eb *bus = target->bus; 4659 4660 mtx_lock(&bus->eb_mtx); 4661 if (--target->refcount > 0) { 4662 mtx_unlock(&bus->eb_mtx); 4663 return; 4664 } 4665 TAILQ_REMOVE(&bus->et_entries, target, links); 4666 bus->generation++; 4667 mtx_unlock(&bus->eb_mtx); 4668 KASSERT(TAILQ_EMPTY(&target->ed_entries), 4669 ("destroying target, but device list is not empty")); 4670 xpt_release_bus(bus); 4671 mtx_destroy(&target->luns_mtx); 4672 if (target->luns) 4673 free(target->luns, M_CAMXPT); 4674 free(target, M_CAMXPT); 4675 } 4676 4677 static struct cam_ed * 4678 xpt_alloc_device_default(struct cam_eb *bus, struct cam_et *target, 4679 lun_id_t lun_id) 4680 { 4681 struct cam_ed *device; 4682 4683 device = xpt_alloc_device(bus, target, lun_id); 4684 if (device == NULL) 4685 return (NULL); 4686 4687 device->mintags = 1; 4688 device->maxtags = 1; 4689 return (device); 4690 } 4691 4692 static void 4693 xpt_destroy_device(void *context, int pending) 4694 { 4695 struct cam_ed *device = context; 4696 4697 mtx_lock(&device->device_mtx); 4698 mtx_destroy(&device->device_mtx); 4699 free(device, M_CAMDEV); 4700 } 4701 4702 struct cam_ed * 4703 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id) 4704 { 4705 struct cam_ed *cur_device, *device; 4706 struct cam_devq *devq; 4707 cam_status status; 4708 4709 mtx_assert(&bus->eb_mtx, MA_OWNED); 4710 /* Make space for us in the device queue on our bus */ 4711 devq = bus->sim->devq; 4712 mtx_lock(&devq->send_mtx); 4713 status = cam_devq_resize(devq, devq->send_queue.array_size + 1); 4714 mtx_unlock(&devq->send_mtx); 4715 if (status != CAM_REQ_CMP) 4716 return (NULL); 4717 4718 device = (struct cam_ed *)malloc(sizeof(*device), 4719 M_CAMDEV, M_NOWAIT|M_ZERO); 4720 if (device == NULL) 4721 return (NULL); 4722 4723 cam_init_pinfo(&device->devq_entry); 4724 device->target = target; 4725 device->lun_id = lun_id; 4726 device->sim = bus->sim; 4727 if (cam_ccbq_init(&device->ccbq, 4728 bus->sim->max_dev_openings) != 0) { 4729 free(device, M_CAMDEV); 4730 return (NULL); 4731 } 4732 SLIST_INIT(&device->asyncs); 4733 SLIST_INIT(&device->periphs); 4734 device->generation = 0; 4735 device->flags = CAM_DEV_UNCONFIGURED; 4736 device->tag_delay_count = 0; 4737 device->tag_saved_openings = 0; 4738 device->refcount = 1; 4739 mtx_init(&device->device_mtx, "CAM device lock", NULL, MTX_DEF); 4740 callout_init_mtx(&device->callout, &devq->send_mtx, 0); 4741 TASK_INIT(&device->device_destroy_task, 0, xpt_destroy_device, device); 4742 /* 4743 * Hold a reference to our parent bus so it 4744 * will not go away before we do. 4745 */ 4746 target->refcount++; 4747 4748 cur_device = TAILQ_FIRST(&target->ed_entries); 4749 while (cur_device != NULL && cur_device->lun_id < lun_id) 4750 cur_device = TAILQ_NEXT(cur_device, links); 4751 if (cur_device != NULL) 4752 TAILQ_INSERT_BEFORE(cur_device, device, links); 4753 else 4754 TAILQ_INSERT_TAIL(&target->ed_entries, device, links); 4755 target->generation++; 4756 return (device); 4757 } 4758 4759 void 4760 xpt_acquire_device(struct cam_ed *device) 4761 { 4762 struct cam_eb *bus = device->target->bus; 4763 4764 mtx_lock(&bus->eb_mtx); 4765 device->refcount++; 4766 mtx_unlock(&bus->eb_mtx); 4767 } 4768 4769 void 4770 xpt_release_device(struct cam_ed *device) 4771 { 4772 struct cam_eb *bus = device->target->bus; 4773 struct cam_devq *devq; 4774 4775 mtx_lock(&bus->eb_mtx); 4776 if (--device->refcount > 0) { 4777 mtx_unlock(&bus->eb_mtx); 4778 return; 4779 } 4780 4781 TAILQ_REMOVE(&device->target->ed_entries, device,links); 4782 device->target->generation++; 4783 mtx_unlock(&bus->eb_mtx); 4784 4785 /* Release our slot in the devq */ 4786 devq = bus->sim->devq; 4787 mtx_lock(&devq->send_mtx); 4788 cam_devq_resize(devq, devq->send_queue.array_size - 1); 4789 mtx_unlock(&devq->send_mtx); 4790 4791 KASSERT(SLIST_EMPTY(&device->periphs), 4792 ("destroying device, but periphs list is not empty")); 4793 KASSERT(device->devq_entry.index == CAM_UNQUEUED_INDEX, 4794 ("destroying device while still queued for ccbs")); 4795 4796 if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) 4797 callout_stop(&device->callout); 4798 4799 xpt_release_target(device->target); 4800 4801 cam_ccbq_fini(&device->ccbq); 4802 /* 4803 * Free allocated memory. free(9) does nothing if the 4804 * supplied pointer is NULL, so it is safe to call without 4805 * checking. 4806 */ 4807 free(device->supported_vpds, M_CAMXPT); 4808 free(device->device_id, M_CAMXPT); 4809 free(device->ext_inq, M_CAMXPT); 4810 free(device->physpath, M_CAMXPT); 4811 free(device->rcap_buf, M_CAMXPT); 4812 free(device->serial_num, M_CAMXPT); 4813 taskqueue_enqueue(xsoftc.xpt_taskq, &device->device_destroy_task); 4814 } 4815 4816 u_int32_t 4817 xpt_dev_ccbq_resize(struct cam_path *path, int newopenings) 4818 { 4819 int result; 4820 struct cam_ed *dev; 4821 4822 dev = path->device; 4823 mtx_lock(&dev->sim->devq->send_mtx); 4824 result = cam_ccbq_resize(&dev->ccbq, newopenings); 4825 mtx_unlock(&dev->sim->devq->send_mtx); 4826 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 4827 || (dev->inq_flags & SID_CmdQue) != 0) 4828 dev->tag_saved_openings = newopenings; 4829 return (result); 4830 } 4831 4832 static struct cam_eb * 4833 xpt_find_bus(path_id_t path_id) 4834 { 4835 struct cam_eb *bus; 4836 4837 xpt_lock_buses(); 4838 for (bus = TAILQ_FIRST(&xsoftc.xpt_busses); 4839 bus != NULL; 4840 bus = TAILQ_NEXT(bus, links)) { 4841 if (bus->path_id == path_id) { 4842 bus->refcount++; 4843 break; 4844 } 4845 } 4846 xpt_unlock_buses(); 4847 return (bus); 4848 } 4849 4850 static struct cam_et * 4851 xpt_find_target(struct cam_eb *bus, target_id_t target_id) 4852 { 4853 struct cam_et *target; 4854 4855 mtx_assert(&bus->eb_mtx, MA_OWNED); 4856 for (target = TAILQ_FIRST(&bus->et_entries); 4857 target != NULL; 4858 target = TAILQ_NEXT(target, links)) { 4859 if (target->target_id == target_id) { 4860 target->refcount++; 4861 break; 4862 } 4863 } 4864 return (target); 4865 } 4866 4867 static struct cam_ed * 4868 xpt_find_device(struct cam_et *target, lun_id_t lun_id) 4869 { 4870 struct cam_ed *device; 4871 4872 mtx_assert(&target->bus->eb_mtx, MA_OWNED); 4873 for (device = TAILQ_FIRST(&target->ed_entries); 4874 device != NULL; 4875 device = TAILQ_NEXT(device, links)) { 4876 if (device->lun_id == lun_id) { 4877 device->refcount++; 4878 break; 4879 } 4880 } 4881 return (device); 4882 } 4883 4884 void 4885 xpt_start_tags(struct cam_path *path) 4886 { 4887 struct ccb_relsim crs; 4888 struct cam_ed *device; 4889 struct cam_sim *sim; 4890 int newopenings; 4891 4892 device = path->device; 4893 sim = path->bus->sim; 4894 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT; 4895 xpt_freeze_devq(path, /*count*/1); 4896 device->inq_flags |= SID_CmdQue; 4897 if (device->tag_saved_openings != 0) 4898 newopenings = device->tag_saved_openings; 4899 else 4900 newopenings = min(device->maxtags, 4901 sim->max_tagged_dev_openings); 4902 xpt_dev_ccbq_resize(path, newopenings); 4903 xpt_async(AC_GETDEV_CHANGED, path, NULL); 4904 xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL); 4905 crs.ccb_h.func_code = XPT_REL_SIMQ; 4906 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY; 4907 crs.openings 4908 = crs.release_timeout 4909 = crs.qfrozen_cnt 4910 = 0; 4911 xpt_action((union ccb *)&crs); 4912 } 4913 4914 void 4915 xpt_stop_tags(struct cam_path *path) 4916 { 4917 struct ccb_relsim crs; 4918 struct cam_ed *device; 4919 struct cam_sim *sim; 4920 4921 device = path->device; 4922 sim = path->bus->sim; 4923 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT; 4924 device->tag_delay_count = 0; 4925 xpt_freeze_devq(path, /*count*/1); 4926 device->inq_flags &= ~SID_CmdQue; 4927 xpt_dev_ccbq_resize(path, sim->max_dev_openings); 4928 xpt_async(AC_GETDEV_CHANGED, path, NULL); 4929 xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL); 4930 crs.ccb_h.func_code = XPT_REL_SIMQ; 4931 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY; 4932 crs.openings 4933 = crs.release_timeout 4934 = crs.qfrozen_cnt 4935 = 0; 4936 xpt_action((union ccb *)&crs); 4937 } 4938 4939 static void 4940 xpt_boot_delay(void *arg) 4941 { 4942 4943 xpt_release_boot(); 4944 } 4945 4946 static void 4947 xpt_config(void *arg) 4948 { 4949 /* 4950 * Now that interrupts are enabled, go find our devices 4951 */ 4952 if (taskqueue_start_threads(&xsoftc.xpt_taskq, 1, PRIBIO, "CAM taskq")) 4953 printf("xpt_config: failed to create taskqueue thread.\n"); 4954 4955 /* Setup debugging path */ 4956 if (cam_dflags != CAM_DEBUG_NONE) { 4957 if (xpt_create_path(&cam_dpath, NULL, 4958 CAM_DEBUG_BUS, CAM_DEBUG_TARGET, 4959 CAM_DEBUG_LUN) != CAM_REQ_CMP) { 4960 printf("xpt_config: xpt_create_path() failed for debug" 4961 " target %d:%d:%d, debugging disabled\n", 4962 CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN); 4963 cam_dflags = CAM_DEBUG_NONE; 4964 } 4965 } else 4966 cam_dpath = NULL; 4967 4968 periphdriver_init(1); 4969 xpt_hold_boot(); 4970 callout_init(&xsoftc.boot_callout, 1); 4971 callout_reset_sbt(&xsoftc.boot_callout, SBT_1MS * xsoftc.boot_delay, 0, 4972 xpt_boot_delay, NULL, 0); 4973 /* Fire up rescan thread. */ 4974 if (kproc_kthread_add(xpt_scanner_thread, NULL, &cam_proc, NULL, 0, 0, 4975 "cam", "scanner")) { 4976 printf("xpt_config: failed to create rescan thread.\n"); 4977 } 4978 } 4979 4980 void 4981 xpt_hold_boot(void) 4982 { 4983 xpt_lock_buses(); 4984 xsoftc.buses_to_config++; 4985 xpt_unlock_buses(); 4986 } 4987 4988 void 4989 xpt_release_boot(void) 4990 { 4991 xpt_lock_buses(); 4992 xsoftc.buses_to_config--; 4993 if (xsoftc.buses_to_config == 0 && xsoftc.buses_config_done == 0) { 4994 struct xpt_task *task; 4995 4996 xsoftc.buses_config_done = 1; 4997 xpt_unlock_buses(); 4998 /* Call manually because we don't have any busses */ 4999 task = malloc(sizeof(struct xpt_task), M_CAMXPT, M_NOWAIT); 5000 if (task != NULL) { 5001 TASK_INIT(&task->task, 0, xpt_finishconfig_task, task); 5002 taskqueue_enqueue(taskqueue_thread, &task->task); 5003 } 5004 } else 5005 xpt_unlock_buses(); 5006 } 5007 5008 /* 5009 * If the given device only has one peripheral attached to it, and if that 5010 * peripheral is the passthrough driver, announce it. This insures that the 5011 * user sees some sort of announcement for every peripheral in their system. 5012 */ 5013 static int 5014 xptpassannouncefunc(struct cam_ed *device, void *arg) 5015 { 5016 struct cam_periph *periph; 5017 int i; 5018 5019 for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL; 5020 periph = SLIST_NEXT(periph, periph_links), i++); 5021 5022 periph = SLIST_FIRST(&device->periphs); 5023 if ((i == 1) 5024 && (strncmp(periph->periph_name, "pass", 4) == 0)) 5025 xpt_announce_periph(periph, NULL); 5026 5027 return(1); 5028 } 5029 5030 static void 5031 xpt_finishconfig_task(void *context, int pending) 5032 { 5033 5034 periphdriver_init(2); 5035 /* 5036 * Check for devices with no "standard" peripheral driver 5037 * attached. For any devices like that, announce the 5038 * passthrough driver so the user will see something. 5039 */ 5040 if (!bootverbose) 5041 xpt_for_all_devices(xptpassannouncefunc, NULL); 5042 5043 /* Release our hook so that the boot can continue. */ 5044 config_intrhook_disestablish(xsoftc.xpt_config_hook); 5045 free(xsoftc.xpt_config_hook, M_CAMXPT); 5046 xsoftc.xpt_config_hook = NULL; 5047 5048 free(context, M_CAMXPT); 5049 } 5050 5051 cam_status 5052 xpt_register_async(int event, ac_callback_t *cbfunc, void *cbarg, 5053 struct cam_path *path) 5054 { 5055 struct ccb_setasync csa; 5056 cam_status status; 5057 int xptpath = 0; 5058 5059 if (path == NULL) { 5060 status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID, 5061 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 5062 if (status != CAM_REQ_CMP) 5063 return (status); 5064 xpt_path_lock(path); 5065 xptpath = 1; 5066 } 5067 5068 xpt_setup_ccb(&csa.ccb_h, path, CAM_PRIORITY_NORMAL); 5069 csa.ccb_h.func_code = XPT_SASYNC_CB; 5070 csa.event_enable = event; 5071 csa.callback = cbfunc; 5072 csa.callback_arg = cbarg; 5073 xpt_action((union ccb *)&csa); 5074 status = csa.ccb_h.status; 5075 5076 CAM_DEBUG(csa.ccb_h.path, CAM_DEBUG_TRACE, 5077 ("xpt_register_async: func %p\n", cbfunc)); 5078 5079 if (xptpath) { 5080 xpt_path_unlock(path); 5081 xpt_free_path(path); 5082 } 5083 5084 if ((status == CAM_REQ_CMP) && 5085 (csa.event_enable & AC_FOUND_DEVICE)) { 5086 /* 5087 * Get this peripheral up to date with all 5088 * the currently existing devices. 5089 */ 5090 xpt_for_all_devices(xptsetasyncfunc, &csa); 5091 } 5092 if ((status == CAM_REQ_CMP) && 5093 (csa.event_enable & AC_PATH_REGISTERED)) { 5094 /* 5095 * Get this peripheral up to date with all 5096 * the currently existing busses. 5097 */ 5098 xpt_for_all_busses(xptsetasyncbusfunc, &csa); 5099 } 5100 5101 return (status); 5102 } 5103 5104 static void 5105 xptaction(struct cam_sim *sim, union ccb *work_ccb) 5106 { 5107 CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n")); 5108 5109 switch (work_ccb->ccb_h.func_code) { 5110 /* Common cases first */ 5111 case XPT_PATH_INQ: /* Path routing inquiry */ 5112 { 5113 struct ccb_pathinq *cpi; 5114 5115 cpi = &work_ccb->cpi; 5116 cpi->version_num = 1; /* XXX??? */ 5117 cpi->hba_inquiry = 0; 5118 cpi->target_sprt = 0; 5119 cpi->hba_misc = 0; 5120 cpi->hba_eng_cnt = 0; 5121 cpi->max_target = 0; 5122 cpi->max_lun = 0; 5123 cpi->initiator_id = 0; 5124 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 5125 strncpy(cpi->hba_vid, "", HBA_IDLEN); 5126 strncpy(cpi->dev_name, sim->sim_name, DEV_IDLEN); 5127 cpi->unit_number = sim->unit_number; 5128 cpi->bus_id = sim->bus_id; 5129 cpi->base_transfer_speed = 0; 5130 cpi->protocol = PROTO_UNSPECIFIED; 5131 cpi->protocol_version = PROTO_VERSION_UNSPECIFIED; 5132 cpi->transport = XPORT_UNSPECIFIED; 5133 cpi->transport_version = XPORT_VERSION_UNSPECIFIED; 5134 cpi->ccb_h.status = CAM_REQ_CMP; 5135 xpt_done(work_ccb); 5136 break; 5137 } 5138 default: 5139 work_ccb->ccb_h.status = CAM_REQ_INVALID; 5140 xpt_done(work_ccb); 5141 break; 5142 } 5143 } 5144 5145 /* 5146 * The xpt as a "controller" has no interrupt sources, so polling 5147 * is a no-op. 5148 */ 5149 static void 5150 xptpoll(struct cam_sim *sim) 5151 { 5152 } 5153 5154 void 5155 xpt_lock_buses(void) 5156 { 5157 mtx_lock(&xsoftc.xpt_topo_lock); 5158 } 5159 5160 void 5161 xpt_unlock_buses(void) 5162 { 5163 mtx_unlock(&xsoftc.xpt_topo_lock); 5164 } 5165 5166 struct mtx * 5167 xpt_path_mtx(struct cam_path *path) 5168 { 5169 5170 return (&path->device->device_mtx); 5171 } 5172 5173 static void 5174 xpt_done_process(struct ccb_hdr *ccb_h) 5175 { 5176 struct cam_sim *sim; 5177 struct cam_devq *devq; 5178 struct mtx *mtx = NULL; 5179 5180 if (ccb_h->flags & CAM_HIGH_POWER) { 5181 struct highpowerlist *hphead; 5182 struct cam_ed *device; 5183 5184 mtx_lock(&xsoftc.xpt_highpower_lock); 5185 hphead = &xsoftc.highpowerq; 5186 5187 device = STAILQ_FIRST(hphead); 5188 5189 /* 5190 * Increment the count since this command is done. 5191 */ 5192 xsoftc.num_highpower++; 5193 5194 /* 5195 * Any high powered commands queued up? 5196 */ 5197 if (device != NULL) { 5198 5199 STAILQ_REMOVE_HEAD(hphead, highpowerq_entry); 5200 mtx_unlock(&xsoftc.xpt_highpower_lock); 5201 5202 mtx_lock(&device->sim->devq->send_mtx); 5203 xpt_release_devq_device(device, 5204 /*count*/1, /*runqueue*/TRUE); 5205 mtx_unlock(&device->sim->devq->send_mtx); 5206 } else 5207 mtx_unlock(&xsoftc.xpt_highpower_lock); 5208 } 5209 5210 sim = ccb_h->path->bus->sim; 5211 5212 if (ccb_h->status & CAM_RELEASE_SIMQ) { 5213 xpt_release_simq(sim, /*run_queue*/FALSE); 5214 ccb_h->status &= ~CAM_RELEASE_SIMQ; 5215 } 5216 5217 if ((ccb_h->flags & CAM_DEV_QFRZDIS) 5218 && (ccb_h->status & CAM_DEV_QFRZN)) { 5219 xpt_release_devq(ccb_h->path, /*count*/1, /*run_queue*/TRUE); 5220 ccb_h->status &= ~CAM_DEV_QFRZN; 5221 } 5222 5223 devq = sim->devq; 5224 if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) { 5225 struct cam_ed *dev = ccb_h->path->device; 5226 5227 mtx_lock(&devq->send_mtx); 5228 devq->send_active--; 5229 devq->send_openings++; 5230 cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h); 5231 5232 if (((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0 5233 && (dev->ccbq.dev_active == 0))) { 5234 dev->flags &= ~CAM_DEV_REL_ON_QUEUE_EMPTY; 5235 xpt_release_devq_device(dev, /*count*/1, 5236 /*run_queue*/FALSE); 5237 } 5238 5239 if (((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0 5240 && (ccb_h->status&CAM_STATUS_MASK) != CAM_REQUEUE_REQ)) { 5241 dev->flags &= ~CAM_DEV_REL_ON_COMPLETE; 5242 xpt_release_devq_device(dev, /*count*/1, 5243 /*run_queue*/FALSE); 5244 } 5245 5246 if (!device_is_queued(dev)) 5247 (void)xpt_schedule_devq(devq, dev); 5248 xpt_run_devq(devq); 5249 mtx_unlock(&devq->send_mtx); 5250 5251 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0) { 5252 mtx = xpt_path_mtx(ccb_h->path); 5253 mtx_lock(mtx); 5254 5255 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 5256 && (--dev->tag_delay_count == 0)) 5257 xpt_start_tags(ccb_h->path); 5258 } 5259 } 5260 5261 if ((ccb_h->flags & CAM_UNLOCKED) == 0) { 5262 if (mtx == NULL) { 5263 mtx = xpt_path_mtx(ccb_h->path); 5264 mtx_lock(mtx); 5265 } 5266 } else { 5267 if (mtx != NULL) { 5268 mtx_unlock(mtx); 5269 mtx = NULL; 5270 } 5271 } 5272 5273 /* Call the peripheral driver's callback */ 5274 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX; 5275 (*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h); 5276 if (mtx != NULL) 5277 mtx_unlock(mtx); 5278 } 5279 5280 void 5281 xpt_done_td(void *arg) 5282 { 5283 struct cam_doneq *queue = arg; 5284 struct ccb_hdr *ccb_h; 5285 STAILQ_HEAD(, ccb_hdr) doneq; 5286 5287 STAILQ_INIT(&doneq); 5288 mtx_lock(&queue->cam_doneq_mtx); 5289 while (1) { 5290 while (STAILQ_EMPTY(&queue->cam_doneq)) { 5291 queue->cam_doneq_sleep = 1; 5292 msleep(&queue->cam_doneq, &queue->cam_doneq_mtx, 5293 PRIBIO, "-", 0); 5294 queue->cam_doneq_sleep = 0; 5295 } 5296 STAILQ_CONCAT(&doneq, &queue->cam_doneq); 5297 mtx_unlock(&queue->cam_doneq_mtx); 5298 5299 THREAD_NO_SLEEPING(); 5300 while ((ccb_h = STAILQ_FIRST(&doneq)) != NULL) { 5301 STAILQ_REMOVE_HEAD(&doneq, sim_links.stqe); 5302 xpt_done_process(ccb_h); 5303 } 5304 THREAD_SLEEPING_OK(); 5305 5306 mtx_lock(&queue->cam_doneq_mtx); 5307 } 5308 } 5309 5310 static void 5311 camisr_runqueue(void) 5312 { 5313 struct ccb_hdr *ccb_h; 5314 struct cam_doneq *queue; 5315 int i; 5316 5317 /* Process global queues. */ 5318 for (i = 0; i < cam_num_doneqs; i++) { 5319 queue = &cam_doneqs[i]; 5320 mtx_lock(&queue->cam_doneq_mtx); 5321 while ((ccb_h = STAILQ_FIRST(&queue->cam_doneq)) != NULL) { 5322 STAILQ_REMOVE_HEAD(&queue->cam_doneq, sim_links.stqe); 5323 mtx_unlock(&queue->cam_doneq_mtx); 5324 xpt_done_process(ccb_h); 5325 mtx_lock(&queue->cam_doneq_mtx); 5326 } 5327 mtx_unlock(&queue->cam_doneq_mtx); 5328 } 5329 } 5330 5331 struct kv 5332 { 5333 uint32_t v; 5334 const char *name; 5335 }; 5336 5337 static struct kv map[] = { 5338 { XPT_NOOP, "XPT_NOOP" }, 5339 { XPT_SCSI_IO, "XPT_SCSI_IO" }, 5340 { XPT_GDEV_TYPE, "XPT_GDEV_TYPE" }, 5341 { XPT_GDEVLIST, "XPT_GDEVLIST" }, 5342 { XPT_PATH_INQ, "XPT_PATH_INQ" }, 5343 { XPT_REL_SIMQ, "XPT_REL_SIMQ" }, 5344 { XPT_SASYNC_CB, "XPT_SASYNC_CB" }, 5345 { XPT_SDEV_TYPE, "XPT_SDEV_TYPE" }, 5346 { XPT_SCAN_BUS, "XPT_SCAN_BUS" }, 5347 { XPT_DEV_MATCH, "XPT_DEV_MATCH" }, 5348 { XPT_DEBUG, "XPT_DEBUG" }, 5349 { XPT_PATH_STATS, "XPT_PATH_STATS" }, 5350 { XPT_GDEV_STATS, "XPT_GDEV_STATS" }, 5351 { XPT_DEV_ADVINFO, "XPT_DEV_ADVINFO" }, 5352 { XPT_ASYNC, "XPT_ASYNC" }, 5353 { XPT_ABORT, "XPT_ABORT" }, 5354 { XPT_RESET_BUS, "XPT_RESET_BUS" }, 5355 { XPT_RESET_DEV, "XPT_RESET_DEV" }, 5356 { XPT_TERM_IO, "XPT_TERM_IO" }, 5357 { XPT_SCAN_LUN, "XPT_SCAN_LUN" }, 5358 { XPT_GET_TRAN_SETTINGS, "XPT_GET_TRAN_SETTINGS" }, 5359 { XPT_SET_TRAN_SETTINGS, "XPT_SET_TRAN_SETTINGS" }, 5360 { XPT_CALC_GEOMETRY, "XPT_CALC_GEOMETRY" }, 5361 { XPT_ATA_IO, "XPT_ATA_IO" }, 5362 { XPT_GET_SIM_KNOB, "XPT_GET_SIM_KNOB" }, 5363 { XPT_SET_SIM_KNOB, "XPT_SET_SIM_KNOB" }, 5364 { XPT_NVME_IO, "XPT_NVME_IO" }, 5365 { XPT_MMCSD_IO, "XPT_MMCSD_IO" }, 5366 { XPT_SMP_IO, "XPT_SMP_IO" }, 5367 { XPT_SCAN_TGT, "XPT_SCAN_TGT" }, 5368 { XPT_ENG_INQ, "XPT_ENG_INQ" }, 5369 { XPT_ENG_EXEC, "XPT_ENG_EXEC" }, 5370 { XPT_EN_LUN, "XPT_EN_LUN" }, 5371 { XPT_TARGET_IO, "XPT_TARGET_IO" }, 5372 { XPT_ACCEPT_TARGET_IO, "XPT_ACCEPT_TARGET_IO" }, 5373 { XPT_CONT_TARGET_IO, "XPT_CONT_TARGET_IO" }, 5374 { XPT_IMMED_NOTIFY, "XPT_IMMED_NOTIFY" }, 5375 { XPT_NOTIFY_ACK, "XPT_NOTIFY_ACK" }, 5376 { XPT_IMMEDIATE_NOTIFY, "XPT_IMMEDIATE_NOTIFY" }, 5377 { XPT_NOTIFY_ACKNOWLEDGE, "XPT_NOTIFY_ACKNOWLEDGE" }, 5378 { 0, 0 } 5379 }; 5380 5381 static const char * 5382 xpt_action_name(uint32_t action) 5383 { 5384 static char buffer[32]; /* Only for unknown messages -- racy */ 5385 struct kv *walker = map; 5386 5387 while (walker->name != NULL) { 5388 if (walker->v == action) 5389 return (walker->name); 5390 walker++; 5391 } 5392 5393 snprintf(buffer, sizeof(buffer), "%#x", action); 5394 return (buffer); 5395 } 5396