1 /*- 2 * Implementation of the Common Access Method Transport (XPT) layer. 3 * 4 * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs. 5 * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions, and the following disclaimer, 13 * without modification, immediately at the beginning of the file. 14 * 2. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include <sys/param.h> 34 #include <sys/bus.h> 35 #include <sys/systm.h> 36 #include <sys/types.h> 37 #include <sys/malloc.h> 38 #include <sys/kernel.h> 39 #include <sys/time.h> 40 #include <sys/conf.h> 41 #include <sys/fcntl.h> 42 #include <sys/interrupt.h> 43 #include <sys/proc.h> 44 #include <sys/sbuf.h> 45 #include <sys/smp.h> 46 #include <sys/taskqueue.h> 47 48 #include <sys/lock.h> 49 #include <sys/mutex.h> 50 #include <sys/sysctl.h> 51 #include <sys/kthread.h> 52 53 #include <cam/cam.h> 54 #include <cam/cam_ccb.h> 55 #include <cam/cam_periph.h> 56 #include <cam/cam_queue.h> 57 #include <cam/cam_sim.h> 58 #include <cam/cam_xpt.h> 59 #include <cam/cam_xpt_sim.h> 60 #include <cam/cam_xpt_periph.h> 61 #include <cam/cam_xpt_internal.h> 62 #include <cam/cam_debug.h> 63 #include <cam/cam_compat.h> 64 65 #include <cam/scsi/scsi_all.h> 66 #include <cam/scsi/scsi_message.h> 67 #include <cam/scsi/scsi_pass.h> 68 69 #include <machine/md_var.h> /* geometry translation */ 70 #include <machine/stdarg.h> /* for xpt_print below */ 71 72 #include "opt_cam.h" 73 74 /* 75 * This is the maximum number of high powered commands (e.g. start unit) 76 * that can be outstanding at a particular time. 77 */ 78 #ifndef CAM_MAX_HIGHPOWER 79 #define CAM_MAX_HIGHPOWER 4 80 #endif 81 82 /* Datastructures internal to the xpt layer */ 83 MALLOC_DEFINE(M_CAMXPT, "CAM XPT", "CAM XPT buffers"); 84 MALLOC_DEFINE(M_CAMDEV, "CAM DEV", "CAM devices"); 85 MALLOC_DEFINE(M_CAMCCB, "CAM CCB", "CAM CCBs"); 86 MALLOC_DEFINE(M_CAMPATH, "CAM path", "CAM paths"); 87 88 /* Object for defering XPT actions to a taskqueue */ 89 struct xpt_task { 90 struct task task; 91 void *data1; 92 uintptr_t data2; 93 }; 94 95 struct xpt_softc { 96 uint32_t xpt_generation; 97 98 /* number of high powered commands that can go through right now */ 99 struct mtx xpt_highpower_lock; 100 STAILQ_HEAD(highpowerlist, cam_ed) highpowerq; 101 int num_highpower; 102 103 /* queue for handling async rescan requests. */ 104 TAILQ_HEAD(, ccb_hdr) ccb_scanq; 105 int buses_to_config; 106 int buses_config_done; 107 108 /* Registered busses */ 109 TAILQ_HEAD(,cam_eb) xpt_busses; 110 u_int bus_generation; 111 112 struct intr_config_hook *xpt_config_hook; 113 114 int boot_delay; 115 struct callout boot_callout; 116 117 struct mtx xpt_topo_lock; 118 struct mtx xpt_lock; 119 struct taskqueue *xpt_taskq; 120 }; 121 122 typedef enum { 123 DM_RET_COPY = 0x01, 124 DM_RET_FLAG_MASK = 0x0f, 125 DM_RET_NONE = 0x00, 126 DM_RET_STOP = 0x10, 127 DM_RET_DESCEND = 0x20, 128 DM_RET_ERROR = 0x30, 129 DM_RET_ACTION_MASK = 0xf0 130 } dev_match_ret; 131 132 typedef enum { 133 XPT_DEPTH_BUS, 134 XPT_DEPTH_TARGET, 135 XPT_DEPTH_DEVICE, 136 XPT_DEPTH_PERIPH 137 } xpt_traverse_depth; 138 139 struct xpt_traverse_config { 140 xpt_traverse_depth depth; 141 void *tr_func; 142 void *tr_arg; 143 }; 144 145 typedef int xpt_busfunc_t (struct cam_eb *bus, void *arg); 146 typedef int xpt_targetfunc_t (struct cam_et *target, void *arg); 147 typedef int xpt_devicefunc_t (struct cam_ed *device, void *arg); 148 typedef int xpt_periphfunc_t (struct cam_periph *periph, void *arg); 149 typedef int xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg); 150 151 /* Transport layer configuration information */ 152 static struct xpt_softc xsoftc; 153 154 MTX_SYSINIT(xpt_topo_init, &xsoftc.xpt_topo_lock, "XPT topology lock", MTX_DEF); 155 156 SYSCTL_INT(_kern_cam, OID_AUTO, boot_delay, CTLFLAG_RDTUN, 157 &xsoftc.boot_delay, 0, "Bus registration wait time"); 158 SYSCTL_UINT(_kern_cam, OID_AUTO, xpt_generation, CTLFLAG_RD, 159 &xsoftc.xpt_generation, 0, "CAM peripheral generation count"); 160 161 struct cam_doneq { 162 struct mtx_padalign cam_doneq_mtx; 163 STAILQ_HEAD(, ccb_hdr) cam_doneq; 164 int cam_doneq_sleep; 165 }; 166 167 static struct cam_doneq cam_doneqs[MAXCPU]; 168 static int cam_num_doneqs; 169 static struct proc *cam_proc; 170 171 SYSCTL_INT(_kern_cam, OID_AUTO, num_doneqs, CTLFLAG_RDTUN, 172 &cam_num_doneqs, 0, "Number of completion queues/threads"); 173 174 struct cam_periph *xpt_periph; 175 176 static periph_init_t xpt_periph_init; 177 178 static struct periph_driver xpt_driver = 179 { 180 xpt_periph_init, "xpt", 181 TAILQ_HEAD_INITIALIZER(xpt_driver.units), /* generation */ 0, 182 CAM_PERIPH_DRV_EARLY 183 }; 184 185 PERIPHDRIVER_DECLARE(xpt, xpt_driver); 186 187 static d_open_t xptopen; 188 static d_close_t xptclose; 189 static d_ioctl_t xptioctl; 190 static d_ioctl_t xptdoioctl; 191 192 static struct cdevsw xpt_cdevsw = { 193 .d_version = D_VERSION, 194 .d_flags = 0, 195 .d_open = xptopen, 196 .d_close = xptclose, 197 .d_ioctl = xptioctl, 198 .d_name = "xpt", 199 }; 200 201 /* Storage for debugging datastructures */ 202 struct cam_path *cam_dpath; 203 u_int32_t cam_dflags = CAM_DEBUG_FLAGS; 204 SYSCTL_UINT(_kern_cam, OID_AUTO, dflags, CTLFLAG_RWTUN, 205 &cam_dflags, 0, "Enabled debug flags"); 206 u_int32_t cam_debug_delay = CAM_DEBUG_DELAY; 207 SYSCTL_UINT(_kern_cam, OID_AUTO, debug_delay, CTLFLAG_RWTUN, 208 &cam_debug_delay, 0, "Delay in us after each debug message"); 209 210 /* Our boot-time initialization hook */ 211 static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *); 212 213 static moduledata_t cam_moduledata = { 214 "cam", 215 cam_module_event_handler, 216 NULL 217 }; 218 219 static int xpt_init(void *); 220 221 DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND); 222 MODULE_VERSION(cam, 1); 223 224 225 static void xpt_async_bcast(struct async_list *async_head, 226 u_int32_t async_code, 227 struct cam_path *path, 228 void *async_arg); 229 static path_id_t xptnextfreepathid(void); 230 static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus); 231 static union ccb *xpt_get_ccb(struct cam_periph *periph); 232 static union ccb *xpt_get_ccb_nowait(struct cam_periph *periph); 233 static void xpt_run_allocq(struct cam_periph *periph, int sleep); 234 static void xpt_run_allocq_task(void *context, int pending); 235 static void xpt_run_devq(struct cam_devq *devq); 236 static timeout_t xpt_release_devq_timeout; 237 static void xpt_release_simq_timeout(void *arg) __unused; 238 static void xpt_acquire_bus(struct cam_eb *bus); 239 static void xpt_release_bus(struct cam_eb *bus); 240 static uint32_t xpt_freeze_devq_device(struct cam_ed *dev, u_int count); 241 static int xpt_release_devq_device(struct cam_ed *dev, u_int count, 242 int run_queue); 243 static struct cam_et* 244 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id); 245 static void xpt_acquire_target(struct cam_et *target); 246 static void xpt_release_target(struct cam_et *target); 247 static struct cam_eb* 248 xpt_find_bus(path_id_t path_id); 249 static struct cam_et* 250 xpt_find_target(struct cam_eb *bus, target_id_t target_id); 251 static struct cam_ed* 252 xpt_find_device(struct cam_et *target, lun_id_t lun_id); 253 static void xpt_config(void *arg); 254 static int xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo, 255 u_int32_t new_priority); 256 static xpt_devicefunc_t xptpassannouncefunc; 257 static void xptaction(struct cam_sim *sim, union ccb *work_ccb); 258 static void xptpoll(struct cam_sim *sim); 259 static void camisr_runqueue(void); 260 static void xpt_done_process(struct ccb_hdr *ccb_h); 261 static void xpt_done_td(void *); 262 static dev_match_ret xptbusmatch(struct dev_match_pattern *patterns, 263 u_int num_patterns, struct cam_eb *bus); 264 static dev_match_ret xptdevicematch(struct dev_match_pattern *patterns, 265 u_int num_patterns, 266 struct cam_ed *device); 267 static dev_match_ret xptperiphmatch(struct dev_match_pattern *patterns, 268 u_int num_patterns, 269 struct cam_periph *periph); 270 static xpt_busfunc_t xptedtbusfunc; 271 static xpt_targetfunc_t xptedttargetfunc; 272 static xpt_devicefunc_t xptedtdevicefunc; 273 static xpt_periphfunc_t xptedtperiphfunc; 274 static xpt_pdrvfunc_t xptplistpdrvfunc; 275 static xpt_periphfunc_t xptplistperiphfunc; 276 static int xptedtmatch(struct ccb_dev_match *cdm); 277 static int xptperiphlistmatch(struct ccb_dev_match *cdm); 278 static int xptbustraverse(struct cam_eb *start_bus, 279 xpt_busfunc_t *tr_func, void *arg); 280 static int xpttargettraverse(struct cam_eb *bus, 281 struct cam_et *start_target, 282 xpt_targetfunc_t *tr_func, void *arg); 283 static int xptdevicetraverse(struct cam_et *target, 284 struct cam_ed *start_device, 285 xpt_devicefunc_t *tr_func, void *arg); 286 static int xptperiphtraverse(struct cam_ed *device, 287 struct cam_periph *start_periph, 288 xpt_periphfunc_t *tr_func, void *arg); 289 static int xptpdrvtraverse(struct periph_driver **start_pdrv, 290 xpt_pdrvfunc_t *tr_func, void *arg); 291 static int xptpdperiphtraverse(struct periph_driver **pdrv, 292 struct cam_periph *start_periph, 293 xpt_periphfunc_t *tr_func, 294 void *arg); 295 static xpt_busfunc_t xptdefbusfunc; 296 static xpt_targetfunc_t xptdeftargetfunc; 297 static xpt_devicefunc_t xptdefdevicefunc; 298 static xpt_periphfunc_t xptdefperiphfunc; 299 static void xpt_finishconfig_task(void *context, int pending); 300 static void xpt_dev_async_default(u_int32_t async_code, 301 struct cam_eb *bus, 302 struct cam_et *target, 303 struct cam_ed *device, 304 void *async_arg); 305 static struct cam_ed * xpt_alloc_device_default(struct cam_eb *bus, 306 struct cam_et *target, 307 lun_id_t lun_id); 308 static xpt_devicefunc_t xptsetasyncfunc; 309 static xpt_busfunc_t xptsetasyncbusfunc; 310 static cam_status xptregister(struct cam_periph *periph, 311 void *arg); 312 static const char * xpt_action_name(uint32_t action); 313 static __inline int device_is_queued(struct cam_ed *device); 314 315 static __inline int 316 xpt_schedule_devq(struct cam_devq *devq, struct cam_ed *dev) 317 { 318 int retval; 319 320 mtx_assert(&devq->send_mtx, MA_OWNED); 321 if ((dev->ccbq.queue.entries > 0) && 322 (dev->ccbq.dev_openings > 0) && 323 (dev->ccbq.queue.qfrozen_cnt == 0)) { 324 /* 325 * The priority of a device waiting for controller 326 * resources is that of the highest priority CCB 327 * enqueued. 328 */ 329 retval = 330 xpt_schedule_dev(&devq->send_queue, 331 &dev->devq_entry, 332 CAMQ_GET_PRIO(&dev->ccbq.queue)); 333 } else { 334 retval = 0; 335 } 336 return (retval); 337 } 338 339 static __inline int 340 device_is_queued(struct cam_ed *device) 341 { 342 return (device->devq_entry.index != CAM_UNQUEUED_INDEX); 343 } 344 345 static void 346 xpt_periph_init() 347 { 348 make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0"); 349 } 350 351 static int 352 xptopen(struct cdev *dev, int flags, int fmt, struct thread *td) 353 { 354 355 /* 356 * Only allow read-write access. 357 */ 358 if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0)) 359 return(EPERM); 360 361 /* 362 * We don't allow nonblocking access. 363 */ 364 if ((flags & O_NONBLOCK) != 0) { 365 printf("%s: can't do nonblocking access\n", devtoname(dev)); 366 return(ENODEV); 367 } 368 369 return(0); 370 } 371 372 static int 373 xptclose(struct cdev *dev, int flag, int fmt, struct thread *td) 374 { 375 376 return(0); 377 } 378 379 /* 380 * Don't automatically grab the xpt softc lock here even though this is going 381 * through the xpt device. The xpt device is really just a back door for 382 * accessing other devices and SIMs, so the right thing to do is to grab 383 * the appropriate SIM lock once the bus/SIM is located. 384 */ 385 static int 386 xptioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) 387 { 388 int error; 389 390 if ((error = xptdoioctl(dev, cmd, addr, flag, td)) == ENOTTY) { 391 error = cam_compat_ioctl(dev, cmd, addr, flag, td, xptdoioctl); 392 } 393 return (error); 394 } 395 396 static int 397 xptdoioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) 398 { 399 int error; 400 401 error = 0; 402 403 switch(cmd) { 404 /* 405 * For the transport layer CAMIOCOMMAND ioctl, we really only want 406 * to accept CCB types that don't quite make sense to send through a 407 * passthrough driver. XPT_PATH_INQ is an exception to this, as stated 408 * in the CAM spec. 409 */ 410 case CAMIOCOMMAND: { 411 union ccb *ccb; 412 union ccb *inccb; 413 struct cam_eb *bus; 414 415 inccb = (union ccb *)addr; 416 417 bus = xpt_find_bus(inccb->ccb_h.path_id); 418 if (bus == NULL) 419 return (EINVAL); 420 421 switch (inccb->ccb_h.func_code) { 422 case XPT_SCAN_BUS: 423 case XPT_RESET_BUS: 424 if (inccb->ccb_h.target_id != CAM_TARGET_WILDCARD || 425 inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) { 426 xpt_release_bus(bus); 427 return (EINVAL); 428 } 429 break; 430 case XPT_SCAN_TGT: 431 if (inccb->ccb_h.target_id == CAM_TARGET_WILDCARD || 432 inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) { 433 xpt_release_bus(bus); 434 return (EINVAL); 435 } 436 break; 437 default: 438 break; 439 } 440 441 switch(inccb->ccb_h.func_code) { 442 case XPT_SCAN_BUS: 443 case XPT_RESET_BUS: 444 case XPT_PATH_INQ: 445 case XPT_ENG_INQ: 446 case XPT_SCAN_LUN: 447 case XPT_SCAN_TGT: 448 449 ccb = xpt_alloc_ccb(); 450 451 /* 452 * Create a path using the bus, target, and lun the 453 * user passed in. 454 */ 455 if (xpt_create_path(&ccb->ccb_h.path, NULL, 456 inccb->ccb_h.path_id, 457 inccb->ccb_h.target_id, 458 inccb->ccb_h.target_lun) != 459 CAM_REQ_CMP){ 460 error = EINVAL; 461 xpt_free_ccb(ccb); 462 break; 463 } 464 /* Ensure all of our fields are correct */ 465 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, 466 inccb->ccb_h.pinfo.priority); 467 xpt_merge_ccb(ccb, inccb); 468 xpt_path_lock(ccb->ccb_h.path); 469 cam_periph_runccb(ccb, NULL, 0, 0, NULL); 470 xpt_path_unlock(ccb->ccb_h.path); 471 bcopy(ccb, inccb, sizeof(union ccb)); 472 xpt_free_path(ccb->ccb_h.path); 473 xpt_free_ccb(ccb); 474 break; 475 476 case XPT_DEBUG: { 477 union ccb ccb; 478 479 /* 480 * This is an immediate CCB, so it's okay to 481 * allocate it on the stack. 482 */ 483 484 /* 485 * Create a path using the bus, target, and lun the 486 * user passed in. 487 */ 488 if (xpt_create_path(&ccb.ccb_h.path, NULL, 489 inccb->ccb_h.path_id, 490 inccb->ccb_h.target_id, 491 inccb->ccb_h.target_lun) != 492 CAM_REQ_CMP){ 493 error = EINVAL; 494 break; 495 } 496 /* Ensure all of our fields are correct */ 497 xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path, 498 inccb->ccb_h.pinfo.priority); 499 xpt_merge_ccb(&ccb, inccb); 500 xpt_action(&ccb); 501 bcopy(&ccb, inccb, sizeof(union ccb)); 502 xpt_free_path(ccb.ccb_h.path); 503 break; 504 505 } 506 case XPT_DEV_MATCH: { 507 struct cam_periph_map_info mapinfo; 508 struct cam_path *old_path; 509 510 /* 511 * We can't deal with physical addresses for this 512 * type of transaction. 513 */ 514 if ((inccb->ccb_h.flags & CAM_DATA_MASK) != 515 CAM_DATA_VADDR) { 516 error = EINVAL; 517 break; 518 } 519 520 /* 521 * Save this in case the caller had it set to 522 * something in particular. 523 */ 524 old_path = inccb->ccb_h.path; 525 526 /* 527 * We really don't need a path for the matching 528 * code. The path is needed because of the 529 * debugging statements in xpt_action(). They 530 * assume that the CCB has a valid path. 531 */ 532 inccb->ccb_h.path = xpt_periph->path; 533 534 bzero(&mapinfo, sizeof(mapinfo)); 535 536 /* 537 * Map the pattern and match buffers into kernel 538 * virtual address space. 539 */ 540 error = cam_periph_mapmem(inccb, &mapinfo, MAXPHYS); 541 542 if (error) { 543 inccb->ccb_h.path = old_path; 544 break; 545 } 546 547 /* 548 * This is an immediate CCB, we can send it on directly. 549 */ 550 xpt_action(inccb); 551 552 /* 553 * Map the buffers back into user space. 554 */ 555 cam_periph_unmapmem(inccb, &mapinfo); 556 557 inccb->ccb_h.path = old_path; 558 559 error = 0; 560 break; 561 } 562 default: 563 error = ENOTSUP; 564 break; 565 } 566 xpt_release_bus(bus); 567 break; 568 } 569 /* 570 * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input, 571 * with the periphal driver name and unit name filled in. The other 572 * fields don't really matter as input. The passthrough driver name 573 * ("pass"), and unit number are passed back in the ccb. The current 574 * device generation number, and the index into the device peripheral 575 * driver list, and the status are also passed back. Note that 576 * since we do everything in one pass, unlike the XPT_GDEVLIST ccb, 577 * we never return a status of CAM_GDEVLIST_LIST_CHANGED. It is 578 * (or rather should be) impossible for the device peripheral driver 579 * list to change since we look at the whole thing in one pass, and 580 * we do it with lock protection. 581 * 582 */ 583 case CAMGETPASSTHRU: { 584 union ccb *ccb; 585 struct cam_periph *periph; 586 struct periph_driver **p_drv; 587 char *name; 588 u_int unit; 589 int base_periph_found; 590 591 ccb = (union ccb *)addr; 592 unit = ccb->cgdl.unit_number; 593 name = ccb->cgdl.periph_name; 594 base_periph_found = 0; 595 596 /* 597 * Sanity check -- make sure we don't get a null peripheral 598 * driver name. 599 */ 600 if (*ccb->cgdl.periph_name == '\0') { 601 error = EINVAL; 602 break; 603 } 604 605 /* Keep the list from changing while we traverse it */ 606 xpt_lock_buses(); 607 608 /* first find our driver in the list of drivers */ 609 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) 610 if (strcmp((*p_drv)->driver_name, name) == 0) 611 break; 612 613 if (*p_drv == NULL) { 614 xpt_unlock_buses(); 615 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 616 ccb->cgdl.status = CAM_GDEVLIST_ERROR; 617 *ccb->cgdl.periph_name = '\0'; 618 ccb->cgdl.unit_number = 0; 619 error = ENOENT; 620 break; 621 } 622 623 /* 624 * Run through every peripheral instance of this driver 625 * and check to see whether it matches the unit passed 626 * in by the user. If it does, get out of the loops and 627 * find the passthrough driver associated with that 628 * peripheral driver. 629 */ 630 for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL; 631 periph = TAILQ_NEXT(periph, unit_links)) { 632 633 if (periph->unit_number == unit) 634 break; 635 } 636 /* 637 * If we found the peripheral driver that the user passed 638 * in, go through all of the peripheral drivers for that 639 * particular device and look for a passthrough driver. 640 */ 641 if (periph != NULL) { 642 struct cam_ed *device; 643 int i; 644 645 base_periph_found = 1; 646 device = periph->path->device; 647 for (i = 0, periph = SLIST_FIRST(&device->periphs); 648 periph != NULL; 649 periph = SLIST_NEXT(periph, periph_links), i++) { 650 /* 651 * Check to see whether we have a 652 * passthrough device or not. 653 */ 654 if (strcmp(periph->periph_name, "pass") == 0) { 655 /* 656 * Fill in the getdevlist fields. 657 */ 658 strcpy(ccb->cgdl.periph_name, 659 periph->periph_name); 660 ccb->cgdl.unit_number = 661 periph->unit_number; 662 if (SLIST_NEXT(periph, periph_links)) 663 ccb->cgdl.status = 664 CAM_GDEVLIST_MORE_DEVS; 665 else 666 ccb->cgdl.status = 667 CAM_GDEVLIST_LAST_DEVICE; 668 ccb->cgdl.generation = 669 device->generation; 670 ccb->cgdl.index = i; 671 /* 672 * Fill in some CCB header fields 673 * that the user may want. 674 */ 675 ccb->ccb_h.path_id = 676 periph->path->bus->path_id; 677 ccb->ccb_h.target_id = 678 periph->path->target->target_id; 679 ccb->ccb_h.target_lun = 680 periph->path->device->lun_id; 681 ccb->ccb_h.status = CAM_REQ_CMP; 682 break; 683 } 684 } 685 } 686 687 /* 688 * If the periph is null here, one of two things has 689 * happened. The first possibility is that we couldn't 690 * find the unit number of the particular peripheral driver 691 * that the user is asking about. e.g. the user asks for 692 * the passthrough driver for "da11". We find the list of 693 * "da" peripherals all right, but there is no unit 11. 694 * The other possibility is that we went through the list 695 * of peripheral drivers attached to the device structure, 696 * but didn't find one with the name "pass". Either way, 697 * we return ENOENT, since we couldn't find something. 698 */ 699 if (periph == NULL) { 700 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 701 ccb->cgdl.status = CAM_GDEVLIST_ERROR; 702 *ccb->cgdl.periph_name = '\0'; 703 ccb->cgdl.unit_number = 0; 704 error = ENOENT; 705 /* 706 * It is unfortunate that this is even necessary, 707 * but there are many, many clueless users out there. 708 * If this is true, the user is looking for the 709 * passthrough driver, but doesn't have one in his 710 * kernel. 711 */ 712 if (base_periph_found == 1) { 713 printf("xptioctl: pass driver is not in the " 714 "kernel\n"); 715 printf("xptioctl: put \"device pass\" in " 716 "your kernel config file\n"); 717 } 718 } 719 xpt_unlock_buses(); 720 break; 721 } 722 default: 723 error = ENOTTY; 724 break; 725 } 726 727 return(error); 728 } 729 730 static int 731 cam_module_event_handler(module_t mod, int what, void *arg) 732 { 733 int error; 734 735 switch (what) { 736 case MOD_LOAD: 737 if ((error = xpt_init(NULL)) != 0) 738 return (error); 739 break; 740 case MOD_UNLOAD: 741 return EBUSY; 742 default: 743 return EOPNOTSUPP; 744 } 745 746 return 0; 747 } 748 749 static struct xpt_proto * 750 xpt_proto_find(cam_proto proto) 751 { 752 struct xpt_proto **pp; 753 754 SET_FOREACH(pp, cam_xpt_proto_set) { 755 if ((*pp)->proto == proto) 756 return *pp; 757 } 758 759 return NULL; 760 } 761 762 static void 763 xpt_rescan_done(struct cam_periph *periph, union ccb *done_ccb) 764 { 765 766 if (done_ccb->ccb_h.ppriv_ptr1 == NULL) { 767 xpt_free_path(done_ccb->ccb_h.path); 768 xpt_free_ccb(done_ccb); 769 } else { 770 done_ccb->ccb_h.cbfcnp = done_ccb->ccb_h.ppriv_ptr1; 771 (*done_ccb->ccb_h.cbfcnp)(periph, done_ccb); 772 } 773 xpt_release_boot(); 774 } 775 776 /* thread to handle bus rescans */ 777 static void 778 xpt_scanner_thread(void *dummy) 779 { 780 union ccb *ccb; 781 struct cam_path path; 782 783 xpt_lock_buses(); 784 for (;;) { 785 if (TAILQ_EMPTY(&xsoftc.ccb_scanq)) 786 msleep(&xsoftc.ccb_scanq, &xsoftc.xpt_topo_lock, PRIBIO, 787 "-", 0); 788 if ((ccb = (union ccb *)TAILQ_FIRST(&xsoftc.ccb_scanq)) != NULL) { 789 TAILQ_REMOVE(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe); 790 xpt_unlock_buses(); 791 792 /* 793 * Since lock can be dropped inside and path freed 794 * by completion callback even before return here, 795 * take our own path copy for reference. 796 */ 797 xpt_copy_path(&path, ccb->ccb_h.path); 798 xpt_path_lock(&path); 799 xpt_action(ccb); 800 xpt_path_unlock(&path); 801 xpt_release_path(&path); 802 803 xpt_lock_buses(); 804 } 805 } 806 } 807 808 void 809 xpt_rescan(union ccb *ccb) 810 { 811 struct ccb_hdr *hdr; 812 813 /* Prepare request */ 814 if (ccb->ccb_h.path->target->target_id == CAM_TARGET_WILDCARD && 815 ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD) 816 ccb->ccb_h.func_code = XPT_SCAN_BUS; 817 else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD && 818 ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD) 819 ccb->ccb_h.func_code = XPT_SCAN_TGT; 820 else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD && 821 ccb->ccb_h.path->device->lun_id != CAM_LUN_WILDCARD) 822 ccb->ccb_h.func_code = XPT_SCAN_LUN; 823 else { 824 xpt_print(ccb->ccb_h.path, "illegal scan path\n"); 825 xpt_free_path(ccb->ccb_h.path); 826 xpt_free_ccb(ccb); 827 return; 828 } 829 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, 830 ("xpt_rescan: func %#x %s\n", ccb->ccb_h.func_code, 831 xpt_action_name(ccb->ccb_h.func_code))); 832 833 ccb->ccb_h.ppriv_ptr1 = ccb->ccb_h.cbfcnp; 834 ccb->ccb_h.cbfcnp = xpt_rescan_done; 835 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, CAM_PRIORITY_XPT); 836 /* Don't make duplicate entries for the same paths. */ 837 xpt_lock_buses(); 838 if (ccb->ccb_h.ppriv_ptr1 == NULL) { 839 TAILQ_FOREACH(hdr, &xsoftc.ccb_scanq, sim_links.tqe) { 840 if (xpt_path_comp(hdr->path, ccb->ccb_h.path) == 0) { 841 wakeup(&xsoftc.ccb_scanq); 842 xpt_unlock_buses(); 843 xpt_print(ccb->ccb_h.path, "rescan already queued\n"); 844 xpt_free_path(ccb->ccb_h.path); 845 xpt_free_ccb(ccb); 846 return; 847 } 848 } 849 } 850 TAILQ_INSERT_TAIL(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe); 851 xsoftc.buses_to_config++; 852 wakeup(&xsoftc.ccb_scanq); 853 xpt_unlock_buses(); 854 } 855 856 /* Functions accessed by the peripheral drivers */ 857 static int 858 xpt_init(void *dummy) 859 { 860 struct cam_sim *xpt_sim; 861 struct cam_path *path; 862 struct cam_devq *devq; 863 cam_status status; 864 int error, i; 865 866 TAILQ_INIT(&xsoftc.xpt_busses); 867 TAILQ_INIT(&xsoftc.ccb_scanq); 868 STAILQ_INIT(&xsoftc.highpowerq); 869 xsoftc.num_highpower = CAM_MAX_HIGHPOWER; 870 871 mtx_init(&xsoftc.xpt_lock, "XPT lock", NULL, MTX_DEF); 872 mtx_init(&xsoftc.xpt_highpower_lock, "XPT highpower lock", NULL, MTX_DEF); 873 xsoftc.xpt_taskq = taskqueue_create("CAM XPT task", M_WAITOK, 874 taskqueue_thread_enqueue, /*context*/&xsoftc.xpt_taskq); 875 876 #ifdef CAM_BOOT_DELAY 877 /* 878 * Override this value at compile time to assist our users 879 * who don't use loader to boot a kernel. 880 */ 881 xsoftc.boot_delay = CAM_BOOT_DELAY; 882 #endif 883 /* 884 * The xpt layer is, itself, the equivalent of a SIM. 885 * Allow 16 ccbs in the ccb pool for it. This should 886 * give decent parallelism when we probe busses and 887 * perform other XPT functions. 888 */ 889 devq = cam_simq_alloc(16); 890 xpt_sim = cam_sim_alloc(xptaction, 891 xptpoll, 892 "xpt", 893 /*softc*/NULL, 894 /*unit*/0, 895 /*mtx*/&xsoftc.xpt_lock, 896 /*max_dev_transactions*/0, 897 /*max_tagged_dev_transactions*/0, 898 devq); 899 if (xpt_sim == NULL) 900 return (ENOMEM); 901 902 mtx_lock(&xsoftc.xpt_lock); 903 if ((status = xpt_bus_register(xpt_sim, NULL, 0)) != CAM_SUCCESS) { 904 mtx_unlock(&xsoftc.xpt_lock); 905 printf("xpt_init: xpt_bus_register failed with status %#x," 906 " failing attach\n", status); 907 return (EINVAL); 908 } 909 mtx_unlock(&xsoftc.xpt_lock); 910 911 /* 912 * Looking at the XPT from the SIM layer, the XPT is 913 * the equivalent of a peripheral driver. Allocate 914 * a peripheral driver entry for us. 915 */ 916 if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID, 917 CAM_TARGET_WILDCARD, 918 CAM_LUN_WILDCARD)) != CAM_REQ_CMP) { 919 printf("xpt_init: xpt_create_path failed with status %#x," 920 " failing attach\n", status); 921 return (EINVAL); 922 } 923 xpt_path_lock(path); 924 cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO, 925 path, NULL, 0, xpt_sim); 926 xpt_path_unlock(path); 927 xpt_free_path(path); 928 929 if (cam_num_doneqs < 1) 930 cam_num_doneqs = 1 + mp_ncpus / 6; 931 else if (cam_num_doneqs > MAXCPU) 932 cam_num_doneqs = MAXCPU; 933 for (i = 0; i < cam_num_doneqs; i++) { 934 mtx_init(&cam_doneqs[i].cam_doneq_mtx, "CAM doneq", NULL, 935 MTX_DEF); 936 STAILQ_INIT(&cam_doneqs[i].cam_doneq); 937 error = kproc_kthread_add(xpt_done_td, &cam_doneqs[i], 938 &cam_proc, NULL, 0, 0, "cam", "doneq%d", i); 939 if (error != 0) { 940 cam_num_doneqs = i; 941 break; 942 } 943 } 944 if (cam_num_doneqs < 1) { 945 printf("xpt_init: Cannot init completion queues " 946 "- failing attach\n"); 947 return (ENOMEM); 948 } 949 /* 950 * Register a callback for when interrupts are enabled. 951 */ 952 xsoftc.xpt_config_hook = 953 (struct intr_config_hook *)malloc(sizeof(struct intr_config_hook), 954 M_CAMXPT, M_NOWAIT | M_ZERO); 955 if (xsoftc.xpt_config_hook == NULL) { 956 printf("xpt_init: Cannot malloc config hook " 957 "- failing attach\n"); 958 return (ENOMEM); 959 } 960 xsoftc.xpt_config_hook->ich_func = xpt_config; 961 if (config_intrhook_establish(xsoftc.xpt_config_hook) != 0) { 962 free (xsoftc.xpt_config_hook, M_CAMXPT); 963 printf("xpt_init: config_intrhook_establish failed " 964 "- failing attach\n"); 965 } 966 967 return (0); 968 } 969 970 static cam_status 971 xptregister(struct cam_periph *periph, void *arg) 972 { 973 struct cam_sim *xpt_sim; 974 975 if (periph == NULL) { 976 printf("xptregister: periph was NULL!!\n"); 977 return(CAM_REQ_CMP_ERR); 978 } 979 980 xpt_sim = (struct cam_sim *)arg; 981 xpt_sim->softc = periph; 982 xpt_periph = periph; 983 periph->softc = NULL; 984 985 return(CAM_REQ_CMP); 986 } 987 988 int32_t 989 xpt_add_periph(struct cam_periph *periph) 990 { 991 struct cam_ed *device; 992 int32_t status; 993 994 TASK_INIT(&periph->periph_run_task, 0, xpt_run_allocq_task, periph); 995 device = periph->path->device; 996 status = CAM_REQ_CMP; 997 if (device != NULL) { 998 mtx_lock(&device->target->bus->eb_mtx); 999 device->generation++; 1000 SLIST_INSERT_HEAD(&device->periphs, periph, periph_links); 1001 mtx_unlock(&device->target->bus->eb_mtx); 1002 atomic_add_32(&xsoftc.xpt_generation, 1); 1003 } 1004 1005 return (status); 1006 } 1007 1008 void 1009 xpt_remove_periph(struct cam_periph *periph) 1010 { 1011 struct cam_ed *device; 1012 1013 device = periph->path->device; 1014 if (device != NULL) { 1015 mtx_lock(&device->target->bus->eb_mtx); 1016 device->generation++; 1017 SLIST_REMOVE(&device->periphs, periph, cam_periph, periph_links); 1018 mtx_unlock(&device->target->bus->eb_mtx); 1019 atomic_add_32(&xsoftc.xpt_generation, 1); 1020 } 1021 } 1022 1023 1024 void 1025 xpt_announce_periph(struct cam_periph *periph, char *announce_string) 1026 { 1027 struct cam_path *path = periph->path; 1028 struct xpt_proto *proto; 1029 1030 cam_periph_assert(periph, MA_OWNED); 1031 periph->flags |= CAM_PERIPH_ANNOUNCED; 1032 1033 printf("%s%d at %s%d bus %d scbus%d target %d lun %jx\n", 1034 periph->periph_name, periph->unit_number, 1035 path->bus->sim->sim_name, 1036 path->bus->sim->unit_number, 1037 path->bus->sim->bus_id, 1038 path->bus->path_id, 1039 path->target->target_id, 1040 (uintmax_t)path->device->lun_id); 1041 printf("%s%d: ", periph->periph_name, periph->unit_number); 1042 proto = xpt_proto_find(path->device->protocol); 1043 if (proto) 1044 proto->ops->announce(path->device); 1045 else 1046 printf("%s%d: Unknown protocol device %d\n", 1047 periph->periph_name, periph->unit_number, 1048 path->device->protocol); 1049 if (path->device->serial_num_len > 0) { 1050 /* Don't wrap the screen - print only the first 60 chars */ 1051 printf("%s%d: Serial Number %.60s\n", periph->periph_name, 1052 periph->unit_number, path->device->serial_num); 1053 } 1054 /* Announce transport details. */ 1055 path->bus->xport->ops->announce(periph); 1056 /* Announce command queueing. */ 1057 if (path->device->inq_flags & SID_CmdQue 1058 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) { 1059 printf("%s%d: Command Queueing enabled\n", 1060 periph->periph_name, periph->unit_number); 1061 } 1062 /* Announce caller's details if they've passed in. */ 1063 if (announce_string != NULL) 1064 printf("%s%d: %s\n", periph->periph_name, 1065 periph->unit_number, announce_string); 1066 } 1067 1068 void 1069 xpt_announce_quirks(struct cam_periph *periph, int quirks, char *bit_string) 1070 { 1071 if (quirks != 0) { 1072 printf("%s%d: quirks=0x%b\n", periph->periph_name, 1073 periph->unit_number, quirks, bit_string); 1074 } 1075 } 1076 1077 void 1078 xpt_denounce_periph(struct cam_periph *periph) 1079 { 1080 struct cam_path *path = periph->path; 1081 struct xpt_proto *proto; 1082 1083 cam_periph_assert(periph, MA_OWNED); 1084 printf("%s%d at %s%d bus %d scbus%d target %d lun %jx\n", 1085 periph->periph_name, periph->unit_number, 1086 path->bus->sim->sim_name, 1087 path->bus->sim->unit_number, 1088 path->bus->sim->bus_id, 1089 path->bus->path_id, 1090 path->target->target_id, 1091 (uintmax_t)path->device->lun_id); 1092 printf("%s%d: ", periph->periph_name, periph->unit_number); 1093 proto = xpt_proto_find(path->device->protocol); 1094 if (proto) 1095 proto->ops->denounce(path->device); 1096 else 1097 printf("%s%d: Unknown protocol device %d\n", 1098 periph->periph_name, periph->unit_number, 1099 path->device->protocol); 1100 if (path->device->serial_num_len > 0) 1101 printf(" s/n %.60s", path->device->serial_num); 1102 printf(" detached\n"); 1103 } 1104 1105 1106 int 1107 xpt_getattr(char *buf, size_t len, const char *attr, struct cam_path *path) 1108 { 1109 int ret = -1, l; 1110 struct ccb_dev_advinfo cdai; 1111 struct scsi_vpd_id_descriptor *idd; 1112 1113 xpt_path_assert(path, MA_OWNED); 1114 1115 memset(&cdai, 0, sizeof(cdai)); 1116 xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL); 1117 cdai.ccb_h.func_code = XPT_DEV_ADVINFO; 1118 cdai.bufsiz = len; 1119 1120 if (!strcmp(attr, "GEOM::ident")) 1121 cdai.buftype = CDAI_TYPE_SERIAL_NUM; 1122 else if (!strcmp(attr, "GEOM::physpath")) 1123 cdai.buftype = CDAI_TYPE_PHYS_PATH; 1124 else if (strcmp(attr, "GEOM::lunid") == 0 || 1125 strcmp(attr, "GEOM::lunname") == 0) { 1126 cdai.buftype = CDAI_TYPE_SCSI_DEVID; 1127 cdai.bufsiz = CAM_SCSI_DEVID_MAXLEN; 1128 } else 1129 goto out; 1130 1131 cdai.buf = malloc(cdai.bufsiz, M_CAMXPT, M_NOWAIT|M_ZERO); 1132 if (cdai.buf == NULL) { 1133 ret = ENOMEM; 1134 goto out; 1135 } 1136 xpt_action((union ccb *)&cdai); /* can only be synchronous */ 1137 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0) 1138 cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE); 1139 if (cdai.provsiz == 0) 1140 goto out; 1141 if (cdai.buftype == CDAI_TYPE_SCSI_DEVID) { 1142 if (strcmp(attr, "GEOM::lunid") == 0) { 1143 idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf, 1144 cdai.provsiz, scsi_devid_is_lun_naa); 1145 if (idd == NULL) 1146 idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf, 1147 cdai.provsiz, scsi_devid_is_lun_eui64); 1148 } else 1149 idd = NULL; 1150 if (idd == NULL) 1151 idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf, 1152 cdai.provsiz, scsi_devid_is_lun_t10); 1153 if (idd == NULL) 1154 idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf, 1155 cdai.provsiz, scsi_devid_is_lun_name); 1156 if (idd == NULL) 1157 goto out; 1158 ret = 0; 1159 if ((idd->proto_codeset & SVPD_ID_CODESET_MASK) == SVPD_ID_CODESET_ASCII) { 1160 if (idd->length < len) { 1161 for (l = 0; l < idd->length; l++) 1162 buf[l] = idd->identifier[l] ? 1163 idd->identifier[l] : ' '; 1164 buf[l] = 0; 1165 } else 1166 ret = EFAULT; 1167 } else if ((idd->proto_codeset & SVPD_ID_CODESET_MASK) == SVPD_ID_CODESET_UTF8) { 1168 l = strnlen(idd->identifier, idd->length); 1169 if (l < len) { 1170 bcopy(idd->identifier, buf, l); 1171 buf[l] = 0; 1172 } else 1173 ret = EFAULT; 1174 } else { 1175 if (idd->length * 2 < len) { 1176 for (l = 0; l < idd->length; l++) 1177 sprintf(buf + l * 2, "%02x", 1178 idd->identifier[l]); 1179 } else 1180 ret = EFAULT; 1181 } 1182 } else { 1183 ret = 0; 1184 if (strlcpy(buf, cdai.buf, len) >= len) 1185 ret = EFAULT; 1186 } 1187 1188 out: 1189 if (cdai.buf != NULL) 1190 free(cdai.buf, M_CAMXPT); 1191 return ret; 1192 } 1193 1194 static dev_match_ret 1195 xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns, 1196 struct cam_eb *bus) 1197 { 1198 dev_match_ret retval; 1199 u_int i; 1200 1201 retval = DM_RET_NONE; 1202 1203 /* 1204 * If we aren't given something to match against, that's an error. 1205 */ 1206 if (bus == NULL) 1207 return(DM_RET_ERROR); 1208 1209 /* 1210 * If there are no match entries, then this bus matches no 1211 * matter what. 1212 */ 1213 if ((patterns == NULL) || (num_patterns == 0)) 1214 return(DM_RET_DESCEND | DM_RET_COPY); 1215 1216 for (i = 0; i < num_patterns; i++) { 1217 struct bus_match_pattern *cur_pattern; 1218 1219 /* 1220 * If the pattern in question isn't for a bus node, we 1221 * aren't interested. However, we do indicate to the 1222 * calling routine that we should continue descending the 1223 * tree, since the user wants to match against lower-level 1224 * EDT elements. 1225 */ 1226 if (patterns[i].type != DEV_MATCH_BUS) { 1227 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) 1228 retval |= DM_RET_DESCEND; 1229 continue; 1230 } 1231 1232 cur_pattern = &patterns[i].pattern.bus_pattern; 1233 1234 /* 1235 * If they want to match any bus node, we give them any 1236 * device node. 1237 */ 1238 if (cur_pattern->flags == BUS_MATCH_ANY) { 1239 /* set the copy flag */ 1240 retval |= DM_RET_COPY; 1241 1242 /* 1243 * If we've already decided on an action, go ahead 1244 * and return. 1245 */ 1246 if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE) 1247 return(retval); 1248 } 1249 1250 /* 1251 * Not sure why someone would do this... 1252 */ 1253 if (cur_pattern->flags == BUS_MATCH_NONE) 1254 continue; 1255 1256 if (((cur_pattern->flags & BUS_MATCH_PATH) != 0) 1257 && (cur_pattern->path_id != bus->path_id)) 1258 continue; 1259 1260 if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0) 1261 && (cur_pattern->bus_id != bus->sim->bus_id)) 1262 continue; 1263 1264 if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0) 1265 && (cur_pattern->unit_number != bus->sim->unit_number)) 1266 continue; 1267 1268 if (((cur_pattern->flags & BUS_MATCH_NAME) != 0) 1269 && (strncmp(cur_pattern->dev_name, bus->sim->sim_name, 1270 DEV_IDLEN) != 0)) 1271 continue; 1272 1273 /* 1274 * If we get to this point, the user definitely wants 1275 * information on this bus. So tell the caller to copy the 1276 * data out. 1277 */ 1278 retval |= DM_RET_COPY; 1279 1280 /* 1281 * If the return action has been set to descend, then we 1282 * know that we've already seen a non-bus matching 1283 * expression, therefore we need to further descend the tree. 1284 * This won't change by continuing around the loop, so we 1285 * go ahead and return. If we haven't seen a non-bus 1286 * matching expression, we keep going around the loop until 1287 * we exhaust the matching expressions. We'll set the stop 1288 * flag once we fall out of the loop. 1289 */ 1290 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND) 1291 return(retval); 1292 } 1293 1294 /* 1295 * If the return action hasn't been set to descend yet, that means 1296 * we haven't seen anything other than bus matching patterns. So 1297 * tell the caller to stop descending the tree -- the user doesn't 1298 * want to match against lower level tree elements. 1299 */ 1300 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) 1301 retval |= DM_RET_STOP; 1302 1303 return(retval); 1304 } 1305 1306 static dev_match_ret 1307 xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns, 1308 struct cam_ed *device) 1309 { 1310 dev_match_ret retval; 1311 u_int i; 1312 1313 retval = DM_RET_NONE; 1314 1315 /* 1316 * If we aren't given something to match against, that's an error. 1317 */ 1318 if (device == NULL) 1319 return(DM_RET_ERROR); 1320 1321 /* 1322 * If there are no match entries, then this device matches no 1323 * matter what. 1324 */ 1325 if ((patterns == NULL) || (num_patterns == 0)) 1326 return(DM_RET_DESCEND | DM_RET_COPY); 1327 1328 for (i = 0; i < num_patterns; i++) { 1329 struct device_match_pattern *cur_pattern; 1330 struct scsi_vpd_device_id *device_id_page; 1331 1332 /* 1333 * If the pattern in question isn't for a device node, we 1334 * aren't interested. 1335 */ 1336 if (patterns[i].type != DEV_MATCH_DEVICE) { 1337 if ((patterns[i].type == DEV_MATCH_PERIPH) 1338 && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)) 1339 retval |= DM_RET_DESCEND; 1340 continue; 1341 } 1342 1343 cur_pattern = &patterns[i].pattern.device_pattern; 1344 1345 /* Error out if mutually exclusive options are specified. */ 1346 if ((cur_pattern->flags & (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID)) 1347 == (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID)) 1348 return(DM_RET_ERROR); 1349 1350 /* 1351 * If they want to match any device node, we give them any 1352 * device node. 1353 */ 1354 if (cur_pattern->flags == DEV_MATCH_ANY) 1355 goto copy_dev_node; 1356 1357 /* 1358 * Not sure why someone would do this... 1359 */ 1360 if (cur_pattern->flags == DEV_MATCH_NONE) 1361 continue; 1362 1363 if (((cur_pattern->flags & DEV_MATCH_PATH) != 0) 1364 && (cur_pattern->path_id != device->target->bus->path_id)) 1365 continue; 1366 1367 if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0) 1368 && (cur_pattern->target_id != device->target->target_id)) 1369 continue; 1370 1371 if (((cur_pattern->flags & DEV_MATCH_LUN) != 0) 1372 && (cur_pattern->target_lun != device->lun_id)) 1373 continue; 1374 1375 if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0) 1376 && (cam_quirkmatch((caddr_t)&device->inq_data, 1377 (caddr_t)&cur_pattern->data.inq_pat, 1378 1, sizeof(cur_pattern->data.inq_pat), 1379 scsi_static_inquiry_match) == NULL)) 1380 continue; 1381 1382 device_id_page = (struct scsi_vpd_device_id *)device->device_id; 1383 if (((cur_pattern->flags & DEV_MATCH_DEVID) != 0) 1384 && (device->device_id_len < SVPD_DEVICE_ID_HDR_LEN 1385 || scsi_devid_match((uint8_t *)device_id_page->desc_list, 1386 device->device_id_len 1387 - SVPD_DEVICE_ID_HDR_LEN, 1388 cur_pattern->data.devid_pat.id, 1389 cur_pattern->data.devid_pat.id_len) != 0)) 1390 continue; 1391 1392 copy_dev_node: 1393 /* 1394 * If we get to this point, the user definitely wants 1395 * information on this device. So tell the caller to copy 1396 * the data out. 1397 */ 1398 retval |= DM_RET_COPY; 1399 1400 /* 1401 * If the return action has been set to descend, then we 1402 * know that we've already seen a peripheral matching 1403 * expression, therefore we need to further descend the tree. 1404 * This won't change by continuing around the loop, so we 1405 * go ahead and return. If we haven't seen a peripheral 1406 * matching expression, we keep going around the loop until 1407 * we exhaust the matching expressions. We'll set the stop 1408 * flag once we fall out of the loop. 1409 */ 1410 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND) 1411 return(retval); 1412 } 1413 1414 /* 1415 * If the return action hasn't been set to descend yet, that means 1416 * we haven't seen any peripheral matching patterns. So tell the 1417 * caller to stop descending the tree -- the user doesn't want to 1418 * match against lower level tree elements. 1419 */ 1420 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) 1421 retval |= DM_RET_STOP; 1422 1423 return(retval); 1424 } 1425 1426 /* 1427 * Match a single peripheral against any number of match patterns. 1428 */ 1429 static dev_match_ret 1430 xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns, 1431 struct cam_periph *periph) 1432 { 1433 dev_match_ret retval; 1434 u_int i; 1435 1436 /* 1437 * If we aren't given something to match against, that's an error. 1438 */ 1439 if (periph == NULL) 1440 return(DM_RET_ERROR); 1441 1442 /* 1443 * If there are no match entries, then this peripheral matches no 1444 * matter what. 1445 */ 1446 if ((patterns == NULL) || (num_patterns == 0)) 1447 return(DM_RET_STOP | DM_RET_COPY); 1448 1449 /* 1450 * There aren't any nodes below a peripheral node, so there's no 1451 * reason to descend the tree any further. 1452 */ 1453 retval = DM_RET_STOP; 1454 1455 for (i = 0; i < num_patterns; i++) { 1456 struct periph_match_pattern *cur_pattern; 1457 1458 /* 1459 * If the pattern in question isn't for a peripheral, we 1460 * aren't interested. 1461 */ 1462 if (patterns[i].type != DEV_MATCH_PERIPH) 1463 continue; 1464 1465 cur_pattern = &patterns[i].pattern.periph_pattern; 1466 1467 /* 1468 * If they want to match on anything, then we will do so. 1469 */ 1470 if (cur_pattern->flags == PERIPH_MATCH_ANY) { 1471 /* set the copy flag */ 1472 retval |= DM_RET_COPY; 1473 1474 /* 1475 * We've already set the return action to stop, 1476 * since there are no nodes below peripherals in 1477 * the tree. 1478 */ 1479 return(retval); 1480 } 1481 1482 /* 1483 * Not sure why someone would do this... 1484 */ 1485 if (cur_pattern->flags == PERIPH_MATCH_NONE) 1486 continue; 1487 1488 if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0) 1489 && (cur_pattern->path_id != periph->path->bus->path_id)) 1490 continue; 1491 1492 /* 1493 * For the target and lun id's, we have to make sure the 1494 * target and lun pointers aren't NULL. The xpt peripheral 1495 * has a wildcard target and device. 1496 */ 1497 if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0) 1498 && ((periph->path->target == NULL) 1499 ||(cur_pattern->target_id != periph->path->target->target_id))) 1500 continue; 1501 1502 if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0) 1503 && ((periph->path->device == NULL) 1504 || (cur_pattern->target_lun != periph->path->device->lun_id))) 1505 continue; 1506 1507 if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0) 1508 && (cur_pattern->unit_number != periph->unit_number)) 1509 continue; 1510 1511 if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0) 1512 && (strncmp(cur_pattern->periph_name, periph->periph_name, 1513 DEV_IDLEN) != 0)) 1514 continue; 1515 1516 /* 1517 * If we get to this point, the user definitely wants 1518 * information on this peripheral. So tell the caller to 1519 * copy the data out. 1520 */ 1521 retval |= DM_RET_COPY; 1522 1523 /* 1524 * The return action has already been set to stop, since 1525 * peripherals don't have any nodes below them in the EDT. 1526 */ 1527 return(retval); 1528 } 1529 1530 /* 1531 * If we get to this point, the peripheral that was passed in 1532 * doesn't match any of the patterns. 1533 */ 1534 return(retval); 1535 } 1536 1537 static int 1538 xptedtbusfunc(struct cam_eb *bus, void *arg) 1539 { 1540 struct ccb_dev_match *cdm; 1541 struct cam_et *target; 1542 dev_match_ret retval; 1543 1544 cdm = (struct ccb_dev_match *)arg; 1545 1546 /* 1547 * If our position is for something deeper in the tree, that means 1548 * that we've already seen this node. So, we keep going down. 1549 */ 1550 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1551 && (cdm->pos.cookie.bus == bus) 1552 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1553 && (cdm->pos.cookie.target != NULL)) 1554 retval = DM_RET_DESCEND; 1555 else 1556 retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus); 1557 1558 /* 1559 * If we got an error, bail out of the search. 1560 */ 1561 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 1562 cdm->status = CAM_DEV_MATCH_ERROR; 1563 return(0); 1564 } 1565 1566 /* 1567 * If the copy flag is set, copy this bus out. 1568 */ 1569 if (retval & DM_RET_COPY) { 1570 int spaceleft, j; 1571 1572 spaceleft = cdm->match_buf_len - (cdm->num_matches * 1573 sizeof(struct dev_match_result)); 1574 1575 /* 1576 * If we don't have enough space to put in another 1577 * match result, save our position and tell the 1578 * user there are more devices to check. 1579 */ 1580 if (spaceleft < sizeof(struct dev_match_result)) { 1581 bzero(&cdm->pos, sizeof(cdm->pos)); 1582 cdm->pos.position_type = 1583 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS; 1584 1585 cdm->pos.cookie.bus = bus; 1586 cdm->pos.generations[CAM_BUS_GENERATION]= 1587 xsoftc.bus_generation; 1588 cdm->status = CAM_DEV_MATCH_MORE; 1589 return(0); 1590 } 1591 j = cdm->num_matches; 1592 cdm->num_matches++; 1593 cdm->matches[j].type = DEV_MATCH_BUS; 1594 cdm->matches[j].result.bus_result.path_id = bus->path_id; 1595 cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id; 1596 cdm->matches[j].result.bus_result.unit_number = 1597 bus->sim->unit_number; 1598 strncpy(cdm->matches[j].result.bus_result.dev_name, 1599 bus->sim->sim_name, DEV_IDLEN); 1600 } 1601 1602 /* 1603 * If the user is only interested in busses, there's no 1604 * reason to descend to the next level in the tree. 1605 */ 1606 if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP) 1607 return(1); 1608 1609 /* 1610 * If there is a target generation recorded, check it to 1611 * make sure the target list hasn't changed. 1612 */ 1613 mtx_lock(&bus->eb_mtx); 1614 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1615 && (cdm->pos.cookie.bus == bus) 1616 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1617 && (cdm->pos.cookie.target != NULL)) { 1618 if ((cdm->pos.generations[CAM_TARGET_GENERATION] != 1619 bus->generation)) { 1620 mtx_unlock(&bus->eb_mtx); 1621 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 1622 return (0); 1623 } 1624 target = (struct cam_et *)cdm->pos.cookie.target; 1625 target->refcount++; 1626 } else 1627 target = NULL; 1628 mtx_unlock(&bus->eb_mtx); 1629 1630 return (xpttargettraverse(bus, target, xptedttargetfunc, arg)); 1631 } 1632 1633 static int 1634 xptedttargetfunc(struct cam_et *target, void *arg) 1635 { 1636 struct ccb_dev_match *cdm; 1637 struct cam_eb *bus; 1638 struct cam_ed *device; 1639 1640 cdm = (struct ccb_dev_match *)arg; 1641 bus = target->bus; 1642 1643 /* 1644 * If there is a device list generation recorded, check it to 1645 * make sure the device list hasn't changed. 1646 */ 1647 mtx_lock(&bus->eb_mtx); 1648 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1649 && (cdm->pos.cookie.bus == bus) 1650 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1651 && (cdm->pos.cookie.target == target) 1652 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) 1653 && (cdm->pos.cookie.device != NULL)) { 1654 if (cdm->pos.generations[CAM_DEV_GENERATION] != 1655 target->generation) { 1656 mtx_unlock(&bus->eb_mtx); 1657 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 1658 return(0); 1659 } 1660 device = (struct cam_ed *)cdm->pos.cookie.device; 1661 device->refcount++; 1662 } else 1663 device = NULL; 1664 mtx_unlock(&bus->eb_mtx); 1665 1666 return (xptdevicetraverse(target, device, xptedtdevicefunc, arg)); 1667 } 1668 1669 static int 1670 xptedtdevicefunc(struct cam_ed *device, void *arg) 1671 { 1672 struct cam_eb *bus; 1673 struct cam_periph *periph; 1674 struct ccb_dev_match *cdm; 1675 dev_match_ret retval; 1676 1677 cdm = (struct ccb_dev_match *)arg; 1678 bus = device->target->bus; 1679 1680 /* 1681 * If our position is for something deeper in the tree, that means 1682 * that we've already seen this node. So, we keep going down. 1683 */ 1684 if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE) 1685 && (cdm->pos.cookie.device == device) 1686 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 1687 && (cdm->pos.cookie.periph != NULL)) 1688 retval = DM_RET_DESCEND; 1689 else 1690 retval = xptdevicematch(cdm->patterns, cdm->num_patterns, 1691 device); 1692 1693 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 1694 cdm->status = CAM_DEV_MATCH_ERROR; 1695 return(0); 1696 } 1697 1698 /* 1699 * If the copy flag is set, copy this device out. 1700 */ 1701 if (retval & DM_RET_COPY) { 1702 int spaceleft, j; 1703 1704 spaceleft = cdm->match_buf_len - (cdm->num_matches * 1705 sizeof(struct dev_match_result)); 1706 1707 /* 1708 * If we don't have enough space to put in another 1709 * match result, save our position and tell the 1710 * user there are more devices to check. 1711 */ 1712 if (spaceleft < sizeof(struct dev_match_result)) { 1713 bzero(&cdm->pos, sizeof(cdm->pos)); 1714 cdm->pos.position_type = 1715 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS | 1716 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE; 1717 1718 cdm->pos.cookie.bus = device->target->bus; 1719 cdm->pos.generations[CAM_BUS_GENERATION]= 1720 xsoftc.bus_generation; 1721 cdm->pos.cookie.target = device->target; 1722 cdm->pos.generations[CAM_TARGET_GENERATION] = 1723 device->target->bus->generation; 1724 cdm->pos.cookie.device = device; 1725 cdm->pos.generations[CAM_DEV_GENERATION] = 1726 device->target->generation; 1727 cdm->status = CAM_DEV_MATCH_MORE; 1728 return(0); 1729 } 1730 j = cdm->num_matches; 1731 cdm->num_matches++; 1732 cdm->matches[j].type = DEV_MATCH_DEVICE; 1733 cdm->matches[j].result.device_result.path_id = 1734 device->target->bus->path_id; 1735 cdm->matches[j].result.device_result.target_id = 1736 device->target->target_id; 1737 cdm->matches[j].result.device_result.target_lun = 1738 device->lun_id; 1739 cdm->matches[j].result.device_result.protocol = 1740 device->protocol; 1741 bcopy(&device->inq_data, 1742 &cdm->matches[j].result.device_result.inq_data, 1743 sizeof(struct scsi_inquiry_data)); 1744 bcopy(&device->ident_data, 1745 &cdm->matches[j].result.device_result.ident_data, 1746 sizeof(struct ata_params)); 1747 1748 /* Let the user know whether this device is unconfigured */ 1749 if (device->flags & CAM_DEV_UNCONFIGURED) 1750 cdm->matches[j].result.device_result.flags = 1751 DEV_RESULT_UNCONFIGURED; 1752 else 1753 cdm->matches[j].result.device_result.flags = 1754 DEV_RESULT_NOFLAG; 1755 } 1756 1757 /* 1758 * If the user isn't interested in peripherals, don't descend 1759 * the tree any further. 1760 */ 1761 if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP) 1762 return(1); 1763 1764 /* 1765 * If there is a peripheral list generation recorded, make sure 1766 * it hasn't changed. 1767 */ 1768 xpt_lock_buses(); 1769 mtx_lock(&bus->eb_mtx); 1770 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1771 && (cdm->pos.cookie.bus == bus) 1772 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1773 && (cdm->pos.cookie.target == device->target) 1774 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) 1775 && (cdm->pos.cookie.device == device) 1776 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 1777 && (cdm->pos.cookie.periph != NULL)) { 1778 if (cdm->pos.generations[CAM_PERIPH_GENERATION] != 1779 device->generation) { 1780 mtx_unlock(&bus->eb_mtx); 1781 xpt_unlock_buses(); 1782 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 1783 return(0); 1784 } 1785 periph = (struct cam_periph *)cdm->pos.cookie.periph; 1786 periph->refcount++; 1787 } else 1788 periph = NULL; 1789 mtx_unlock(&bus->eb_mtx); 1790 xpt_unlock_buses(); 1791 1792 return (xptperiphtraverse(device, periph, xptedtperiphfunc, arg)); 1793 } 1794 1795 static int 1796 xptedtperiphfunc(struct cam_periph *periph, void *arg) 1797 { 1798 struct ccb_dev_match *cdm; 1799 dev_match_ret retval; 1800 1801 cdm = (struct ccb_dev_match *)arg; 1802 1803 retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph); 1804 1805 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 1806 cdm->status = CAM_DEV_MATCH_ERROR; 1807 return(0); 1808 } 1809 1810 /* 1811 * If the copy flag is set, copy this peripheral out. 1812 */ 1813 if (retval & DM_RET_COPY) { 1814 int spaceleft, j; 1815 1816 spaceleft = cdm->match_buf_len - (cdm->num_matches * 1817 sizeof(struct dev_match_result)); 1818 1819 /* 1820 * If we don't have enough space to put in another 1821 * match result, save our position and tell the 1822 * user there are more devices to check. 1823 */ 1824 if (spaceleft < sizeof(struct dev_match_result)) { 1825 bzero(&cdm->pos, sizeof(cdm->pos)); 1826 cdm->pos.position_type = 1827 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS | 1828 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE | 1829 CAM_DEV_POS_PERIPH; 1830 1831 cdm->pos.cookie.bus = periph->path->bus; 1832 cdm->pos.generations[CAM_BUS_GENERATION]= 1833 xsoftc.bus_generation; 1834 cdm->pos.cookie.target = periph->path->target; 1835 cdm->pos.generations[CAM_TARGET_GENERATION] = 1836 periph->path->bus->generation; 1837 cdm->pos.cookie.device = periph->path->device; 1838 cdm->pos.generations[CAM_DEV_GENERATION] = 1839 periph->path->target->generation; 1840 cdm->pos.cookie.periph = periph; 1841 cdm->pos.generations[CAM_PERIPH_GENERATION] = 1842 periph->path->device->generation; 1843 cdm->status = CAM_DEV_MATCH_MORE; 1844 return(0); 1845 } 1846 1847 j = cdm->num_matches; 1848 cdm->num_matches++; 1849 cdm->matches[j].type = DEV_MATCH_PERIPH; 1850 cdm->matches[j].result.periph_result.path_id = 1851 periph->path->bus->path_id; 1852 cdm->matches[j].result.periph_result.target_id = 1853 periph->path->target->target_id; 1854 cdm->matches[j].result.periph_result.target_lun = 1855 periph->path->device->lun_id; 1856 cdm->matches[j].result.periph_result.unit_number = 1857 periph->unit_number; 1858 strncpy(cdm->matches[j].result.periph_result.periph_name, 1859 periph->periph_name, DEV_IDLEN); 1860 } 1861 1862 return(1); 1863 } 1864 1865 static int 1866 xptedtmatch(struct ccb_dev_match *cdm) 1867 { 1868 struct cam_eb *bus; 1869 int ret; 1870 1871 cdm->num_matches = 0; 1872 1873 /* 1874 * Check the bus list generation. If it has changed, the user 1875 * needs to reset everything and start over. 1876 */ 1877 xpt_lock_buses(); 1878 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1879 && (cdm->pos.cookie.bus != NULL)) { 1880 if (cdm->pos.generations[CAM_BUS_GENERATION] != 1881 xsoftc.bus_generation) { 1882 xpt_unlock_buses(); 1883 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 1884 return(0); 1885 } 1886 bus = (struct cam_eb *)cdm->pos.cookie.bus; 1887 bus->refcount++; 1888 } else 1889 bus = NULL; 1890 xpt_unlock_buses(); 1891 1892 ret = xptbustraverse(bus, xptedtbusfunc, cdm); 1893 1894 /* 1895 * If we get back 0, that means that we had to stop before fully 1896 * traversing the EDT. It also means that one of the subroutines 1897 * has set the status field to the proper value. If we get back 1, 1898 * we've fully traversed the EDT and copied out any matching entries. 1899 */ 1900 if (ret == 1) 1901 cdm->status = CAM_DEV_MATCH_LAST; 1902 1903 return(ret); 1904 } 1905 1906 static int 1907 xptplistpdrvfunc(struct periph_driver **pdrv, void *arg) 1908 { 1909 struct cam_periph *periph; 1910 struct ccb_dev_match *cdm; 1911 1912 cdm = (struct ccb_dev_match *)arg; 1913 1914 xpt_lock_buses(); 1915 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR) 1916 && (cdm->pos.cookie.pdrv == pdrv) 1917 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 1918 && (cdm->pos.cookie.periph != NULL)) { 1919 if (cdm->pos.generations[CAM_PERIPH_GENERATION] != 1920 (*pdrv)->generation) { 1921 xpt_unlock_buses(); 1922 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 1923 return(0); 1924 } 1925 periph = (struct cam_periph *)cdm->pos.cookie.periph; 1926 periph->refcount++; 1927 } else 1928 periph = NULL; 1929 xpt_unlock_buses(); 1930 1931 return (xptpdperiphtraverse(pdrv, periph, xptplistperiphfunc, arg)); 1932 } 1933 1934 static int 1935 xptplistperiphfunc(struct cam_periph *periph, void *arg) 1936 { 1937 struct ccb_dev_match *cdm; 1938 dev_match_ret retval; 1939 1940 cdm = (struct ccb_dev_match *)arg; 1941 1942 retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph); 1943 1944 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 1945 cdm->status = CAM_DEV_MATCH_ERROR; 1946 return(0); 1947 } 1948 1949 /* 1950 * If the copy flag is set, copy this peripheral out. 1951 */ 1952 if (retval & DM_RET_COPY) { 1953 int spaceleft, j; 1954 1955 spaceleft = cdm->match_buf_len - (cdm->num_matches * 1956 sizeof(struct dev_match_result)); 1957 1958 /* 1959 * If we don't have enough space to put in another 1960 * match result, save our position and tell the 1961 * user there are more devices to check. 1962 */ 1963 if (spaceleft < sizeof(struct dev_match_result)) { 1964 struct periph_driver **pdrv; 1965 1966 pdrv = NULL; 1967 bzero(&cdm->pos, sizeof(cdm->pos)); 1968 cdm->pos.position_type = 1969 CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR | 1970 CAM_DEV_POS_PERIPH; 1971 1972 /* 1973 * This may look a bit non-sensical, but it is 1974 * actually quite logical. There are very few 1975 * peripheral drivers, and bloating every peripheral 1976 * structure with a pointer back to its parent 1977 * peripheral driver linker set entry would cost 1978 * more in the long run than doing this quick lookup. 1979 */ 1980 for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) { 1981 if (strcmp((*pdrv)->driver_name, 1982 periph->periph_name) == 0) 1983 break; 1984 } 1985 1986 if (*pdrv == NULL) { 1987 cdm->status = CAM_DEV_MATCH_ERROR; 1988 return(0); 1989 } 1990 1991 cdm->pos.cookie.pdrv = pdrv; 1992 /* 1993 * The periph generation slot does double duty, as 1994 * does the periph pointer slot. They are used for 1995 * both edt and pdrv lookups and positioning. 1996 */ 1997 cdm->pos.cookie.periph = periph; 1998 cdm->pos.generations[CAM_PERIPH_GENERATION] = 1999 (*pdrv)->generation; 2000 cdm->status = CAM_DEV_MATCH_MORE; 2001 return(0); 2002 } 2003 2004 j = cdm->num_matches; 2005 cdm->num_matches++; 2006 cdm->matches[j].type = DEV_MATCH_PERIPH; 2007 cdm->matches[j].result.periph_result.path_id = 2008 periph->path->bus->path_id; 2009 2010 /* 2011 * The transport layer peripheral doesn't have a target or 2012 * lun. 2013 */ 2014 if (periph->path->target) 2015 cdm->matches[j].result.periph_result.target_id = 2016 periph->path->target->target_id; 2017 else 2018 cdm->matches[j].result.periph_result.target_id = 2019 CAM_TARGET_WILDCARD; 2020 2021 if (periph->path->device) 2022 cdm->matches[j].result.periph_result.target_lun = 2023 periph->path->device->lun_id; 2024 else 2025 cdm->matches[j].result.periph_result.target_lun = 2026 CAM_LUN_WILDCARD; 2027 2028 cdm->matches[j].result.periph_result.unit_number = 2029 periph->unit_number; 2030 strncpy(cdm->matches[j].result.periph_result.periph_name, 2031 periph->periph_name, DEV_IDLEN); 2032 } 2033 2034 return(1); 2035 } 2036 2037 static int 2038 xptperiphlistmatch(struct ccb_dev_match *cdm) 2039 { 2040 int ret; 2041 2042 cdm->num_matches = 0; 2043 2044 /* 2045 * At this point in the edt traversal function, we check the bus 2046 * list generation to make sure that no busses have been added or 2047 * removed since the user last sent a XPT_DEV_MATCH ccb through. 2048 * For the peripheral driver list traversal function, however, we 2049 * don't have to worry about new peripheral driver types coming or 2050 * going; they're in a linker set, and therefore can't change 2051 * without a recompile. 2052 */ 2053 2054 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR) 2055 && (cdm->pos.cookie.pdrv != NULL)) 2056 ret = xptpdrvtraverse( 2057 (struct periph_driver **)cdm->pos.cookie.pdrv, 2058 xptplistpdrvfunc, cdm); 2059 else 2060 ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm); 2061 2062 /* 2063 * If we get back 0, that means that we had to stop before fully 2064 * traversing the peripheral driver tree. It also means that one of 2065 * the subroutines has set the status field to the proper value. If 2066 * we get back 1, we've fully traversed the EDT and copied out any 2067 * matching entries. 2068 */ 2069 if (ret == 1) 2070 cdm->status = CAM_DEV_MATCH_LAST; 2071 2072 return(ret); 2073 } 2074 2075 static int 2076 xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg) 2077 { 2078 struct cam_eb *bus, *next_bus; 2079 int retval; 2080 2081 retval = 1; 2082 if (start_bus) 2083 bus = start_bus; 2084 else { 2085 xpt_lock_buses(); 2086 bus = TAILQ_FIRST(&xsoftc.xpt_busses); 2087 if (bus == NULL) { 2088 xpt_unlock_buses(); 2089 return (retval); 2090 } 2091 bus->refcount++; 2092 xpt_unlock_buses(); 2093 } 2094 for (; bus != NULL; bus = next_bus) { 2095 retval = tr_func(bus, arg); 2096 if (retval == 0) { 2097 xpt_release_bus(bus); 2098 break; 2099 } 2100 xpt_lock_buses(); 2101 next_bus = TAILQ_NEXT(bus, links); 2102 if (next_bus) 2103 next_bus->refcount++; 2104 xpt_unlock_buses(); 2105 xpt_release_bus(bus); 2106 } 2107 return(retval); 2108 } 2109 2110 static int 2111 xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target, 2112 xpt_targetfunc_t *tr_func, void *arg) 2113 { 2114 struct cam_et *target, *next_target; 2115 int retval; 2116 2117 retval = 1; 2118 if (start_target) 2119 target = start_target; 2120 else { 2121 mtx_lock(&bus->eb_mtx); 2122 target = TAILQ_FIRST(&bus->et_entries); 2123 if (target == NULL) { 2124 mtx_unlock(&bus->eb_mtx); 2125 return (retval); 2126 } 2127 target->refcount++; 2128 mtx_unlock(&bus->eb_mtx); 2129 } 2130 for (; target != NULL; target = next_target) { 2131 retval = tr_func(target, arg); 2132 if (retval == 0) { 2133 xpt_release_target(target); 2134 break; 2135 } 2136 mtx_lock(&bus->eb_mtx); 2137 next_target = TAILQ_NEXT(target, links); 2138 if (next_target) 2139 next_target->refcount++; 2140 mtx_unlock(&bus->eb_mtx); 2141 xpt_release_target(target); 2142 } 2143 return(retval); 2144 } 2145 2146 static int 2147 xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device, 2148 xpt_devicefunc_t *tr_func, void *arg) 2149 { 2150 struct cam_eb *bus; 2151 struct cam_ed *device, *next_device; 2152 int retval; 2153 2154 retval = 1; 2155 bus = target->bus; 2156 if (start_device) 2157 device = start_device; 2158 else { 2159 mtx_lock(&bus->eb_mtx); 2160 device = TAILQ_FIRST(&target->ed_entries); 2161 if (device == NULL) { 2162 mtx_unlock(&bus->eb_mtx); 2163 return (retval); 2164 } 2165 device->refcount++; 2166 mtx_unlock(&bus->eb_mtx); 2167 } 2168 for (; device != NULL; device = next_device) { 2169 mtx_lock(&device->device_mtx); 2170 retval = tr_func(device, arg); 2171 mtx_unlock(&device->device_mtx); 2172 if (retval == 0) { 2173 xpt_release_device(device); 2174 break; 2175 } 2176 mtx_lock(&bus->eb_mtx); 2177 next_device = TAILQ_NEXT(device, links); 2178 if (next_device) 2179 next_device->refcount++; 2180 mtx_unlock(&bus->eb_mtx); 2181 xpt_release_device(device); 2182 } 2183 return(retval); 2184 } 2185 2186 static int 2187 xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph, 2188 xpt_periphfunc_t *tr_func, void *arg) 2189 { 2190 struct cam_eb *bus; 2191 struct cam_periph *periph, *next_periph; 2192 int retval; 2193 2194 retval = 1; 2195 2196 bus = device->target->bus; 2197 if (start_periph) 2198 periph = start_periph; 2199 else { 2200 xpt_lock_buses(); 2201 mtx_lock(&bus->eb_mtx); 2202 periph = SLIST_FIRST(&device->periphs); 2203 while (periph != NULL && (periph->flags & CAM_PERIPH_FREE) != 0) 2204 periph = SLIST_NEXT(periph, periph_links); 2205 if (periph == NULL) { 2206 mtx_unlock(&bus->eb_mtx); 2207 xpt_unlock_buses(); 2208 return (retval); 2209 } 2210 periph->refcount++; 2211 mtx_unlock(&bus->eb_mtx); 2212 xpt_unlock_buses(); 2213 } 2214 for (; periph != NULL; periph = next_periph) { 2215 retval = tr_func(periph, arg); 2216 if (retval == 0) { 2217 cam_periph_release_locked(periph); 2218 break; 2219 } 2220 xpt_lock_buses(); 2221 mtx_lock(&bus->eb_mtx); 2222 next_periph = SLIST_NEXT(periph, periph_links); 2223 while (next_periph != NULL && 2224 (next_periph->flags & CAM_PERIPH_FREE) != 0) 2225 next_periph = SLIST_NEXT(next_periph, periph_links); 2226 if (next_periph) 2227 next_periph->refcount++; 2228 mtx_unlock(&bus->eb_mtx); 2229 xpt_unlock_buses(); 2230 cam_periph_release_locked(periph); 2231 } 2232 return(retval); 2233 } 2234 2235 static int 2236 xptpdrvtraverse(struct periph_driver **start_pdrv, 2237 xpt_pdrvfunc_t *tr_func, void *arg) 2238 { 2239 struct periph_driver **pdrv; 2240 int retval; 2241 2242 retval = 1; 2243 2244 /* 2245 * We don't traverse the peripheral driver list like we do the 2246 * other lists, because it is a linker set, and therefore cannot be 2247 * changed during runtime. If the peripheral driver list is ever 2248 * re-done to be something other than a linker set (i.e. it can 2249 * change while the system is running), the list traversal should 2250 * be modified to work like the other traversal functions. 2251 */ 2252 for (pdrv = (start_pdrv ? start_pdrv : periph_drivers); 2253 *pdrv != NULL; pdrv++) { 2254 retval = tr_func(pdrv, arg); 2255 2256 if (retval == 0) 2257 return(retval); 2258 } 2259 2260 return(retval); 2261 } 2262 2263 static int 2264 xptpdperiphtraverse(struct periph_driver **pdrv, 2265 struct cam_periph *start_periph, 2266 xpt_periphfunc_t *tr_func, void *arg) 2267 { 2268 struct cam_periph *periph, *next_periph; 2269 int retval; 2270 2271 retval = 1; 2272 2273 if (start_periph) 2274 periph = start_periph; 2275 else { 2276 xpt_lock_buses(); 2277 periph = TAILQ_FIRST(&(*pdrv)->units); 2278 while (periph != NULL && (periph->flags & CAM_PERIPH_FREE) != 0) 2279 periph = TAILQ_NEXT(periph, unit_links); 2280 if (periph == NULL) { 2281 xpt_unlock_buses(); 2282 return (retval); 2283 } 2284 periph->refcount++; 2285 xpt_unlock_buses(); 2286 } 2287 for (; periph != NULL; periph = next_periph) { 2288 cam_periph_lock(periph); 2289 retval = tr_func(periph, arg); 2290 cam_periph_unlock(periph); 2291 if (retval == 0) { 2292 cam_periph_release(periph); 2293 break; 2294 } 2295 xpt_lock_buses(); 2296 next_periph = TAILQ_NEXT(periph, unit_links); 2297 while (next_periph != NULL && 2298 (next_periph->flags & CAM_PERIPH_FREE) != 0) 2299 next_periph = TAILQ_NEXT(next_periph, unit_links); 2300 if (next_periph) 2301 next_periph->refcount++; 2302 xpt_unlock_buses(); 2303 cam_periph_release(periph); 2304 } 2305 return(retval); 2306 } 2307 2308 static int 2309 xptdefbusfunc(struct cam_eb *bus, void *arg) 2310 { 2311 struct xpt_traverse_config *tr_config; 2312 2313 tr_config = (struct xpt_traverse_config *)arg; 2314 2315 if (tr_config->depth == XPT_DEPTH_BUS) { 2316 xpt_busfunc_t *tr_func; 2317 2318 tr_func = (xpt_busfunc_t *)tr_config->tr_func; 2319 2320 return(tr_func(bus, tr_config->tr_arg)); 2321 } else 2322 return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg)); 2323 } 2324 2325 static int 2326 xptdeftargetfunc(struct cam_et *target, void *arg) 2327 { 2328 struct xpt_traverse_config *tr_config; 2329 2330 tr_config = (struct xpt_traverse_config *)arg; 2331 2332 if (tr_config->depth == XPT_DEPTH_TARGET) { 2333 xpt_targetfunc_t *tr_func; 2334 2335 tr_func = (xpt_targetfunc_t *)tr_config->tr_func; 2336 2337 return(tr_func(target, tr_config->tr_arg)); 2338 } else 2339 return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg)); 2340 } 2341 2342 static int 2343 xptdefdevicefunc(struct cam_ed *device, void *arg) 2344 { 2345 struct xpt_traverse_config *tr_config; 2346 2347 tr_config = (struct xpt_traverse_config *)arg; 2348 2349 if (tr_config->depth == XPT_DEPTH_DEVICE) { 2350 xpt_devicefunc_t *tr_func; 2351 2352 tr_func = (xpt_devicefunc_t *)tr_config->tr_func; 2353 2354 return(tr_func(device, tr_config->tr_arg)); 2355 } else 2356 return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg)); 2357 } 2358 2359 static int 2360 xptdefperiphfunc(struct cam_periph *periph, void *arg) 2361 { 2362 struct xpt_traverse_config *tr_config; 2363 xpt_periphfunc_t *tr_func; 2364 2365 tr_config = (struct xpt_traverse_config *)arg; 2366 2367 tr_func = (xpt_periphfunc_t *)tr_config->tr_func; 2368 2369 /* 2370 * Unlike the other default functions, we don't check for depth 2371 * here. The peripheral driver level is the last level in the EDT, 2372 * so if we're here, we should execute the function in question. 2373 */ 2374 return(tr_func(periph, tr_config->tr_arg)); 2375 } 2376 2377 /* 2378 * Execute the given function for every bus in the EDT. 2379 */ 2380 static int 2381 xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg) 2382 { 2383 struct xpt_traverse_config tr_config; 2384 2385 tr_config.depth = XPT_DEPTH_BUS; 2386 tr_config.tr_func = tr_func; 2387 tr_config.tr_arg = arg; 2388 2389 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config)); 2390 } 2391 2392 /* 2393 * Execute the given function for every device in the EDT. 2394 */ 2395 static int 2396 xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg) 2397 { 2398 struct xpt_traverse_config tr_config; 2399 2400 tr_config.depth = XPT_DEPTH_DEVICE; 2401 tr_config.tr_func = tr_func; 2402 tr_config.tr_arg = arg; 2403 2404 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config)); 2405 } 2406 2407 static int 2408 xptsetasyncfunc(struct cam_ed *device, void *arg) 2409 { 2410 struct cam_path path; 2411 struct ccb_getdev cgd; 2412 struct ccb_setasync *csa = (struct ccb_setasync *)arg; 2413 2414 /* 2415 * Don't report unconfigured devices (Wildcard devs, 2416 * devices only for target mode, device instances 2417 * that have been invalidated but are waiting for 2418 * their last reference count to be released). 2419 */ 2420 if ((device->flags & CAM_DEV_UNCONFIGURED) != 0) 2421 return (1); 2422 2423 xpt_compile_path(&path, 2424 NULL, 2425 device->target->bus->path_id, 2426 device->target->target_id, 2427 device->lun_id); 2428 xpt_setup_ccb(&cgd.ccb_h, &path, CAM_PRIORITY_NORMAL); 2429 cgd.ccb_h.func_code = XPT_GDEV_TYPE; 2430 xpt_action((union ccb *)&cgd); 2431 csa->callback(csa->callback_arg, 2432 AC_FOUND_DEVICE, 2433 &path, &cgd); 2434 xpt_release_path(&path); 2435 2436 return(1); 2437 } 2438 2439 static int 2440 xptsetasyncbusfunc(struct cam_eb *bus, void *arg) 2441 { 2442 struct cam_path path; 2443 struct ccb_pathinq cpi; 2444 struct ccb_setasync *csa = (struct ccb_setasync *)arg; 2445 2446 xpt_compile_path(&path, /*periph*/NULL, 2447 bus->path_id, 2448 CAM_TARGET_WILDCARD, 2449 CAM_LUN_WILDCARD); 2450 xpt_path_lock(&path); 2451 xpt_setup_ccb(&cpi.ccb_h, &path, CAM_PRIORITY_NORMAL); 2452 cpi.ccb_h.func_code = XPT_PATH_INQ; 2453 xpt_action((union ccb *)&cpi); 2454 csa->callback(csa->callback_arg, 2455 AC_PATH_REGISTERED, 2456 &path, &cpi); 2457 xpt_path_unlock(&path); 2458 xpt_release_path(&path); 2459 2460 return(1); 2461 } 2462 2463 void 2464 xpt_action(union ccb *start_ccb) 2465 { 2466 2467 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, 2468 ("xpt_action: func %#x %s\n", start_ccb->ccb_h.func_code, 2469 xpt_action_name(start_ccb->ccb_h.func_code))); 2470 2471 start_ccb->ccb_h.status = CAM_REQ_INPROG; 2472 (*(start_ccb->ccb_h.path->bus->xport->ops->action))(start_ccb); 2473 } 2474 2475 void 2476 xpt_action_default(union ccb *start_ccb) 2477 { 2478 struct cam_path *path; 2479 struct cam_sim *sim; 2480 int lock; 2481 2482 path = start_ccb->ccb_h.path; 2483 CAM_DEBUG(path, CAM_DEBUG_TRACE, 2484 ("xpt_action_default: func %#x %s\n", start_ccb->ccb_h.func_code, 2485 xpt_action_name(start_ccb->ccb_h.func_code))); 2486 2487 switch (start_ccb->ccb_h.func_code) { 2488 case XPT_SCSI_IO: 2489 { 2490 struct cam_ed *device; 2491 2492 /* 2493 * For the sake of compatibility with SCSI-1 2494 * devices that may not understand the identify 2495 * message, we include lun information in the 2496 * second byte of all commands. SCSI-1 specifies 2497 * that luns are a 3 bit value and reserves only 3 2498 * bits for lun information in the CDB. Later 2499 * revisions of the SCSI spec allow for more than 8 2500 * luns, but have deprecated lun information in the 2501 * CDB. So, if the lun won't fit, we must omit. 2502 * 2503 * Also be aware that during initial probing for devices, 2504 * the inquiry information is unknown but initialized to 0. 2505 * This means that this code will be exercised while probing 2506 * devices with an ANSI revision greater than 2. 2507 */ 2508 device = path->device; 2509 if (device->protocol_version <= SCSI_REV_2 2510 && start_ccb->ccb_h.target_lun < 8 2511 && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) { 2512 2513 start_ccb->csio.cdb_io.cdb_bytes[1] |= 2514 start_ccb->ccb_h.target_lun << 5; 2515 } 2516 start_ccb->csio.scsi_status = SCSI_STATUS_OK; 2517 } 2518 /* FALLTHROUGH */ 2519 case XPT_TARGET_IO: 2520 case XPT_CONT_TARGET_IO: 2521 start_ccb->csio.sense_resid = 0; 2522 start_ccb->csio.resid = 0; 2523 /* FALLTHROUGH */ 2524 case XPT_ATA_IO: 2525 if (start_ccb->ccb_h.func_code == XPT_ATA_IO) 2526 start_ccb->ataio.resid = 0; 2527 /* FALLTHROUGH */ 2528 case XPT_NVME_IO: 2529 if (start_ccb->ccb_h.func_code == XPT_NVME_IO) 2530 start_ccb->nvmeio.resid = 0; 2531 /* FALLTHROUGH */ 2532 case XPT_RESET_DEV: 2533 case XPT_ENG_EXEC: 2534 case XPT_SMP_IO: 2535 { 2536 struct cam_devq *devq; 2537 2538 devq = path->bus->sim->devq; 2539 mtx_lock(&devq->send_mtx); 2540 cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb); 2541 if (xpt_schedule_devq(devq, path->device) != 0) 2542 xpt_run_devq(devq); 2543 mtx_unlock(&devq->send_mtx); 2544 break; 2545 } 2546 case XPT_CALC_GEOMETRY: 2547 /* Filter out garbage */ 2548 if (start_ccb->ccg.block_size == 0 2549 || start_ccb->ccg.volume_size == 0) { 2550 start_ccb->ccg.cylinders = 0; 2551 start_ccb->ccg.heads = 0; 2552 start_ccb->ccg.secs_per_track = 0; 2553 start_ccb->ccb_h.status = CAM_REQ_CMP; 2554 break; 2555 } 2556 #if defined(PC98) || defined(__sparc64__) 2557 /* 2558 * In a PC-98 system, geometry translation depens on 2559 * the "real" device geometry obtained from mode page 4. 2560 * SCSI geometry translation is performed in the 2561 * initialization routine of the SCSI BIOS and the result 2562 * stored in host memory. If the translation is available 2563 * in host memory, use it. If not, rely on the default 2564 * translation the device driver performs. 2565 * For sparc64, we may need adjust the geometry of large 2566 * disks in order to fit the limitations of the 16-bit 2567 * fields of the VTOC8 disk label. 2568 */ 2569 if (scsi_da_bios_params(&start_ccb->ccg) != 0) { 2570 start_ccb->ccb_h.status = CAM_REQ_CMP; 2571 break; 2572 } 2573 #endif 2574 goto call_sim; 2575 case XPT_ABORT: 2576 { 2577 union ccb* abort_ccb; 2578 2579 abort_ccb = start_ccb->cab.abort_ccb; 2580 if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) { 2581 2582 if (abort_ccb->ccb_h.pinfo.index >= 0) { 2583 struct cam_ccbq *ccbq; 2584 struct cam_ed *device; 2585 2586 device = abort_ccb->ccb_h.path->device; 2587 ccbq = &device->ccbq; 2588 cam_ccbq_remove_ccb(ccbq, abort_ccb); 2589 abort_ccb->ccb_h.status = 2590 CAM_REQ_ABORTED|CAM_DEV_QFRZN; 2591 xpt_freeze_devq(abort_ccb->ccb_h.path, 1); 2592 xpt_done(abort_ccb); 2593 start_ccb->ccb_h.status = CAM_REQ_CMP; 2594 break; 2595 } 2596 if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX 2597 && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) { 2598 /* 2599 * We've caught this ccb en route to 2600 * the SIM. Flag it for abort and the 2601 * SIM will do so just before starting 2602 * real work on the CCB. 2603 */ 2604 abort_ccb->ccb_h.status = 2605 CAM_REQ_ABORTED|CAM_DEV_QFRZN; 2606 xpt_freeze_devq(abort_ccb->ccb_h.path, 1); 2607 start_ccb->ccb_h.status = CAM_REQ_CMP; 2608 break; 2609 } 2610 } 2611 if (XPT_FC_IS_QUEUED(abort_ccb) 2612 && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) { 2613 /* 2614 * It's already completed but waiting 2615 * for our SWI to get to it. 2616 */ 2617 start_ccb->ccb_h.status = CAM_UA_ABORT; 2618 break; 2619 } 2620 /* 2621 * If we weren't able to take care of the abort request 2622 * in the XPT, pass the request down to the SIM for processing. 2623 */ 2624 } 2625 /* FALLTHROUGH */ 2626 case XPT_ACCEPT_TARGET_IO: 2627 case XPT_EN_LUN: 2628 case XPT_IMMED_NOTIFY: 2629 case XPT_NOTIFY_ACK: 2630 case XPT_RESET_BUS: 2631 case XPT_IMMEDIATE_NOTIFY: 2632 case XPT_NOTIFY_ACKNOWLEDGE: 2633 case XPT_GET_SIM_KNOB_OLD: 2634 case XPT_GET_SIM_KNOB: 2635 case XPT_SET_SIM_KNOB: 2636 case XPT_GET_TRAN_SETTINGS: 2637 case XPT_SET_TRAN_SETTINGS: 2638 case XPT_PATH_INQ: 2639 call_sim: 2640 sim = path->bus->sim; 2641 lock = (mtx_owned(sim->mtx) == 0); 2642 if (lock) 2643 CAM_SIM_LOCK(sim); 2644 CAM_DEBUG(path, CAM_DEBUG_TRACE, 2645 ("sim->sim_action: func=%#x\n", start_ccb->ccb_h.func_code)); 2646 (*(sim->sim_action))(sim, start_ccb); 2647 CAM_DEBUG(path, CAM_DEBUG_TRACE, 2648 ("sim->sim_action: status=%#x\n", start_ccb->ccb_h.status)); 2649 if (lock) 2650 CAM_SIM_UNLOCK(sim); 2651 break; 2652 case XPT_PATH_STATS: 2653 start_ccb->cpis.last_reset = path->bus->last_reset; 2654 start_ccb->ccb_h.status = CAM_REQ_CMP; 2655 break; 2656 case XPT_GDEV_TYPE: 2657 { 2658 struct cam_ed *dev; 2659 2660 dev = path->device; 2661 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) { 2662 start_ccb->ccb_h.status = CAM_DEV_NOT_THERE; 2663 } else { 2664 struct ccb_getdev *cgd; 2665 2666 cgd = &start_ccb->cgd; 2667 cgd->protocol = dev->protocol; 2668 cgd->inq_data = dev->inq_data; 2669 cgd->ident_data = dev->ident_data; 2670 cgd->inq_flags = dev->inq_flags; 2671 cgd->nvme_data = dev->nvme_data; 2672 cgd->nvme_cdata = dev->nvme_cdata; 2673 cgd->ccb_h.status = CAM_REQ_CMP; 2674 cgd->serial_num_len = dev->serial_num_len; 2675 if ((dev->serial_num_len > 0) 2676 && (dev->serial_num != NULL)) 2677 bcopy(dev->serial_num, cgd->serial_num, 2678 dev->serial_num_len); 2679 } 2680 break; 2681 } 2682 case XPT_GDEV_STATS: 2683 { 2684 struct cam_ed *dev; 2685 2686 dev = path->device; 2687 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) { 2688 start_ccb->ccb_h.status = CAM_DEV_NOT_THERE; 2689 } else { 2690 struct ccb_getdevstats *cgds; 2691 struct cam_eb *bus; 2692 struct cam_et *tar; 2693 struct cam_devq *devq; 2694 2695 cgds = &start_ccb->cgds; 2696 bus = path->bus; 2697 tar = path->target; 2698 devq = bus->sim->devq; 2699 mtx_lock(&devq->send_mtx); 2700 cgds->dev_openings = dev->ccbq.dev_openings; 2701 cgds->dev_active = dev->ccbq.dev_active; 2702 cgds->allocated = dev->ccbq.allocated; 2703 cgds->queued = cam_ccbq_pending_ccb_count(&dev->ccbq); 2704 cgds->held = cgds->allocated - cgds->dev_active - 2705 cgds->queued; 2706 cgds->last_reset = tar->last_reset; 2707 cgds->maxtags = dev->maxtags; 2708 cgds->mintags = dev->mintags; 2709 if (timevalcmp(&tar->last_reset, &bus->last_reset, <)) 2710 cgds->last_reset = bus->last_reset; 2711 mtx_unlock(&devq->send_mtx); 2712 cgds->ccb_h.status = CAM_REQ_CMP; 2713 } 2714 break; 2715 } 2716 case XPT_GDEVLIST: 2717 { 2718 struct cam_periph *nperiph; 2719 struct periph_list *periph_head; 2720 struct ccb_getdevlist *cgdl; 2721 u_int i; 2722 struct cam_ed *device; 2723 int found; 2724 2725 2726 found = 0; 2727 2728 /* 2729 * Don't want anyone mucking with our data. 2730 */ 2731 device = path->device; 2732 periph_head = &device->periphs; 2733 cgdl = &start_ccb->cgdl; 2734 2735 /* 2736 * Check and see if the list has changed since the user 2737 * last requested a list member. If so, tell them that the 2738 * list has changed, and therefore they need to start over 2739 * from the beginning. 2740 */ 2741 if ((cgdl->index != 0) && 2742 (cgdl->generation != device->generation)) { 2743 cgdl->status = CAM_GDEVLIST_LIST_CHANGED; 2744 break; 2745 } 2746 2747 /* 2748 * Traverse the list of peripherals and attempt to find 2749 * the requested peripheral. 2750 */ 2751 for (nperiph = SLIST_FIRST(periph_head), i = 0; 2752 (nperiph != NULL) && (i <= cgdl->index); 2753 nperiph = SLIST_NEXT(nperiph, periph_links), i++) { 2754 if (i == cgdl->index) { 2755 strncpy(cgdl->periph_name, 2756 nperiph->periph_name, 2757 DEV_IDLEN); 2758 cgdl->unit_number = nperiph->unit_number; 2759 found = 1; 2760 } 2761 } 2762 if (found == 0) { 2763 cgdl->status = CAM_GDEVLIST_ERROR; 2764 break; 2765 } 2766 2767 if (nperiph == NULL) 2768 cgdl->status = CAM_GDEVLIST_LAST_DEVICE; 2769 else 2770 cgdl->status = CAM_GDEVLIST_MORE_DEVS; 2771 2772 cgdl->index++; 2773 cgdl->generation = device->generation; 2774 2775 cgdl->ccb_h.status = CAM_REQ_CMP; 2776 break; 2777 } 2778 case XPT_DEV_MATCH: 2779 { 2780 dev_pos_type position_type; 2781 struct ccb_dev_match *cdm; 2782 2783 cdm = &start_ccb->cdm; 2784 2785 /* 2786 * There are two ways of getting at information in the EDT. 2787 * The first way is via the primary EDT tree. It starts 2788 * with a list of busses, then a list of targets on a bus, 2789 * then devices/luns on a target, and then peripherals on a 2790 * device/lun. The "other" way is by the peripheral driver 2791 * lists. The peripheral driver lists are organized by 2792 * peripheral driver. (obviously) So it makes sense to 2793 * use the peripheral driver list if the user is looking 2794 * for something like "da1", or all "da" devices. If the 2795 * user is looking for something on a particular bus/target 2796 * or lun, it's generally better to go through the EDT tree. 2797 */ 2798 2799 if (cdm->pos.position_type != CAM_DEV_POS_NONE) 2800 position_type = cdm->pos.position_type; 2801 else { 2802 u_int i; 2803 2804 position_type = CAM_DEV_POS_NONE; 2805 2806 for (i = 0; i < cdm->num_patterns; i++) { 2807 if ((cdm->patterns[i].type == DEV_MATCH_BUS) 2808 ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){ 2809 position_type = CAM_DEV_POS_EDT; 2810 break; 2811 } 2812 } 2813 2814 if (cdm->num_patterns == 0) 2815 position_type = CAM_DEV_POS_EDT; 2816 else if (position_type == CAM_DEV_POS_NONE) 2817 position_type = CAM_DEV_POS_PDRV; 2818 } 2819 2820 switch(position_type & CAM_DEV_POS_TYPEMASK) { 2821 case CAM_DEV_POS_EDT: 2822 xptedtmatch(cdm); 2823 break; 2824 case CAM_DEV_POS_PDRV: 2825 xptperiphlistmatch(cdm); 2826 break; 2827 default: 2828 cdm->status = CAM_DEV_MATCH_ERROR; 2829 break; 2830 } 2831 2832 if (cdm->status == CAM_DEV_MATCH_ERROR) 2833 start_ccb->ccb_h.status = CAM_REQ_CMP_ERR; 2834 else 2835 start_ccb->ccb_h.status = CAM_REQ_CMP; 2836 2837 break; 2838 } 2839 case XPT_SASYNC_CB: 2840 { 2841 struct ccb_setasync *csa; 2842 struct async_node *cur_entry; 2843 struct async_list *async_head; 2844 u_int32_t added; 2845 2846 csa = &start_ccb->csa; 2847 added = csa->event_enable; 2848 async_head = &path->device->asyncs; 2849 2850 /* 2851 * If there is already an entry for us, simply 2852 * update it. 2853 */ 2854 cur_entry = SLIST_FIRST(async_head); 2855 while (cur_entry != NULL) { 2856 if ((cur_entry->callback_arg == csa->callback_arg) 2857 && (cur_entry->callback == csa->callback)) 2858 break; 2859 cur_entry = SLIST_NEXT(cur_entry, links); 2860 } 2861 2862 if (cur_entry != NULL) { 2863 /* 2864 * If the request has no flags set, 2865 * remove the entry. 2866 */ 2867 added &= ~cur_entry->event_enable; 2868 if (csa->event_enable == 0) { 2869 SLIST_REMOVE(async_head, cur_entry, 2870 async_node, links); 2871 xpt_release_device(path->device); 2872 free(cur_entry, M_CAMXPT); 2873 } else { 2874 cur_entry->event_enable = csa->event_enable; 2875 } 2876 csa->event_enable = added; 2877 } else { 2878 cur_entry = malloc(sizeof(*cur_entry), M_CAMXPT, 2879 M_NOWAIT); 2880 if (cur_entry == NULL) { 2881 csa->ccb_h.status = CAM_RESRC_UNAVAIL; 2882 break; 2883 } 2884 cur_entry->event_enable = csa->event_enable; 2885 cur_entry->event_lock = 2886 mtx_owned(path->bus->sim->mtx) ? 1 : 0; 2887 cur_entry->callback_arg = csa->callback_arg; 2888 cur_entry->callback = csa->callback; 2889 SLIST_INSERT_HEAD(async_head, cur_entry, links); 2890 xpt_acquire_device(path->device); 2891 } 2892 start_ccb->ccb_h.status = CAM_REQ_CMP; 2893 break; 2894 } 2895 case XPT_REL_SIMQ: 2896 { 2897 struct ccb_relsim *crs; 2898 struct cam_ed *dev; 2899 2900 crs = &start_ccb->crs; 2901 dev = path->device; 2902 if (dev == NULL) { 2903 2904 crs->ccb_h.status = CAM_DEV_NOT_THERE; 2905 break; 2906 } 2907 2908 if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) { 2909 2910 /* Don't ever go below one opening */ 2911 if (crs->openings > 0) { 2912 xpt_dev_ccbq_resize(path, crs->openings); 2913 if (bootverbose) { 2914 xpt_print(path, 2915 "number of openings is now %d\n", 2916 crs->openings); 2917 } 2918 } 2919 } 2920 2921 mtx_lock(&dev->sim->devq->send_mtx); 2922 if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) { 2923 2924 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) { 2925 2926 /* 2927 * Just extend the old timeout and decrement 2928 * the freeze count so that a single timeout 2929 * is sufficient for releasing the queue. 2930 */ 2931 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE; 2932 callout_stop(&dev->callout); 2933 } else { 2934 2935 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 2936 } 2937 2938 callout_reset_sbt(&dev->callout, 2939 SBT_1MS * crs->release_timeout, 0, 2940 xpt_release_devq_timeout, dev, 0); 2941 2942 dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING; 2943 2944 } 2945 2946 if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) { 2947 2948 if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) { 2949 /* 2950 * Decrement the freeze count so that a single 2951 * completion is still sufficient to unfreeze 2952 * the queue. 2953 */ 2954 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE; 2955 } else { 2956 2957 dev->flags |= CAM_DEV_REL_ON_COMPLETE; 2958 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 2959 } 2960 } 2961 2962 if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) { 2963 2964 if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0 2965 || (dev->ccbq.dev_active == 0)) { 2966 2967 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE; 2968 } else { 2969 2970 dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY; 2971 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 2972 } 2973 } 2974 mtx_unlock(&dev->sim->devq->send_mtx); 2975 2976 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) 2977 xpt_release_devq(path, /*count*/1, /*run_queue*/TRUE); 2978 start_ccb->crs.qfrozen_cnt = dev->ccbq.queue.qfrozen_cnt; 2979 start_ccb->ccb_h.status = CAM_REQ_CMP; 2980 break; 2981 } 2982 case XPT_DEBUG: { 2983 struct cam_path *oldpath; 2984 2985 /* Check that all request bits are supported. */ 2986 if (start_ccb->cdbg.flags & ~(CAM_DEBUG_COMPILE)) { 2987 start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 2988 break; 2989 } 2990 2991 cam_dflags = CAM_DEBUG_NONE; 2992 if (cam_dpath != NULL) { 2993 oldpath = cam_dpath; 2994 cam_dpath = NULL; 2995 xpt_free_path(oldpath); 2996 } 2997 if (start_ccb->cdbg.flags != CAM_DEBUG_NONE) { 2998 if (xpt_create_path(&cam_dpath, NULL, 2999 start_ccb->ccb_h.path_id, 3000 start_ccb->ccb_h.target_id, 3001 start_ccb->ccb_h.target_lun) != 3002 CAM_REQ_CMP) { 3003 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 3004 } else { 3005 cam_dflags = start_ccb->cdbg.flags; 3006 start_ccb->ccb_h.status = CAM_REQ_CMP; 3007 xpt_print(cam_dpath, "debugging flags now %x\n", 3008 cam_dflags); 3009 } 3010 } else 3011 start_ccb->ccb_h.status = CAM_REQ_CMP; 3012 break; 3013 } 3014 case XPT_NOOP: 3015 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0) 3016 xpt_freeze_devq(path, 1); 3017 start_ccb->ccb_h.status = CAM_REQ_CMP; 3018 break; 3019 case XPT_REPROBE_LUN: 3020 xpt_async(AC_INQ_CHANGED, path, NULL); 3021 start_ccb->ccb_h.status = CAM_REQ_CMP; 3022 xpt_done(start_ccb); 3023 break; 3024 default: 3025 case XPT_SDEV_TYPE: 3026 case XPT_TERM_IO: 3027 case XPT_ENG_INQ: 3028 /* XXX Implement */ 3029 xpt_print_path(start_ccb->ccb_h.path); 3030 printf("%s: CCB type %#x %s not supported\n", __func__, 3031 start_ccb->ccb_h.func_code, 3032 xpt_action_name(start_ccb->ccb_h.func_code)); 3033 start_ccb->ccb_h.status = CAM_PROVIDE_FAIL; 3034 if (start_ccb->ccb_h.func_code & XPT_FC_DEV_QUEUED) { 3035 xpt_done(start_ccb); 3036 } 3037 break; 3038 } 3039 CAM_DEBUG(path, CAM_DEBUG_TRACE, 3040 ("xpt_action_default: func= %#x %s status %#x\n", 3041 start_ccb->ccb_h.func_code, 3042 xpt_action_name(start_ccb->ccb_h.func_code), 3043 start_ccb->ccb_h.status)); 3044 } 3045 3046 void 3047 xpt_polled_action(union ccb *start_ccb) 3048 { 3049 u_int32_t timeout; 3050 struct cam_sim *sim; 3051 struct cam_devq *devq; 3052 struct cam_ed *dev; 3053 3054 timeout = start_ccb->ccb_h.timeout * 10; 3055 sim = start_ccb->ccb_h.path->bus->sim; 3056 devq = sim->devq; 3057 dev = start_ccb->ccb_h.path->device; 3058 3059 mtx_unlock(&dev->device_mtx); 3060 3061 /* 3062 * Steal an opening so that no other queued requests 3063 * can get it before us while we simulate interrupts. 3064 */ 3065 mtx_lock(&devq->send_mtx); 3066 dev->ccbq.dev_openings--; 3067 while((devq->send_openings <= 0 || dev->ccbq.dev_openings < 0) && 3068 (--timeout > 0)) { 3069 mtx_unlock(&devq->send_mtx); 3070 DELAY(100); 3071 CAM_SIM_LOCK(sim); 3072 (*(sim->sim_poll))(sim); 3073 CAM_SIM_UNLOCK(sim); 3074 camisr_runqueue(); 3075 mtx_lock(&devq->send_mtx); 3076 } 3077 dev->ccbq.dev_openings++; 3078 mtx_unlock(&devq->send_mtx); 3079 3080 if (timeout != 0) { 3081 xpt_action(start_ccb); 3082 while(--timeout > 0) { 3083 CAM_SIM_LOCK(sim); 3084 (*(sim->sim_poll))(sim); 3085 CAM_SIM_UNLOCK(sim); 3086 camisr_runqueue(); 3087 if ((start_ccb->ccb_h.status & CAM_STATUS_MASK) 3088 != CAM_REQ_INPROG) 3089 break; 3090 DELAY(100); 3091 } 3092 if (timeout == 0) { 3093 /* 3094 * XXX Is it worth adding a sim_timeout entry 3095 * point so we can attempt recovery? If 3096 * this is only used for dumps, I don't think 3097 * it is. 3098 */ 3099 start_ccb->ccb_h.status = CAM_CMD_TIMEOUT; 3100 } 3101 } else { 3102 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 3103 } 3104 3105 mtx_lock(&dev->device_mtx); 3106 } 3107 3108 /* 3109 * Schedule a peripheral driver to receive a ccb when its 3110 * target device has space for more transactions. 3111 */ 3112 void 3113 xpt_schedule(struct cam_periph *periph, u_int32_t new_priority) 3114 { 3115 3116 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n")); 3117 cam_periph_assert(periph, MA_OWNED); 3118 if (new_priority < periph->scheduled_priority) { 3119 periph->scheduled_priority = new_priority; 3120 xpt_run_allocq(periph, 0); 3121 } 3122 } 3123 3124 3125 /* 3126 * Schedule a device to run on a given queue. 3127 * If the device was inserted as a new entry on the queue, 3128 * return 1 meaning the device queue should be run. If we 3129 * were already queued, implying someone else has already 3130 * started the queue, return 0 so the caller doesn't attempt 3131 * to run the queue. 3132 */ 3133 static int 3134 xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo, 3135 u_int32_t new_priority) 3136 { 3137 int retval; 3138 u_int32_t old_priority; 3139 3140 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n")); 3141 3142 old_priority = pinfo->priority; 3143 3144 /* 3145 * Are we already queued? 3146 */ 3147 if (pinfo->index != CAM_UNQUEUED_INDEX) { 3148 /* Simply reorder based on new priority */ 3149 if (new_priority < old_priority) { 3150 camq_change_priority(queue, pinfo->index, 3151 new_priority); 3152 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3153 ("changed priority to %d\n", 3154 new_priority)); 3155 retval = 1; 3156 } else 3157 retval = 0; 3158 } else { 3159 /* New entry on the queue */ 3160 if (new_priority < old_priority) 3161 pinfo->priority = new_priority; 3162 3163 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3164 ("Inserting onto queue\n")); 3165 pinfo->generation = ++queue->generation; 3166 camq_insert(queue, pinfo); 3167 retval = 1; 3168 } 3169 return (retval); 3170 } 3171 3172 static void 3173 xpt_run_allocq_task(void *context, int pending) 3174 { 3175 struct cam_periph *periph = context; 3176 3177 cam_periph_lock(periph); 3178 periph->flags &= ~CAM_PERIPH_RUN_TASK; 3179 xpt_run_allocq(periph, 1); 3180 cam_periph_unlock(periph); 3181 cam_periph_release(periph); 3182 } 3183 3184 static void 3185 xpt_run_allocq(struct cam_periph *periph, int sleep) 3186 { 3187 struct cam_ed *device; 3188 union ccb *ccb; 3189 uint32_t prio; 3190 3191 cam_periph_assert(periph, MA_OWNED); 3192 if (periph->periph_allocating) 3193 return; 3194 periph->periph_allocating = 1; 3195 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_allocq(%p)\n", periph)); 3196 device = periph->path->device; 3197 ccb = NULL; 3198 restart: 3199 while ((prio = min(periph->scheduled_priority, 3200 periph->immediate_priority)) != CAM_PRIORITY_NONE && 3201 (periph->periph_allocated - (ccb != NULL ? 1 : 0) < 3202 device->ccbq.total_openings || prio <= CAM_PRIORITY_OOB)) { 3203 3204 if (ccb == NULL && 3205 (ccb = xpt_get_ccb_nowait(periph)) == NULL) { 3206 if (sleep) { 3207 ccb = xpt_get_ccb(periph); 3208 goto restart; 3209 } 3210 if (periph->flags & CAM_PERIPH_RUN_TASK) 3211 break; 3212 cam_periph_doacquire(periph); 3213 periph->flags |= CAM_PERIPH_RUN_TASK; 3214 taskqueue_enqueue(xsoftc.xpt_taskq, 3215 &periph->periph_run_task); 3216 break; 3217 } 3218 xpt_setup_ccb(&ccb->ccb_h, periph->path, prio); 3219 if (prio == periph->immediate_priority) { 3220 periph->immediate_priority = CAM_PRIORITY_NONE; 3221 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3222 ("waking cam_periph_getccb()\n")); 3223 SLIST_INSERT_HEAD(&periph->ccb_list, &ccb->ccb_h, 3224 periph_links.sle); 3225 wakeup(&periph->ccb_list); 3226 } else { 3227 periph->scheduled_priority = CAM_PRIORITY_NONE; 3228 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3229 ("calling periph_start()\n")); 3230 periph->periph_start(periph, ccb); 3231 } 3232 ccb = NULL; 3233 } 3234 if (ccb != NULL) 3235 xpt_release_ccb(ccb); 3236 periph->periph_allocating = 0; 3237 } 3238 3239 static void 3240 xpt_run_devq(struct cam_devq *devq) 3241 { 3242 int lock; 3243 3244 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_devq\n")); 3245 3246 devq->send_queue.qfrozen_cnt++; 3247 while ((devq->send_queue.entries > 0) 3248 && (devq->send_openings > 0) 3249 && (devq->send_queue.qfrozen_cnt <= 1)) { 3250 struct cam_ed *device; 3251 union ccb *work_ccb; 3252 struct cam_sim *sim; 3253 struct xpt_proto *proto; 3254 3255 device = (struct cam_ed *)camq_remove(&devq->send_queue, 3256 CAMQ_HEAD); 3257 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3258 ("running device %p\n", device)); 3259 3260 work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD); 3261 if (work_ccb == NULL) { 3262 printf("device on run queue with no ccbs???\n"); 3263 continue; 3264 } 3265 3266 if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) { 3267 3268 mtx_lock(&xsoftc.xpt_highpower_lock); 3269 if (xsoftc.num_highpower <= 0) { 3270 /* 3271 * We got a high power command, but we 3272 * don't have any available slots. Freeze 3273 * the device queue until we have a slot 3274 * available. 3275 */ 3276 xpt_freeze_devq_device(device, 1); 3277 STAILQ_INSERT_TAIL(&xsoftc.highpowerq, device, 3278 highpowerq_entry); 3279 3280 mtx_unlock(&xsoftc.xpt_highpower_lock); 3281 continue; 3282 } else { 3283 /* 3284 * Consume a high power slot while 3285 * this ccb runs. 3286 */ 3287 xsoftc.num_highpower--; 3288 } 3289 mtx_unlock(&xsoftc.xpt_highpower_lock); 3290 } 3291 cam_ccbq_remove_ccb(&device->ccbq, work_ccb); 3292 cam_ccbq_send_ccb(&device->ccbq, work_ccb); 3293 devq->send_openings--; 3294 devq->send_active++; 3295 xpt_schedule_devq(devq, device); 3296 mtx_unlock(&devq->send_mtx); 3297 3298 if ((work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0) { 3299 /* 3300 * The client wants to freeze the queue 3301 * after this CCB is sent. 3302 */ 3303 xpt_freeze_devq(work_ccb->ccb_h.path, 1); 3304 } 3305 3306 /* In Target mode, the peripheral driver knows best... */ 3307 if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) { 3308 if ((device->inq_flags & SID_CmdQue) != 0 3309 && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE) 3310 work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID; 3311 else 3312 /* 3313 * Clear this in case of a retried CCB that 3314 * failed due to a rejected tag. 3315 */ 3316 work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID; 3317 } 3318 3319 KASSERT(device == work_ccb->ccb_h.path->device, 3320 ("device (%p) / path->device (%p) mismatch", 3321 device, work_ccb->ccb_h.path->device)); 3322 proto = xpt_proto_find(device->protocol); 3323 if (proto && proto->ops->debug_out) 3324 proto->ops->debug_out(work_ccb); 3325 3326 /* 3327 * Device queues can be shared among multiple SIM instances 3328 * that reside on different busses. Use the SIM from the 3329 * queued device, rather than the one from the calling bus. 3330 */ 3331 sim = device->sim; 3332 lock = (mtx_owned(sim->mtx) == 0); 3333 if (lock) 3334 CAM_SIM_LOCK(sim); 3335 work_ccb->ccb_h.qos.sim_data = sbinuptime(); // xxx uintprt_t too small 32bit platforms 3336 (*(sim->sim_action))(sim, work_ccb); 3337 if (lock) 3338 CAM_SIM_UNLOCK(sim); 3339 mtx_lock(&devq->send_mtx); 3340 } 3341 devq->send_queue.qfrozen_cnt--; 3342 } 3343 3344 /* 3345 * This function merges stuff from the slave ccb into the master ccb, while 3346 * keeping important fields in the master ccb constant. 3347 */ 3348 void 3349 xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb) 3350 { 3351 3352 /* 3353 * Pull fields that are valid for peripheral drivers to set 3354 * into the master CCB along with the CCB "payload". 3355 */ 3356 master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count; 3357 master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code; 3358 master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout; 3359 master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags; 3360 bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1], 3361 sizeof(union ccb) - sizeof(struct ccb_hdr)); 3362 } 3363 3364 void 3365 xpt_setup_ccb_flags(struct ccb_hdr *ccb_h, struct cam_path *path, 3366 u_int32_t priority, u_int32_t flags) 3367 { 3368 3369 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n")); 3370 ccb_h->pinfo.priority = priority; 3371 ccb_h->path = path; 3372 ccb_h->path_id = path->bus->path_id; 3373 if (path->target) 3374 ccb_h->target_id = path->target->target_id; 3375 else 3376 ccb_h->target_id = CAM_TARGET_WILDCARD; 3377 if (path->device) { 3378 ccb_h->target_lun = path->device->lun_id; 3379 ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation; 3380 } else { 3381 ccb_h->target_lun = CAM_TARGET_WILDCARD; 3382 } 3383 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX; 3384 ccb_h->flags = flags; 3385 ccb_h->xflags = 0; 3386 } 3387 3388 void 3389 xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority) 3390 { 3391 xpt_setup_ccb_flags(ccb_h, path, priority, /*flags*/ 0); 3392 } 3393 3394 /* Path manipulation functions */ 3395 cam_status 3396 xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph, 3397 path_id_t path_id, target_id_t target_id, lun_id_t lun_id) 3398 { 3399 struct cam_path *path; 3400 cam_status status; 3401 3402 path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_NOWAIT); 3403 3404 if (path == NULL) { 3405 status = CAM_RESRC_UNAVAIL; 3406 return(status); 3407 } 3408 status = xpt_compile_path(path, perph, path_id, target_id, lun_id); 3409 if (status != CAM_REQ_CMP) { 3410 free(path, M_CAMPATH); 3411 path = NULL; 3412 } 3413 *new_path_ptr = path; 3414 return (status); 3415 } 3416 3417 cam_status 3418 xpt_create_path_unlocked(struct cam_path **new_path_ptr, 3419 struct cam_periph *periph, path_id_t path_id, 3420 target_id_t target_id, lun_id_t lun_id) 3421 { 3422 3423 return (xpt_create_path(new_path_ptr, periph, path_id, target_id, 3424 lun_id)); 3425 } 3426 3427 cam_status 3428 xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph, 3429 path_id_t path_id, target_id_t target_id, lun_id_t lun_id) 3430 { 3431 struct cam_eb *bus; 3432 struct cam_et *target; 3433 struct cam_ed *device; 3434 cam_status status; 3435 3436 status = CAM_REQ_CMP; /* Completed without error */ 3437 target = NULL; /* Wildcarded */ 3438 device = NULL; /* Wildcarded */ 3439 3440 /* 3441 * We will potentially modify the EDT, so block interrupts 3442 * that may attempt to create cam paths. 3443 */ 3444 bus = xpt_find_bus(path_id); 3445 if (bus == NULL) { 3446 status = CAM_PATH_INVALID; 3447 } else { 3448 xpt_lock_buses(); 3449 mtx_lock(&bus->eb_mtx); 3450 target = xpt_find_target(bus, target_id); 3451 if (target == NULL) { 3452 /* Create one */ 3453 struct cam_et *new_target; 3454 3455 new_target = xpt_alloc_target(bus, target_id); 3456 if (new_target == NULL) { 3457 status = CAM_RESRC_UNAVAIL; 3458 } else { 3459 target = new_target; 3460 } 3461 } 3462 xpt_unlock_buses(); 3463 if (target != NULL) { 3464 device = xpt_find_device(target, lun_id); 3465 if (device == NULL) { 3466 /* Create one */ 3467 struct cam_ed *new_device; 3468 3469 new_device = 3470 (*(bus->xport->ops->alloc_device))(bus, 3471 target, 3472 lun_id); 3473 if (new_device == NULL) { 3474 status = CAM_RESRC_UNAVAIL; 3475 } else { 3476 device = new_device; 3477 } 3478 } 3479 } 3480 mtx_unlock(&bus->eb_mtx); 3481 } 3482 3483 /* 3484 * Only touch the user's data if we are successful. 3485 */ 3486 if (status == CAM_REQ_CMP) { 3487 new_path->periph = perph; 3488 new_path->bus = bus; 3489 new_path->target = target; 3490 new_path->device = device; 3491 CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n")); 3492 } else { 3493 if (device != NULL) 3494 xpt_release_device(device); 3495 if (target != NULL) 3496 xpt_release_target(target); 3497 if (bus != NULL) 3498 xpt_release_bus(bus); 3499 } 3500 return (status); 3501 } 3502 3503 cam_status 3504 xpt_clone_path(struct cam_path **new_path_ptr, struct cam_path *path) 3505 { 3506 struct cam_path *new_path; 3507 3508 new_path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_NOWAIT); 3509 if (new_path == NULL) 3510 return(CAM_RESRC_UNAVAIL); 3511 xpt_copy_path(new_path, path); 3512 *new_path_ptr = new_path; 3513 return (CAM_REQ_CMP); 3514 } 3515 3516 void 3517 xpt_copy_path(struct cam_path *new_path, struct cam_path *path) 3518 { 3519 3520 *new_path = *path; 3521 if (path->bus != NULL) 3522 xpt_acquire_bus(path->bus); 3523 if (path->target != NULL) 3524 xpt_acquire_target(path->target); 3525 if (path->device != NULL) 3526 xpt_acquire_device(path->device); 3527 } 3528 3529 void 3530 xpt_release_path(struct cam_path *path) 3531 { 3532 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n")); 3533 if (path->device != NULL) { 3534 xpt_release_device(path->device); 3535 path->device = NULL; 3536 } 3537 if (path->target != NULL) { 3538 xpt_release_target(path->target); 3539 path->target = NULL; 3540 } 3541 if (path->bus != NULL) { 3542 xpt_release_bus(path->bus); 3543 path->bus = NULL; 3544 } 3545 } 3546 3547 void 3548 xpt_free_path(struct cam_path *path) 3549 { 3550 3551 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n")); 3552 xpt_release_path(path); 3553 free(path, M_CAMPATH); 3554 } 3555 3556 void 3557 xpt_path_counts(struct cam_path *path, uint32_t *bus_ref, 3558 uint32_t *periph_ref, uint32_t *target_ref, uint32_t *device_ref) 3559 { 3560 3561 xpt_lock_buses(); 3562 if (bus_ref) { 3563 if (path->bus) 3564 *bus_ref = path->bus->refcount; 3565 else 3566 *bus_ref = 0; 3567 } 3568 if (periph_ref) { 3569 if (path->periph) 3570 *periph_ref = path->periph->refcount; 3571 else 3572 *periph_ref = 0; 3573 } 3574 xpt_unlock_buses(); 3575 if (target_ref) { 3576 if (path->target) 3577 *target_ref = path->target->refcount; 3578 else 3579 *target_ref = 0; 3580 } 3581 if (device_ref) { 3582 if (path->device) 3583 *device_ref = path->device->refcount; 3584 else 3585 *device_ref = 0; 3586 } 3587 } 3588 3589 /* 3590 * Return -1 for failure, 0 for exact match, 1 for match with wildcards 3591 * in path1, 2 for match with wildcards in path2. 3592 */ 3593 int 3594 xpt_path_comp(struct cam_path *path1, struct cam_path *path2) 3595 { 3596 int retval = 0; 3597 3598 if (path1->bus != path2->bus) { 3599 if (path1->bus->path_id == CAM_BUS_WILDCARD) 3600 retval = 1; 3601 else if (path2->bus->path_id == CAM_BUS_WILDCARD) 3602 retval = 2; 3603 else 3604 return (-1); 3605 } 3606 if (path1->target != path2->target) { 3607 if (path1->target->target_id == CAM_TARGET_WILDCARD) { 3608 if (retval == 0) 3609 retval = 1; 3610 } else if (path2->target->target_id == CAM_TARGET_WILDCARD) 3611 retval = 2; 3612 else 3613 return (-1); 3614 } 3615 if (path1->device != path2->device) { 3616 if (path1->device->lun_id == CAM_LUN_WILDCARD) { 3617 if (retval == 0) 3618 retval = 1; 3619 } else if (path2->device->lun_id == CAM_LUN_WILDCARD) 3620 retval = 2; 3621 else 3622 return (-1); 3623 } 3624 return (retval); 3625 } 3626 3627 int 3628 xpt_path_comp_dev(struct cam_path *path, struct cam_ed *dev) 3629 { 3630 int retval = 0; 3631 3632 if (path->bus != dev->target->bus) { 3633 if (path->bus->path_id == CAM_BUS_WILDCARD) 3634 retval = 1; 3635 else if (dev->target->bus->path_id == CAM_BUS_WILDCARD) 3636 retval = 2; 3637 else 3638 return (-1); 3639 } 3640 if (path->target != dev->target) { 3641 if (path->target->target_id == CAM_TARGET_WILDCARD) { 3642 if (retval == 0) 3643 retval = 1; 3644 } else if (dev->target->target_id == CAM_TARGET_WILDCARD) 3645 retval = 2; 3646 else 3647 return (-1); 3648 } 3649 if (path->device != dev) { 3650 if (path->device->lun_id == CAM_LUN_WILDCARD) { 3651 if (retval == 0) 3652 retval = 1; 3653 } else if (dev->lun_id == CAM_LUN_WILDCARD) 3654 retval = 2; 3655 else 3656 return (-1); 3657 } 3658 return (retval); 3659 } 3660 3661 void 3662 xpt_print_path(struct cam_path *path) 3663 { 3664 3665 if (path == NULL) 3666 printf("(nopath): "); 3667 else { 3668 if (path->periph != NULL) 3669 printf("(%s%d:", path->periph->periph_name, 3670 path->periph->unit_number); 3671 else 3672 printf("(noperiph:"); 3673 3674 if (path->bus != NULL) 3675 printf("%s%d:%d:", path->bus->sim->sim_name, 3676 path->bus->sim->unit_number, 3677 path->bus->sim->bus_id); 3678 else 3679 printf("nobus:"); 3680 3681 if (path->target != NULL) 3682 printf("%d:", path->target->target_id); 3683 else 3684 printf("X:"); 3685 3686 if (path->device != NULL) 3687 printf("%jx): ", (uintmax_t)path->device->lun_id); 3688 else 3689 printf("X): "); 3690 } 3691 } 3692 3693 void 3694 xpt_print_device(struct cam_ed *device) 3695 { 3696 3697 if (device == NULL) 3698 printf("(nopath): "); 3699 else { 3700 printf("(noperiph:%s%d:%d:%d:%jx): ", device->sim->sim_name, 3701 device->sim->unit_number, 3702 device->sim->bus_id, 3703 device->target->target_id, 3704 (uintmax_t)device->lun_id); 3705 } 3706 } 3707 3708 void 3709 xpt_print(struct cam_path *path, const char *fmt, ...) 3710 { 3711 va_list ap; 3712 xpt_print_path(path); 3713 va_start(ap, fmt); 3714 vprintf(fmt, ap); 3715 va_end(ap); 3716 } 3717 3718 int 3719 xpt_path_string(struct cam_path *path, char *str, size_t str_len) 3720 { 3721 struct sbuf sb; 3722 3723 sbuf_new(&sb, str, str_len, 0); 3724 3725 if (path == NULL) 3726 sbuf_printf(&sb, "(nopath): "); 3727 else { 3728 if (path->periph != NULL) 3729 sbuf_printf(&sb, "(%s%d:", path->periph->periph_name, 3730 path->periph->unit_number); 3731 else 3732 sbuf_printf(&sb, "(noperiph:"); 3733 3734 if (path->bus != NULL) 3735 sbuf_printf(&sb, "%s%d:%d:", path->bus->sim->sim_name, 3736 path->bus->sim->unit_number, 3737 path->bus->sim->bus_id); 3738 else 3739 sbuf_printf(&sb, "nobus:"); 3740 3741 if (path->target != NULL) 3742 sbuf_printf(&sb, "%d:", path->target->target_id); 3743 else 3744 sbuf_printf(&sb, "X:"); 3745 3746 if (path->device != NULL) 3747 sbuf_printf(&sb, "%jx): ", 3748 (uintmax_t)path->device->lun_id); 3749 else 3750 sbuf_printf(&sb, "X): "); 3751 } 3752 sbuf_finish(&sb); 3753 3754 return(sbuf_len(&sb)); 3755 } 3756 3757 path_id_t 3758 xpt_path_path_id(struct cam_path *path) 3759 { 3760 return(path->bus->path_id); 3761 } 3762 3763 target_id_t 3764 xpt_path_target_id(struct cam_path *path) 3765 { 3766 if (path->target != NULL) 3767 return (path->target->target_id); 3768 else 3769 return (CAM_TARGET_WILDCARD); 3770 } 3771 3772 lun_id_t 3773 xpt_path_lun_id(struct cam_path *path) 3774 { 3775 if (path->device != NULL) 3776 return (path->device->lun_id); 3777 else 3778 return (CAM_LUN_WILDCARD); 3779 } 3780 3781 struct cam_sim * 3782 xpt_path_sim(struct cam_path *path) 3783 { 3784 3785 return (path->bus->sim); 3786 } 3787 3788 struct cam_periph* 3789 xpt_path_periph(struct cam_path *path) 3790 { 3791 3792 return (path->periph); 3793 } 3794 3795 /* 3796 * Release a CAM control block for the caller. Remit the cost of the structure 3797 * to the device referenced by the path. If the this device had no 'credits' 3798 * and peripheral drivers have registered async callbacks for this notification 3799 * call them now. 3800 */ 3801 void 3802 xpt_release_ccb(union ccb *free_ccb) 3803 { 3804 struct cam_ed *device; 3805 struct cam_periph *periph; 3806 3807 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n")); 3808 xpt_path_assert(free_ccb->ccb_h.path, MA_OWNED); 3809 device = free_ccb->ccb_h.path->device; 3810 periph = free_ccb->ccb_h.path->periph; 3811 3812 xpt_free_ccb(free_ccb); 3813 periph->periph_allocated--; 3814 cam_ccbq_release_opening(&device->ccbq); 3815 xpt_run_allocq(periph, 0); 3816 } 3817 3818 /* Functions accessed by SIM drivers */ 3819 3820 static struct xpt_xport_ops xport_default_ops = { 3821 .alloc_device = xpt_alloc_device_default, 3822 .action = xpt_action_default, 3823 .async = xpt_dev_async_default, 3824 }; 3825 static struct xpt_xport xport_default = { 3826 .xport = XPORT_UNKNOWN, 3827 .name = "unknown", 3828 .ops = &xport_default_ops, 3829 }; 3830 3831 CAM_XPT_XPORT(xport_default); 3832 3833 /* 3834 * A sim structure, listing the SIM entry points and instance 3835 * identification info is passed to xpt_bus_register to hook the SIM 3836 * into the CAM framework. xpt_bus_register creates a cam_eb entry 3837 * for this new bus and places it in the array of busses and assigns 3838 * it a path_id. The path_id may be influenced by "hard wiring" 3839 * information specified by the user. Once interrupt services are 3840 * available, the bus will be probed. 3841 */ 3842 int32_t 3843 xpt_bus_register(struct cam_sim *sim, device_t parent, u_int32_t bus) 3844 { 3845 struct cam_eb *new_bus; 3846 struct cam_eb *old_bus; 3847 struct ccb_pathinq cpi; 3848 struct cam_path *path; 3849 cam_status status; 3850 3851 mtx_assert(sim->mtx, MA_OWNED); 3852 3853 sim->bus_id = bus; 3854 new_bus = (struct cam_eb *)malloc(sizeof(*new_bus), 3855 M_CAMXPT, M_NOWAIT|M_ZERO); 3856 if (new_bus == NULL) { 3857 /* Couldn't satisfy request */ 3858 return (CAM_RESRC_UNAVAIL); 3859 } 3860 3861 mtx_init(&new_bus->eb_mtx, "CAM bus lock", NULL, MTX_DEF); 3862 TAILQ_INIT(&new_bus->et_entries); 3863 cam_sim_hold(sim); 3864 new_bus->sim = sim; 3865 timevalclear(&new_bus->last_reset); 3866 new_bus->flags = 0; 3867 new_bus->refcount = 1; /* Held until a bus_deregister event */ 3868 new_bus->generation = 0; 3869 3870 xpt_lock_buses(); 3871 sim->path_id = new_bus->path_id = 3872 xptpathid(sim->sim_name, sim->unit_number, sim->bus_id); 3873 old_bus = TAILQ_FIRST(&xsoftc.xpt_busses); 3874 while (old_bus != NULL 3875 && old_bus->path_id < new_bus->path_id) 3876 old_bus = TAILQ_NEXT(old_bus, links); 3877 if (old_bus != NULL) 3878 TAILQ_INSERT_BEFORE(old_bus, new_bus, links); 3879 else 3880 TAILQ_INSERT_TAIL(&xsoftc.xpt_busses, new_bus, links); 3881 xsoftc.bus_generation++; 3882 xpt_unlock_buses(); 3883 3884 /* 3885 * Set a default transport so that a PATH_INQ can be issued to 3886 * the SIM. This will then allow for probing and attaching of 3887 * a more appropriate transport. 3888 */ 3889 new_bus->xport = &xport_default; 3890 3891 status = xpt_create_path(&path, /*periph*/NULL, sim->path_id, 3892 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 3893 if (status != CAM_REQ_CMP) { 3894 xpt_release_bus(new_bus); 3895 free(path, M_CAMXPT); 3896 return (CAM_RESRC_UNAVAIL); 3897 } 3898 3899 xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NORMAL); 3900 cpi.ccb_h.func_code = XPT_PATH_INQ; 3901 xpt_action((union ccb *)&cpi); 3902 3903 if (cpi.ccb_h.status == CAM_REQ_CMP) { 3904 struct xpt_xport **xpt; 3905 3906 SET_FOREACH(xpt, cam_xpt_xport_set) { 3907 if ((*xpt)->xport == cpi.transport) { 3908 new_bus->xport = *xpt; 3909 break; 3910 } 3911 } 3912 if (new_bus->xport == NULL) { 3913 xpt_print_path(path); 3914 printf("No transport found for %d\n", cpi.transport); 3915 xpt_release_bus(new_bus); 3916 free(path, M_CAMXPT); 3917 return (CAM_RESRC_UNAVAIL); 3918 } 3919 } 3920 3921 /* Notify interested parties */ 3922 if (sim->path_id != CAM_XPT_PATH_ID) { 3923 3924 xpt_async(AC_PATH_REGISTERED, path, &cpi); 3925 if ((cpi.hba_misc & PIM_NOSCAN) == 0) { 3926 union ccb *scan_ccb; 3927 3928 /* Initiate bus rescan. */ 3929 scan_ccb = xpt_alloc_ccb_nowait(); 3930 if (scan_ccb != NULL) { 3931 scan_ccb->ccb_h.path = path; 3932 scan_ccb->ccb_h.func_code = XPT_SCAN_BUS; 3933 scan_ccb->crcn.flags = 0; 3934 xpt_rescan(scan_ccb); 3935 } else { 3936 xpt_print(path, 3937 "Can't allocate CCB to scan bus\n"); 3938 xpt_free_path(path); 3939 } 3940 } else 3941 xpt_free_path(path); 3942 } else 3943 xpt_free_path(path); 3944 return (CAM_SUCCESS); 3945 } 3946 3947 int32_t 3948 xpt_bus_deregister(path_id_t pathid) 3949 { 3950 struct cam_path bus_path; 3951 cam_status status; 3952 3953 status = xpt_compile_path(&bus_path, NULL, pathid, 3954 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 3955 if (status != CAM_REQ_CMP) 3956 return (status); 3957 3958 xpt_async(AC_LOST_DEVICE, &bus_path, NULL); 3959 xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL); 3960 3961 /* Release the reference count held while registered. */ 3962 xpt_release_bus(bus_path.bus); 3963 xpt_release_path(&bus_path); 3964 3965 return (CAM_REQ_CMP); 3966 } 3967 3968 static path_id_t 3969 xptnextfreepathid(void) 3970 { 3971 struct cam_eb *bus; 3972 path_id_t pathid; 3973 const char *strval; 3974 3975 mtx_assert(&xsoftc.xpt_topo_lock, MA_OWNED); 3976 pathid = 0; 3977 bus = TAILQ_FIRST(&xsoftc.xpt_busses); 3978 retry: 3979 /* Find an unoccupied pathid */ 3980 while (bus != NULL && bus->path_id <= pathid) { 3981 if (bus->path_id == pathid) 3982 pathid++; 3983 bus = TAILQ_NEXT(bus, links); 3984 } 3985 3986 /* 3987 * Ensure that this pathid is not reserved for 3988 * a bus that may be registered in the future. 3989 */ 3990 if (resource_string_value("scbus", pathid, "at", &strval) == 0) { 3991 ++pathid; 3992 /* Start the search over */ 3993 goto retry; 3994 } 3995 return (pathid); 3996 } 3997 3998 static path_id_t 3999 xptpathid(const char *sim_name, int sim_unit, int sim_bus) 4000 { 4001 path_id_t pathid; 4002 int i, dunit, val; 4003 char buf[32]; 4004 const char *dname; 4005 4006 pathid = CAM_XPT_PATH_ID; 4007 snprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit); 4008 if (strcmp(buf, "xpt0") == 0 && sim_bus == 0) 4009 return (pathid); 4010 i = 0; 4011 while ((resource_find_match(&i, &dname, &dunit, "at", buf)) == 0) { 4012 if (strcmp(dname, "scbus")) { 4013 /* Avoid a bit of foot shooting. */ 4014 continue; 4015 } 4016 if (dunit < 0) /* unwired?! */ 4017 continue; 4018 if (resource_int_value("scbus", dunit, "bus", &val) == 0) { 4019 if (sim_bus == val) { 4020 pathid = dunit; 4021 break; 4022 } 4023 } else if (sim_bus == 0) { 4024 /* Unspecified matches bus 0 */ 4025 pathid = dunit; 4026 break; 4027 } else { 4028 printf("Ambiguous scbus configuration for %s%d " 4029 "bus %d, cannot wire down. The kernel " 4030 "config entry for scbus%d should " 4031 "specify a controller bus.\n" 4032 "Scbus will be assigned dynamically.\n", 4033 sim_name, sim_unit, sim_bus, dunit); 4034 break; 4035 } 4036 } 4037 4038 if (pathid == CAM_XPT_PATH_ID) 4039 pathid = xptnextfreepathid(); 4040 return (pathid); 4041 } 4042 4043 static const char * 4044 xpt_async_string(u_int32_t async_code) 4045 { 4046 4047 switch (async_code) { 4048 case AC_BUS_RESET: return ("AC_BUS_RESET"); 4049 case AC_UNSOL_RESEL: return ("AC_UNSOL_RESEL"); 4050 case AC_SCSI_AEN: return ("AC_SCSI_AEN"); 4051 case AC_SENT_BDR: return ("AC_SENT_BDR"); 4052 case AC_PATH_REGISTERED: return ("AC_PATH_REGISTERED"); 4053 case AC_PATH_DEREGISTERED: return ("AC_PATH_DEREGISTERED"); 4054 case AC_FOUND_DEVICE: return ("AC_FOUND_DEVICE"); 4055 case AC_LOST_DEVICE: return ("AC_LOST_DEVICE"); 4056 case AC_TRANSFER_NEG: return ("AC_TRANSFER_NEG"); 4057 case AC_INQ_CHANGED: return ("AC_INQ_CHANGED"); 4058 case AC_GETDEV_CHANGED: return ("AC_GETDEV_CHANGED"); 4059 case AC_CONTRACT: return ("AC_CONTRACT"); 4060 case AC_ADVINFO_CHANGED: return ("AC_ADVINFO_CHANGED"); 4061 case AC_UNIT_ATTENTION: return ("AC_UNIT_ATTENTION"); 4062 } 4063 return ("AC_UNKNOWN"); 4064 } 4065 4066 static int 4067 xpt_async_size(u_int32_t async_code) 4068 { 4069 4070 switch (async_code) { 4071 case AC_BUS_RESET: return (0); 4072 case AC_UNSOL_RESEL: return (0); 4073 case AC_SCSI_AEN: return (0); 4074 case AC_SENT_BDR: return (0); 4075 case AC_PATH_REGISTERED: return (sizeof(struct ccb_pathinq)); 4076 case AC_PATH_DEREGISTERED: return (0); 4077 case AC_FOUND_DEVICE: return (sizeof(struct ccb_getdev)); 4078 case AC_LOST_DEVICE: return (0); 4079 case AC_TRANSFER_NEG: return (sizeof(struct ccb_trans_settings)); 4080 case AC_INQ_CHANGED: return (0); 4081 case AC_GETDEV_CHANGED: return (0); 4082 case AC_CONTRACT: return (sizeof(struct ac_contract)); 4083 case AC_ADVINFO_CHANGED: return (-1); 4084 case AC_UNIT_ATTENTION: return (sizeof(struct ccb_scsiio)); 4085 } 4086 return (0); 4087 } 4088 4089 static int 4090 xpt_async_process_dev(struct cam_ed *device, void *arg) 4091 { 4092 union ccb *ccb = arg; 4093 struct cam_path *path = ccb->ccb_h.path; 4094 void *async_arg = ccb->casync.async_arg_ptr; 4095 u_int32_t async_code = ccb->casync.async_code; 4096 int relock; 4097 4098 if (path->device != device 4099 && path->device->lun_id != CAM_LUN_WILDCARD 4100 && device->lun_id != CAM_LUN_WILDCARD) 4101 return (1); 4102 4103 /* 4104 * The async callback could free the device. 4105 * If it is a broadcast async, it doesn't hold 4106 * device reference, so take our own reference. 4107 */ 4108 xpt_acquire_device(device); 4109 4110 /* 4111 * If async for specific device is to be delivered to 4112 * the wildcard client, take the specific device lock. 4113 * XXX: We may need a way for client to specify it. 4114 */ 4115 if ((device->lun_id == CAM_LUN_WILDCARD && 4116 path->device->lun_id != CAM_LUN_WILDCARD) || 4117 (device->target->target_id == CAM_TARGET_WILDCARD && 4118 path->target->target_id != CAM_TARGET_WILDCARD) || 4119 (device->target->bus->path_id == CAM_BUS_WILDCARD && 4120 path->target->bus->path_id != CAM_BUS_WILDCARD)) { 4121 mtx_unlock(&device->device_mtx); 4122 xpt_path_lock(path); 4123 relock = 1; 4124 } else 4125 relock = 0; 4126 4127 (*(device->target->bus->xport->ops->async))(async_code, 4128 device->target->bus, device->target, device, async_arg); 4129 xpt_async_bcast(&device->asyncs, async_code, path, async_arg); 4130 4131 if (relock) { 4132 xpt_path_unlock(path); 4133 mtx_lock(&device->device_mtx); 4134 } 4135 xpt_release_device(device); 4136 return (1); 4137 } 4138 4139 static int 4140 xpt_async_process_tgt(struct cam_et *target, void *arg) 4141 { 4142 union ccb *ccb = arg; 4143 struct cam_path *path = ccb->ccb_h.path; 4144 4145 if (path->target != target 4146 && path->target->target_id != CAM_TARGET_WILDCARD 4147 && target->target_id != CAM_TARGET_WILDCARD) 4148 return (1); 4149 4150 if (ccb->casync.async_code == AC_SENT_BDR) { 4151 /* Update our notion of when the last reset occurred */ 4152 microtime(&target->last_reset); 4153 } 4154 4155 return (xptdevicetraverse(target, NULL, xpt_async_process_dev, ccb)); 4156 } 4157 4158 static void 4159 xpt_async_process(struct cam_periph *periph, union ccb *ccb) 4160 { 4161 struct cam_eb *bus; 4162 struct cam_path *path; 4163 void *async_arg; 4164 u_int32_t async_code; 4165 4166 path = ccb->ccb_h.path; 4167 async_code = ccb->casync.async_code; 4168 async_arg = ccb->casync.async_arg_ptr; 4169 CAM_DEBUG(path, CAM_DEBUG_TRACE | CAM_DEBUG_INFO, 4170 ("xpt_async(%s)\n", xpt_async_string(async_code))); 4171 bus = path->bus; 4172 4173 if (async_code == AC_BUS_RESET) { 4174 /* Update our notion of when the last reset occurred */ 4175 microtime(&bus->last_reset); 4176 } 4177 4178 xpttargettraverse(bus, NULL, xpt_async_process_tgt, ccb); 4179 4180 /* 4181 * If this wasn't a fully wildcarded async, tell all 4182 * clients that want all async events. 4183 */ 4184 if (bus != xpt_periph->path->bus) { 4185 xpt_path_lock(xpt_periph->path); 4186 xpt_async_process_dev(xpt_periph->path->device, ccb); 4187 xpt_path_unlock(xpt_periph->path); 4188 } 4189 4190 if (path->device != NULL && path->device->lun_id != CAM_LUN_WILDCARD) 4191 xpt_release_devq(path, 1, TRUE); 4192 else 4193 xpt_release_simq(path->bus->sim, TRUE); 4194 if (ccb->casync.async_arg_size > 0) 4195 free(async_arg, M_CAMXPT); 4196 xpt_free_path(path); 4197 xpt_free_ccb(ccb); 4198 } 4199 4200 static void 4201 xpt_async_bcast(struct async_list *async_head, 4202 u_int32_t async_code, 4203 struct cam_path *path, void *async_arg) 4204 { 4205 struct async_node *cur_entry; 4206 int lock; 4207 4208 cur_entry = SLIST_FIRST(async_head); 4209 while (cur_entry != NULL) { 4210 struct async_node *next_entry; 4211 /* 4212 * Grab the next list entry before we call the current 4213 * entry's callback. This is because the callback function 4214 * can delete its async callback entry. 4215 */ 4216 next_entry = SLIST_NEXT(cur_entry, links); 4217 if ((cur_entry->event_enable & async_code) != 0) { 4218 lock = cur_entry->event_lock; 4219 if (lock) 4220 CAM_SIM_LOCK(path->device->sim); 4221 cur_entry->callback(cur_entry->callback_arg, 4222 async_code, path, 4223 async_arg); 4224 if (lock) 4225 CAM_SIM_UNLOCK(path->device->sim); 4226 } 4227 cur_entry = next_entry; 4228 } 4229 } 4230 4231 void 4232 xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg) 4233 { 4234 union ccb *ccb; 4235 int size; 4236 4237 ccb = xpt_alloc_ccb_nowait(); 4238 if (ccb == NULL) { 4239 xpt_print(path, "Can't allocate CCB to send %s\n", 4240 xpt_async_string(async_code)); 4241 return; 4242 } 4243 4244 if (xpt_clone_path(&ccb->ccb_h.path, path) != CAM_REQ_CMP) { 4245 xpt_print(path, "Can't allocate path to send %s\n", 4246 xpt_async_string(async_code)); 4247 xpt_free_ccb(ccb); 4248 return; 4249 } 4250 ccb->ccb_h.path->periph = NULL; 4251 ccb->ccb_h.func_code = XPT_ASYNC; 4252 ccb->ccb_h.cbfcnp = xpt_async_process; 4253 ccb->ccb_h.flags |= CAM_UNLOCKED; 4254 ccb->casync.async_code = async_code; 4255 ccb->casync.async_arg_size = 0; 4256 size = xpt_async_size(async_code); 4257 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, 4258 ("xpt_async: func %#x %s aync_code %d %s\n", 4259 ccb->ccb_h.func_code, 4260 xpt_action_name(ccb->ccb_h.func_code), 4261 async_code, 4262 xpt_async_string(async_code))); 4263 if (size > 0 && async_arg != NULL) { 4264 ccb->casync.async_arg_ptr = malloc(size, M_CAMXPT, M_NOWAIT); 4265 if (ccb->casync.async_arg_ptr == NULL) { 4266 xpt_print(path, "Can't allocate argument to send %s\n", 4267 xpt_async_string(async_code)); 4268 xpt_free_path(ccb->ccb_h.path); 4269 xpt_free_ccb(ccb); 4270 return; 4271 } 4272 memcpy(ccb->casync.async_arg_ptr, async_arg, size); 4273 ccb->casync.async_arg_size = size; 4274 } else if (size < 0) { 4275 ccb->casync.async_arg_ptr = async_arg; 4276 ccb->casync.async_arg_size = size; 4277 } 4278 if (path->device != NULL && path->device->lun_id != CAM_LUN_WILDCARD) 4279 xpt_freeze_devq(path, 1); 4280 else 4281 xpt_freeze_simq(path->bus->sim, 1); 4282 xpt_done(ccb); 4283 } 4284 4285 static void 4286 xpt_dev_async_default(u_int32_t async_code, struct cam_eb *bus, 4287 struct cam_et *target, struct cam_ed *device, 4288 void *async_arg) 4289 { 4290 4291 /* 4292 * We only need to handle events for real devices. 4293 */ 4294 if (target->target_id == CAM_TARGET_WILDCARD 4295 || device->lun_id == CAM_LUN_WILDCARD) 4296 return; 4297 4298 printf("%s called\n", __func__); 4299 } 4300 4301 static uint32_t 4302 xpt_freeze_devq_device(struct cam_ed *dev, u_int count) 4303 { 4304 struct cam_devq *devq; 4305 uint32_t freeze; 4306 4307 devq = dev->sim->devq; 4308 mtx_assert(&devq->send_mtx, MA_OWNED); 4309 CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE, 4310 ("xpt_freeze_devq_device(%d) %u->%u\n", count, 4311 dev->ccbq.queue.qfrozen_cnt, dev->ccbq.queue.qfrozen_cnt + count)); 4312 freeze = (dev->ccbq.queue.qfrozen_cnt += count); 4313 /* Remove frozen device from sendq. */ 4314 if (device_is_queued(dev)) 4315 camq_remove(&devq->send_queue, dev->devq_entry.index); 4316 return (freeze); 4317 } 4318 4319 u_int32_t 4320 xpt_freeze_devq(struct cam_path *path, u_int count) 4321 { 4322 struct cam_ed *dev = path->device; 4323 struct cam_devq *devq; 4324 uint32_t freeze; 4325 4326 devq = dev->sim->devq; 4327 mtx_lock(&devq->send_mtx); 4328 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_freeze_devq(%d)\n", count)); 4329 freeze = xpt_freeze_devq_device(dev, count); 4330 mtx_unlock(&devq->send_mtx); 4331 return (freeze); 4332 } 4333 4334 u_int32_t 4335 xpt_freeze_simq(struct cam_sim *sim, u_int count) 4336 { 4337 struct cam_devq *devq; 4338 uint32_t freeze; 4339 4340 devq = sim->devq; 4341 mtx_lock(&devq->send_mtx); 4342 freeze = (devq->send_queue.qfrozen_cnt += count); 4343 mtx_unlock(&devq->send_mtx); 4344 return (freeze); 4345 } 4346 4347 static void 4348 xpt_release_devq_timeout(void *arg) 4349 { 4350 struct cam_ed *dev; 4351 struct cam_devq *devq; 4352 4353 dev = (struct cam_ed *)arg; 4354 CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE, ("xpt_release_devq_timeout\n")); 4355 devq = dev->sim->devq; 4356 mtx_assert(&devq->send_mtx, MA_OWNED); 4357 if (xpt_release_devq_device(dev, /*count*/1, /*run_queue*/TRUE)) 4358 xpt_run_devq(devq); 4359 } 4360 4361 void 4362 xpt_release_devq(struct cam_path *path, u_int count, int run_queue) 4363 { 4364 struct cam_ed *dev; 4365 struct cam_devq *devq; 4366 4367 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_devq(%d, %d)\n", 4368 count, run_queue)); 4369 dev = path->device; 4370 devq = dev->sim->devq; 4371 mtx_lock(&devq->send_mtx); 4372 if (xpt_release_devq_device(dev, count, run_queue)) 4373 xpt_run_devq(dev->sim->devq); 4374 mtx_unlock(&devq->send_mtx); 4375 } 4376 4377 static int 4378 xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue) 4379 { 4380 4381 mtx_assert(&dev->sim->devq->send_mtx, MA_OWNED); 4382 CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE, 4383 ("xpt_release_devq_device(%d, %d) %u->%u\n", count, run_queue, 4384 dev->ccbq.queue.qfrozen_cnt, dev->ccbq.queue.qfrozen_cnt - count)); 4385 if (count > dev->ccbq.queue.qfrozen_cnt) { 4386 #ifdef INVARIANTS 4387 printf("xpt_release_devq(): requested %u > present %u\n", 4388 count, dev->ccbq.queue.qfrozen_cnt); 4389 #endif 4390 count = dev->ccbq.queue.qfrozen_cnt; 4391 } 4392 dev->ccbq.queue.qfrozen_cnt -= count; 4393 if (dev->ccbq.queue.qfrozen_cnt == 0) { 4394 /* 4395 * No longer need to wait for a successful 4396 * command completion. 4397 */ 4398 dev->flags &= ~CAM_DEV_REL_ON_COMPLETE; 4399 /* 4400 * Remove any timeouts that might be scheduled 4401 * to release this queue. 4402 */ 4403 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) { 4404 callout_stop(&dev->callout); 4405 dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING; 4406 } 4407 /* 4408 * Now that we are unfrozen schedule the 4409 * device so any pending transactions are 4410 * run. 4411 */ 4412 xpt_schedule_devq(dev->sim->devq, dev); 4413 } else 4414 run_queue = 0; 4415 return (run_queue); 4416 } 4417 4418 void 4419 xpt_release_simq(struct cam_sim *sim, int run_queue) 4420 { 4421 struct cam_devq *devq; 4422 4423 devq = sim->devq; 4424 mtx_lock(&devq->send_mtx); 4425 if (devq->send_queue.qfrozen_cnt <= 0) { 4426 #ifdef INVARIANTS 4427 printf("xpt_release_simq: requested 1 > present %u\n", 4428 devq->send_queue.qfrozen_cnt); 4429 #endif 4430 } else 4431 devq->send_queue.qfrozen_cnt--; 4432 if (devq->send_queue.qfrozen_cnt == 0) { 4433 /* 4434 * If there is a timeout scheduled to release this 4435 * sim queue, remove it. The queue frozen count is 4436 * already at 0. 4437 */ 4438 if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){ 4439 callout_stop(&sim->callout); 4440 sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING; 4441 } 4442 if (run_queue) { 4443 /* 4444 * Now that we are unfrozen run the send queue. 4445 */ 4446 xpt_run_devq(sim->devq); 4447 } 4448 } 4449 mtx_unlock(&devq->send_mtx); 4450 } 4451 4452 /* 4453 * XXX Appears to be unused. 4454 */ 4455 static void 4456 xpt_release_simq_timeout(void *arg) 4457 { 4458 struct cam_sim *sim; 4459 4460 sim = (struct cam_sim *)arg; 4461 xpt_release_simq(sim, /* run_queue */ TRUE); 4462 } 4463 4464 void 4465 xpt_done(union ccb *done_ccb) 4466 { 4467 struct cam_doneq *queue; 4468 int run, hash; 4469 4470 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, 4471 ("xpt_done: func= %#x %s status %#x\n", 4472 done_ccb->ccb_h.func_code, 4473 xpt_action_name(done_ccb->ccb_h.func_code), 4474 done_ccb->ccb_h.status)); 4475 if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0) 4476 return; 4477 4478 /* Store the time the ccb was in the sim */ 4479 done_ccb->ccb_h.qos.sim_data = sbinuptime() - done_ccb->ccb_h.qos.sim_data; 4480 hash = (done_ccb->ccb_h.path_id + done_ccb->ccb_h.target_id + 4481 done_ccb->ccb_h.target_lun) % cam_num_doneqs; 4482 queue = &cam_doneqs[hash]; 4483 mtx_lock(&queue->cam_doneq_mtx); 4484 run = (queue->cam_doneq_sleep && STAILQ_EMPTY(&queue->cam_doneq)); 4485 STAILQ_INSERT_TAIL(&queue->cam_doneq, &done_ccb->ccb_h, sim_links.stqe); 4486 done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX; 4487 mtx_unlock(&queue->cam_doneq_mtx); 4488 if (run) 4489 wakeup(&queue->cam_doneq); 4490 } 4491 4492 void 4493 xpt_done_direct(union ccb *done_ccb) 4494 { 4495 4496 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, 4497 ("xpt_done_direct: status %#x\n", done_ccb->ccb_h.status)); 4498 if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0) 4499 return; 4500 4501 /* Store the time the ccb was in the sim */ 4502 done_ccb->ccb_h.qos.sim_data = sbinuptime() - done_ccb->ccb_h.qos.sim_data; 4503 xpt_done_process(&done_ccb->ccb_h); 4504 } 4505 4506 union ccb * 4507 xpt_alloc_ccb() 4508 { 4509 union ccb *new_ccb; 4510 4511 new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_WAITOK); 4512 return (new_ccb); 4513 } 4514 4515 union ccb * 4516 xpt_alloc_ccb_nowait() 4517 { 4518 union ccb *new_ccb; 4519 4520 new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_NOWAIT); 4521 return (new_ccb); 4522 } 4523 4524 void 4525 xpt_free_ccb(union ccb *free_ccb) 4526 { 4527 free(free_ccb, M_CAMCCB); 4528 } 4529 4530 4531 4532 /* Private XPT functions */ 4533 4534 /* 4535 * Get a CAM control block for the caller. Charge the structure to the device 4536 * referenced by the path. If we don't have sufficient resources to allocate 4537 * more ccbs, we return NULL. 4538 */ 4539 static union ccb * 4540 xpt_get_ccb_nowait(struct cam_periph *periph) 4541 { 4542 union ccb *new_ccb; 4543 4544 new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_NOWAIT); 4545 if (new_ccb == NULL) 4546 return (NULL); 4547 periph->periph_allocated++; 4548 cam_ccbq_take_opening(&periph->path->device->ccbq); 4549 return (new_ccb); 4550 } 4551 4552 static union ccb * 4553 xpt_get_ccb(struct cam_periph *periph) 4554 { 4555 union ccb *new_ccb; 4556 4557 cam_periph_unlock(periph); 4558 new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_WAITOK); 4559 cam_periph_lock(periph); 4560 periph->periph_allocated++; 4561 cam_ccbq_take_opening(&periph->path->device->ccbq); 4562 return (new_ccb); 4563 } 4564 4565 union ccb * 4566 cam_periph_getccb(struct cam_periph *periph, u_int32_t priority) 4567 { 4568 struct ccb_hdr *ccb_h; 4569 4570 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("cam_periph_getccb\n")); 4571 cam_periph_assert(periph, MA_OWNED); 4572 while ((ccb_h = SLIST_FIRST(&periph->ccb_list)) == NULL || 4573 ccb_h->pinfo.priority != priority) { 4574 if (priority < periph->immediate_priority) { 4575 periph->immediate_priority = priority; 4576 xpt_run_allocq(periph, 0); 4577 } else 4578 cam_periph_sleep(periph, &periph->ccb_list, PRIBIO, 4579 "cgticb", 0); 4580 } 4581 SLIST_REMOVE_HEAD(&periph->ccb_list, periph_links.sle); 4582 return ((union ccb *)ccb_h); 4583 } 4584 4585 static void 4586 xpt_acquire_bus(struct cam_eb *bus) 4587 { 4588 4589 xpt_lock_buses(); 4590 bus->refcount++; 4591 xpt_unlock_buses(); 4592 } 4593 4594 static void 4595 xpt_release_bus(struct cam_eb *bus) 4596 { 4597 4598 xpt_lock_buses(); 4599 KASSERT(bus->refcount >= 1, ("bus->refcount >= 1")); 4600 if (--bus->refcount > 0) { 4601 xpt_unlock_buses(); 4602 return; 4603 } 4604 TAILQ_REMOVE(&xsoftc.xpt_busses, bus, links); 4605 xsoftc.bus_generation++; 4606 xpt_unlock_buses(); 4607 KASSERT(TAILQ_EMPTY(&bus->et_entries), 4608 ("destroying bus, but target list is not empty")); 4609 cam_sim_release(bus->sim); 4610 mtx_destroy(&bus->eb_mtx); 4611 free(bus, M_CAMXPT); 4612 } 4613 4614 static struct cam_et * 4615 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id) 4616 { 4617 struct cam_et *cur_target, *target; 4618 4619 mtx_assert(&xsoftc.xpt_topo_lock, MA_OWNED); 4620 mtx_assert(&bus->eb_mtx, MA_OWNED); 4621 target = (struct cam_et *)malloc(sizeof(*target), M_CAMXPT, 4622 M_NOWAIT|M_ZERO); 4623 if (target == NULL) 4624 return (NULL); 4625 4626 TAILQ_INIT(&target->ed_entries); 4627 target->bus = bus; 4628 target->target_id = target_id; 4629 target->refcount = 1; 4630 target->generation = 0; 4631 target->luns = NULL; 4632 mtx_init(&target->luns_mtx, "CAM LUNs lock", NULL, MTX_DEF); 4633 timevalclear(&target->last_reset); 4634 /* 4635 * Hold a reference to our parent bus so it 4636 * will not go away before we do. 4637 */ 4638 bus->refcount++; 4639 4640 /* Insertion sort into our bus's target list */ 4641 cur_target = TAILQ_FIRST(&bus->et_entries); 4642 while (cur_target != NULL && cur_target->target_id < target_id) 4643 cur_target = TAILQ_NEXT(cur_target, links); 4644 if (cur_target != NULL) { 4645 TAILQ_INSERT_BEFORE(cur_target, target, links); 4646 } else { 4647 TAILQ_INSERT_TAIL(&bus->et_entries, target, links); 4648 } 4649 bus->generation++; 4650 return (target); 4651 } 4652 4653 static void 4654 xpt_acquire_target(struct cam_et *target) 4655 { 4656 struct cam_eb *bus = target->bus; 4657 4658 mtx_lock(&bus->eb_mtx); 4659 target->refcount++; 4660 mtx_unlock(&bus->eb_mtx); 4661 } 4662 4663 static void 4664 xpt_release_target(struct cam_et *target) 4665 { 4666 struct cam_eb *bus = target->bus; 4667 4668 mtx_lock(&bus->eb_mtx); 4669 if (--target->refcount > 0) { 4670 mtx_unlock(&bus->eb_mtx); 4671 return; 4672 } 4673 TAILQ_REMOVE(&bus->et_entries, target, links); 4674 bus->generation++; 4675 mtx_unlock(&bus->eb_mtx); 4676 KASSERT(TAILQ_EMPTY(&target->ed_entries), 4677 ("destroying target, but device list is not empty")); 4678 xpt_release_bus(bus); 4679 mtx_destroy(&target->luns_mtx); 4680 if (target->luns) 4681 free(target->luns, M_CAMXPT); 4682 free(target, M_CAMXPT); 4683 } 4684 4685 static struct cam_ed * 4686 xpt_alloc_device_default(struct cam_eb *bus, struct cam_et *target, 4687 lun_id_t lun_id) 4688 { 4689 struct cam_ed *device; 4690 4691 device = xpt_alloc_device(bus, target, lun_id); 4692 if (device == NULL) 4693 return (NULL); 4694 4695 device->mintags = 1; 4696 device->maxtags = 1; 4697 return (device); 4698 } 4699 4700 static void 4701 xpt_destroy_device(void *context, int pending) 4702 { 4703 struct cam_ed *device = context; 4704 4705 mtx_lock(&device->device_mtx); 4706 mtx_destroy(&device->device_mtx); 4707 free(device, M_CAMDEV); 4708 } 4709 4710 struct cam_ed * 4711 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id) 4712 { 4713 struct cam_ed *cur_device, *device; 4714 struct cam_devq *devq; 4715 cam_status status; 4716 4717 mtx_assert(&bus->eb_mtx, MA_OWNED); 4718 /* Make space for us in the device queue on our bus */ 4719 devq = bus->sim->devq; 4720 mtx_lock(&devq->send_mtx); 4721 status = cam_devq_resize(devq, devq->send_queue.array_size + 1); 4722 mtx_unlock(&devq->send_mtx); 4723 if (status != CAM_REQ_CMP) 4724 return (NULL); 4725 4726 device = (struct cam_ed *)malloc(sizeof(*device), 4727 M_CAMDEV, M_NOWAIT|M_ZERO); 4728 if (device == NULL) 4729 return (NULL); 4730 4731 cam_init_pinfo(&device->devq_entry); 4732 device->target = target; 4733 device->lun_id = lun_id; 4734 device->sim = bus->sim; 4735 if (cam_ccbq_init(&device->ccbq, 4736 bus->sim->max_dev_openings) != 0) { 4737 free(device, M_CAMDEV); 4738 return (NULL); 4739 } 4740 SLIST_INIT(&device->asyncs); 4741 SLIST_INIT(&device->periphs); 4742 device->generation = 0; 4743 device->flags = CAM_DEV_UNCONFIGURED; 4744 device->tag_delay_count = 0; 4745 device->tag_saved_openings = 0; 4746 device->refcount = 1; 4747 mtx_init(&device->device_mtx, "CAM device lock", NULL, MTX_DEF); 4748 callout_init_mtx(&device->callout, &devq->send_mtx, 0); 4749 TASK_INIT(&device->device_destroy_task, 0, xpt_destroy_device, device); 4750 /* 4751 * Hold a reference to our parent bus so it 4752 * will not go away before we do. 4753 */ 4754 target->refcount++; 4755 4756 cur_device = TAILQ_FIRST(&target->ed_entries); 4757 while (cur_device != NULL && cur_device->lun_id < lun_id) 4758 cur_device = TAILQ_NEXT(cur_device, links); 4759 if (cur_device != NULL) 4760 TAILQ_INSERT_BEFORE(cur_device, device, links); 4761 else 4762 TAILQ_INSERT_TAIL(&target->ed_entries, device, links); 4763 target->generation++; 4764 return (device); 4765 } 4766 4767 void 4768 xpt_acquire_device(struct cam_ed *device) 4769 { 4770 struct cam_eb *bus = device->target->bus; 4771 4772 mtx_lock(&bus->eb_mtx); 4773 device->refcount++; 4774 mtx_unlock(&bus->eb_mtx); 4775 } 4776 4777 void 4778 xpt_release_device(struct cam_ed *device) 4779 { 4780 struct cam_eb *bus = device->target->bus; 4781 struct cam_devq *devq; 4782 4783 mtx_lock(&bus->eb_mtx); 4784 if (--device->refcount > 0) { 4785 mtx_unlock(&bus->eb_mtx); 4786 return; 4787 } 4788 4789 TAILQ_REMOVE(&device->target->ed_entries, device,links); 4790 device->target->generation++; 4791 mtx_unlock(&bus->eb_mtx); 4792 4793 /* Release our slot in the devq */ 4794 devq = bus->sim->devq; 4795 mtx_lock(&devq->send_mtx); 4796 cam_devq_resize(devq, devq->send_queue.array_size - 1); 4797 mtx_unlock(&devq->send_mtx); 4798 4799 KASSERT(SLIST_EMPTY(&device->periphs), 4800 ("destroying device, but periphs list is not empty")); 4801 KASSERT(device->devq_entry.index == CAM_UNQUEUED_INDEX, 4802 ("destroying device while still queued for ccbs")); 4803 4804 if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) 4805 callout_stop(&device->callout); 4806 4807 xpt_release_target(device->target); 4808 4809 cam_ccbq_fini(&device->ccbq); 4810 /* 4811 * Free allocated memory. free(9) does nothing if the 4812 * supplied pointer is NULL, so it is safe to call without 4813 * checking. 4814 */ 4815 free(device->supported_vpds, M_CAMXPT); 4816 free(device->device_id, M_CAMXPT); 4817 free(device->ext_inq, M_CAMXPT); 4818 free(device->physpath, M_CAMXPT); 4819 free(device->rcap_buf, M_CAMXPT); 4820 free(device->serial_num, M_CAMXPT); 4821 taskqueue_enqueue(xsoftc.xpt_taskq, &device->device_destroy_task); 4822 } 4823 4824 u_int32_t 4825 xpt_dev_ccbq_resize(struct cam_path *path, int newopenings) 4826 { 4827 int result; 4828 struct cam_ed *dev; 4829 4830 dev = path->device; 4831 mtx_lock(&dev->sim->devq->send_mtx); 4832 result = cam_ccbq_resize(&dev->ccbq, newopenings); 4833 mtx_unlock(&dev->sim->devq->send_mtx); 4834 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 4835 || (dev->inq_flags & SID_CmdQue) != 0) 4836 dev->tag_saved_openings = newopenings; 4837 return (result); 4838 } 4839 4840 static struct cam_eb * 4841 xpt_find_bus(path_id_t path_id) 4842 { 4843 struct cam_eb *bus; 4844 4845 xpt_lock_buses(); 4846 for (bus = TAILQ_FIRST(&xsoftc.xpt_busses); 4847 bus != NULL; 4848 bus = TAILQ_NEXT(bus, links)) { 4849 if (bus->path_id == path_id) { 4850 bus->refcount++; 4851 break; 4852 } 4853 } 4854 xpt_unlock_buses(); 4855 return (bus); 4856 } 4857 4858 static struct cam_et * 4859 xpt_find_target(struct cam_eb *bus, target_id_t target_id) 4860 { 4861 struct cam_et *target; 4862 4863 mtx_assert(&bus->eb_mtx, MA_OWNED); 4864 for (target = TAILQ_FIRST(&bus->et_entries); 4865 target != NULL; 4866 target = TAILQ_NEXT(target, links)) { 4867 if (target->target_id == target_id) { 4868 target->refcount++; 4869 break; 4870 } 4871 } 4872 return (target); 4873 } 4874 4875 static struct cam_ed * 4876 xpt_find_device(struct cam_et *target, lun_id_t lun_id) 4877 { 4878 struct cam_ed *device; 4879 4880 mtx_assert(&target->bus->eb_mtx, MA_OWNED); 4881 for (device = TAILQ_FIRST(&target->ed_entries); 4882 device != NULL; 4883 device = TAILQ_NEXT(device, links)) { 4884 if (device->lun_id == lun_id) { 4885 device->refcount++; 4886 break; 4887 } 4888 } 4889 return (device); 4890 } 4891 4892 void 4893 xpt_start_tags(struct cam_path *path) 4894 { 4895 struct ccb_relsim crs; 4896 struct cam_ed *device; 4897 struct cam_sim *sim; 4898 int newopenings; 4899 4900 device = path->device; 4901 sim = path->bus->sim; 4902 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT; 4903 xpt_freeze_devq(path, /*count*/1); 4904 device->inq_flags |= SID_CmdQue; 4905 if (device->tag_saved_openings != 0) 4906 newopenings = device->tag_saved_openings; 4907 else 4908 newopenings = min(device->maxtags, 4909 sim->max_tagged_dev_openings); 4910 xpt_dev_ccbq_resize(path, newopenings); 4911 xpt_async(AC_GETDEV_CHANGED, path, NULL); 4912 xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL); 4913 crs.ccb_h.func_code = XPT_REL_SIMQ; 4914 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY; 4915 crs.openings 4916 = crs.release_timeout 4917 = crs.qfrozen_cnt 4918 = 0; 4919 xpt_action((union ccb *)&crs); 4920 } 4921 4922 void 4923 xpt_stop_tags(struct cam_path *path) 4924 { 4925 struct ccb_relsim crs; 4926 struct cam_ed *device; 4927 struct cam_sim *sim; 4928 4929 device = path->device; 4930 sim = path->bus->sim; 4931 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT; 4932 device->tag_delay_count = 0; 4933 xpt_freeze_devq(path, /*count*/1); 4934 device->inq_flags &= ~SID_CmdQue; 4935 xpt_dev_ccbq_resize(path, sim->max_dev_openings); 4936 xpt_async(AC_GETDEV_CHANGED, path, NULL); 4937 xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL); 4938 crs.ccb_h.func_code = XPT_REL_SIMQ; 4939 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY; 4940 crs.openings 4941 = crs.release_timeout 4942 = crs.qfrozen_cnt 4943 = 0; 4944 xpt_action((union ccb *)&crs); 4945 } 4946 4947 static void 4948 xpt_boot_delay(void *arg) 4949 { 4950 4951 xpt_release_boot(); 4952 } 4953 4954 static void 4955 xpt_config(void *arg) 4956 { 4957 /* 4958 * Now that interrupts are enabled, go find our devices 4959 */ 4960 if (taskqueue_start_threads(&xsoftc.xpt_taskq, 1, PRIBIO, "CAM taskq")) 4961 printf("xpt_config: failed to create taskqueue thread.\n"); 4962 4963 /* Setup debugging path */ 4964 if (cam_dflags != CAM_DEBUG_NONE) { 4965 if (xpt_create_path(&cam_dpath, NULL, 4966 CAM_DEBUG_BUS, CAM_DEBUG_TARGET, 4967 CAM_DEBUG_LUN) != CAM_REQ_CMP) { 4968 printf("xpt_config: xpt_create_path() failed for debug" 4969 " target %d:%d:%d, debugging disabled\n", 4970 CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN); 4971 cam_dflags = CAM_DEBUG_NONE; 4972 } 4973 } else 4974 cam_dpath = NULL; 4975 4976 periphdriver_init(1); 4977 xpt_hold_boot(); 4978 callout_init(&xsoftc.boot_callout, 1); 4979 callout_reset_sbt(&xsoftc.boot_callout, SBT_1MS * xsoftc.boot_delay, 0, 4980 xpt_boot_delay, NULL, 0); 4981 /* Fire up rescan thread. */ 4982 if (kproc_kthread_add(xpt_scanner_thread, NULL, &cam_proc, NULL, 0, 0, 4983 "cam", "scanner")) { 4984 printf("xpt_config: failed to create rescan thread.\n"); 4985 } 4986 } 4987 4988 void 4989 xpt_hold_boot(void) 4990 { 4991 xpt_lock_buses(); 4992 xsoftc.buses_to_config++; 4993 xpt_unlock_buses(); 4994 } 4995 4996 void 4997 xpt_release_boot(void) 4998 { 4999 xpt_lock_buses(); 5000 xsoftc.buses_to_config--; 5001 if (xsoftc.buses_to_config == 0 && xsoftc.buses_config_done == 0) { 5002 struct xpt_task *task; 5003 5004 xsoftc.buses_config_done = 1; 5005 xpt_unlock_buses(); 5006 /* Call manually because we don't have any busses */ 5007 task = malloc(sizeof(struct xpt_task), M_CAMXPT, M_NOWAIT); 5008 if (task != NULL) { 5009 TASK_INIT(&task->task, 0, xpt_finishconfig_task, task); 5010 taskqueue_enqueue(taskqueue_thread, &task->task); 5011 } 5012 } else 5013 xpt_unlock_buses(); 5014 } 5015 5016 /* 5017 * If the given device only has one peripheral attached to it, and if that 5018 * peripheral is the passthrough driver, announce it. This insures that the 5019 * user sees some sort of announcement for every peripheral in their system. 5020 */ 5021 static int 5022 xptpassannouncefunc(struct cam_ed *device, void *arg) 5023 { 5024 struct cam_periph *periph; 5025 int i; 5026 5027 for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL; 5028 periph = SLIST_NEXT(periph, periph_links), i++); 5029 5030 periph = SLIST_FIRST(&device->periphs); 5031 if ((i == 1) 5032 && (strncmp(periph->periph_name, "pass", 4) == 0)) 5033 xpt_announce_periph(periph, NULL); 5034 5035 return(1); 5036 } 5037 5038 static void 5039 xpt_finishconfig_task(void *context, int pending) 5040 { 5041 5042 periphdriver_init(2); 5043 /* 5044 * Check for devices with no "standard" peripheral driver 5045 * attached. For any devices like that, announce the 5046 * passthrough driver so the user will see something. 5047 */ 5048 if (!bootverbose) 5049 xpt_for_all_devices(xptpassannouncefunc, NULL); 5050 5051 /* Release our hook so that the boot can continue. */ 5052 config_intrhook_disestablish(xsoftc.xpt_config_hook); 5053 free(xsoftc.xpt_config_hook, M_CAMXPT); 5054 xsoftc.xpt_config_hook = NULL; 5055 5056 free(context, M_CAMXPT); 5057 } 5058 5059 cam_status 5060 xpt_register_async(int event, ac_callback_t *cbfunc, void *cbarg, 5061 struct cam_path *path) 5062 { 5063 struct ccb_setasync csa; 5064 cam_status status; 5065 int xptpath = 0; 5066 5067 if (path == NULL) { 5068 status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID, 5069 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 5070 if (status != CAM_REQ_CMP) 5071 return (status); 5072 xpt_path_lock(path); 5073 xptpath = 1; 5074 } 5075 5076 xpt_setup_ccb(&csa.ccb_h, path, CAM_PRIORITY_NORMAL); 5077 csa.ccb_h.func_code = XPT_SASYNC_CB; 5078 csa.event_enable = event; 5079 csa.callback = cbfunc; 5080 csa.callback_arg = cbarg; 5081 xpt_action((union ccb *)&csa); 5082 status = csa.ccb_h.status; 5083 5084 CAM_DEBUG(csa.ccb_h.path, CAM_DEBUG_TRACE, 5085 ("xpt_register_async: func %p\n", cbfunc)); 5086 5087 if (xptpath) { 5088 xpt_path_unlock(path); 5089 xpt_free_path(path); 5090 } 5091 5092 if ((status == CAM_REQ_CMP) && 5093 (csa.event_enable & AC_FOUND_DEVICE)) { 5094 /* 5095 * Get this peripheral up to date with all 5096 * the currently existing devices. 5097 */ 5098 xpt_for_all_devices(xptsetasyncfunc, &csa); 5099 } 5100 if ((status == CAM_REQ_CMP) && 5101 (csa.event_enable & AC_PATH_REGISTERED)) { 5102 /* 5103 * Get this peripheral up to date with all 5104 * the currently existing busses. 5105 */ 5106 xpt_for_all_busses(xptsetasyncbusfunc, &csa); 5107 } 5108 5109 return (status); 5110 } 5111 5112 static void 5113 xptaction(struct cam_sim *sim, union ccb *work_ccb) 5114 { 5115 CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n")); 5116 5117 switch (work_ccb->ccb_h.func_code) { 5118 /* Common cases first */ 5119 case XPT_PATH_INQ: /* Path routing inquiry */ 5120 { 5121 struct ccb_pathinq *cpi; 5122 5123 cpi = &work_ccb->cpi; 5124 cpi->version_num = 1; /* XXX??? */ 5125 cpi->hba_inquiry = 0; 5126 cpi->target_sprt = 0; 5127 cpi->hba_misc = 0; 5128 cpi->hba_eng_cnt = 0; 5129 cpi->max_target = 0; 5130 cpi->max_lun = 0; 5131 cpi->initiator_id = 0; 5132 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 5133 strncpy(cpi->hba_vid, "", HBA_IDLEN); 5134 strncpy(cpi->dev_name, sim->sim_name, DEV_IDLEN); 5135 cpi->unit_number = sim->unit_number; 5136 cpi->bus_id = sim->bus_id; 5137 cpi->base_transfer_speed = 0; 5138 cpi->protocol = PROTO_UNSPECIFIED; 5139 cpi->protocol_version = PROTO_VERSION_UNSPECIFIED; 5140 cpi->transport = XPORT_UNSPECIFIED; 5141 cpi->transport_version = XPORT_VERSION_UNSPECIFIED; 5142 cpi->ccb_h.status = CAM_REQ_CMP; 5143 xpt_done(work_ccb); 5144 break; 5145 } 5146 default: 5147 work_ccb->ccb_h.status = CAM_REQ_INVALID; 5148 xpt_done(work_ccb); 5149 break; 5150 } 5151 } 5152 5153 /* 5154 * The xpt as a "controller" has no interrupt sources, so polling 5155 * is a no-op. 5156 */ 5157 static void 5158 xptpoll(struct cam_sim *sim) 5159 { 5160 } 5161 5162 void 5163 xpt_lock_buses(void) 5164 { 5165 mtx_lock(&xsoftc.xpt_topo_lock); 5166 } 5167 5168 void 5169 xpt_unlock_buses(void) 5170 { 5171 mtx_unlock(&xsoftc.xpt_topo_lock); 5172 } 5173 5174 struct mtx * 5175 xpt_path_mtx(struct cam_path *path) 5176 { 5177 5178 return (&path->device->device_mtx); 5179 } 5180 5181 static void 5182 xpt_done_process(struct ccb_hdr *ccb_h) 5183 { 5184 struct cam_sim *sim; 5185 struct cam_devq *devq; 5186 struct mtx *mtx = NULL; 5187 5188 if (ccb_h->flags & CAM_HIGH_POWER) { 5189 struct highpowerlist *hphead; 5190 struct cam_ed *device; 5191 5192 mtx_lock(&xsoftc.xpt_highpower_lock); 5193 hphead = &xsoftc.highpowerq; 5194 5195 device = STAILQ_FIRST(hphead); 5196 5197 /* 5198 * Increment the count since this command is done. 5199 */ 5200 xsoftc.num_highpower++; 5201 5202 /* 5203 * Any high powered commands queued up? 5204 */ 5205 if (device != NULL) { 5206 5207 STAILQ_REMOVE_HEAD(hphead, highpowerq_entry); 5208 mtx_unlock(&xsoftc.xpt_highpower_lock); 5209 5210 mtx_lock(&device->sim->devq->send_mtx); 5211 xpt_release_devq_device(device, 5212 /*count*/1, /*runqueue*/TRUE); 5213 mtx_unlock(&device->sim->devq->send_mtx); 5214 } else 5215 mtx_unlock(&xsoftc.xpt_highpower_lock); 5216 } 5217 5218 sim = ccb_h->path->bus->sim; 5219 5220 if (ccb_h->status & CAM_RELEASE_SIMQ) { 5221 xpt_release_simq(sim, /*run_queue*/FALSE); 5222 ccb_h->status &= ~CAM_RELEASE_SIMQ; 5223 } 5224 5225 if ((ccb_h->flags & CAM_DEV_QFRZDIS) 5226 && (ccb_h->status & CAM_DEV_QFRZN)) { 5227 xpt_release_devq(ccb_h->path, /*count*/1, /*run_queue*/TRUE); 5228 ccb_h->status &= ~CAM_DEV_QFRZN; 5229 } 5230 5231 devq = sim->devq; 5232 if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) { 5233 struct cam_ed *dev = ccb_h->path->device; 5234 5235 mtx_lock(&devq->send_mtx); 5236 devq->send_active--; 5237 devq->send_openings++; 5238 cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h); 5239 5240 if (((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0 5241 && (dev->ccbq.dev_active == 0))) { 5242 dev->flags &= ~CAM_DEV_REL_ON_QUEUE_EMPTY; 5243 xpt_release_devq_device(dev, /*count*/1, 5244 /*run_queue*/FALSE); 5245 } 5246 5247 if (((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0 5248 && (ccb_h->status&CAM_STATUS_MASK) != CAM_REQUEUE_REQ)) { 5249 dev->flags &= ~CAM_DEV_REL_ON_COMPLETE; 5250 xpt_release_devq_device(dev, /*count*/1, 5251 /*run_queue*/FALSE); 5252 } 5253 5254 if (!device_is_queued(dev)) 5255 (void)xpt_schedule_devq(devq, dev); 5256 xpt_run_devq(devq); 5257 mtx_unlock(&devq->send_mtx); 5258 5259 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0) { 5260 mtx = xpt_path_mtx(ccb_h->path); 5261 mtx_lock(mtx); 5262 5263 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 5264 && (--dev->tag_delay_count == 0)) 5265 xpt_start_tags(ccb_h->path); 5266 } 5267 } 5268 5269 if ((ccb_h->flags & CAM_UNLOCKED) == 0) { 5270 if (mtx == NULL) { 5271 mtx = xpt_path_mtx(ccb_h->path); 5272 mtx_lock(mtx); 5273 } 5274 } else { 5275 if (mtx != NULL) { 5276 mtx_unlock(mtx); 5277 mtx = NULL; 5278 } 5279 } 5280 5281 /* Call the peripheral driver's callback */ 5282 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX; 5283 (*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h); 5284 if (mtx != NULL) 5285 mtx_unlock(mtx); 5286 } 5287 5288 void 5289 xpt_done_td(void *arg) 5290 { 5291 struct cam_doneq *queue = arg; 5292 struct ccb_hdr *ccb_h; 5293 STAILQ_HEAD(, ccb_hdr) doneq; 5294 5295 STAILQ_INIT(&doneq); 5296 mtx_lock(&queue->cam_doneq_mtx); 5297 while (1) { 5298 while (STAILQ_EMPTY(&queue->cam_doneq)) { 5299 queue->cam_doneq_sleep = 1; 5300 msleep(&queue->cam_doneq, &queue->cam_doneq_mtx, 5301 PRIBIO, "-", 0); 5302 queue->cam_doneq_sleep = 0; 5303 } 5304 STAILQ_CONCAT(&doneq, &queue->cam_doneq); 5305 mtx_unlock(&queue->cam_doneq_mtx); 5306 5307 THREAD_NO_SLEEPING(); 5308 while ((ccb_h = STAILQ_FIRST(&doneq)) != NULL) { 5309 STAILQ_REMOVE_HEAD(&doneq, sim_links.stqe); 5310 xpt_done_process(ccb_h); 5311 } 5312 THREAD_SLEEPING_OK(); 5313 5314 mtx_lock(&queue->cam_doneq_mtx); 5315 } 5316 } 5317 5318 static void 5319 camisr_runqueue(void) 5320 { 5321 struct ccb_hdr *ccb_h; 5322 struct cam_doneq *queue; 5323 int i; 5324 5325 /* Process global queues. */ 5326 for (i = 0; i < cam_num_doneqs; i++) { 5327 queue = &cam_doneqs[i]; 5328 mtx_lock(&queue->cam_doneq_mtx); 5329 while ((ccb_h = STAILQ_FIRST(&queue->cam_doneq)) != NULL) { 5330 STAILQ_REMOVE_HEAD(&queue->cam_doneq, sim_links.stqe); 5331 mtx_unlock(&queue->cam_doneq_mtx); 5332 xpt_done_process(ccb_h); 5333 mtx_lock(&queue->cam_doneq_mtx); 5334 } 5335 mtx_unlock(&queue->cam_doneq_mtx); 5336 } 5337 } 5338 5339 struct kv 5340 { 5341 uint32_t v; 5342 const char *name; 5343 }; 5344 5345 static struct kv map[] = { 5346 { XPT_NOOP, "XPT_NOOP" }, 5347 { XPT_SCSI_IO, "XPT_SCSI_IO" }, 5348 { XPT_GDEV_TYPE, "XPT_GDEV_TYPE" }, 5349 { XPT_GDEVLIST, "XPT_GDEVLIST" }, 5350 { XPT_PATH_INQ, "XPT_PATH_INQ" }, 5351 { XPT_REL_SIMQ, "XPT_REL_SIMQ" }, 5352 { XPT_SASYNC_CB, "XPT_SASYNC_CB" }, 5353 { XPT_SDEV_TYPE, "XPT_SDEV_TYPE" }, 5354 { XPT_SCAN_BUS, "XPT_SCAN_BUS" }, 5355 { XPT_DEV_MATCH, "XPT_DEV_MATCH" }, 5356 { XPT_DEBUG, "XPT_DEBUG" }, 5357 { XPT_PATH_STATS, "XPT_PATH_STATS" }, 5358 { XPT_GDEV_STATS, "XPT_GDEV_STATS" }, 5359 { XPT_DEV_ADVINFO, "XPT_DEV_ADVINFO" }, 5360 { XPT_ASYNC, "XPT_ASYNC" }, 5361 { XPT_ABORT, "XPT_ABORT" }, 5362 { XPT_RESET_BUS, "XPT_RESET_BUS" }, 5363 { XPT_RESET_DEV, "XPT_RESET_DEV" }, 5364 { XPT_TERM_IO, "XPT_TERM_IO" }, 5365 { XPT_SCAN_LUN, "XPT_SCAN_LUN" }, 5366 { XPT_GET_TRAN_SETTINGS, "XPT_GET_TRAN_SETTINGS" }, 5367 { XPT_SET_TRAN_SETTINGS, "XPT_SET_TRAN_SETTINGS" }, 5368 { XPT_CALC_GEOMETRY, "XPT_CALC_GEOMETRY" }, 5369 { XPT_ATA_IO, "XPT_ATA_IO" }, 5370 { XPT_GET_SIM_KNOB, "XPT_GET_SIM_KNOB" }, 5371 { XPT_SET_SIM_KNOB, "XPT_SET_SIM_KNOB" }, 5372 { XPT_NVME_IO, "XPT_NVME_IO" }, 5373 { XPT_MMCSD_IO, "XPT_MMCSD_IO" }, 5374 { XPT_SMP_IO, "XPT_SMP_IO" }, 5375 { XPT_SCAN_TGT, "XPT_SCAN_TGT" }, 5376 { XPT_ENG_INQ, "XPT_ENG_INQ" }, 5377 { XPT_ENG_EXEC, "XPT_ENG_EXEC" }, 5378 { XPT_EN_LUN, "XPT_EN_LUN" }, 5379 { XPT_TARGET_IO, "XPT_TARGET_IO" }, 5380 { XPT_ACCEPT_TARGET_IO, "XPT_ACCEPT_TARGET_IO" }, 5381 { XPT_CONT_TARGET_IO, "XPT_CONT_TARGET_IO" }, 5382 { XPT_IMMED_NOTIFY, "XPT_IMMED_NOTIFY" }, 5383 { XPT_NOTIFY_ACK, "XPT_NOTIFY_ACK" }, 5384 { XPT_IMMEDIATE_NOTIFY, "XPT_IMMEDIATE_NOTIFY" }, 5385 { XPT_NOTIFY_ACKNOWLEDGE, "XPT_NOTIFY_ACKNOWLEDGE" }, 5386 { 0, 0 } 5387 }; 5388 5389 static const char * 5390 xpt_action_name(uint32_t action) 5391 { 5392 static char buffer[32]; /* Only for unknown messages -- racy */ 5393 struct kv *walker = map; 5394 5395 while (walker->name != NULL) { 5396 if (walker->v == action) 5397 return (walker->name); 5398 walker++; 5399 } 5400 5401 snprintf(buffer, sizeof(buffer), "%#x", action); 5402 return (buffer); 5403 } 5404