1 /*- 2 * Implementation of the Common Access Method Transport (XPT) layer. 3 * 4 * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs. 5 * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions, and the following disclaimer, 13 * without modification, immediately at the beginning of the file. 14 * 2. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include <sys/param.h> 34 #include <sys/bus.h> 35 #include <sys/systm.h> 36 #include <sys/types.h> 37 #include <sys/malloc.h> 38 #include <sys/kernel.h> 39 #include <sys/time.h> 40 #include <sys/conf.h> 41 #include <sys/fcntl.h> 42 #include <sys/interrupt.h> 43 #include <sys/proc.h> 44 #include <sys/sbuf.h> 45 #include <sys/smp.h> 46 #include <sys/taskqueue.h> 47 48 #include <sys/lock.h> 49 #include <sys/mutex.h> 50 #include <sys/sysctl.h> 51 #include <sys/kthread.h> 52 53 #include <cam/cam.h> 54 #include <cam/cam_ccb.h> 55 #include <cam/cam_periph.h> 56 #include <cam/cam_queue.h> 57 #include <cam/cam_sim.h> 58 #include <cam/cam_xpt.h> 59 #include <cam/cam_xpt_sim.h> 60 #include <cam/cam_xpt_periph.h> 61 #include <cam/cam_xpt_internal.h> 62 #include <cam/cam_debug.h> 63 #include <cam/cam_compat.h> 64 65 #include <cam/scsi/scsi_all.h> 66 #include <cam/scsi/scsi_message.h> 67 #include <cam/scsi/scsi_pass.h> 68 69 #include <machine/md_var.h> /* geometry translation */ 70 #include <machine/stdarg.h> /* for xpt_print below */ 71 72 #include "opt_cam.h" 73 74 /* 75 * This is the maximum number of high powered commands (e.g. start unit) 76 * that can be outstanding at a particular time. 77 */ 78 #ifndef CAM_MAX_HIGHPOWER 79 #define CAM_MAX_HIGHPOWER 4 80 #endif 81 82 /* Datastructures internal to the xpt layer */ 83 MALLOC_DEFINE(M_CAMXPT, "CAM XPT", "CAM XPT buffers"); 84 MALLOC_DEFINE(M_CAMDEV, "CAM DEV", "CAM devices"); 85 MALLOC_DEFINE(M_CAMCCB, "CAM CCB", "CAM CCBs"); 86 MALLOC_DEFINE(M_CAMPATH, "CAM path", "CAM paths"); 87 88 /* Object for defering XPT actions to a taskqueue */ 89 struct xpt_task { 90 struct task task; 91 void *data1; 92 uintptr_t data2; 93 }; 94 95 struct xpt_softc { 96 uint32_t xpt_generation; 97 98 /* number of high powered commands that can go through right now */ 99 struct mtx xpt_highpower_lock; 100 STAILQ_HEAD(highpowerlist, cam_ed) highpowerq; 101 int num_highpower; 102 103 /* queue for handling async rescan requests. */ 104 TAILQ_HEAD(, ccb_hdr) ccb_scanq; 105 int buses_to_config; 106 int buses_config_done; 107 108 /* Registered busses */ 109 TAILQ_HEAD(,cam_eb) xpt_busses; 110 u_int bus_generation; 111 112 struct intr_config_hook *xpt_config_hook; 113 114 int boot_delay; 115 struct callout boot_callout; 116 117 struct mtx xpt_topo_lock; 118 struct mtx xpt_lock; 119 struct taskqueue *xpt_taskq; 120 }; 121 122 typedef enum { 123 DM_RET_COPY = 0x01, 124 DM_RET_FLAG_MASK = 0x0f, 125 DM_RET_NONE = 0x00, 126 DM_RET_STOP = 0x10, 127 DM_RET_DESCEND = 0x20, 128 DM_RET_ERROR = 0x30, 129 DM_RET_ACTION_MASK = 0xf0 130 } dev_match_ret; 131 132 typedef enum { 133 XPT_DEPTH_BUS, 134 XPT_DEPTH_TARGET, 135 XPT_DEPTH_DEVICE, 136 XPT_DEPTH_PERIPH 137 } xpt_traverse_depth; 138 139 struct xpt_traverse_config { 140 xpt_traverse_depth depth; 141 void *tr_func; 142 void *tr_arg; 143 }; 144 145 typedef int xpt_busfunc_t (struct cam_eb *bus, void *arg); 146 typedef int xpt_targetfunc_t (struct cam_et *target, void *arg); 147 typedef int xpt_devicefunc_t (struct cam_ed *device, void *arg); 148 typedef int xpt_periphfunc_t (struct cam_periph *periph, void *arg); 149 typedef int xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg); 150 151 /* Transport layer configuration information */ 152 static struct xpt_softc xsoftc; 153 154 MTX_SYSINIT(xpt_topo_init, &xsoftc.xpt_topo_lock, "XPT topology lock", MTX_DEF); 155 156 SYSCTL_INT(_kern_cam, OID_AUTO, boot_delay, CTLFLAG_RDTUN, 157 &xsoftc.boot_delay, 0, "Bus registration wait time"); 158 SYSCTL_UINT(_kern_cam, OID_AUTO, xpt_generation, CTLFLAG_RD, 159 &xsoftc.xpt_generation, 0, "CAM peripheral generation count"); 160 161 struct cam_doneq { 162 struct mtx_padalign cam_doneq_mtx; 163 STAILQ_HEAD(, ccb_hdr) cam_doneq; 164 int cam_doneq_sleep; 165 }; 166 167 static struct cam_doneq cam_doneqs[MAXCPU]; 168 static int cam_num_doneqs; 169 static struct proc *cam_proc; 170 171 SYSCTL_INT(_kern_cam, OID_AUTO, num_doneqs, CTLFLAG_RDTUN, 172 &cam_num_doneqs, 0, "Number of completion queues/threads"); 173 174 struct cam_periph *xpt_periph; 175 176 static periph_init_t xpt_periph_init; 177 178 static struct periph_driver xpt_driver = 179 { 180 xpt_periph_init, "xpt", 181 TAILQ_HEAD_INITIALIZER(xpt_driver.units), /* generation */ 0, 182 CAM_PERIPH_DRV_EARLY 183 }; 184 185 PERIPHDRIVER_DECLARE(xpt, xpt_driver); 186 187 static d_open_t xptopen; 188 static d_close_t xptclose; 189 static d_ioctl_t xptioctl; 190 static d_ioctl_t xptdoioctl; 191 192 static struct cdevsw xpt_cdevsw = { 193 .d_version = D_VERSION, 194 .d_flags = 0, 195 .d_open = xptopen, 196 .d_close = xptclose, 197 .d_ioctl = xptioctl, 198 .d_name = "xpt", 199 }; 200 201 /* Storage for debugging datastructures */ 202 struct cam_path *cam_dpath; 203 u_int32_t cam_dflags = CAM_DEBUG_FLAGS; 204 SYSCTL_UINT(_kern_cam, OID_AUTO, dflags, CTLFLAG_RWTUN, 205 &cam_dflags, 0, "Enabled debug flags"); 206 u_int32_t cam_debug_delay = CAM_DEBUG_DELAY; 207 SYSCTL_UINT(_kern_cam, OID_AUTO, debug_delay, CTLFLAG_RWTUN, 208 &cam_debug_delay, 0, "Delay in us after each debug message"); 209 210 /* Our boot-time initialization hook */ 211 static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *); 212 213 static moduledata_t cam_moduledata = { 214 "cam", 215 cam_module_event_handler, 216 NULL 217 }; 218 219 static int xpt_init(void *); 220 221 DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND); 222 MODULE_VERSION(cam, 1); 223 224 225 static void xpt_async_bcast(struct async_list *async_head, 226 u_int32_t async_code, 227 struct cam_path *path, 228 void *async_arg); 229 static path_id_t xptnextfreepathid(void); 230 static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus); 231 static union ccb *xpt_get_ccb(struct cam_periph *periph); 232 static union ccb *xpt_get_ccb_nowait(struct cam_periph *periph); 233 static void xpt_run_allocq(struct cam_periph *periph, int sleep); 234 static void xpt_run_allocq_task(void *context, int pending); 235 static void xpt_run_devq(struct cam_devq *devq); 236 static timeout_t xpt_release_devq_timeout; 237 static void xpt_release_simq_timeout(void *arg) __unused; 238 static void xpt_acquire_bus(struct cam_eb *bus); 239 static void xpt_release_bus(struct cam_eb *bus); 240 static uint32_t xpt_freeze_devq_device(struct cam_ed *dev, u_int count); 241 static int xpt_release_devq_device(struct cam_ed *dev, u_int count, 242 int run_queue); 243 static struct cam_et* 244 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id); 245 static void xpt_acquire_target(struct cam_et *target); 246 static void xpt_release_target(struct cam_et *target); 247 static struct cam_eb* 248 xpt_find_bus(path_id_t path_id); 249 static struct cam_et* 250 xpt_find_target(struct cam_eb *bus, target_id_t target_id); 251 static struct cam_ed* 252 xpt_find_device(struct cam_et *target, lun_id_t lun_id); 253 static void xpt_config(void *arg); 254 static int xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo, 255 u_int32_t new_priority); 256 static xpt_devicefunc_t xptpassannouncefunc; 257 static void xptaction(struct cam_sim *sim, union ccb *work_ccb); 258 static void xptpoll(struct cam_sim *sim); 259 static void camisr_runqueue(void); 260 static void xpt_done_process(struct ccb_hdr *ccb_h); 261 static void xpt_done_td(void *); 262 static dev_match_ret xptbusmatch(struct dev_match_pattern *patterns, 263 u_int num_patterns, struct cam_eb *bus); 264 static dev_match_ret xptdevicematch(struct dev_match_pattern *patterns, 265 u_int num_patterns, 266 struct cam_ed *device); 267 static dev_match_ret xptperiphmatch(struct dev_match_pattern *patterns, 268 u_int num_patterns, 269 struct cam_periph *periph); 270 static xpt_busfunc_t xptedtbusfunc; 271 static xpt_targetfunc_t xptedttargetfunc; 272 static xpt_devicefunc_t xptedtdevicefunc; 273 static xpt_periphfunc_t xptedtperiphfunc; 274 static xpt_pdrvfunc_t xptplistpdrvfunc; 275 static xpt_periphfunc_t xptplistperiphfunc; 276 static int xptedtmatch(struct ccb_dev_match *cdm); 277 static int xptperiphlistmatch(struct ccb_dev_match *cdm); 278 static int xptbustraverse(struct cam_eb *start_bus, 279 xpt_busfunc_t *tr_func, void *arg); 280 static int xpttargettraverse(struct cam_eb *bus, 281 struct cam_et *start_target, 282 xpt_targetfunc_t *tr_func, void *arg); 283 static int xptdevicetraverse(struct cam_et *target, 284 struct cam_ed *start_device, 285 xpt_devicefunc_t *tr_func, void *arg); 286 static int xptperiphtraverse(struct cam_ed *device, 287 struct cam_periph *start_periph, 288 xpt_periphfunc_t *tr_func, void *arg); 289 static int xptpdrvtraverse(struct periph_driver **start_pdrv, 290 xpt_pdrvfunc_t *tr_func, void *arg); 291 static int xptpdperiphtraverse(struct periph_driver **pdrv, 292 struct cam_periph *start_periph, 293 xpt_periphfunc_t *tr_func, 294 void *arg); 295 static xpt_busfunc_t xptdefbusfunc; 296 static xpt_targetfunc_t xptdeftargetfunc; 297 static xpt_devicefunc_t xptdefdevicefunc; 298 static xpt_periphfunc_t xptdefperiphfunc; 299 static void xpt_finishconfig_task(void *context, int pending); 300 static void xpt_dev_async_default(u_int32_t async_code, 301 struct cam_eb *bus, 302 struct cam_et *target, 303 struct cam_ed *device, 304 void *async_arg); 305 static struct cam_ed * xpt_alloc_device_default(struct cam_eb *bus, 306 struct cam_et *target, 307 lun_id_t lun_id); 308 static xpt_devicefunc_t xptsetasyncfunc; 309 static xpt_busfunc_t xptsetasyncbusfunc; 310 static cam_status xptregister(struct cam_periph *periph, 311 void *arg); 312 static const char * xpt_action_name(uint32_t action); 313 static __inline int device_is_queued(struct cam_ed *device); 314 315 static __inline int 316 xpt_schedule_devq(struct cam_devq *devq, struct cam_ed *dev) 317 { 318 int retval; 319 320 mtx_assert(&devq->send_mtx, MA_OWNED); 321 if ((dev->ccbq.queue.entries > 0) && 322 (dev->ccbq.dev_openings > 0) && 323 (dev->ccbq.queue.qfrozen_cnt == 0)) { 324 /* 325 * The priority of a device waiting for controller 326 * resources is that of the highest priority CCB 327 * enqueued. 328 */ 329 retval = 330 xpt_schedule_dev(&devq->send_queue, 331 &dev->devq_entry, 332 CAMQ_GET_PRIO(&dev->ccbq.queue)); 333 } else { 334 retval = 0; 335 } 336 return (retval); 337 } 338 339 static __inline int 340 device_is_queued(struct cam_ed *device) 341 { 342 return (device->devq_entry.index != CAM_UNQUEUED_INDEX); 343 } 344 345 static void 346 xpt_periph_init() 347 { 348 make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0"); 349 } 350 351 static int 352 xptopen(struct cdev *dev, int flags, int fmt, struct thread *td) 353 { 354 355 /* 356 * Only allow read-write access. 357 */ 358 if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0)) 359 return(EPERM); 360 361 /* 362 * We don't allow nonblocking access. 363 */ 364 if ((flags & O_NONBLOCK) != 0) { 365 printf("%s: can't do nonblocking access\n", devtoname(dev)); 366 return(ENODEV); 367 } 368 369 return(0); 370 } 371 372 static int 373 xptclose(struct cdev *dev, int flag, int fmt, struct thread *td) 374 { 375 376 return(0); 377 } 378 379 /* 380 * Don't automatically grab the xpt softc lock here even though this is going 381 * through the xpt device. The xpt device is really just a back door for 382 * accessing other devices and SIMs, so the right thing to do is to grab 383 * the appropriate SIM lock once the bus/SIM is located. 384 */ 385 static int 386 xptioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) 387 { 388 int error; 389 390 if ((error = xptdoioctl(dev, cmd, addr, flag, td)) == ENOTTY) { 391 error = cam_compat_ioctl(dev, cmd, addr, flag, td, xptdoioctl); 392 } 393 return (error); 394 } 395 396 static int 397 xptdoioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) 398 { 399 int error; 400 401 error = 0; 402 403 switch(cmd) { 404 /* 405 * For the transport layer CAMIOCOMMAND ioctl, we really only want 406 * to accept CCB types that don't quite make sense to send through a 407 * passthrough driver. XPT_PATH_INQ is an exception to this, as stated 408 * in the CAM spec. 409 */ 410 case CAMIOCOMMAND: { 411 union ccb *ccb; 412 union ccb *inccb; 413 struct cam_eb *bus; 414 415 inccb = (union ccb *)addr; 416 417 bus = xpt_find_bus(inccb->ccb_h.path_id); 418 if (bus == NULL) 419 return (EINVAL); 420 421 switch (inccb->ccb_h.func_code) { 422 case XPT_SCAN_BUS: 423 case XPT_RESET_BUS: 424 if (inccb->ccb_h.target_id != CAM_TARGET_WILDCARD || 425 inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) { 426 xpt_release_bus(bus); 427 return (EINVAL); 428 } 429 break; 430 case XPT_SCAN_TGT: 431 if (inccb->ccb_h.target_id == CAM_TARGET_WILDCARD || 432 inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) { 433 xpt_release_bus(bus); 434 return (EINVAL); 435 } 436 break; 437 default: 438 break; 439 } 440 441 switch(inccb->ccb_h.func_code) { 442 case XPT_SCAN_BUS: 443 case XPT_RESET_BUS: 444 case XPT_PATH_INQ: 445 case XPT_ENG_INQ: 446 case XPT_SCAN_LUN: 447 case XPT_SCAN_TGT: 448 449 ccb = xpt_alloc_ccb(); 450 451 /* 452 * Create a path using the bus, target, and lun the 453 * user passed in. 454 */ 455 if (xpt_create_path(&ccb->ccb_h.path, NULL, 456 inccb->ccb_h.path_id, 457 inccb->ccb_h.target_id, 458 inccb->ccb_h.target_lun) != 459 CAM_REQ_CMP){ 460 error = EINVAL; 461 xpt_free_ccb(ccb); 462 break; 463 } 464 /* Ensure all of our fields are correct */ 465 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, 466 inccb->ccb_h.pinfo.priority); 467 xpt_merge_ccb(ccb, inccb); 468 xpt_path_lock(ccb->ccb_h.path); 469 cam_periph_runccb(ccb, NULL, 0, 0, NULL); 470 xpt_path_unlock(ccb->ccb_h.path); 471 bcopy(ccb, inccb, sizeof(union ccb)); 472 xpt_free_path(ccb->ccb_h.path); 473 xpt_free_ccb(ccb); 474 break; 475 476 case XPT_DEBUG: { 477 union ccb ccb; 478 479 /* 480 * This is an immediate CCB, so it's okay to 481 * allocate it on the stack. 482 */ 483 484 /* 485 * Create a path using the bus, target, and lun the 486 * user passed in. 487 */ 488 if (xpt_create_path(&ccb.ccb_h.path, NULL, 489 inccb->ccb_h.path_id, 490 inccb->ccb_h.target_id, 491 inccb->ccb_h.target_lun) != 492 CAM_REQ_CMP){ 493 error = EINVAL; 494 break; 495 } 496 /* Ensure all of our fields are correct */ 497 xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path, 498 inccb->ccb_h.pinfo.priority); 499 xpt_merge_ccb(&ccb, inccb); 500 xpt_action(&ccb); 501 bcopy(&ccb, inccb, sizeof(union ccb)); 502 xpt_free_path(ccb.ccb_h.path); 503 break; 504 505 } 506 case XPT_DEV_MATCH: { 507 struct cam_periph_map_info mapinfo; 508 struct cam_path *old_path; 509 510 /* 511 * We can't deal with physical addresses for this 512 * type of transaction. 513 */ 514 if ((inccb->ccb_h.flags & CAM_DATA_MASK) != 515 CAM_DATA_VADDR) { 516 error = EINVAL; 517 break; 518 } 519 520 /* 521 * Save this in case the caller had it set to 522 * something in particular. 523 */ 524 old_path = inccb->ccb_h.path; 525 526 /* 527 * We really don't need a path for the matching 528 * code. The path is needed because of the 529 * debugging statements in xpt_action(). They 530 * assume that the CCB has a valid path. 531 */ 532 inccb->ccb_h.path = xpt_periph->path; 533 534 bzero(&mapinfo, sizeof(mapinfo)); 535 536 /* 537 * Map the pattern and match buffers into kernel 538 * virtual address space. 539 */ 540 error = cam_periph_mapmem(inccb, &mapinfo, MAXPHYS); 541 542 if (error) { 543 inccb->ccb_h.path = old_path; 544 break; 545 } 546 547 /* 548 * This is an immediate CCB, we can send it on directly. 549 */ 550 xpt_action(inccb); 551 552 /* 553 * Map the buffers back into user space. 554 */ 555 cam_periph_unmapmem(inccb, &mapinfo); 556 557 inccb->ccb_h.path = old_path; 558 559 error = 0; 560 break; 561 } 562 default: 563 error = ENOTSUP; 564 break; 565 } 566 xpt_release_bus(bus); 567 break; 568 } 569 /* 570 * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input, 571 * with the periphal driver name and unit name filled in. The other 572 * fields don't really matter as input. The passthrough driver name 573 * ("pass"), and unit number are passed back in the ccb. The current 574 * device generation number, and the index into the device peripheral 575 * driver list, and the status are also passed back. Note that 576 * since we do everything in one pass, unlike the XPT_GDEVLIST ccb, 577 * we never return a status of CAM_GDEVLIST_LIST_CHANGED. It is 578 * (or rather should be) impossible for the device peripheral driver 579 * list to change since we look at the whole thing in one pass, and 580 * we do it with lock protection. 581 * 582 */ 583 case CAMGETPASSTHRU: { 584 union ccb *ccb; 585 struct cam_periph *periph; 586 struct periph_driver **p_drv; 587 char *name; 588 u_int unit; 589 int base_periph_found; 590 591 ccb = (union ccb *)addr; 592 unit = ccb->cgdl.unit_number; 593 name = ccb->cgdl.periph_name; 594 base_periph_found = 0; 595 596 /* 597 * Sanity check -- make sure we don't get a null peripheral 598 * driver name. 599 */ 600 if (*ccb->cgdl.periph_name == '\0') { 601 error = EINVAL; 602 break; 603 } 604 605 /* Keep the list from changing while we traverse it */ 606 xpt_lock_buses(); 607 608 /* first find our driver in the list of drivers */ 609 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) 610 if (strcmp((*p_drv)->driver_name, name) == 0) 611 break; 612 613 if (*p_drv == NULL) { 614 xpt_unlock_buses(); 615 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 616 ccb->cgdl.status = CAM_GDEVLIST_ERROR; 617 *ccb->cgdl.periph_name = '\0'; 618 ccb->cgdl.unit_number = 0; 619 error = ENOENT; 620 break; 621 } 622 623 /* 624 * Run through every peripheral instance of this driver 625 * and check to see whether it matches the unit passed 626 * in by the user. If it does, get out of the loops and 627 * find the passthrough driver associated with that 628 * peripheral driver. 629 */ 630 for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL; 631 periph = TAILQ_NEXT(periph, unit_links)) { 632 633 if (periph->unit_number == unit) 634 break; 635 } 636 /* 637 * If we found the peripheral driver that the user passed 638 * in, go through all of the peripheral drivers for that 639 * particular device and look for a passthrough driver. 640 */ 641 if (periph != NULL) { 642 struct cam_ed *device; 643 int i; 644 645 base_periph_found = 1; 646 device = periph->path->device; 647 for (i = 0, periph = SLIST_FIRST(&device->periphs); 648 periph != NULL; 649 periph = SLIST_NEXT(periph, periph_links), i++) { 650 /* 651 * Check to see whether we have a 652 * passthrough device or not. 653 */ 654 if (strcmp(periph->periph_name, "pass") == 0) { 655 /* 656 * Fill in the getdevlist fields. 657 */ 658 strcpy(ccb->cgdl.periph_name, 659 periph->periph_name); 660 ccb->cgdl.unit_number = 661 periph->unit_number; 662 if (SLIST_NEXT(periph, periph_links)) 663 ccb->cgdl.status = 664 CAM_GDEVLIST_MORE_DEVS; 665 else 666 ccb->cgdl.status = 667 CAM_GDEVLIST_LAST_DEVICE; 668 ccb->cgdl.generation = 669 device->generation; 670 ccb->cgdl.index = i; 671 /* 672 * Fill in some CCB header fields 673 * that the user may want. 674 */ 675 ccb->ccb_h.path_id = 676 periph->path->bus->path_id; 677 ccb->ccb_h.target_id = 678 periph->path->target->target_id; 679 ccb->ccb_h.target_lun = 680 periph->path->device->lun_id; 681 ccb->ccb_h.status = CAM_REQ_CMP; 682 break; 683 } 684 } 685 } 686 687 /* 688 * If the periph is null here, one of two things has 689 * happened. The first possibility is that we couldn't 690 * find the unit number of the particular peripheral driver 691 * that the user is asking about. e.g. the user asks for 692 * the passthrough driver for "da11". We find the list of 693 * "da" peripherals all right, but there is no unit 11. 694 * The other possibility is that we went through the list 695 * of peripheral drivers attached to the device structure, 696 * but didn't find one with the name "pass". Either way, 697 * we return ENOENT, since we couldn't find something. 698 */ 699 if (periph == NULL) { 700 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 701 ccb->cgdl.status = CAM_GDEVLIST_ERROR; 702 *ccb->cgdl.periph_name = '\0'; 703 ccb->cgdl.unit_number = 0; 704 error = ENOENT; 705 /* 706 * It is unfortunate that this is even necessary, 707 * but there are many, many clueless users out there. 708 * If this is true, the user is looking for the 709 * passthrough driver, but doesn't have one in his 710 * kernel. 711 */ 712 if (base_periph_found == 1) { 713 printf("xptioctl: pass driver is not in the " 714 "kernel\n"); 715 printf("xptioctl: put \"device pass\" in " 716 "your kernel config file\n"); 717 } 718 } 719 xpt_unlock_buses(); 720 break; 721 } 722 default: 723 error = ENOTTY; 724 break; 725 } 726 727 return(error); 728 } 729 730 static int 731 cam_module_event_handler(module_t mod, int what, void *arg) 732 { 733 int error; 734 735 switch (what) { 736 case MOD_LOAD: 737 if ((error = xpt_init(NULL)) != 0) 738 return (error); 739 break; 740 case MOD_UNLOAD: 741 return EBUSY; 742 default: 743 return EOPNOTSUPP; 744 } 745 746 return 0; 747 } 748 749 static struct xpt_proto * 750 xpt_proto_find(cam_proto proto) 751 { 752 struct xpt_proto **pp; 753 754 SET_FOREACH(pp, cam_xpt_proto_set) { 755 if ((*pp)->proto == proto) 756 return *pp; 757 } 758 759 return NULL; 760 } 761 762 static void 763 xpt_rescan_done(struct cam_periph *periph, union ccb *done_ccb) 764 { 765 766 if (done_ccb->ccb_h.ppriv_ptr1 == NULL) { 767 xpt_free_path(done_ccb->ccb_h.path); 768 xpt_free_ccb(done_ccb); 769 } else { 770 done_ccb->ccb_h.cbfcnp = done_ccb->ccb_h.ppriv_ptr1; 771 (*done_ccb->ccb_h.cbfcnp)(periph, done_ccb); 772 } 773 xpt_release_boot(); 774 } 775 776 /* thread to handle bus rescans */ 777 static void 778 xpt_scanner_thread(void *dummy) 779 { 780 union ccb *ccb; 781 struct cam_path path; 782 783 xpt_lock_buses(); 784 for (;;) { 785 if (TAILQ_EMPTY(&xsoftc.ccb_scanq)) 786 msleep(&xsoftc.ccb_scanq, &xsoftc.xpt_topo_lock, PRIBIO, 787 "-", 0); 788 if ((ccb = (union ccb *)TAILQ_FIRST(&xsoftc.ccb_scanq)) != NULL) { 789 TAILQ_REMOVE(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe); 790 xpt_unlock_buses(); 791 792 /* 793 * Since lock can be dropped inside and path freed 794 * by completion callback even before return here, 795 * take our own path copy for reference. 796 */ 797 xpt_copy_path(&path, ccb->ccb_h.path); 798 xpt_path_lock(&path); 799 xpt_action(ccb); 800 xpt_path_unlock(&path); 801 xpt_release_path(&path); 802 803 xpt_lock_buses(); 804 } 805 } 806 } 807 808 void 809 xpt_rescan(union ccb *ccb) 810 { 811 struct ccb_hdr *hdr; 812 813 /* Prepare request */ 814 if (ccb->ccb_h.path->target->target_id == CAM_TARGET_WILDCARD && 815 ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD) 816 ccb->ccb_h.func_code = XPT_SCAN_BUS; 817 else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD && 818 ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD) 819 ccb->ccb_h.func_code = XPT_SCAN_TGT; 820 else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD && 821 ccb->ccb_h.path->device->lun_id != CAM_LUN_WILDCARD) 822 ccb->ccb_h.func_code = XPT_SCAN_LUN; 823 else { 824 xpt_print(ccb->ccb_h.path, "illegal scan path\n"); 825 xpt_free_path(ccb->ccb_h.path); 826 xpt_free_ccb(ccb); 827 return; 828 } 829 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, 830 ("xpt_rescan: func %#x %s\n", ccb->ccb_h.func_code, 831 xpt_action_name(ccb->ccb_h.func_code))); 832 833 ccb->ccb_h.ppriv_ptr1 = ccb->ccb_h.cbfcnp; 834 ccb->ccb_h.cbfcnp = xpt_rescan_done; 835 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, CAM_PRIORITY_XPT); 836 /* Don't make duplicate entries for the same paths. */ 837 xpt_lock_buses(); 838 if (ccb->ccb_h.ppriv_ptr1 == NULL) { 839 TAILQ_FOREACH(hdr, &xsoftc.ccb_scanq, sim_links.tqe) { 840 if (xpt_path_comp(hdr->path, ccb->ccb_h.path) == 0) { 841 wakeup(&xsoftc.ccb_scanq); 842 xpt_unlock_buses(); 843 xpt_print(ccb->ccb_h.path, "rescan already queued\n"); 844 xpt_free_path(ccb->ccb_h.path); 845 xpt_free_ccb(ccb); 846 return; 847 } 848 } 849 } 850 TAILQ_INSERT_TAIL(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe); 851 xsoftc.buses_to_config++; 852 wakeup(&xsoftc.ccb_scanq); 853 xpt_unlock_buses(); 854 } 855 856 /* Functions accessed by the peripheral drivers */ 857 static int 858 xpt_init(void *dummy) 859 { 860 struct cam_sim *xpt_sim; 861 struct cam_path *path; 862 struct cam_devq *devq; 863 cam_status status; 864 int error, i; 865 866 TAILQ_INIT(&xsoftc.xpt_busses); 867 TAILQ_INIT(&xsoftc.ccb_scanq); 868 STAILQ_INIT(&xsoftc.highpowerq); 869 xsoftc.num_highpower = CAM_MAX_HIGHPOWER; 870 871 mtx_init(&xsoftc.xpt_lock, "XPT lock", NULL, MTX_DEF); 872 mtx_init(&xsoftc.xpt_highpower_lock, "XPT highpower lock", NULL, MTX_DEF); 873 xsoftc.xpt_taskq = taskqueue_create("CAM XPT task", M_WAITOK, 874 taskqueue_thread_enqueue, /*context*/&xsoftc.xpt_taskq); 875 876 #ifdef CAM_BOOT_DELAY 877 /* 878 * Override this value at compile time to assist our users 879 * who don't use loader to boot a kernel. 880 */ 881 xsoftc.boot_delay = CAM_BOOT_DELAY; 882 #endif 883 /* 884 * The xpt layer is, itself, the equivalent of a SIM. 885 * Allow 16 ccbs in the ccb pool for it. This should 886 * give decent parallelism when we probe busses and 887 * perform other XPT functions. 888 */ 889 devq = cam_simq_alloc(16); 890 xpt_sim = cam_sim_alloc(xptaction, 891 xptpoll, 892 "xpt", 893 /*softc*/NULL, 894 /*unit*/0, 895 /*mtx*/&xsoftc.xpt_lock, 896 /*max_dev_transactions*/0, 897 /*max_tagged_dev_transactions*/0, 898 devq); 899 if (xpt_sim == NULL) 900 return (ENOMEM); 901 902 mtx_lock(&xsoftc.xpt_lock); 903 if ((status = xpt_bus_register(xpt_sim, NULL, 0)) != CAM_SUCCESS) { 904 mtx_unlock(&xsoftc.xpt_lock); 905 printf("xpt_init: xpt_bus_register failed with status %#x," 906 " failing attach\n", status); 907 return (EINVAL); 908 } 909 mtx_unlock(&xsoftc.xpt_lock); 910 911 /* 912 * Looking at the XPT from the SIM layer, the XPT is 913 * the equivalent of a peripheral driver. Allocate 914 * a peripheral driver entry for us. 915 */ 916 if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID, 917 CAM_TARGET_WILDCARD, 918 CAM_LUN_WILDCARD)) != CAM_REQ_CMP) { 919 printf("xpt_init: xpt_create_path failed with status %#x," 920 " failing attach\n", status); 921 return (EINVAL); 922 } 923 xpt_path_lock(path); 924 cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO, 925 path, NULL, 0, xpt_sim); 926 xpt_path_unlock(path); 927 xpt_free_path(path); 928 929 if (cam_num_doneqs < 1) 930 cam_num_doneqs = 1 + mp_ncpus / 6; 931 else if (cam_num_doneqs > MAXCPU) 932 cam_num_doneqs = MAXCPU; 933 for (i = 0; i < cam_num_doneqs; i++) { 934 mtx_init(&cam_doneqs[i].cam_doneq_mtx, "CAM doneq", NULL, 935 MTX_DEF); 936 STAILQ_INIT(&cam_doneqs[i].cam_doneq); 937 error = kproc_kthread_add(xpt_done_td, &cam_doneqs[i], 938 &cam_proc, NULL, 0, 0, "cam", "doneq%d", i); 939 if (error != 0) { 940 cam_num_doneqs = i; 941 break; 942 } 943 } 944 if (cam_num_doneqs < 1) { 945 printf("xpt_init: Cannot init completion queues " 946 "- failing attach\n"); 947 return (ENOMEM); 948 } 949 /* 950 * Register a callback for when interrupts are enabled. 951 */ 952 xsoftc.xpt_config_hook = 953 (struct intr_config_hook *)malloc(sizeof(struct intr_config_hook), 954 M_CAMXPT, M_NOWAIT | M_ZERO); 955 if (xsoftc.xpt_config_hook == NULL) { 956 printf("xpt_init: Cannot malloc config hook " 957 "- failing attach\n"); 958 return (ENOMEM); 959 } 960 xsoftc.xpt_config_hook->ich_func = xpt_config; 961 if (config_intrhook_establish(xsoftc.xpt_config_hook) != 0) { 962 free (xsoftc.xpt_config_hook, M_CAMXPT); 963 printf("xpt_init: config_intrhook_establish failed " 964 "- failing attach\n"); 965 } 966 967 return (0); 968 } 969 970 static cam_status 971 xptregister(struct cam_periph *periph, void *arg) 972 { 973 struct cam_sim *xpt_sim; 974 975 if (periph == NULL) { 976 printf("xptregister: periph was NULL!!\n"); 977 return(CAM_REQ_CMP_ERR); 978 } 979 980 xpt_sim = (struct cam_sim *)arg; 981 xpt_sim->softc = periph; 982 xpt_periph = periph; 983 periph->softc = NULL; 984 985 return(CAM_REQ_CMP); 986 } 987 988 int32_t 989 xpt_add_periph(struct cam_periph *periph) 990 { 991 struct cam_ed *device; 992 int32_t status; 993 994 TASK_INIT(&periph->periph_run_task, 0, xpt_run_allocq_task, periph); 995 device = periph->path->device; 996 status = CAM_REQ_CMP; 997 if (device != NULL) { 998 mtx_lock(&device->target->bus->eb_mtx); 999 device->generation++; 1000 SLIST_INSERT_HEAD(&device->periphs, periph, periph_links); 1001 mtx_unlock(&device->target->bus->eb_mtx); 1002 atomic_add_32(&xsoftc.xpt_generation, 1); 1003 } 1004 1005 return (status); 1006 } 1007 1008 void 1009 xpt_remove_periph(struct cam_periph *periph) 1010 { 1011 struct cam_ed *device; 1012 1013 device = periph->path->device; 1014 if (device != NULL) { 1015 mtx_lock(&device->target->bus->eb_mtx); 1016 device->generation++; 1017 SLIST_REMOVE(&device->periphs, periph, cam_periph, periph_links); 1018 mtx_unlock(&device->target->bus->eb_mtx); 1019 atomic_add_32(&xsoftc.xpt_generation, 1); 1020 } 1021 } 1022 1023 1024 void 1025 xpt_announce_periph(struct cam_periph *periph, char *announce_string) 1026 { 1027 struct cam_path *path = periph->path; 1028 struct xpt_proto *proto; 1029 1030 cam_periph_assert(periph, MA_OWNED); 1031 periph->flags |= CAM_PERIPH_ANNOUNCED; 1032 1033 printf("%s%d at %s%d bus %d scbus%d target %d lun %jx\n", 1034 periph->periph_name, periph->unit_number, 1035 path->bus->sim->sim_name, 1036 path->bus->sim->unit_number, 1037 path->bus->sim->bus_id, 1038 path->bus->path_id, 1039 path->target->target_id, 1040 (uintmax_t)path->device->lun_id); 1041 printf("%s%d: ", periph->periph_name, periph->unit_number); 1042 proto = xpt_proto_find(path->device->protocol); 1043 if (proto) 1044 proto->ops->announce(path->device); 1045 else 1046 printf("%s%d: Unknown protocol device %d\n", 1047 periph->periph_name, periph->unit_number, 1048 path->device->protocol); 1049 if (path->device->serial_num_len > 0) { 1050 /* Don't wrap the screen - print only the first 60 chars */ 1051 printf("%s%d: Serial Number %.60s\n", periph->periph_name, 1052 periph->unit_number, path->device->serial_num); 1053 } 1054 /* Announce transport details. */ 1055 path->bus->xport->ops->announce(periph); 1056 /* Announce command queueing. */ 1057 if (path->device->inq_flags & SID_CmdQue 1058 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) { 1059 printf("%s%d: Command Queueing enabled\n", 1060 periph->periph_name, periph->unit_number); 1061 } 1062 /* Announce caller's details if they've passed in. */ 1063 if (announce_string != NULL) 1064 printf("%s%d: %s\n", periph->periph_name, 1065 periph->unit_number, announce_string); 1066 } 1067 1068 void 1069 xpt_announce_quirks(struct cam_periph *periph, int quirks, char *bit_string) 1070 { 1071 if (quirks != 0) { 1072 printf("%s%d: quirks=0x%b\n", periph->periph_name, 1073 periph->unit_number, quirks, bit_string); 1074 } 1075 } 1076 1077 void 1078 xpt_denounce_periph(struct cam_periph *periph) 1079 { 1080 struct cam_path *path = periph->path; 1081 struct xpt_proto *proto; 1082 1083 cam_periph_assert(periph, MA_OWNED); 1084 printf("%s%d at %s%d bus %d scbus%d target %d lun %jx\n", 1085 periph->periph_name, periph->unit_number, 1086 path->bus->sim->sim_name, 1087 path->bus->sim->unit_number, 1088 path->bus->sim->bus_id, 1089 path->bus->path_id, 1090 path->target->target_id, 1091 (uintmax_t)path->device->lun_id); 1092 printf("%s%d: ", periph->periph_name, periph->unit_number); 1093 proto = xpt_proto_find(path->device->protocol); 1094 if (proto) 1095 proto->ops->denounce(path->device); 1096 else 1097 printf("%s%d: Unknown protocol device %d\n", 1098 periph->periph_name, periph->unit_number, 1099 path->device->protocol); 1100 if (path->device->serial_num_len > 0) 1101 printf(" s/n %.60s", path->device->serial_num); 1102 printf(" detached\n"); 1103 } 1104 1105 1106 int 1107 xpt_getattr(char *buf, size_t len, const char *attr, struct cam_path *path) 1108 { 1109 int ret = -1, l; 1110 struct ccb_dev_advinfo cdai; 1111 struct scsi_vpd_id_descriptor *idd; 1112 1113 xpt_path_assert(path, MA_OWNED); 1114 1115 memset(&cdai, 0, sizeof(cdai)); 1116 xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL); 1117 cdai.ccb_h.func_code = XPT_DEV_ADVINFO; 1118 cdai.bufsiz = len; 1119 1120 if (!strcmp(attr, "GEOM::ident")) 1121 cdai.buftype = CDAI_TYPE_SERIAL_NUM; 1122 else if (!strcmp(attr, "GEOM::physpath")) 1123 cdai.buftype = CDAI_TYPE_PHYS_PATH; 1124 else if (strcmp(attr, "GEOM::lunid") == 0 || 1125 strcmp(attr, "GEOM::lunname") == 0) { 1126 cdai.buftype = CDAI_TYPE_SCSI_DEVID; 1127 cdai.bufsiz = CAM_SCSI_DEVID_MAXLEN; 1128 } else 1129 goto out; 1130 1131 cdai.buf = malloc(cdai.bufsiz, M_CAMXPT, M_NOWAIT|M_ZERO); 1132 if (cdai.buf == NULL) { 1133 ret = ENOMEM; 1134 goto out; 1135 } 1136 xpt_action((union ccb *)&cdai); /* can only be synchronous */ 1137 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0) 1138 cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE); 1139 if (cdai.provsiz == 0) 1140 goto out; 1141 if (cdai.buftype == CDAI_TYPE_SCSI_DEVID) { 1142 if (strcmp(attr, "GEOM::lunid") == 0) { 1143 idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf, 1144 cdai.provsiz, scsi_devid_is_lun_naa); 1145 if (idd == NULL) 1146 idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf, 1147 cdai.provsiz, scsi_devid_is_lun_eui64); 1148 } else 1149 idd = NULL; 1150 if (idd == NULL) 1151 idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf, 1152 cdai.provsiz, scsi_devid_is_lun_t10); 1153 if (idd == NULL) 1154 idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf, 1155 cdai.provsiz, scsi_devid_is_lun_name); 1156 if (idd == NULL) 1157 goto out; 1158 ret = 0; 1159 if ((idd->proto_codeset & SVPD_ID_CODESET_MASK) == SVPD_ID_CODESET_ASCII) { 1160 if (idd->length < len) { 1161 for (l = 0; l < idd->length; l++) 1162 buf[l] = idd->identifier[l] ? 1163 idd->identifier[l] : ' '; 1164 buf[l] = 0; 1165 } else 1166 ret = EFAULT; 1167 } else if ((idd->proto_codeset & SVPD_ID_CODESET_MASK) == SVPD_ID_CODESET_UTF8) { 1168 l = strnlen(idd->identifier, idd->length); 1169 if (l < len) { 1170 bcopy(idd->identifier, buf, l); 1171 buf[l] = 0; 1172 } else 1173 ret = EFAULT; 1174 } else { 1175 if (idd->length * 2 < len) { 1176 for (l = 0; l < idd->length; l++) 1177 sprintf(buf + l * 2, "%02x", 1178 idd->identifier[l]); 1179 } else 1180 ret = EFAULT; 1181 } 1182 } else { 1183 ret = 0; 1184 if (strlcpy(buf, cdai.buf, len) >= len) 1185 ret = EFAULT; 1186 } 1187 1188 out: 1189 if (cdai.buf != NULL) 1190 free(cdai.buf, M_CAMXPT); 1191 return ret; 1192 } 1193 1194 static dev_match_ret 1195 xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns, 1196 struct cam_eb *bus) 1197 { 1198 dev_match_ret retval; 1199 u_int i; 1200 1201 retval = DM_RET_NONE; 1202 1203 /* 1204 * If we aren't given something to match against, that's an error. 1205 */ 1206 if (bus == NULL) 1207 return(DM_RET_ERROR); 1208 1209 /* 1210 * If there are no match entries, then this bus matches no 1211 * matter what. 1212 */ 1213 if ((patterns == NULL) || (num_patterns == 0)) 1214 return(DM_RET_DESCEND | DM_RET_COPY); 1215 1216 for (i = 0; i < num_patterns; i++) { 1217 struct bus_match_pattern *cur_pattern; 1218 1219 /* 1220 * If the pattern in question isn't for a bus node, we 1221 * aren't interested. However, we do indicate to the 1222 * calling routine that we should continue descending the 1223 * tree, since the user wants to match against lower-level 1224 * EDT elements. 1225 */ 1226 if (patterns[i].type != DEV_MATCH_BUS) { 1227 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) 1228 retval |= DM_RET_DESCEND; 1229 continue; 1230 } 1231 1232 cur_pattern = &patterns[i].pattern.bus_pattern; 1233 1234 /* 1235 * If they want to match any bus node, we give them any 1236 * device node. 1237 */ 1238 if (cur_pattern->flags == BUS_MATCH_ANY) { 1239 /* set the copy flag */ 1240 retval |= DM_RET_COPY; 1241 1242 /* 1243 * If we've already decided on an action, go ahead 1244 * and return. 1245 */ 1246 if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE) 1247 return(retval); 1248 } 1249 1250 /* 1251 * Not sure why someone would do this... 1252 */ 1253 if (cur_pattern->flags == BUS_MATCH_NONE) 1254 continue; 1255 1256 if (((cur_pattern->flags & BUS_MATCH_PATH) != 0) 1257 && (cur_pattern->path_id != bus->path_id)) 1258 continue; 1259 1260 if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0) 1261 && (cur_pattern->bus_id != bus->sim->bus_id)) 1262 continue; 1263 1264 if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0) 1265 && (cur_pattern->unit_number != bus->sim->unit_number)) 1266 continue; 1267 1268 if (((cur_pattern->flags & BUS_MATCH_NAME) != 0) 1269 && (strncmp(cur_pattern->dev_name, bus->sim->sim_name, 1270 DEV_IDLEN) != 0)) 1271 continue; 1272 1273 /* 1274 * If we get to this point, the user definitely wants 1275 * information on this bus. So tell the caller to copy the 1276 * data out. 1277 */ 1278 retval |= DM_RET_COPY; 1279 1280 /* 1281 * If the return action has been set to descend, then we 1282 * know that we've already seen a non-bus matching 1283 * expression, therefore we need to further descend the tree. 1284 * This won't change by continuing around the loop, so we 1285 * go ahead and return. If we haven't seen a non-bus 1286 * matching expression, we keep going around the loop until 1287 * we exhaust the matching expressions. We'll set the stop 1288 * flag once we fall out of the loop. 1289 */ 1290 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND) 1291 return(retval); 1292 } 1293 1294 /* 1295 * If the return action hasn't been set to descend yet, that means 1296 * we haven't seen anything other than bus matching patterns. So 1297 * tell the caller to stop descending the tree -- the user doesn't 1298 * want to match against lower level tree elements. 1299 */ 1300 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) 1301 retval |= DM_RET_STOP; 1302 1303 return(retval); 1304 } 1305 1306 static dev_match_ret 1307 xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns, 1308 struct cam_ed *device) 1309 { 1310 dev_match_ret retval; 1311 u_int i; 1312 1313 retval = DM_RET_NONE; 1314 1315 /* 1316 * If we aren't given something to match against, that's an error. 1317 */ 1318 if (device == NULL) 1319 return(DM_RET_ERROR); 1320 1321 /* 1322 * If there are no match entries, then this device matches no 1323 * matter what. 1324 */ 1325 if ((patterns == NULL) || (num_patterns == 0)) 1326 return(DM_RET_DESCEND | DM_RET_COPY); 1327 1328 for (i = 0; i < num_patterns; i++) { 1329 struct device_match_pattern *cur_pattern; 1330 struct scsi_vpd_device_id *device_id_page; 1331 1332 /* 1333 * If the pattern in question isn't for a device node, we 1334 * aren't interested. 1335 */ 1336 if (patterns[i].type != DEV_MATCH_DEVICE) { 1337 if ((patterns[i].type == DEV_MATCH_PERIPH) 1338 && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)) 1339 retval |= DM_RET_DESCEND; 1340 continue; 1341 } 1342 1343 cur_pattern = &patterns[i].pattern.device_pattern; 1344 1345 /* Error out if mutually exclusive options are specified. */ 1346 if ((cur_pattern->flags & (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID)) 1347 == (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID)) 1348 return(DM_RET_ERROR); 1349 1350 /* 1351 * If they want to match any device node, we give them any 1352 * device node. 1353 */ 1354 if (cur_pattern->flags == DEV_MATCH_ANY) 1355 goto copy_dev_node; 1356 1357 /* 1358 * Not sure why someone would do this... 1359 */ 1360 if (cur_pattern->flags == DEV_MATCH_NONE) 1361 continue; 1362 1363 if (((cur_pattern->flags & DEV_MATCH_PATH) != 0) 1364 && (cur_pattern->path_id != device->target->bus->path_id)) 1365 continue; 1366 1367 if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0) 1368 && (cur_pattern->target_id != device->target->target_id)) 1369 continue; 1370 1371 if (((cur_pattern->flags & DEV_MATCH_LUN) != 0) 1372 && (cur_pattern->target_lun != device->lun_id)) 1373 continue; 1374 1375 if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0) 1376 && (cam_quirkmatch((caddr_t)&device->inq_data, 1377 (caddr_t)&cur_pattern->data.inq_pat, 1378 1, sizeof(cur_pattern->data.inq_pat), 1379 scsi_static_inquiry_match) == NULL)) 1380 continue; 1381 1382 device_id_page = (struct scsi_vpd_device_id *)device->device_id; 1383 if (((cur_pattern->flags & DEV_MATCH_DEVID) != 0) 1384 && (device->device_id_len < SVPD_DEVICE_ID_HDR_LEN 1385 || scsi_devid_match((uint8_t *)device_id_page->desc_list, 1386 device->device_id_len 1387 - SVPD_DEVICE_ID_HDR_LEN, 1388 cur_pattern->data.devid_pat.id, 1389 cur_pattern->data.devid_pat.id_len) != 0)) 1390 continue; 1391 1392 copy_dev_node: 1393 /* 1394 * If we get to this point, the user definitely wants 1395 * information on this device. So tell the caller to copy 1396 * the data out. 1397 */ 1398 retval |= DM_RET_COPY; 1399 1400 /* 1401 * If the return action has been set to descend, then we 1402 * know that we've already seen a peripheral matching 1403 * expression, therefore we need to further descend the tree. 1404 * This won't change by continuing around the loop, so we 1405 * go ahead and return. If we haven't seen a peripheral 1406 * matching expression, we keep going around the loop until 1407 * we exhaust the matching expressions. We'll set the stop 1408 * flag once we fall out of the loop. 1409 */ 1410 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND) 1411 return(retval); 1412 } 1413 1414 /* 1415 * If the return action hasn't been set to descend yet, that means 1416 * we haven't seen any peripheral matching patterns. So tell the 1417 * caller to stop descending the tree -- the user doesn't want to 1418 * match against lower level tree elements. 1419 */ 1420 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) 1421 retval |= DM_RET_STOP; 1422 1423 return(retval); 1424 } 1425 1426 /* 1427 * Match a single peripheral against any number of match patterns. 1428 */ 1429 static dev_match_ret 1430 xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns, 1431 struct cam_periph *periph) 1432 { 1433 dev_match_ret retval; 1434 u_int i; 1435 1436 /* 1437 * If we aren't given something to match against, that's an error. 1438 */ 1439 if (periph == NULL) 1440 return(DM_RET_ERROR); 1441 1442 /* 1443 * If there are no match entries, then this peripheral matches no 1444 * matter what. 1445 */ 1446 if ((patterns == NULL) || (num_patterns == 0)) 1447 return(DM_RET_STOP | DM_RET_COPY); 1448 1449 /* 1450 * There aren't any nodes below a peripheral node, so there's no 1451 * reason to descend the tree any further. 1452 */ 1453 retval = DM_RET_STOP; 1454 1455 for (i = 0; i < num_patterns; i++) { 1456 struct periph_match_pattern *cur_pattern; 1457 1458 /* 1459 * If the pattern in question isn't for a peripheral, we 1460 * aren't interested. 1461 */ 1462 if (patterns[i].type != DEV_MATCH_PERIPH) 1463 continue; 1464 1465 cur_pattern = &patterns[i].pattern.periph_pattern; 1466 1467 /* 1468 * If they want to match on anything, then we will do so. 1469 */ 1470 if (cur_pattern->flags == PERIPH_MATCH_ANY) { 1471 /* set the copy flag */ 1472 retval |= DM_RET_COPY; 1473 1474 /* 1475 * We've already set the return action to stop, 1476 * since there are no nodes below peripherals in 1477 * the tree. 1478 */ 1479 return(retval); 1480 } 1481 1482 /* 1483 * Not sure why someone would do this... 1484 */ 1485 if (cur_pattern->flags == PERIPH_MATCH_NONE) 1486 continue; 1487 1488 if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0) 1489 && (cur_pattern->path_id != periph->path->bus->path_id)) 1490 continue; 1491 1492 /* 1493 * For the target and lun id's, we have to make sure the 1494 * target and lun pointers aren't NULL. The xpt peripheral 1495 * has a wildcard target and device. 1496 */ 1497 if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0) 1498 && ((periph->path->target == NULL) 1499 ||(cur_pattern->target_id != periph->path->target->target_id))) 1500 continue; 1501 1502 if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0) 1503 && ((periph->path->device == NULL) 1504 || (cur_pattern->target_lun != periph->path->device->lun_id))) 1505 continue; 1506 1507 if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0) 1508 && (cur_pattern->unit_number != periph->unit_number)) 1509 continue; 1510 1511 if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0) 1512 && (strncmp(cur_pattern->periph_name, periph->periph_name, 1513 DEV_IDLEN) != 0)) 1514 continue; 1515 1516 /* 1517 * If we get to this point, the user definitely wants 1518 * information on this peripheral. So tell the caller to 1519 * copy the data out. 1520 */ 1521 retval |= DM_RET_COPY; 1522 1523 /* 1524 * The return action has already been set to stop, since 1525 * peripherals don't have any nodes below them in the EDT. 1526 */ 1527 return(retval); 1528 } 1529 1530 /* 1531 * If we get to this point, the peripheral that was passed in 1532 * doesn't match any of the patterns. 1533 */ 1534 return(retval); 1535 } 1536 1537 static int 1538 xptedtbusfunc(struct cam_eb *bus, void *arg) 1539 { 1540 struct ccb_dev_match *cdm; 1541 struct cam_et *target; 1542 dev_match_ret retval; 1543 1544 cdm = (struct ccb_dev_match *)arg; 1545 1546 /* 1547 * If our position is for something deeper in the tree, that means 1548 * that we've already seen this node. So, we keep going down. 1549 */ 1550 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1551 && (cdm->pos.cookie.bus == bus) 1552 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1553 && (cdm->pos.cookie.target != NULL)) 1554 retval = DM_RET_DESCEND; 1555 else 1556 retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus); 1557 1558 /* 1559 * If we got an error, bail out of the search. 1560 */ 1561 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 1562 cdm->status = CAM_DEV_MATCH_ERROR; 1563 return(0); 1564 } 1565 1566 /* 1567 * If the copy flag is set, copy this bus out. 1568 */ 1569 if (retval & DM_RET_COPY) { 1570 int spaceleft, j; 1571 1572 spaceleft = cdm->match_buf_len - (cdm->num_matches * 1573 sizeof(struct dev_match_result)); 1574 1575 /* 1576 * If we don't have enough space to put in another 1577 * match result, save our position and tell the 1578 * user there are more devices to check. 1579 */ 1580 if (spaceleft < sizeof(struct dev_match_result)) { 1581 bzero(&cdm->pos, sizeof(cdm->pos)); 1582 cdm->pos.position_type = 1583 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS; 1584 1585 cdm->pos.cookie.bus = bus; 1586 cdm->pos.generations[CAM_BUS_GENERATION]= 1587 xsoftc.bus_generation; 1588 cdm->status = CAM_DEV_MATCH_MORE; 1589 return(0); 1590 } 1591 j = cdm->num_matches; 1592 cdm->num_matches++; 1593 cdm->matches[j].type = DEV_MATCH_BUS; 1594 cdm->matches[j].result.bus_result.path_id = bus->path_id; 1595 cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id; 1596 cdm->matches[j].result.bus_result.unit_number = 1597 bus->sim->unit_number; 1598 strncpy(cdm->matches[j].result.bus_result.dev_name, 1599 bus->sim->sim_name, DEV_IDLEN); 1600 } 1601 1602 /* 1603 * If the user is only interested in busses, there's no 1604 * reason to descend to the next level in the tree. 1605 */ 1606 if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP) 1607 return(1); 1608 1609 /* 1610 * If there is a target generation recorded, check it to 1611 * make sure the target list hasn't changed. 1612 */ 1613 mtx_lock(&bus->eb_mtx); 1614 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1615 && (cdm->pos.cookie.bus == bus) 1616 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1617 && (cdm->pos.cookie.target != NULL)) { 1618 if ((cdm->pos.generations[CAM_TARGET_GENERATION] != 1619 bus->generation)) { 1620 mtx_unlock(&bus->eb_mtx); 1621 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 1622 return (0); 1623 } 1624 target = (struct cam_et *)cdm->pos.cookie.target; 1625 target->refcount++; 1626 } else 1627 target = NULL; 1628 mtx_unlock(&bus->eb_mtx); 1629 1630 return (xpttargettraverse(bus, target, xptedttargetfunc, arg)); 1631 } 1632 1633 static int 1634 xptedttargetfunc(struct cam_et *target, void *arg) 1635 { 1636 struct ccb_dev_match *cdm; 1637 struct cam_eb *bus; 1638 struct cam_ed *device; 1639 1640 cdm = (struct ccb_dev_match *)arg; 1641 bus = target->bus; 1642 1643 /* 1644 * If there is a device list generation recorded, check it to 1645 * make sure the device list hasn't changed. 1646 */ 1647 mtx_lock(&bus->eb_mtx); 1648 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1649 && (cdm->pos.cookie.bus == bus) 1650 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1651 && (cdm->pos.cookie.target == target) 1652 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) 1653 && (cdm->pos.cookie.device != NULL)) { 1654 if (cdm->pos.generations[CAM_DEV_GENERATION] != 1655 target->generation) { 1656 mtx_unlock(&bus->eb_mtx); 1657 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 1658 return(0); 1659 } 1660 device = (struct cam_ed *)cdm->pos.cookie.device; 1661 device->refcount++; 1662 } else 1663 device = NULL; 1664 mtx_unlock(&bus->eb_mtx); 1665 1666 return (xptdevicetraverse(target, device, xptedtdevicefunc, arg)); 1667 } 1668 1669 static int 1670 xptedtdevicefunc(struct cam_ed *device, void *arg) 1671 { 1672 struct cam_eb *bus; 1673 struct cam_periph *periph; 1674 struct ccb_dev_match *cdm; 1675 dev_match_ret retval; 1676 1677 cdm = (struct ccb_dev_match *)arg; 1678 bus = device->target->bus; 1679 1680 /* 1681 * If our position is for something deeper in the tree, that means 1682 * that we've already seen this node. So, we keep going down. 1683 */ 1684 if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE) 1685 && (cdm->pos.cookie.device == device) 1686 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 1687 && (cdm->pos.cookie.periph != NULL)) 1688 retval = DM_RET_DESCEND; 1689 else 1690 retval = xptdevicematch(cdm->patterns, cdm->num_patterns, 1691 device); 1692 1693 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 1694 cdm->status = CAM_DEV_MATCH_ERROR; 1695 return(0); 1696 } 1697 1698 /* 1699 * If the copy flag is set, copy this device out. 1700 */ 1701 if (retval & DM_RET_COPY) { 1702 int spaceleft, j; 1703 1704 spaceleft = cdm->match_buf_len - (cdm->num_matches * 1705 sizeof(struct dev_match_result)); 1706 1707 /* 1708 * If we don't have enough space to put in another 1709 * match result, save our position and tell the 1710 * user there are more devices to check. 1711 */ 1712 if (spaceleft < sizeof(struct dev_match_result)) { 1713 bzero(&cdm->pos, sizeof(cdm->pos)); 1714 cdm->pos.position_type = 1715 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS | 1716 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE; 1717 1718 cdm->pos.cookie.bus = device->target->bus; 1719 cdm->pos.generations[CAM_BUS_GENERATION]= 1720 xsoftc.bus_generation; 1721 cdm->pos.cookie.target = device->target; 1722 cdm->pos.generations[CAM_TARGET_GENERATION] = 1723 device->target->bus->generation; 1724 cdm->pos.cookie.device = device; 1725 cdm->pos.generations[CAM_DEV_GENERATION] = 1726 device->target->generation; 1727 cdm->status = CAM_DEV_MATCH_MORE; 1728 return(0); 1729 } 1730 j = cdm->num_matches; 1731 cdm->num_matches++; 1732 cdm->matches[j].type = DEV_MATCH_DEVICE; 1733 cdm->matches[j].result.device_result.path_id = 1734 device->target->bus->path_id; 1735 cdm->matches[j].result.device_result.target_id = 1736 device->target->target_id; 1737 cdm->matches[j].result.device_result.target_lun = 1738 device->lun_id; 1739 cdm->matches[j].result.device_result.protocol = 1740 device->protocol; 1741 bcopy(&device->inq_data, 1742 &cdm->matches[j].result.device_result.inq_data, 1743 sizeof(struct scsi_inquiry_data)); 1744 bcopy(&device->ident_data, 1745 &cdm->matches[j].result.device_result.ident_data, 1746 sizeof(struct ata_params)); 1747 1748 /* Let the user know whether this device is unconfigured */ 1749 if (device->flags & CAM_DEV_UNCONFIGURED) 1750 cdm->matches[j].result.device_result.flags = 1751 DEV_RESULT_UNCONFIGURED; 1752 else 1753 cdm->matches[j].result.device_result.flags = 1754 DEV_RESULT_NOFLAG; 1755 } 1756 1757 /* 1758 * If the user isn't interested in peripherals, don't descend 1759 * the tree any further. 1760 */ 1761 if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP) 1762 return(1); 1763 1764 /* 1765 * If there is a peripheral list generation recorded, make sure 1766 * it hasn't changed. 1767 */ 1768 xpt_lock_buses(); 1769 mtx_lock(&bus->eb_mtx); 1770 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1771 && (cdm->pos.cookie.bus == bus) 1772 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1773 && (cdm->pos.cookie.target == device->target) 1774 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) 1775 && (cdm->pos.cookie.device == device) 1776 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 1777 && (cdm->pos.cookie.periph != NULL)) { 1778 if (cdm->pos.generations[CAM_PERIPH_GENERATION] != 1779 device->generation) { 1780 mtx_unlock(&bus->eb_mtx); 1781 xpt_unlock_buses(); 1782 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 1783 return(0); 1784 } 1785 periph = (struct cam_periph *)cdm->pos.cookie.periph; 1786 periph->refcount++; 1787 } else 1788 periph = NULL; 1789 mtx_unlock(&bus->eb_mtx); 1790 xpt_unlock_buses(); 1791 1792 return (xptperiphtraverse(device, periph, xptedtperiphfunc, arg)); 1793 } 1794 1795 static int 1796 xptedtperiphfunc(struct cam_periph *periph, void *arg) 1797 { 1798 struct ccb_dev_match *cdm; 1799 dev_match_ret retval; 1800 1801 cdm = (struct ccb_dev_match *)arg; 1802 1803 retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph); 1804 1805 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 1806 cdm->status = CAM_DEV_MATCH_ERROR; 1807 return(0); 1808 } 1809 1810 /* 1811 * If the copy flag is set, copy this peripheral out. 1812 */ 1813 if (retval & DM_RET_COPY) { 1814 int spaceleft, j; 1815 1816 spaceleft = cdm->match_buf_len - (cdm->num_matches * 1817 sizeof(struct dev_match_result)); 1818 1819 /* 1820 * If we don't have enough space to put in another 1821 * match result, save our position and tell the 1822 * user there are more devices to check. 1823 */ 1824 if (spaceleft < sizeof(struct dev_match_result)) { 1825 bzero(&cdm->pos, sizeof(cdm->pos)); 1826 cdm->pos.position_type = 1827 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS | 1828 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE | 1829 CAM_DEV_POS_PERIPH; 1830 1831 cdm->pos.cookie.bus = periph->path->bus; 1832 cdm->pos.generations[CAM_BUS_GENERATION]= 1833 xsoftc.bus_generation; 1834 cdm->pos.cookie.target = periph->path->target; 1835 cdm->pos.generations[CAM_TARGET_GENERATION] = 1836 periph->path->bus->generation; 1837 cdm->pos.cookie.device = periph->path->device; 1838 cdm->pos.generations[CAM_DEV_GENERATION] = 1839 periph->path->target->generation; 1840 cdm->pos.cookie.periph = periph; 1841 cdm->pos.generations[CAM_PERIPH_GENERATION] = 1842 periph->path->device->generation; 1843 cdm->status = CAM_DEV_MATCH_MORE; 1844 return(0); 1845 } 1846 1847 j = cdm->num_matches; 1848 cdm->num_matches++; 1849 cdm->matches[j].type = DEV_MATCH_PERIPH; 1850 cdm->matches[j].result.periph_result.path_id = 1851 periph->path->bus->path_id; 1852 cdm->matches[j].result.periph_result.target_id = 1853 periph->path->target->target_id; 1854 cdm->matches[j].result.periph_result.target_lun = 1855 periph->path->device->lun_id; 1856 cdm->matches[j].result.periph_result.unit_number = 1857 periph->unit_number; 1858 strncpy(cdm->matches[j].result.periph_result.periph_name, 1859 periph->periph_name, DEV_IDLEN); 1860 } 1861 1862 return(1); 1863 } 1864 1865 static int 1866 xptedtmatch(struct ccb_dev_match *cdm) 1867 { 1868 struct cam_eb *bus; 1869 int ret; 1870 1871 cdm->num_matches = 0; 1872 1873 /* 1874 * Check the bus list generation. If it has changed, the user 1875 * needs to reset everything and start over. 1876 */ 1877 xpt_lock_buses(); 1878 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1879 && (cdm->pos.cookie.bus != NULL)) { 1880 if (cdm->pos.generations[CAM_BUS_GENERATION] != 1881 xsoftc.bus_generation) { 1882 xpt_unlock_buses(); 1883 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 1884 return(0); 1885 } 1886 bus = (struct cam_eb *)cdm->pos.cookie.bus; 1887 bus->refcount++; 1888 } else 1889 bus = NULL; 1890 xpt_unlock_buses(); 1891 1892 ret = xptbustraverse(bus, xptedtbusfunc, cdm); 1893 1894 /* 1895 * If we get back 0, that means that we had to stop before fully 1896 * traversing the EDT. It also means that one of the subroutines 1897 * has set the status field to the proper value. If we get back 1, 1898 * we've fully traversed the EDT and copied out any matching entries. 1899 */ 1900 if (ret == 1) 1901 cdm->status = CAM_DEV_MATCH_LAST; 1902 1903 return(ret); 1904 } 1905 1906 static int 1907 xptplistpdrvfunc(struct periph_driver **pdrv, void *arg) 1908 { 1909 struct cam_periph *periph; 1910 struct ccb_dev_match *cdm; 1911 1912 cdm = (struct ccb_dev_match *)arg; 1913 1914 xpt_lock_buses(); 1915 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR) 1916 && (cdm->pos.cookie.pdrv == pdrv) 1917 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 1918 && (cdm->pos.cookie.periph != NULL)) { 1919 if (cdm->pos.generations[CAM_PERIPH_GENERATION] != 1920 (*pdrv)->generation) { 1921 xpt_unlock_buses(); 1922 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 1923 return(0); 1924 } 1925 periph = (struct cam_periph *)cdm->pos.cookie.periph; 1926 periph->refcount++; 1927 } else 1928 periph = NULL; 1929 xpt_unlock_buses(); 1930 1931 return (xptpdperiphtraverse(pdrv, periph, xptplistperiphfunc, arg)); 1932 } 1933 1934 static int 1935 xptplistperiphfunc(struct cam_periph *periph, void *arg) 1936 { 1937 struct ccb_dev_match *cdm; 1938 dev_match_ret retval; 1939 1940 cdm = (struct ccb_dev_match *)arg; 1941 1942 retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph); 1943 1944 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 1945 cdm->status = CAM_DEV_MATCH_ERROR; 1946 return(0); 1947 } 1948 1949 /* 1950 * If the copy flag is set, copy this peripheral out. 1951 */ 1952 if (retval & DM_RET_COPY) { 1953 int spaceleft, j; 1954 1955 spaceleft = cdm->match_buf_len - (cdm->num_matches * 1956 sizeof(struct dev_match_result)); 1957 1958 /* 1959 * If we don't have enough space to put in another 1960 * match result, save our position and tell the 1961 * user there are more devices to check. 1962 */ 1963 if (spaceleft < sizeof(struct dev_match_result)) { 1964 struct periph_driver **pdrv; 1965 1966 pdrv = NULL; 1967 bzero(&cdm->pos, sizeof(cdm->pos)); 1968 cdm->pos.position_type = 1969 CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR | 1970 CAM_DEV_POS_PERIPH; 1971 1972 /* 1973 * This may look a bit non-sensical, but it is 1974 * actually quite logical. There are very few 1975 * peripheral drivers, and bloating every peripheral 1976 * structure with a pointer back to its parent 1977 * peripheral driver linker set entry would cost 1978 * more in the long run than doing this quick lookup. 1979 */ 1980 for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) { 1981 if (strcmp((*pdrv)->driver_name, 1982 periph->periph_name) == 0) 1983 break; 1984 } 1985 1986 if (*pdrv == NULL) { 1987 cdm->status = CAM_DEV_MATCH_ERROR; 1988 return(0); 1989 } 1990 1991 cdm->pos.cookie.pdrv = pdrv; 1992 /* 1993 * The periph generation slot does double duty, as 1994 * does the periph pointer slot. They are used for 1995 * both edt and pdrv lookups and positioning. 1996 */ 1997 cdm->pos.cookie.periph = periph; 1998 cdm->pos.generations[CAM_PERIPH_GENERATION] = 1999 (*pdrv)->generation; 2000 cdm->status = CAM_DEV_MATCH_MORE; 2001 return(0); 2002 } 2003 2004 j = cdm->num_matches; 2005 cdm->num_matches++; 2006 cdm->matches[j].type = DEV_MATCH_PERIPH; 2007 cdm->matches[j].result.periph_result.path_id = 2008 periph->path->bus->path_id; 2009 2010 /* 2011 * The transport layer peripheral doesn't have a target or 2012 * lun. 2013 */ 2014 if (periph->path->target) 2015 cdm->matches[j].result.periph_result.target_id = 2016 periph->path->target->target_id; 2017 else 2018 cdm->matches[j].result.periph_result.target_id = 2019 CAM_TARGET_WILDCARD; 2020 2021 if (periph->path->device) 2022 cdm->matches[j].result.periph_result.target_lun = 2023 periph->path->device->lun_id; 2024 else 2025 cdm->matches[j].result.periph_result.target_lun = 2026 CAM_LUN_WILDCARD; 2027 2028 cdm->matches[j].result.periph_result.unit_number = 2029 periph->unit_number; 2030 strncpy(cdm->matches[j].result.periph_result.periph_name, 2031 periph->periph_name, DEV_IDLEN); 2032 } 2033 2034 return(1); 2035 } 2036 2037 static int 2038 xptperiphlistmatch(struct ccb_dev_match *cdm) 2039 { 2040 int ret; 2041 2042 cdm->num_matches = 0; 2043 2044 /* 2045 * At this point in the edt traversal function, we check the bus 2046 * list generation to make sure that no busses have been added or 2047 * removed since the user last sent a XPT_DEV_MATCH ccb through. 2048 * For the peripheral driver list traversal function, however, we 2049 * don't have to worry about new peripheral driver types coming or 2050 * going; they're in a linker set, and therefore can't change 2051 * without a recompile. 2052 */ 2053 2054 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR) 2055 && (cdm->pos.cookie.pdrv != NULL)) 2056 ret = xptpdrvtraverse( 2057 (struct periph_driver **)cdm->pos.cookie.pdrv, 2058 xptplistpdrvfunc, cdm); 2059 else 2060 ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm); 2061 2062 /* 2063 * If we get back 0, that means that we had to stop before fully 2064 * traversing the peripheral driver tree. It also means that one of 2065 * the subroutines has set the status field to the proper value. If 2066 * we get back 1, we've fully traversed the EDT and copied out any 2067 * matching entries. 2068 */ 2069 if (ret == 1) 2070 cdm->status = CAM_DEV_MATCH_LAST; 2071 2072 return(ret); 2073 } 2074 2075 static int 2076 xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg) 2077 { 2078 struct cam_eb *bus, *next_bus; 2079 int retval; 2080 2081 retval = 1; 2082 if (start_bus) 2083 bus = start_bus; 2084 else { 2085 xpt_lock_buses(); 2086 bus = TAILQ_FIRST(&xsoftc.xpt_busses); 2087 if (bus == NULL) { 2088 xpt_unlock_buses(); 2089 return (retval); 2090 } 2091 bus->refcount++; 2092 xpt_unlock_buses(); 2093 } 2094 for (; bus != NULL; bus = next_bus) { 2095 retval = tr_func(bus, arg); 2096 if (retval == 0) { 2097 xpt_release_bus(bus); 2098 break; 2099 } 2100 xpt_lock_buses(); 2101 next_bus = TAILQ_NEXT(bus, links); 2102 if (next_bus) 2103 next_bus->refcount++; 2104 xpt_unlock_buses(); 2105 xpt_release_bus(bus); 2106 } 2107 return(retval); 2108 } 2109 2110 static int 2111 xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target, 2112 xpt_targetfunc_t *tr_func, void *arg) 2113 { 2114 struct cam_et *target, *next_target; 2115 int retval; 2116 2117 retval = 1; 2118 if (start_target) 2119 target = start_target; 2120 else { 2121 mtx_lock(&bus->eb_mtx); 2122 target = TAILQ_FIRST(&bus->et_entries); 2123 if (target == NULL) { 2124 mtx_unlock(&bus->eb_mtx); 2125 return (retval); 2126 } 2127 target->refcount++; 2128 mtx_unlock(&bus->eb_mtx); 2129 } 2130 for (; target != NULL; target = next_target) { 2131 retval = tr_func(target, arg); 2132 if (retval == 0) { 2133 xpt_release_target(target); 2134 break; 2135 } 2136 mtx_lock(&bus->eb_mtx); 2137 next_target = TAILQ_NEXT(target, links); 2138 if (next_target) 2139 next_target->refcount++; 2140 mtx_unlock(&bus->eb_mtx); 2141 xpt_release_target(target); 2142 } 2143 return(retval); 2144 } 2145 2146 static int 2147 xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device, 2148 xpt_devicefunc_t *tr_func, void *arg) 2149 { 2150 struct cam_eb *bus; 2151 struct cam_ed *device, *next_device; 2152 int retval; 2153 2154 retval = 1; 2155 bus = target->bus; 2156 if (start_device) 2157 device = start_device; 2158 else { 2159 mtx_lock(&bus->eb_mtx); 2160 device = TAILQ_FIRST(&target->ed_entries); 2161 if (device == NULL) { 2162 mtx_unlock(&bus->eb_mtx); 2163 return (retval); 2164 } 2165 device->refcount++; 2166 mtx_unlock(&bus->eb_mtx); 2167 } 2168 for (; device != NULL; device = next_device) { 2169 mtx_lock(&device->device_mtx); 2170 retval = tr_func(device, arg); 2171 mtx_unlock(&device->device_mtx); 2172 if (retval == 0) { 2173 xpt_release_device(device); 2174 break; 2175 } 2176 mtx_lock(&bus->eb_mtx); 2177 next_device = TAILQ_NEXT(device, links); 2178 if (next_device) 2179 next_device->refcount++; 2180 mtx_unlock(&bus->eb_mtx); 2181 xpt_release_device(device); 2182 } 2183 return(retval); 2184 } 2185 2186 static int 2187 xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph, 2188 xpt_periphfunc_t *tr_func, void *arg) 2189 { 2190 struct cam_eb *bus; 2191 struct cam_periph *periph, *next_periph; 2192 int retval; 2193 2194 retval = 1; 2195 2196 bus = device->target->bus; 2197 if (start_periph) 2198 periph = start_periph; 2199 else { 2200 xpt_lock_buses(); 2201 mtx_lock(&bus->eb_mtx); 2202 periph = SLIST_FIRST(&device->periphs); 2203 while (periph != NULL && (periph->flags & CAM_PERIPH_FREE) != 0) 2204 periph = SLIST_NEXT(periph, periph_links); 2205 if (periph == NULL) { 2206 mtx_unlock(&bus->eb_mtx); 2207 xpt_unlock_buses(); 2208 return (retval); 2209 } 2210 periph->refcount++; 2211 mtx_unlock(&bus->eb_mtx); 2212 xpt_unlock_buses(); 2213 } 2214 for (; periph != NULL; periph = next_periph) { 2215 retval = tr_func(periph, arg); 2216 if (retval == 0) { 2217 cam_periph_release_locked(periph); 2218 break; 2219 } 2220 xpt_lock_buses(); 2221 mtx_lock(&bus->eb_mtx); 2222 next_periph = SLIST_NEXT(periph, periph_links); 2223 while (next_periph != NULL && 2224 (next_periph->flags & CAM_PERIPH_FREE) != 0) 2225 next_periph = SLIST_NEXT(next_periph, periph_links); 2226 if (next_periph) 2227 next_periph->refcount++; 2228 mtx_unlock(&bus->eb_mtx); 2229 xpt_unlock_buses(); 2230 cam_periph_release_locked(periph); 2231 } 2232 return(retval); 2233 } 2234 2235 static int 2236 xptpdrvtraverse(struct periph_driver **start_pdrv, 2237 xpt_pdrvfunc_t *tr_func, void *arg) 2238 { 2239 struct periph_driver **pdrv; 2240 int retval; 2241 2242 retval = 1; 2243 2244 /* 2245 * We don't traverse the peripheral driver list like we do the 2246 * other lists, because it is a linker set, and therefore cannot be 2247 * changed during runtime. If the peripheral driver list is ever 2248 * re-done to be something other than a linker set (i.e. it can 2249 * change while the system is running), the list traversal should 2250 * be modified to work like the other traversal functions. 2251 */ 2252 for (pdrv = (start_pdrv ? start_pdrv : periph_drivers); 2253 *pdrv != NULL; pdrv++) { 2254 retval = tr_func(pdrv, arg); 2255 2256 if (retval == 0) 2257 return(retval); 2258 } 2259 2260 return(retval); 2261 } 2262 2263 static int 2264 xptpdperiphtraverse(struct periph_driver **pdrv, 2265 struct cam_periph *start_periph, 2266 xpt_periphfunc_t *tr_func, void *arg) 2267 { 2268 struct cam_periph *periph, *next_periph; 2269 int retval; 2270 2271 retval = 1; 2272 2273 if (start_periph) 2274 periph = start_periph; 2275 else { 2276 xpt_lock_buses(); 2277 periph = TAILQ_FIRST(&(*pdrv)->units); 2278 while (periph != NULL && (periph->flags & CAM_PERIPH_FREE) != 0) 2279 periph = TAILQ_NEXT(periph, unit_links); 2280 if (periph == NULL) { 2281 xpt_unlock_buses(); 2282 return (retval); 2283 } 2284 periph->refcount++; 2285 xpt_unlock_buses(); 2286 } 2287 for (; periph != NULL; periph = next_periph) { 2288 cam_periph_lock(periph); 2289 retval = tr_func(periph, arg); 2290 cam_periph_unlock(periph); 2291 if (retval == 0) { 2292 cam_periph_release(periph); 2293 break; 2294 } 2295 xpt_lock_buses(); 2296 next_periph = TAILQ_NEXT(periph, unit_links); 2297 while (next_periph != NULL && 2298 (next_periph->flags & CAM_PERIPH_FREE) != 0) 2299 next_periph = TAILQ_NEXT(next_periph, unit_links); 2300 if (next_periph) 2301 next_periph->refcount++; 2302 xpt_unlock_buses(); 2303 cam_periph_release(periph); 2304 } 2305 return(retval); 2306 } 2307 2308 static int 2309 xptdefbusfunc(struct cam_eb *bus, void *arg) 2310 { 2311 struct xpt_traverse_config *tr_config; 2312 2313 tr_config = (struct xpt_traverse_config *)arg; 2314 2315 if (tr_config->depth == XPT_DEPTH_BUS) { 2316 xpt_busfunc_t *tr_func; 2317 2318 tr_func = (xpt_busfunc_t *)tr_config->tr_func; 2319 2320 return(tr_func(bus, tr_config->tr_arg)); 2321 } else 2322 return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg)); 2323 } 2324 2325 static int 2326 xptdeftargetfunc(struct cam_et *target, void *arg) 2327 { 2328 struct xpt_traverse_config *tr_config; 2329 2330 tr_config = (struct xpt_traverse_config *)arg; 2331 2332 if (tr_config->depth == XPT_DEPTH_TARGET) { 2333 xpt_targetfunc_t *tr_func; 2334 2335 tr_func = (xpt_targetfunc_t *)tr_config->tr_func; 2336 2337 return(tr_func(target, tr_config->tr_arg)); 2338 } else 2339 return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg)); 2340 } 2341 2342 static int 2343 xptdefdevicefunc(struct cam_ed *device, void *arg) 2344 { 2345 struct xpt_traverse_config *tr_config; 2346 2347 tr_config = (struct xpt_traverse_config *)arg; 2348 2349 if (tr_config->depth == XPT_DEPTH_DEVICE) { 2350 xpt_devicefunc_t *tr_func; 2351 2352 tr_func = (xpt_devicefunc_t *)tr_config->tr_func; 2353 2354 return(tr_func(device, tr_config->tr_arg)); 2355 } else 2356 return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg)); 2357 } 2358 2359 static int 2360 xptdefperiphfunc(struct cam_periph *periph, void *arg) 2361 { 2362 struct xpt_traverse_config *tr_config; 2363 xpt_periphfunc_t *tr_func; 2364 2365 tr_config = (struct xpt_traverse_config *)arg; 2366 2367 tr_func = (xpt_periphfunc_t *)tr_config->tr_func; 2368 2369 /* 2370 * Unlike the other default functions, we don't check for depth 2371 * here. The peripheral driver level is the last level in the EDT, 2372 * so if we're here, we should execute the function in question. 2373 */ 2374 return(tr_func(periph, tr_config->tr_arg)); 2375 } 2376 2377 /* 2378 * Execute the given function for every bus in the EDT. 2379 */ 2380 static int 2381 xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg) 2382 { 2383 struct xpt_traverse_config tr_config; 2384 2385 tr_config.depth = XPT_DEPTH_BUS; 2386 tr_config.tr_func = tr_func; 2387 tr_config.tr_arg = arg; 2388 2389 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config)); 2390 } 2391 2392 /* 2393 * Execute the given function for every device in the EDT. 2394 */ 2395 static int 2396 xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg) 2397 { 2398 struct xpt_traverse_config tr_config; 2399 2400 tr_config.depth = XPT_DEPTH_DEVICE; 2401 tr_config.tr_func = tr_func; 2402 tr_config.tr_arg = arg; 2403 2404 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config)); 2405 } 2406 2407 static int 2408 xptsetasyncfunc(struct cam_ed *device, void *arg) 2409 { 2410 struct cam_path path; 2411 struct ccb_getdev cgd; 2412 struct ccb_setasync *csa = (struct ccb_setasync *)arg; 2413 2414 /* 2415 * Don't report unconfigured devices (Wildcard devs, 2416 * devices only for target mode, device instances 2417 * that have been invalidated but are waiting for 2418 * their last reference count to be released). 2419 */ 2420 if ((device->flags & CAM_DEV_UNCONFIGURED) != 0) 2421 return (1); 2422 2423 xpt_compile_path(&path, 2424 NULL, 2425 device->target->bus->path_id, 2426 device->target->target_id, 2427 device->lun_id); 2428 xpt_setup_ccb(&cgd.ccb_h, &path, CAM_PRIORITY_NORMAL); 2429 cgd.ccb_h.func_code = XPT_GDEV_TYPE; 2430 xpt_action((union ccb *)&cgd); 2431 csa->callback(csa->callback_arg, 2432 AC_FOUND_DEVICE, 2433 &path, &cgd); 2434 xpt_release_path(&path); 2435 2436 return(1); 2437 } 2438 2439 static int 2440 xptsetasyncbusfunc(struct cam_eb *bus, void *arg) 2441 { 2442 struct cam_path path; 2443 struct ccb_pathinq cpi; 2444 struct ccb_setasync *csa = (struct ccb_setasync *)arg; 2445 2446 xpt_compile_path(&path, /*periph*/NULL, 2447 bus->path_id, 2448 CAM_TARGET_WILDCARD, 2449 CAM_LUN_WILDCARD); 2450 xpt_path_lock(&path); 2451 xpt_setup_ccb(&cpi.ccb_h, &path, CAM_PRIORITY_NORMAL); 2452 cpi.ccb_h.func_code = XPT_PATH_INQ; 2453 xpt_action((union ccb *)&cpi); 2454 csa->callback(csa->callback_arg, 2455 AC_PATH_REGISTERED, 2456 &path, &cpi); 2457 xpt_path_unlock(&path); 2458 xpt_release_path(&path); 2459 2460 return(1); 2461 } 2462 2463 void 2464 xpt_action(union ccb *start_ccb) 2465 { 2466 2467 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, 2468 ("xpt_action: func %#x %s\n", start_ccb->ccb_h.func_code, 2469 xpt_action_name(start_ccb->ccb_h.func_code))); 2470 2471 start_ccb->ccb_h.status = CAM_REQ_INPROG; 2472 (*(start_ccb->ccb_h.path->bus->xport->ops->action))(start_ccb); 2473 } 2474 2475 void 2476 xpt_action_default(union ccb *start_ccb) 2477 { 2478 struct cam_path *path; 2479 struct cam_sim *sim; 2480 int lock; 2481 2482 path = start_ccb->ccb_h.path; 2483 CAM_DEBUG(path, CAM_DEBUG_TRACE, 2484 ("xpt_action_default: func %#x %s\n", start_ccb->ccb_h.func_code, 2485 xpt_action_name(start_ccb->ccb_h.func_code))); 2486 2487 switch (start_ccb->ccb_h.func_code) { 2488 case XPT_SCSI_IO: 2489 { 2490 struct cam_ed *device; 2491 2492 /* 2493 * For the sake of compatibility with SCSI-1 2494 * devices that may not understand the identify 2495 * message, we include lun information in the 2496 * second byte of all commands. SCSI-1 specifies 2497 * that luns are a 3 bit value and reserves only 3 2498 * bits for lun information in the CDB. Later 2499 * revisions of the SCSI spec allow for more than 8 2500 * luns, but have deprecated lun information in the 2501 * CDB. So, if the lun won't fit, we must omit. 2502 * 2503 * Also be aware that during initial probing for devices, 2504 * the inquiry information is unknown but initialized to 0. 2505 * This means that this code will be exercised while probing 2506 * devices with an ANSI revision greater than 2. 2507 */ 2508 device = path->device; 2509 if (device->protocol_version <= SCSI_REV_2 2510 && start_ccb->ccb_h.target_lun < 8 2511 && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) { 2512 2513 start_ccb->csio.cdb_io.cdb_bytes[1] |= 2514 start_ccb->ccb_h.target_lun << 5; 2515 } 2516 start_ccb->csio.scsi_status = SCSI_STATUS_OK; 2517 } 2518 /* FALLTHROUGH */ 2519 case XPT_TARGET_IO: 2520 case XPT_CONT_TARGET_IO: 2521 start_ccb->csio.sense_resid = 0; 2522 start_ccb->csio.resid = 0; 2523 /* FALLTHROUGH */ 2524 case XPT_ATA_IO: 2525 if (start_ccb->ccb_h.func_code == XPT_ATA_IO) 2526 start_ccb->ataio.resid = 0; 2527 /* FALLTHROUGH */ 2528 case XPT_NVME_IO: 2529 if (start_ccb->ccb_h.func_code == XPT_NVME_IO) 2530 start_ccb->nvmeio.resid = 0; 2531 /* FALLTHROUGH */ 2532 case XPT_RESET_DEV: 2533 case XPT_ENG_EXEC: 2534 case XPT_SMP_IO: 2535 { 2536 struct cam_devq *devq; 2537 2538 devq = path->bus->sim->devq; 2539 mtx_lock(&devq->send_mtx); 2540 cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb); 2541 if (xpt_schedule_devq(devq, path->device) != 0) 2542 xpt_run_devq(devq); 2543 mtx_unlock(&devq->send_mtx); 2544 break; 2545 } 2546 case XPT_CALC_GEOMETRY: 2547 /* Filter out garbage */ 2548 if (start_ccb->ccg.block_size == 0 2549 || start_ccb->ccg.volume_size == 0) { 2550 start_ccb->ccg.cylinders = 0; 2551 start_ccb->ccg.heads = 0; 2552 start_ccb->ccg.secs_per_track = 0; 2553 start_ccb->ccb_h.status = CAM_REQ_CMP; 2554 break; 2555 } 2556 #if defined(PC98) || defined(__sparc64__) 2557 /* 2558 * In a PC-98 system, geometry translation depens on 2559 * the "real" device geometry obtained from mode page 4. 2560 * SCSI geometry translation is performed in the 2561 * initialization routine of the SCSI BIOS and the result 2562 * stored in host memory. If the translation is available 2563 * in host memory, use it. If not, rely on the default 2564 * translation the device driver performs. 2565 * For sparc64, we may need adjust the geometry of large 2566 * disks in order to fit the limitations of the 16-bit 2567 * fields of the VTOC8 disk label. 2568 */ 2569 if (scsi_da_bios_params(&start_ccb->ccg) != 0) { 2570 start_ccb->ccb_h.status = CAM_REQ_CMP; 2571 break; 2572 } 2573 #endif 2574 goto call_sim; 2575 case XPT_ABORT: 2576 { 2577 union ccb* abort_ccb; 2578 2579 abort_ccb = start_ccb->cab.abort_ccb; 2580 if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) { 2581 struct cam_ed *device; 2582 struct cam_devq *devq; 2583 2584 device = abort_ccb->ccb_h.path->device; 2585 devq = device->sim->devq; 2586 2587 mtx_lock(&devq->send_mtx); 2588 if (abort_ccb->ccb_h.pinfo.index > 0) { 2589 cam_ccbq_remove_ccb(&device->ccbq, abort_ccb); 2590 abort_ccb->ccb_h.status = 2591 CAM_REQ_ABORTED|CAM_DEV_QFRZN; 2592 xpt_freeze_devq_device(device, 1); 2593 mtx_unlock(&devq->send_mtx); 2594 xpt_done(abort_ccb); 2595 start_ccb->ccb_h.status = CAM_REQ_CMP; 2596 break; 2597 } 2598 mtx_unlock(&devq->send_mtx); 2599 2600 if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX 2601 && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) { 2602 /* 2603 * We've caught this ccb en route to 2604 * the SIM. Flag it for abort and the 2605 * SIM will do so just before starting 2606 * real work on the CCB. 2607 */ 2608 abort_ccb->ccb_h.status = 2609 CAM_REQ_ABORTED|CAM_DEV_QFRZN; 2610 xpt_freeze_devq(abort_ccb->ccb_h.path, 1); 2611 start_ccb->ccb_h.status = CAM_REQ_CMP; 2612 break; 2613 } 2614 } 2615 if (XPT_FC_IS_QUEUED(abort_ccb) 2616 && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) { 2617 /* 2618 * It's already completed but waiting 2619 * for our SWI to get to it. 2620 */ 2621 start_ccb->ccb_h.status = CAM_UA_ABORT; 2622 break; 2623 } 2624 /* 2625 * If we weren't able to take care of the abort request 2626 * in the XPT, pass the request down to the SIM for processing. 2627 */ 2628 } 2629 /* FALLTHROUGH */ 2630 case XPT_ACCEPT_TARGET_IO: 2631 case XPT_EN_LUN: 2632 case XPT_IMMED_NOTIFY: 2633 case XPT_NOTIFY_ACK: 2634 case XPT_RESET_BUS: 2635 case XPT_IMMEDIATE_NOTIFY: 2636 case XPT_NOTIFY_ACKNOWLEDGE: 2637 case XPT_GET_SIM_KNOB_OLD: 2638 case XPT_GET_SIM_KNOB: 2639 case XPT_SET_SIM_KNOB: 2640 case XPT_GET_TRAN_SETTINGS: 2641 case XPT_SET_TRAN_SETTINGS: 2642 case XPT_PATH_INQ: 2643 call_sim: 2644 sim = path->bus->sim; 2645 lock = (mtx_owned(sim->mtx) == 0); 2646 if (lock) 2647 CAM_SIM_LOCK(sim); 2648 CAM_DEBUG(path, CAM_DEBUG_TRACE, 2649 ("sim->sim_action: func=%#x\n", start_ccb->ccb_h.func_code)); 2650 (*(sim->sim_action))(sim, start_ccb); 2651 CAM_DEBUG(path, CAM_DEBUG_TRACE, 2652 ("sim->sim_action: status=%#x\n", start_ccb->ccb_h.status)); 2653 if (lock) 2654 CAM_SIM_UNLOCK(sim); 2655 break; 2656 case XPT_PATH_STATS: 2657 start_ccb->cpis.last_reset = path->bus->last_reset; 2658 start_ccb->ccb_h.status = CAM_REQ_CMP; 2659 break; 2660 case XPT_GDEV_TYPE: 2661 { 2662 struct cam_ed *dev; 2663 2664 dev = path->device; 2665 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) { 2666 start_ccb->ccb_h.status = CAM_DEV_NOT_THERE; 2667 } else { 2668 struct ccb_getdev *cgd; 2669 2670 cgd = &start_ccb->cgd; 2671 cgd->protocol = dev->protocol; 2672 cgd->inq_data = dev->inq_data; 2673 cgd->ident_data = dev->ident_data; 2674 cgd->inq_flags = dev->inq_flags; 2675 cgd->nvme_data = dev->nvme_data; 2676 cgd->nvme_cdata = dev->nvme_cdata; 2677 cgd->ccb_h.status = CAM_REQ_CMP; 2678 cgd->serial_num_len = dev->serial_num_len; 2679 if ((dev->serial_num_len > 0) 2680 && (dev->serial_num != NULL)) 2681 bcopy(dev->serial_num, cgd->serial_num, 2682 dev->serial_num_len); 2683 } 2684 break; 2685 } 2686 case XPT_GDEV_STATS: 2687 { 2688 struct cam_ed *dev; 2689 2690 dev = path->device; 2691 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) { 2692 start_ccb->ccb_h.status = CAM_DEV_NOT_THERE; 2693 } else { 2694 struct ccb_getdevstats *cgds; 2695 struct cam_eb *bus; 2696 struct cam_et *tar; 2697 struct cam_devq *devq; 2698 2699 cgds = &start_ccb->cgds; 2700 bus = path->bus; 2701 tar = path->target; 2702 devq = bus->sim->devq; 2703 mtx_lock(&devq->send_mtx); 2704 cgds->dev_openings = dev->ccbq.dev_openings; 2705 cgds->dev_active = dev->ccbq.dev_active; 2706 cgds->allocated = dev->ccbq.allocated; 2707 cgds->queued = cam_ccbq_pending_ccb_count(&dev->ccbq); 2708 cgds->held = cgds->allocated - cgds->dev_active - 2709 cgds->queued; 2710 cgds->last_reset = tar->last_reset; 2711 cgds->maxtags = dev->maxtags; 2712 cgds->mintags = dev->mintags; 2713 if (timevalcmp(&tar->last_reset, &bus->last_reset, <)) 2714 cgds->last_reset = bus->last_reset; 2715 mtx_unlock(&devq->send_mtx); 2716 cgds->ccb_h.status = CAM_REQ_CMP; 2717 } 2718 break; 2719 } 2720 case XPT_GDEVLIST: 2721 { 2722 struct cam_periph *nperiph; 2723 struct periph_list *periph_head; 2724 struct ccb_getdevlist *cgdl; 2725 u_int i; 2726 struct cam_ed *device; 2727 int found; 2728 2729 2730 found = 0; 2731 2732 /* 2733 * Don't want anyone mucking with our data. 2734 */ 2735 device = path->device; 2736 periph_head = &device->periphs; 2737 cgdl = &start_ccb->cgdl; 2738 2739 /* 2740 * Check and see if the list has changed since the user 2741 * last requested a list member. If so, tell them that the 2742 * list has changed, and therefore they need to start over 2743 * from the beginning. 2744 */ 2745 if ((cgdl->index != 0) && 2746 (cgdl->generation != device->generation)) { 2747 cgdl->status = CAM_GDEVLIST_LIST_CHANGED; 2748 break; 2749 } 2750 2751 /* 2752 * Traverse the list of peripherals and attempt to find 2753 * the requested peripheral. 2754 */ 2755 for (nperiph = SLIST_FIRST(periph_head), i = 0; 2756 (nperiph != NULL) && (i <= cgdl->index); 2757 nperiph = SLIST_NEXT(nperiph, periph_links), i++) { 2758 if (i == cgdl->index) { 2759 strncpy(cgdl->periph_name, 2760 nperiph->periph_name, 2761 DEV_IDLEN); 2762 cgdl->unit_number = nperiph->unit_number; 2763 found = 1; 2764 } 2765 } 2766 if (found == 0) { 2767 cgdl->status = CAM_GDEVLIST_ERROR; 2768 break; 2769 } 2770 2771 if (nperiph == NULL) 2772 cgdl->status = CAM_GDEVLIST_LAST_DEVICE; 2773 else 2774 cgdl->status = CAM_GDEVLIST_MORE_DEVS; 2775 2776 cgdl->index++; 2777 cgdl->generation = device->generation; 2778 2779 cgdl->ccb_h.status = CAM_REQ_CMP; 2780 break; 2781 } 2782 case XPT_DEV_MATCH: 2783 { 2784 dev_pos_type position_type; 2785 struct ccb_dev_match *cdm; 2786 2787 cdm = &start_ccb->cdm; 2788 2789 /* 2790 * There are two ways of getting at information in the EDT. 2791 * The first way is via the primary EDT tree. It starts 2792 * with a list of busses, then a list of targets on a bus, 2793 * then devices/luns on a target, and then peripherals on a 2794 * device/lun. The "other" way is by the peripheral driver 2795 * lists. The peripheral driver lists are organized by 2796 * peripheral driver. (obviously) So it makes sense to 2797 * use the peripheral driver list if the user is looking 2798 * for something like "da1", or all "da" devices. If the 2799 * user is looking for something on a particular bus/target 2800 * or lun, it's generally better to go through the EDT tree. 2801 */ 2802 2803 if (cdm->pos.position_type != CAM_DEV_POS_NONE) 2804 position_type = cdm->pos.position_type; 2805 else { 2806 u_int i; 2807 2808 position_type = CAM_DEV_POS_NONE; 2809 2810 for (i = 0; i < cdm->num_patterns; i++) { 2811 if ((cdm->patterns[i].type == DEV_MATCH_BUS) 2812 ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){ 2813 position_type = CAM_DEV_POS_EDT; 2814 break; 2815 } 2816 } 2817 2818 if (cdm->num_patterns == 0) 2819 position_type = CAM_DEV_POS_EDT; 2820 else if (position_type == CAM_DEV_POS_NONE) 2821 position_type = CAM_DEV_POS_PDRV; 2822 } 2823 2824 switch(position_type & CAM_DEV_POS_TYPEMASK) { 2825 case CAM_DEV_POS_EDT: 2826 xptedtmatch(cdm); 2827 break; 2828 case CAM_DEV_POS_PDRV: 2829 xptperiphlistmatch(cdm); 2830 break; 2831 default: 2832 cdm->status = CAM_DEV_MATCH_ERROR; 2833 break; 2834 } 2835 2836 if (cdm->status == CAM_DEV_MATCH_ERROR) 2837 start_ccb->ccb_h.status = CAM_REQ_CMP_ERR; 2838 else 2839 start_ccb->ccb_h.status = CAM_REQ_CMP; 2840 2841 break; 2842 } 2843 case XPT_SASYNC_CB: 2844 { 2845 struct ccb_setasync *csa; 2846 struct async_node *cur_entry; 2847 struct async_list *async_head; 2848 u_int32_t added; 2849 2850 csa = &start_ccb->csa; 2851 added = csa->event_enable; 2852 async_head = &path->device->asyncs; 2853 2854 /* 2855 * If there is already an entry for us, simply 2856 * update it. 2857 */ 2858 cur_entry = SLIST_FIRST(async_head); 2859 while (cur_entry != NULL) { 2860 if ((cur_entry->callback_arg == csa->callback_arg) 2861 && (cur_entry->callback == csa->callback)) 2862 break; 2863 cur_entry = SLIST_NEXT(cur_entry, links); 2864 } 2865 2866 if (cur_entry != NULL) { 2867 /* 2868 * If the request has no flags set, 2869 * remove the entry. 2870 */ 2871 added &= ~cur_entry->event_enable; 2872 if (csa->event_enable == 0) { 2873 SLIST_REMOVE(async_head, cur_entry, 2874 async_node, links); 2875 xpt_release_device(path->device); 2876 free(cur_entry, M_CAMXPT); 2877 } else { 2878 cur_entry->event_enable = csa->event_enable; 2879 } 2880 csa->event_enable = added; 2881 } else { 2882 cur_entry = malloc(sizeof(*cur_entry), M_CAMXPT, 2883 M_NOWAIT); 2884 if (cur_entry == NULL) { 2885 csa->ccb_h.status = CAM_RESRC_UNAVAIL; 2886 break; 2887 } 2888 cur_entry->event_enable = csa->event_enable; 2889 cur_entry->event_lock = 2890 mtx_owned(path->bus->sim->mtx) ? 1 : 0; 2891 cur_entry->callback_arg = csa->callback_arg; 2892 cur_entry->callback = csa->callback; 2893 SLIST_INSERT_HEAD(async_head, cur_entry, links); 2894 xpt_acquire_device(path->device); 2895 } 2896 start_ccb->ccb_h.status = CAM_REQ_CMP; 2897 break; 2898 } 2899 case XPT_REL_SIMQ: 2900 { 2901 struct ccb_relsim *crs; 2902 struct cam_ed *dev; 2903 2904 crs = &start_ccb->crs; 2905 dev = path->device; 2906 if (dev == NULL) { 2907 2908 crs->ccb_h.status = CAM_DEV_NOT_THERE; 2909 break; 2910 } 2911 2912 if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) { 2913 2914 /* Don't ever go below one opening */ 2915 if (crs->openings > 0) { 2916 xpt_dev_ccbq_resize(path, crs->openings); 2917 if (bootverbose) { 2918 xpt_print(path, 2919 "number of openings is now %d\n", 2920 crs->openings); 2921 } 2922 } 2923 } 2924 2925 mtx_lock(&dev->sim->devq->send_mtx); 2926 if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) { 2927 2928 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) { 2929 2930 /* 2931 * Just extend the old timeout and decrement 2932 * the freeze count so that a single timeout 2933 * is sufficient for releasing the queue. 2934 */ 2935 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE; 2936 callout_stop(&dev->callout); 2937 } else { 2938 2939 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 2940 } 2941 2942 callout_reset_sbt(&dev->callout, 2943 SBT_1MS * crs->release_timeout, 0, 2944 xpt_release_devq_timeout, dev, 0); 2945 2946 dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING; 2947 2948 } 2949 2950 if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) { 2951 2952 if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) { 2953 /* 2954 * Decrement the freeze count so that a single 2955 * completion is still sufficient to unfreeze 2956 * the queue. 2957 */ 2958 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE; 2959 } else { 2960 2961 dev->flags |= CAM_DEV_REL_ON_COMPLETE; 2962 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 2963 } 2964 } 2965 2966 if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) { 2967 2968 if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0 2969 || (dev->ccbq.dev_active == 0)) { 2970 2971 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE; 2972 } else { 2973 2974 dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY; 2975 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 2976 } 2977 } 2978 mtx_unlock(&dev->sim->devq->send_mtx); 2979 2980 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) 2981 xpt_release_devq(path, /*count*/1, /*run_queue*/TRUE); 2982 start_ccb->crs.qfrozen_cnt = dev->ccbq.queue.qfrozen_cnt; 2983 start_ccb->ccb_h.status = CAM_REQ_CMP; 2984 break; 2985 } 2986 case XPT_DEBUG: { 2987 struct cam_path *oldpath; 2988 2989 /* Check that all request bits are supported. */ 2990 if (start_ccb->cdbg.flags & ~(CAM_DEBUG_COMPILE)) { 2991 start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 2992 break; 2993 } 2994 2995 cam_dflags = CAM_DEBUG_NONE; 2996 if (cam_dpath != NULL) { 2997 oldpath = cam_dpath; 2998 cam_dpath = NULL; 2999 xpt_free_path(oldpath); 3000 } 3001 if (start_ccb->cdbg.flags != CAM_DEBUG_NONE) { 3002 if (xpt_create_path(&cam_dpath, NULL, 3003 start_ccb->ccb_h.path_id, 3004 start_ccb->ccb_h.target_id, 3005 start_ccb->ccb_h.target_lun) != 3006 CAM_REQ_CMP) { 3007 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 3008 } else { 3009 cam_dflags = start_ccb->cdbg.flags; 3010 start_ccb->ccb_h.status = CAM_REQ_CMP; 3011 xpt_print(cam_dpath, "debugging flags now %x\n", 3012 cam_dflags); 3013 } 3014 } else 3015 start_ccb->ccb_h.status = CAM_REQ_CMP; 3016 break; 3017 } 3018 case XPT_NOOP: 3019 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0) 3020 xpt_freeze_devq(path, 1); 3021 start_ccb->ccb_h.status = CAM_REQ_CMP; 3022 break; 3023 case XPT_REPROBE_LUN: 3024 xpt_async(AC_INQ_CHANGED, path, NULL); 3025 start_ccb->ccb_h.status = CAM_REQ_CMP; 3026 xpt_done(start_ccb); 3027 break; 3028 default: 3029 case XPT_SDEV_TYPE: 3030 case XPT_TERM_IO: 3031 case XPT_ENG_INQ: 3032 /* XXX Implement */ 3033 xpt_print_path(start_ccb->ccb_h.path); 3034 printf("%s: CCB type %#x %s not supported\n", __func__, 3035 start_ccb->ccb_h.func_code, 3036 xpt_action_name(start_ccb->ccb_h.func_code)); 3037 start_ccb->ccb_h.status = CAM_PROVIDE_FAIL; 3038 if (start_ccb->ccb_h.func_code & XPT_FC_DEV_QUEUED) { 3039 xpt_done(start_ccb); 3040 } 3041 break; 3042 } 3043 CAM_DEBUG(path, CAM_DEBUG_TRACE, 3044 ("xpt_action_default: func= %#x %s status %#x\n", 3045 start_ccb->ccb_h.func_code, 3046 xpt_action_name(start_ccb->ccb_h.func_code), 3047 start_ccb->ccb_h.status)); 3048 } 3049 3050 void 3051 xpt_polled_action(union ccb *start_ccb) 3052 { 3053 u_int32_t timeout; 3054 struct cam_sim *sim; 3055 struct cam_devq *devq; 3056 struct cam_ed *dev; 3057 3058 timeout = start_ccb->ccb_h.timeout * 10; 3059 sim = start_ccb->ccb_h.path->bus->sim; 3060 devq = sim->devq; 3061 dev = start_ccb->ccb_h.path->device; 3062 3063 mtx_unlock(&dev->device_mtx); 3064 3065 /* 3066 * Steal an opening so that no other queued requests 3067 * can get it before us while we simulate interrupts. 3068 */ 3069 mtx_lock(&devq->send_mtx); 3070 dev->ccbq.dev_openings--; 3071 while((devq->send_openings <= 0 || dev->ccbq.dev_openings < 0) && 3072 (--timeout > 0)) { 3073 mtx_unlock(&devq->send_mtx); 3074 DELAY(100); 3075 CAM_SIM_LOCK(sim); 3076 (*(sim->sim_poll))(sim); 3077 CAM_SIM_UNLOCK(sim); 3078 camisr_runqueue(); 3079 mtx_lock(&devq->send_mtx); 3080 } 3081 dev->ccbq.dev_openings++; 3082 mtx_unlock(&devq->send_mtx); 3083 3084 if (timeout != 0) { 3085 xpt_action(start_ccb); 3086 while(--timeout > 0) { 3087 CAM_SIM_LOCK(sim); 3088 (*(sim->sim_poll))(sim); 3089 CAM_SIM_UNLOCK(sim); 3090 camisr_runqueue(); 3091 if ((start_ccb->ccb_h.status & CAM_STATUS_MASK) 3092 != CAM_REQ_INPROG) 3093 break; 3094 DELAY(100); 3095 } 3096 if (timeout == 0) { 3097 /* 3098 * XXX Is it worth adding a sim_timeout entry 3099 * point so we can attempt recovery? If 3100 * this is only used for dumps, I don't think 3101 * it is. 3102 */ 3103 start_ccb->ccb_h.status = CAM_CMD_TIMEOUT; 3104 } 3105 } else { 3106 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 3107 } 3108 3109 mtx_lock(&dev->device_mtx); 3110 } 3111 3112 /* 3113 * Schedule a peripheral driver to receive a ccb when its 3114 * target device has space for more transactions. 3115 */ 3116 void 3117 xpt_schedule(struct cam_periph *periph, u_int32_t new_priority) 3118 { 3119 3120 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n")); 3121 cam_periph_assert(periph, MA_OWNED); 3122 if (new_priority < periph->scheduled_priority) { 3123 periph->scheduled_priority = new_priority; 3124 xpt_run_allocq(periph, 0); 3125 } 3126 } 3127 3128 3129 /* 3130 * Schedule a device to run on a given queue. 3131 * If the device was inserted as a new entry on the queue, 3132 * return 1 meaning the device queue should be run. If we 3133 * were already queued, implying someone else has already 3134 * started the queue, return 0 so the caller doesn't attempt 3135 * to run the queue. 3136 */ 3137 static int 3138 xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo, 3139 u_int32_t new_priority) 3140 { 3141 int retval; 3142 u_int32_t old_priority; 3143 3144 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n")); 3145 3146 old_priority = pinfo->priority; 3147 3148 /* 3149 * Are we already queued? 3150 */ 3151 if (pinfo->index != CAM_UNQUEUED_INDEX) { 3152 /* Simply reorder based on new priority */ 3153 if (new_priority < old_priority) { 3154 camq_change_priority(queue, pinfo->index, 3155 new_priority); 3156 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3157 ("changed priority to %d\n", 3158 new_priority)); 3159 retval = 1; 3160 } else 3161 retval = 0; 3162 } else { 3163 /* New entry on the queue */ 3164 if (new_priority < old_priority) 3165 pinfo->priority = new_priority; 3166 3167 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3168 ("Inserting onto queue\n")); 3169 pinfo->generation = ++queue->generation; 3170 camq_insert(queue, pinfo); 3171 retval = 1; 3172 } 3173 return (retval); 3174 } 3175 3176 static void 3177 xpt_run_allocq_task(void *context, int pending) 3178 { 3179 struct cam_periph *periph = context; 3180 3181 cam_periph_lock(periph); 3182 periph->flags &= ~CAM_PERIPH_RUN_TASK; 3183 xpt_run_allocq(periph, 1); 3184 cam_periph_unlock(periph); 3185 cam_periph_release(periph); 3186 } 3187 3188 static void 3189 xpt_run_allocq(struct cam_periph *periph, int sleep) 3190 { 3191 struct cam_ed *device; 3192 union ccb *ccb; 3193 uint32_t prio; 3194 3195 cam_periph_assert(periph, MA_OWNED); 3196 if (periph->periph_allocating) 3197 return; 3198 periph->periph_allocating = 1; 3199 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_allocq(%p)\n", periph)); 3200 device = periph->path->device; 3201 ccb = NULL; 3202 restart: 3203 while ((prio = min(periph->scheduled_priority, 3204 periph->immediate_priority)) != CAM_PRIORITY_NONE && 3205 (periph->periph_allocated - (ccb != NULL ? 1 : 0) < 3206 device->ccbq.total_openings || prio <= CAM_PRIORITY_OOB)) { 3207 3208 if (ccb == NULL && 3209 (ccb = xpt_get_ccb_nowait(periph)) == NULL) { 3210 if (sleep) { 3211 ccb = xpt_get_ccb(periph); 3212 goto restart; 3213 } 3214 if (periph->flags & CAM_PERIPH_RUN_TASK) 3215 break; 3216 cam_periph_doacquire(periph); 3217 periph->flags |= CAM_PERIPH_RUN_TASK; 3218 taskqueue_enqueue(xsoftc.xpt_taskq, 3219 &periph->periph_run_task); 3220 break; 3221 } 3222 xpt_setup_ccb(&ccb->ccb_h, periph->path, prio); 3223 if (prio == periph->immediate_priority) { 3224 periph->immediate_priority = CAM_PRIORITY_NONE; 3225 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3226 ("waking cam_periph_getccb()\n")); 3227 SLIST_INSERT_HEAD(&periph->ccb_list, &ccb->ccb_h, 3228 periph_links.sle); 3229 wakeup(&periph->ccb_list); 3230 } else { 3231 periph->scheduled_priority = CAM_PRIORITY_NONE; 3232 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3233 ("calling periph_start()\n")); 3234 periph->periph_start(periph, ccb); 3235 } 3236 ccb = NULL; 3237 } 3238 if (ccb != NULL) 3239 xpt_release_ccb(ccb); 3240 periph->periph_allocating = 0; 3241 } 3242 3243 static void 3244 xpt_run_devq(struct cam_devq *devq) 3245 { 3246 int lock; 3247 3248 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_devq\n")); 3249 3250 devq->send_queue.qfrozen_cnt++; 3251 while ((devq->send_queue.entries > 0) 3252 && (devq->send_openings > 0) 3253 && (devq->send_queue.qfrozen_cnt <= 1)) { 3254 struct cam_ed *device; 3255 union ccb *work_ccb; 3256 struct cam_sim *sim; 3257 struct xpt_proto *proto; 3258 3259 device = (struct cam_ed *)camq_remove(&devq->send_queue, 3260 CAMQ_HEAD); 3261 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3262 ("running device %p\n", device)); 3263 3264 work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD); 3265 if (work_ccb == NULL) { 3266 printf("device on run queue with no ccbs???\n"); 3267 continue; 3268 } 3269 3270 if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) { 3271 3272 mtx_lock(&xsoftc.xpt_highpower_lock); 3273 if (xsoftc.num_highpower <= 0) { 3274 /* 3275 * We got a high power command, but we 3276 * don't have any available slots. Freeze 3277 * the device queue until we have a slot 3278 * available. 3279 */ 3280 xpt_freeze_devq_device(device, 1); 3281 STAILQ_INSERT_TAIL(&xsoftc.highpowerq, device, 3282 highpowerq_entry); 3283 3284 mtx_unlock(&xsoftc.xpt_highpower_lock); 3285 continue; 3286 } else { 3287 /* 3288 * Consume a high power slot while 3289 * this ccb runs. 3290 */ 3291 xsoftc.num_highpower--; 3292 } 3293 mtx_unlock(&xsoftc.xpt_highpower_lock); 3294 } 3295 cam_ccbq_remove_ccb(&device->ccbq, work_ccb); 3296 cam_ccbq_send_ccb(&device->ccbq, work_ccb); 3297 devq->send_openings--; 3298 devq->send_active++; 3299 xpt_schedule_devq(devq, device); 3300 mtx_unlock(&devq->send_mtx); 3301 3302 if ((work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0) { 3303 /* 3304 * The client wants to freeze the queue 3305 * after this CCB is sent. 3306 */ 3307 xpt_freeze_devq(work_ccb->ccb_h.path, 1); 3308 } 3309 3310 /* In Target mode, the peripheral driver knows best... */ 3311 if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) { 3312 if ((device->inq_flags & SID_CmdQue) != 0 3313 && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE) 3314 work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID; 3315 else 3316 /* 3317 * Clear this in case of a retried CCB that 3318 * failed due to a rejected tag. 3319 */ 3320 work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID; 3321 } 3322 3323 KASSERT(device == work_ccb->ccb_h.path->device, 3324 ("device (%p) / path->device (%p) mismatch", 3325 device, work_ccb->ccb_h.path->device)); 3326 proto = xpt_proto_find(device->protocol); 3327 if (proto && proto->ops->debug_out) 3328 proto->ops->debug_out(work_ccb); 3329 3330 /* 3331 * Device queues can be shared among multiple SIM instances 3332 * that reside on different busses. Use the SIM from the 3333 * queued device, rather than the one from the calling bus. 3334 */ 3335 sim = device->sim; 3336 lock = (mtx_owned(sim->mtx) == 0); 3337 if (lock) 3338 CAM_SIM_LOCK(sim); 3339 work_ccb->ccb_h.qos.sim_data = sbinuptime(); // xxx uintprt_t too small 32bit platforms 3340 (*(sim->sim_action))(sim, work_ccb); 3341 if (lock) 3342 CAM_SIM_UNLOCK(sim); 3343 mtx_lock(&devq->send_mtx); 3344 } 3345 devq->send_queue.qfrozen_cnt--; 3346 } 3347 3348 /* 3349 * This function merges stuff from the slave ccb into the master ccb, while 3350 * keeping important fields in the master ccb constant. 3351 */ 3352 void 3353 xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb) 3354 { 3355 3356 /* 3357 * Pull fields that are valid for peripheral drivers to set 3358 * into the master CCB along with the CCB "payload". 3359 */ 3360 master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count; 3361 master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code; 3362 master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout; 3363 master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags; 3364 bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1], 3365 sizeof(union ccb) - sizeof(struct ccb_hdr)); 3366 } 3367 3368 void 3369 xpt_setup_ccb_flags(struct ccb_hdr *ccb_h, struct cam_path *path, 3370 u_int32_t priority, u_int32_t flags) 3371 { 3372 3373 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n")); 3374 ccb_h->pinfo.priority = priority; 3375 ccb_h->path = path; 3376 ccb_h->path_id = path->bus->path_id; 3377 if (path->target) 3378 ccb_h->target_id = path->target->target_id; 3379 else 3380 ccb_h->target_id = CAM_TARGET_WILDCARD; 3381 if (path->device) { 3382 ccb_h->target_lun = path->device->lun_id; 3383 ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation; 3384 } else { 3385 ccb_h->target_lun = CAM_TARGET_WILDCARD; 3386 } 3387 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX; 3388 ccb_h->flags = flags; 3389 ccb_h->xflags = 0; 3390 } 3391 3392 void 3393 xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority) 3394 { 3395 xpt_setup_ccb_flags(ccb_h, path, priority, /*flags*/ 0); 3396 } 3397 3398 /* Path manipulation functions */ 3399 cam_status 3400 xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph, 3401 path_id_t path_id, target_id_t target_id, lun_id_t lun_id) 3402 { 3403 struct cam_path *path; 3404 cam_status status; 3405 3406 path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_NOWAIT); 3407 3408 if (path == NULL) { 3409 status = CAM_RESRC_UNAVAIL; 3410 return(status); 3411 } 3412 status = xpt_compile_path(path, perph, path_id, target_id, lun_id); 3413 if (status != CAM_REQ_CMP) { 3414 free(path, M_CAMPATH); 3415 path = NULL; 3416 } 3417 *new_path_ptr = path; 3418 return (status); 3419 } 3420 3421 cam_status 3422 xpt_create_path_unlocked(struct cam_path **new_path_ptr, 3423 struct cam_periph *periph, path_id_t path_id, 3424 target_id_t target_id, lun_id_t lun_id) 3425 { 3426 3427 return (xpt_create_path(new_path_ptr, periph, path_id, target_id, 3428 lun_id)); 3429 } 3430 3431 cam_status 3432 xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph, 3433 path_id_t path_id, target_id_t target_id, lun_id_t lun_id) 3434 { 3435 struct cam_eb *bus; 3436 struct cam_et *target; 3437 struct cam_ed *device; 3438 cam_status status; 3439 3440 status = CAM_REQ_CMP; /* Completed without error */ 3441 target = NULL; /* Wildcarded */ 3442 device = NULL; /* Wildcarded */ 3443 3444 /* 3445 * We will potentially modify the EDT, so block interrupts 3446 * that may attempt to create cam paths. 3447 */ 3448 bus = xpt_find_bus(path_id); 3449 if (bus == NULL) { 3450 status = CAM_PATH_INVALID; 3451 } else { 3452 xpt_lock_buses(); 3453 mtx_lock(&bus->eb_mtx); 3454 target = xpt_find_target(bus, target_id); 3455 if (target == NULL) { 3456 /* Create one */ 3457 struct cam_et *new_target; 3458 3459 new_target = xpt_alloc_target(bus, target_id); 3460 if (new_target == NULL) { 3461 status = CAM_RESRC_UNAVAIL; 3462 } else { 3463 target = new_target; 3464 } 3465 } 3466 xpt_unlock_buses(); 3467 if (target != NULL) { 3468 device = xpt_find_device(target, lun_id); 3469 if (device == NULL) { 3470 /* Create one */ 3471 struct cam_ed *new_device; 3472 3473 new_device = 3474 (*(bus->xport->ops->alloc_device))(bus, 3475 target, 3476 lun_id); 3477 if (new_device == NULL) { 3478 status = CAM_RESRC_UNAVAIL; 3479 } else { 3480 device = new_device; 3481 } 3482 } 3483 } 3484 mtx_unlock(&bus->eb_mtx); 3485 } 3486 3487 /* 3488 * Only touch the user's data if we are successful. 3489 */ 3490 if (status == CAM_REQ_CMP) { 3491 new_path->periph = perph; 3492 new_path->bus = bus; 3493 new_path->target = target; 3494 new_path->device = device; 3495 CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n")); 3496 } else { 3497 if (device != NULL) 3498 xpt_release_device(device); 3499 if (target != NULL) 3500 xpt_release_target(target); 3501 if (bus != NULL) 3502 xpt_release_bus(bus); 3503 } 3504 return (status); 3505 } 3506 3507 cam_status 3508 xpt_clone_path(struct cam_path **new_path_ptr, struct cam_path *path) 3509 { 3510 struct cam_path *new_path; 3511 3512 new_path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_NOWAIT); 3513 if (new_path == NULL) 3514 return(CAM_RESRC_UNAVAIL); 3515 xpt_copy_path(new_path, path); 3516 *new_path_ptr = new_path; 3517 return (CAM_REQ_CMP); 3518 } 3519 3520 void 3521 xpt_copy_path(struct cam_path *new_path, struct cam_path *path) 3522 { 3523 3524 *new_path = *path; 3525 if (path->bus != NULL) 3526 xpt_acquire_bus(path->bus); 3527 if (path->target != NULL) 3528 xpt_acquire_target(path->target); 3529 if (path->device != NULL) 3530 xpt_acquire_device(path->device); 3531 } 3532 3533 void 3534 xpt_release_path(struct cam_path *path) 3535 { 3536 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n")); 3537 if (path->device != NULL) { 3538 xpt_release_device(path->device); 3539 path->device = NULL; 3540 } 3541 if (path->target != NULL) { 3542 xpt_release_target(path->target); 3543 path->target = NULL; 3544 } 3545 if (path->bus != NULL) { 3546 xpt_release_bus(path->bus); 3547 path->bus = NULL; 3548 } 3549 } 3550 3551 void 3552 xpt_free_path(struct cam_path *path) 3553 { 3554 3555 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n")); 3556 xpt_release_path(path); 3557 free(path, M_CAMPATH); 3558 } 3559 3560 void 3561 xpt_path_counts(struct cam_path *path, uint32_t *bus_ref, 3562 uint32_t *periph_ref, uint32_t *target_ref, uint32_t *device_ref) 3563 { 3564 3565 xpt_lock_buses(); 3566 if (bus_ref) { 3567 if (path->bus) 3568 *bus_ref = path->bus->refcount; 3569 else 3570 *bus_ref = 0; 3571 } 3572 if (periph_ref) { 3573 if (path->periph) 3574 *periph_ref = path->periph->refcount; 3575 else 3576 *periph_ref = 0; 3577 } 3578 xpt_unlock_buses(); 3579 if (target_ref) { 3580 if (path->target) 3581 *target_ref = path->target->refcount; 3582 else 3583 *target_ref = 0; 3584 } 3585 if (device_ref) { 3586 if (path->device) 3587 *device_ref = path->device->refcount; 3588 else 3589 *device_ref = 0; 3590 } 3591 } 3592 3593 /* 3594 * Return -1 for failure, 0 for exact match, 1 for match with wildcards 3595 * in path1, 2 for match with wildcards in path2. 3596 */ 3597 int 3598 xpt_path_comp(struct cam_path *path1, struct cam_path *path2) 3599 { 3600 int retval = 0; 3601 3602 if (path1->bus != path2->bus) { 3603 if (path1->bus->path_id == CAM_BUS_WILDCARD) 3604 retval = 1; 3605 else if (path2->bus->path_id == CAM_BUS_WILDCARD) 3606 retval = 2; 3607 else 3608 return (-1); 3609 } 3610 if (path1->target != path2->target) { 3611 if (path1->target->target_id == CAM_TARGET_WILDCARD) { 3612 if (retval == 0) 3613 retval = 1; 3614 } else if (path2->target->target_id == CAM_TARGET_WILDCARD) 3615 retval = 2; 3616 else 3617 return (-1); 3618 } 3619 if (path1->device != path2->device) { 3620 if (path1->device->lun_id == CAM_LUN_WILDCARD) { 3621 if (retval == 0) 3622 retval = 1; 3623 } else if (path2->device->lun_id == CAM_LUN_WILDCARD) 3624 retval = 2; 3625 else 3626 return (-1); 3627 } 3628 return (retval); 3629 } 3630 3631 int 3632 xpt_path_comp_dev(struct cam_path *path, struct cam_ed *dev) 3633 { 3634 int retval = 0; 3635 3636 if (path->bus != dev->target->bus) { 3637 if (path->bus->path_id == CAM_BUS_WILDCARD) 3638 retval = 1; 3639 else if (dev->target->bus->path_id == CAM_BUS_WILDCARD) 3640 retval = 2; 3641 else 3642 return (-1); 3643 } 3644 if (path->target != dev->target) { 3645 if (path->target->target_id == CAM_TARGET_WILDCARD) { 3646 if (retval == 0) 3647 retval = 1; 3648 } else if (dev->target->target_id == CAM_TARGET_WILDCARD) 3649 retval = 2; 3650 else 3651 return (-1); 3652 } 3653 if (path->device != dev) { 3654 if (path->device->lun_id == CAM_LUN_WILDCARD) { 3655 if (retval == 0) 3656 retval = 1; 3657 } else if (dev->lun_id == CAM_LUN_WILDCARD) 3658 retval = 2; 3659 else 3660 return (-1); 3661 } 3662 return (retval); 3663 } 3664 3665 void 3666 xpt_print_path(struct cam_path *path) 3667 { 3668 3669 if (path == NULL) 3670 printf("(nopath): "); 3671 else { 3672 if (path->periph != NULL) 3673 printf("(%s%d:", path->periph->periph_name, 3674 path->periph->unit_number); 3675 else 3676 printf("(noperiph:"); 3677 3678 if (path->bus != NULL) 3679 printf("%s%d:%d:", path->bus->sim->sim_name, 3680 path->bus->sim->unit_number, 3681 path->bus->sim->bus_id); 3682 else 3683 printf("nobus:"); 3684 3685 if (path->target != NULL) 3686 printf("%d:", path->target->target_id); 3687 else 3688 printf("X:"); 3689 3690 if (path->device != NULL) 3691 printf("%jx): ", (uintmax_t)path->device->lun_id); 3692 else 3693 printf("X): "); 3694 } 3695 } 3696 3697 void 3698 xpt_print_device(struct cam_ed *device) 3699 { 3700 3701 if (device == NULL) 3702 printf("(nopath): "); 3703 else { 3704 printf("(noperiph:%s%d:%d:%d:%jx): ", device->sim->sim_name, 3705 device->sim->unit_number, 3706 device->sim->bus_id, 3707 device->target->target_id, 3708 (uintmax_t)device->lun_id); 3709 } 3710 } 3711 3712 void 3713 xpt_print(struct cam_path *path, const char *fmt, ...) 3714 { 3715 va_list ap; 3716 xpt_print_path(path); 3717 va_start(ap, fmt); 3718 vprintf(fmt, ap); 3719 va_end(ap); 3720 } 3721 3722 int 3723 xpt_path_string(struct cam_path *path, char *str, size_t str_len) 3724 { 3725 struct sbuf sb; 3726 3727 sbuf_new(&sb, str, str_len, 0); 3728 3729 if (path == NULL) 3730 sbuf_printf(&sb, "(nopath): "); 3731 else { 3732 if (path->periph != NULL) 3733 sbuf_printf(&sb, "(%s%d:", path->periph->periph_name, 3734 path->periph->unit_number); 3735 else 3736 sbuf_printf(&sb, "(noperiph:"); 3737 3738 if (path->bus != NULL) 3739 sbuf_printf(&sb, "%s%d:%d:", path->bus->sim->sim_name, 3740 path->bus->sim->unit_number, 3741 path->bus->sim->bus_id); 3742 else 3743 sbuf_printf(&sb, "nobus:"); 3744 3745 if (path->target != NULL) 3746 sbuf_printf(&sb, "%d:", path->target->target_id); 3747 else 3748 sbuf_printf(&sb, "X:"); 3749 3750 if (path->device != NULL) 3751 sbuf_printf(&sb, "%jx): ", 3752 (uintmax_t)path->device->lun_id); 3753 else 3754 sbuf_printf(&sb, "X): "); 3755 } 3756 sbuf_finish(&sb); 3757 3758 return(sbuf_len(&sb)); 3759 } 3760 3761 path_id_t 3762 xpt_path_path_id(struct cam_path *path) 3763 { 3764 return(path->bus->path_id); 3765 } 3766 3767 target_id_t 3768 xpt_path_target_id(struct cam_path *path) 3769 { 3770 if (path->target != NULL) 3771 return (path->target->target_id); 3772 else 3773 return (CAM_TARGET_WILDCARD); 3774 } 3775 3776 lun_id_t 3777 xpt_path_lun_id(struct cam_path *path) 3778 { 3779 if (path->device != NULL) 3780 return (path->device->lun_id); 3781 else 3782 return (CAM_LUN_WILDCARD); 3783 } 3784 3785 struct cam_sim * 3786 xpt_path_sim(struct cam_path *path) 3787 { 3788 3789 return (path->bus->sim); 3790 } 3791 3792 struct cam_periph* 3793 xpt_path_periph(struct cam_path *path) 3794 { 3795 3796 return (path->periph); 3797 } 3798 3799 /* 3800 * Release a CAM control block for the caller. Remit the cost of the structure 3801 * to the device referenced by the path. If the this device had no 'credits' 3802 * and peripheral drivers have registered async callbacks for this notification 3803 * call them now. 3804 */ 3805 void 3806 xpt_release_ccb(union ccb *free_ccb) 3807 { 3808 struct cam_ed *device; 3809 struct cam_periph *periph; 3810 3811 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n")); 3812 xpt_path_assert(free_ccb->ccb_h.path, MA_OWNED); 3813 device = free_ccb->ccb_h.path->device; 3814 periph = free_ccb->ccb_h.path->periph; 3815 3816 xpt_free_ccb(free_ccb); 3817 periph->periph_allocated--; 3818 cam_ccbq_release_opening(&device->ccbq); 3819 xpt_run_allocq(periph, 0); 3820 } 3821 3822 /* Functions accessed by SIM drivers */ 3823 3824 static struct xpt_xport_ops xport_default_ops = { 3825 .alloc_device = xpt_alloc_device_default, 3826 .action = xpt_action_default, 3827 .async = xpt_dev_async_default, 3828 }; 3829 static struct xpt_xport xport_default = { 3830 .xport = XPORT_UNKNOWN, 3831 .name = "unknown", 3832 .ops = &xport_default_ops, 3833 }; 3834 3835 CAM_XPT_XPORT(xport_default); 3836 3837 /* 3838 * A sim structure, listing the SIM entry points and instance 3839 * identification info is passed to xpt_bus_register to hook the SIM 3840 * into the CAM framework. xpt_bus_register creates a cam_eb entry 3841 * for this new bus and places it in the array of busses and assigns 3842 * it a path_id. The path_id may be influenced by "hard wiring" 3843 * information specified by the user. Once interrupt services are 3844 * available, the bus will be probed. 3845 */ 3846 int32_t 3847 xpt_bus_register(struct cam_sim *sim, device_t parent, u_int32_t bus) 3848 { 3849 struct cam_eb *new_bus; 3850 struct cam_eb *old_bus; 3851 struct ccb_pathinq cpi; 3852 struct cam_path *path; 3853 cam_status status; 3854 3855 mtx_assert(sim->mtx, MA_OWNED); 3856 3857 sim->bus_id = bus; 3858 new_bus = (struct cam_eb *)malloc(sizeof(*new_bus), 3859 M_CAMXPT, M_NOWAIT|M_ZERO); 3860 if (new_bus == NULL) { 3861 /* Couldn't satisfy request */ 3862 return (CAM_RESRC_UNAVAIL); 3863 } 3864 3865 mtx_init(&new_bus->eb_mtx, "CAM bus lock", NULL, MTX_DEF); 3866 TAILQ_INIT(&new_bus->et_entries); 3867 cam_sim_hold(sim); 3868 new_bus->sim = sim; 3869 timevalclear(&new_bus->last_reset); 3870 new_bus->flags = 0; 3871 new_bus->refcount = 1; /* Held until a bus_deregister event */ 3872 new_bus->generation = 0; 3873 3874 xpt_lock_buses(); 3875 sim->path_id = new_bus->path_id = 3876 xptpathid(sim->sim_name, sim->unit_number, sim->bus_id); 3877 old_bus = TAILQ_FIRST(&xsoftc.xpt_busses); 3878 while (old_bus != NULL 3879 && old_bus->path_id < new_bus->path_id) 3880 old_bus = TAILQ_NEXT(old_bus, links); 3881 if (old_bus != NULL) 3882 TAILQ_INSERT_BEFORE(old_bus, new_bus, links); 3883 else 3884 TAILQ_INSERT_TAIL(&xsoftc.xpt_busses, new_bus, links); 3885 xsoftc.bus_generation++; 3886 xpt_unlock_buses(); 3887 3888 /* 3889 * Set a default transport so that a PATH_INQ can be issued to 3890 * the SIM. This will then allow for probing and attaching of 3891 * a more appropriate transport. 3892 */ 3893 new_bus->xport = &xport_default; 3894 3895 status = xpt_create_path(&path, /*periph*/NULL, sim->path_id, 3896 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 3897 if (status != CAM_REQ_CMP) { 3898 xpt_release_bus(new_bus); 3899 free(path, M_CAMXPT); 3900 return (CAM_RESRC_UNAVAIL); 3901 } 3902 3903 xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NORMAL); 3904 cpi.ccb_h.func_code = XPT_PATH_INQ; 3905 xpt_action((union ccb *)&cpi); 3906 3907 if (cpi.ccb_h.status == CAM_REQ_CMP) { 3908 struct xpt_xport **xpt; 3909 3910 SET_FOREACH(xpt, cam_xpt_xport_set) { 3911 if ((*xpt)->xport == cpi.transport) { 3912 new_bus->xport = *xpt; 3913 break; 3914 } 3915 } 3916 if (new_bus->xport == NULL) { 3917 xpt_print_path(path); 3918 printf("No transport found for %d\n", cpi.transport); 3919 xpt_release_bus(new_bus); 3920 free(path, M_CAMXPT); 3921 return (CAM_RESRC_UNAVAIL); 3922 } 3923 } 3924 3925 /* Notify interested parties */ 3926 if (sim->path_id != CAM_XPT_PATH_ID) { 3927 3928 xpt_async(AC_PATH_REGISTERED, path, &cpi); 3929 if ((cpi.hba_misc & PIM_NOSCAN) == 0) { 3930 union ccb *scan_ccb; 3931 3932 /* Initiate bus rescan. */ 3933 scan_ccb = xpt_alloc_ccb_nowait(); 3934 if (scan_ccb != NULL) { 3935 scan_ccb->ccb_h.path = path; 3936 scan_ccb->ccb_h.func_code = XPT_SCAN_BUS; 3937 scan_ccb->crcn.flags = 0; 3938 xpt_rescan(scan_ccb); 3939 } else { 3940 xpt_print(path, 3941 "Can't allocate CCB to scan bus\n"); 3942 xpt_free_path(path); 3943 } 3944 } else 3945 xpt_free_path(path); 3946 } else 3947 xpt_free_path(path); 3948 return (CAM_SUCCESS); 3949 } 3950 3951 int32_t 3952 xpt_bus_deregister(path_id_t pathid) 3953 { 3954 struct cam_path bus_path; 3955 cam_status status; 3956 3957 status = xpt_compile_path(&bus_path, NULL, pathid, 3958 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 3959 if (status != CAM_REQ_CMP) 3960 return (status); 3961 3962 xpt_async(AC_LOST_DEVICE, &bus_path, NULL); 3963 xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL); 3964 3965 /* Release the reference count held while registered. */ 3966 xpt_release_bus(bus_path.bus); 3967 xpt_release_path(&bus_path); 3968 3969 return (CAM_REQ_CMP); 3970 } 3971 3972 static path_id_t 3973 xptnextfreepathid(void) 3974 { 3975 struct cam_eb *bus; 3976 path_id_t pathid; 3977 const char *strval; 3978 3979 mtx_assert(&xsoftc.xpt_topo_lock, MA_OWNED); 3980 pathid = 0; 3981 bus = TAILQ_FIRST(&xsoftc.xpt_busses); 3982 retry: 3983 /* Find an unoccupied pathid */ 3984 while (bus != NULL && bus->path_id <= pathid) { 3985 if (bus->path_id == pathid) 3986 pathid++; 3987 bus = TAILQ_NEXT(bus, links); 3988 } 3989 3990 /* 3991 * Ensure that this pathid is not reserved for 3992 * a bus that may be registered in the future. 3993 */ 3994 if (resource_string_value("scbus", pathid, "at", &strval) == 0) { 3995 ++pathid; 3996 /* Start the search over */ 3997 goto retry; 3998 } 3999 return (pathid); 4000 } 4001 4002 static path_id_t 4003 xptpathid(const char *sim_name, int sim_unit, int sim_bus) 4004 { 4005 path_id_t pathid; 4006 int i, dunit, val; 4007 char buf[32]; 4008 const char *dname; 4009 4010 pathid = CAM_XPT_PATH_ID; 4011 snprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit); 4012 if (strcmp(buf, "xpt0") == 0 && sim_bus == 0) 4013 return (pathid); 4014 i = 0; 4015 while ((resource_find_match(&i, &dname, &dunit, "at", buf)) == 0) { 4016 if (strcmp(dname, "scbus")) { 4017 /* Avoid a bit of foot shooting. */ 4018 continue; 4019 } 4020 if (dunit < 0) /* unwired?! */ 4021 continue; 4022 if (resource_int_value("scbus", dunit, "bus", &val) == 0) { 4023 if (sim_bus == val) { 4024 pathid = dunit; 4025 break; 4026 } 4027 } else if (sim_bus == 0) { 4028 /* Unspecified matches bus 0 */ 4029 pathid = dunit; 4030 break; 4031 } else { 4032 printf("Ambiguous scbus configuration for %s%d " 4033 "bus %d, cannot wire down. The kernel " 4034 "config entry for scbus%d should " 4035 "specify a controller bus.\n" 4036 "Scbus will be assigned dynamically.\n", 4037 sim_name, sim_unit, sim_bus, dunit); 4038 break; 4039 } 4040 } 4041 4042 if (pathid == CAM_XPT_PATH_ID) 4043 pathid = xptnextfreepathid(); 4044 return (pathid); 4045 } 4046 4047 static const char * 4048 xpt_async_string(u_int32_t async_code) 4049 { 4050 4051 switch (async_code) { 4052 case AC_BUS_RESET: return ("AC_BUS_RESET"); 4053 case AC_UNSOL_RESEL: return ("AC_UNSOL_RESEL"); 4054 case AC_SCSI_AEN: return ("AC_SCSI_AEN"); 4055 case AC_SENT_BDR: return ("AC_SENT_BDR"); 4056 case AC_PATH_REGISTERED: return ("AC_PATH_REGISTERED"); 4057 case AC_PATH_DEREGISTERED: return ("AC_PATH_DEREGISTERED"); 4058 case AC_FOUND_DEVICE: return ("AC_FOUND_DEVICE"); 4059 case AC_LOST_DEVICE: return ("AC_LOST_DEVICE"); 4060 case AC_TRANSFER_NEG: return ("AC_TRANSFER_NEG"); 4061 case AC_INQ_CHANGED: return ("AC_INQ_CHANGED"); 4062 case AC_GETDEV_CHANGED: return ("AC_GETDEV_CHANGED"); 4063 case AC_CONTRACT: return ("AC_CONTRACT"); 4064 case AC_ADVINFO_CHANGED: return ("AC_ADVINFO_CHANGED"); 4065 case AC_UNIT_ATTENTION: return ("AC_UNIT_ATTENTION"); 4066 } 4067 return ("AC_UNKNOWN"); 4068 } 4069 4070 static int 4071 xpt_async_size(u_int32_t async_code) 4072 { 4073 4074 switch (async_code) { 4075 case AC_BUS_RESET: return (0); 4076 case AC_UNSOL_RESEL: return (0); 4077 case AC_SCSI_AEN: return (0); 4078 case AC_SENT_BDR: return (0); 4079 case AC_PATH_REGISTERED: return (sizeof(struct ccb_pathinq)); 4080 case AC_PATH_DEREGISTERED: return (0); 4081 case AC_FOUND_DEVICE: return (sizeof(struct ccb_getdev)); 4082 case AC_LOST_DEVICE: return (0); 4083 case AC_TRANSFER_NEG: return (sizeof(struct ccb_trans_settings)); 4084 case AC_INQ_CHANGED: return (0); 4085 case AC_GETDEV_CHANGED: return (0); 4086 case AC_CONTRACT: return (sizeof(struct ac_contract)); 4087 case AC_ADVINFO_CHANGED: return (-1); 4088 case AC_UNIT_ATTENTION: return (sizeof(struct ccb_scsiio)); 4089 } 4090 return (0); 4091 } 4092 4093 static int 4094 xpt_async_process_dev(struct cam_ed *device, void *arg) 4095 { 4096 union ccb *ccb = arg; 4097 struct cam_path *path = ccb->ccb_h.path; 4098 void *async_arg = ccb->casync.async_arg_ptr; 4099 u_int32_t async_code = ccb->casync.async_code; 4100 int relock; 4101 4102 if (path->device != device 4103 && path->device->lun_id != CAM_LUN_WILDCARD 4104 && device->lun_id != CAM_LUN_WILDCARD) 4105 return (1); 4106 4107 /* 4108 * The async callback could free the device. 4109 * If it is a broadcast async, it doesn't hold 4110 * device reference, so take our own reference. 4111 */ 4112 xpt_acquire_device(device); 4113 4114 /* 4115 * If async for specific device is to be delivered to 4116 * the wildcard client, take the specific device lock. 4117 * XXX: We may need a way for client to specify it. 4118 */ 4119 if ((device->lun_id == CAM_LUN_WILDCARD && 4120 path->device->lun_id != CAM_LUN_WILDCARD) || 4121 (device->target->target_id == CAM_TARGET_WILDCARD && 4122 path->target->target_id != CAM_TARGET_WILDCARD) || 4123 (device->target->bus->path_id == CAM_BUS_WILDCARD && 4124 path->target->bus->path_id != CAM_BUS_WILDCARD)) { 4125 mtx_unlock(&device->device_mtx); 4126 xpt_path_lock(path); 4127 relock = 1; 4128 } else 4129 relock = 0; 4130 4131 (*(device->target->bus->xport->ops->async))(async_code, 4132 device->target->bus, device->target, device, async_arg); 4133 xpt_async_bcast(&device->asyncs, async_code, path, async_arg); 4134 4135 if (relock) { 4136 xpt_path_unlock(path); 4137 mtx_lock(&device->device_mtx); 4138 } 4139 xpt_release_device(device); 4140 return (1); 4141 } 4142 4143 static int 4144 xpt_async_process_tgt(struct cam_et *target, void *arg) 4145 { 4146 union ccb *ccb = arg; 4147 struct cam_path *path = ccb->ccb_h.path; 4148 4149 if (path->target != target 4150 && path->target->target_id != CAM_TARGET_WILDCARD 4151 && target->target_id != CAM_TARGET_WILDCARD) 4152 return (1); 4153 4154 if (ccb->casync.async_code == AC_SENT_BDR) { 4155 /* Update our notion of when the last reset occurred */ 4156 microtime(&target->last_reset); 4157 } 4158 4159 return (xptdevicetraverse(target, NULL, xpt_async_process_dev, ccb)); 4160 } 4161 4162 static void 4163 xpt_async_process(struct cam_periph *periph, union ccb *ccb) 4164 { 4165 struct cam_eb *bus; 4166 struct cam_path *path; 4167 void *async_arg; 4168 u_int32_t async_code; 4169 4170 path = ccb->ccb_h.path; 4171 async_code = ccb->casync.async_code; 4172 async_arg = ccb->casync.async_arg_ptr; 4173 CAM_DEBUG(path, CAM_DEBUG_TRACE | CAM_DEBUG_INFO, 4174 ("xpt_async(%s)\n", xpt_async_string(async_code))); 4175 bus = path->bus; 4176 4177 if (async_code == AC_BUS_RESET) { 4178 /* Update our notion of when the last reset occurred */ 4179 microtime(&bus->last_reset); 4180 } 4181 4182 xpttargettraverse(bus, NULL, xpt_async_process_tgt, ccb); 4183 4184 /* 4185 * If this wasn't a fully wildcarded async, tell all 4186 * clients that want all async events. 4187 */ 4188 if (bus != xpt_periph->path->bus) { 4189 xpt_path_lock(xpt_periph->path); 4190 xpt_async_process_dev(xpt_periph->path->device, ccb); 4191 xpt_path_unlock(xpt_periph->path); 4192 } 4193 4194 if (path->device != NULL && path->device->lun_id != CAM_LUN_WILDCARD) 4195 xpt_release_devq(path, 1, TRUE); 4196 else 4197 xpt_release_simq(path->bus->sim, TRUE); 4198 if (ccb->casync.async_arg_size > 0) 4199 free(async_arg, M_CAMXPT); 4200 xpt_free_path(path); 4201 xpt_free_ccb(ccb); 4202 } 4203 4204 static void 4205 xpt_async_bcast(struct async_list *async_head, 4206 u_int32_t async_code, 4207 struct cam_path *path, void *async_arg) 4208 { 4209 struct async_node *cur_entry; 4210 int lock; 4211 4212 cur_entry = SLIST_FIRST(async_head); 4213 while (cur_entry != NULL) { 4214 struct async_node *next_entry; 4215 /* 4216 * Grab the next list entry before we call the current 4217 * entry's callback. This is because the callback function 4218 * can delete its async callback entry. 4219 */ 4220 next_entry = SLIST_NEXT(cur_entry, links); 4221 if ((cur_entry->event_enable & async_code) != 0) { 4222 lock = cur_entry->event_lock; 4223 if (lock) 4224 CAM_SIM_LOCK(path->device->sim); 4225 cur_entry->callback(cur_entry->callback_arg, 4226 async_code, path, 4227 async_arg); 4228 if (lock) 4229 CAM_SIM_UNLOCK(path->device->sim); 4230 } 4231 cur_entry = next_entry; 4232 } 4233 } 4234 4235 void 4236 xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg) 4237 { 4238 union ccb *ccb; 4239 int size; 4240 4241 ccb = xpt_alloc_ccb_nowait(); 4242 if (ccb == NULL) { 4243 xpt_print(path, "Can't allocate CCB to send %s\n", 4244 xpt_async_string(async_code)); 4245 return; 4246 } 4247 4248 if (xpt_clone_path(&ccb->ccb_h.path, path) != CAM_REQ_CMP) { 4249 xpt_print(path, "Can't allocate path to send %s\n", 4250 xpt_async_string(async_code)); 4251 xpt_free_ccb(ccb); 4252 return; 4253 } 4254 ccb->ccb_h.path->periph = NULL; 4255 ccb->ccb_h.func_code = XPT_ASYNC; 4256 ccb->ccb_h.cbfcnp = xpt_async_process; 4257 ccb->ccb_h.flags |= CAM_UNLOCKED; 4258 ccb->casync.async_code = async_code; 4259 ccb->casync.async_arg_size = 0; 4260 size = xpt_async_size(async_code); 4261 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, 4262 ("xpt_async: func %#x %s aync_code %d %s\n", 4263 ccb->ccb_h.func_code, 4264 xpt_action_name(ccb->ccb_h.func_code), 4265 async_code, 4266 xpt_async_string(async_code))); 4267 if (size > 0 && async_arg != NULL) { 4268 ccb->casync.async_arg_ptr = malloc(size, M_CAMXPT, M_NOWAIT); 4269 if (ccb->casync.async_arg_ptr == NULL) { 4270 xpt_print(path, "Can't allocate argument to send %s\n", 4271 xpt_async_string(async_code)); 4272 xpt_free_path(ccb->ccb_h.path); 4273 xpt_free_ccb(ccb); 4274 return; 4275 } 4276 memcpy(ccb->casync.async_arg_ptr, async_arg, size); 4277 ccb->casync.async_arg_size = size; 4278 } else if (size < 0) { 4279 ccb->casync.async_arg_ptr = async_arg; 4280 ccb->casync.async_arg_size = size; 4281 } 4282 if (path->device != NULL && path->device->lun_id != CAM_LUN_WILDCARD) 4283 xpt_freeze_devq(path, 1); 4284 else 4285 xpt_freeze_simq(path->bus->sim, 1); 4286 xpt_done(ccb); 4287 } 4288 4289 static void 4290 xpt_dev_async_default(u_int32_t async_code, struct cam_eb *bus, 4291 struct cam_et *target, struct cam_ed *device, 4292 void *async_arg) 4293 { 4294 4295 /* 4296 * We only need to handle events for real devices. 4297 */ 4298 if (target->target_id == CAM_TARGET_WILDCARD 4299 || device->lun_id == CAM_LUN_WILDCARD) 4300 return; 4301 4302 printf("%s called\n", __func__); 4303 } 4304 4305 static uint32_t 4306 xpt_freeze_devq_device(struct cam_ed *dev, u_int count) 4307 { 4308 struct cam_devq *devq; 4309 uint32_t freeze; 4310 4311 devq = dev->sim->devq; 4312 mtx_assert(&devq->send_mtx, MA_OWNED); 4313 CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE, 4314 ("xpt_freeze_devq_device(%d) %u->%u\n", count, 4315 dev->ccbq.queue.qfrozen_cnt, dev->ccbq.queue.qfrozen_cnt + count)); 4316 freeze = (dev->ccbq.queue.qfrozen_cnt += count); 4317 /* Remove frozen device from sendq. */ 4318 if (device_is_queued(dev)) 4319 camq_remove(&devq->send_queue, dev->devq_entry.index); 4320 return (freeze); 4321 } 4322 4323 u_int32_t 4324 xpt_freeze_devq(struct cam_path *path, u_int count) 4325 { 4326 struct cam_ed *dev = path->device; 4327 struct cam_devq *devq; 4328 uint32_t freeze; 4329 4330 devq = dev->sim->devq; 4331 mtx_lock(&devq->send_mtx); 4332 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_freeze_devq(%d)\n", count)); 4333 freeze = xpt_freeze_devq_device(dev, count); 4334 mtx_unlock(&devq->send_mtx); 4335 return (freeze); 4336 } 4337 4338 u_int32_t 4339 xpt_freeze_simq(struct cam_sim *sim, u_int count) 4340 { 4341 struct cam_devq *devq; 4342 uint32_t freeze; 4343 4344 devq = sim->devq; 4345 mtx_lock(&devq->send_mtx); 4346 freeze = (devq->send_queue.qfrozen_cnt += count); 4347 mtx_unlock(&devq->send_mtx); 4348 return (freeze); 4349 } 4350 4351 static void 4352 xpt_release_devq_timeout(void *arg) 4353 { 4354 struct cam_ed *dev; 4355 struct cam_devq *devq; 4356 4357 dev = (struct cam_ed *)arg; 4358 CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE, ("xpt_release_devq_timeout\n")); 4359 devq = dev->sim->devq; 4360 mtx_assert(&devq->send_mtx, MA_OWNED); 4361 if (xpt_release_devq_device(dev, /*count*/1, /*run_queue*/TRUE)) 4362 xpt_run_devq(devq); 4363 } 4364 4365 void 4366 xpt_release_devq(struct cam_path *path, u_int count, int run_queue) 4367 { 4368 struct cam_ed *dev; 4369 struct cam_devq *devq; 4370 4371 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_devq(%d, %d)\n", 4372 count, run_queue)); 4373 dev = path->device; 4374 devq = dev->sim->devq; 4375 mtx_lock(&devq->send_mtx); 4376 if (xpt_release_devq_device(dev, count, run_queue)) 4377 xpt_run_devq(dev->sim->devq); 4378 mtx_unlock(&devq->send_mtx); 4379 } 4380 4381 static int 4382 xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue) 4383 { 4384 4385 mtx_assert(&dev->sim->devq->send_mtx, MA_OWNED); 4386 CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE, 4387 ("xpt_release_devq_device(%d, %d) %u->%u\n", count, run_queue, 4388 dev->ccbq.queue.qfrozen_cnt, dev->ccbq.queue.qfrozen_cnt - count)); 4389 if (count > dev->ccbq.queue.qfrozen_cnt) { 4390 #ifdef INVARIANTS 4391 printf("xpt_release_devq(): requested %u > present %u\n", 4392 count, dev->ccbq.queue.qfrozen_cnt); 4393 #endif 4394 count = dev->ccbq.queue.qfrozen_cnt; 4395 } 4396 dev->ccbq.queue.qfrozen_cnt -= count; 4397 if (dev->ccbq.queue.qfrozen_cnt == 0) { 4398 /* 4399 * No longer need to wait for a successful 4400 * command completion. 4401 */ 4402 dev->flags &= ~CAM_DEV_REL_ON_COMPLETE; 4403 /* 4404 * Remove any timeouts that might be scheduled 4405 * to release this queue. 4406 */ 4407 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) { 4408 callout_stop(&dev->callout); 4409 dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING; 4410 } 4411 /* 4412 * Now that we are unfrozen schedule the 4413 * device so any pending transactions are 4414 * run. 4415 */ 4416 xpt_schedule_devq(dev->sim->devq, dev); 4417 } else 4418 run_queue = 0; 4419 return (run_queue); 4420 } 4421 4422 void 4423 xpt_release_simq(struct cam_sim *sim, int run_queue) 4424 { 4425 struct cam_devq *devq; 4426 4427 devq = sim->devq; 4428 mtx_lock(&devq->send_mtx); 4429 if (devq->send_queue.qfrozen_cnt <= 0) { 4430 #ifdef INVARIANTS 4431 printf("xpt_release_simq: requested 1 > present %u\n", 4432 devq->send_queue.qfrozen_cnt); 4433 #endif 4434 } else 4435 devq->send_queue.qfrozen_cnt--; 4436 if (devq->send_queue.qfrozen_cnt == 0) { 4437 /* 4438 * If there is a timeout scheduled to release this 4439 * sim queue, remove it. The queue frozen count is 4440 * already at 0. 4441 */ 4442 if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){ 4443 callout_stop(&sim->callout); 4444 sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING; 4445 } 4446 if (run_queue) { 4447 /* 4448 * Now that we are unfrozen run the send queue. 4449 */ 4450 xpt_run_devq(sim->devq); 4451 } 4452 } 4453 mtx_unlock(&devq->send_mtx); 4454 } 4455 4456 /* 4457 * XXX Appears to be unused. 4458 */ 4459 static void 4460 xpt_release_simq_timeout(void *arg) 4461 { 4462 struct cam_sim *sim; 4463 4464 sim = (struct cam_sim *)arg; 4465 xpt_release_simq(sim, /* run_queue */ TRUE); 4466 } 4467 4468 void 4469 xpt_done(union ccb *done_ccb) 4470 { 4471 struct cam_doneq *queue; 4472 int run, hash; 4473 4474 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, 4475 ("xpt_done: func= %#x %s status %#x\n", 4476 done_ccb->ccb_h.func_code, 4477 xpt_action_name(done_ccb->ccb_h.func_code), 4478 done_ccb->ccb_h.status)); 4479 if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0) 4480 return; 4481 4482 /* Store the time the ccb was in the sim */ 4483 done_ccb->ccb_h.qos.sim_data = sbinuptime() - done_ccb->ccb_h.qos.sim_data; 4484 hash = (done_ccb->ccb_h.path_id + done_ccb->ccb_h.target_id + 4485 done_ccb->ccb_h.target_lun) % cam_num_doneqs; 4486 queue = &cam_doneqs[hash]; 4487 mtx_lock(&queue->cam_doneq_mtx); 4488 run = (queue->cam_doneq_sleep && STAILQ_EMPTY(&queue->cam_doneq)); 4489 STAILQ_INSERT_TAIL(&queue->cam_doneq, &done_ccb->ccb_h, sim_links.stqe); 4490 done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX; 4491 mtx_unlock(&queue->cam_doneq_mtx); 4492 if (run) 4493 wakeup(&queue->cam_doneq); 4494 } 4495 4496 void 4497 xpt_done_direct(union ccb *done_ccb) 4498 { 4499 4500 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, 4501 ("xpt_done_direct: status %#x\n", done_ccb->ccb_h.status)); 4502 if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0) 4503 return; 4504 4505 /* Store the time the ccb was in the sim */ 4506 done_ccb->ccb_h.qos.sim_data = sbinuptime() - done_ccb->ccb_h.qos.sim_data; 4507 xpt_done_process(&done_ccb->ccb_h); 4508 } 4509 4510 union ccb * 4511 xpt_alloc_ccb() 4512 { 4513 union ccb *new_ccb; 4514 4515 new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_WAITOK); 4516 return (new_ccb); 4517 } 4518 4519 union ccb * 4520 xpt_alloc_ccb_nowait() 4521 { 4522 union ccb *new_ccb; 4523 4524 new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_NOWAIT); 4525 return (new_ccb); 4526 } 4527 4528 void 4529 xpt_free_ccb(union ccb *free_ccb) 4530 { 4531 free(free_ccb, M_CAMCCB); 4532 } 4533 4534 4535 4536 /* Private XPT functions */ 4537 4538 /* 4539 * Get a CAM control block for the caller. Charge the structure to the device 4540 * referenced by the path. If we don't have sufficient resources to allocate 4541 * more ccbs, we return NULL. 4542 */ 4543 static union ccb * 4544 xpt_get_ccb_nowait(struct cam_periph *periph) 4545 { 4546 union ccb *new_ccb; 4547 4548 new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_NOWAIT); 4549 if (new_ccb == NULL) 4550 return (NULL); 4551 periph->periph_allocated++; 4552 cam_ccbq_take_opening(&periph->path->device->ccbq); 4553 return (new_ccb); 4554 } 4555 4556 static union ccb * 4557 xpt_get_ccb(struct cam_periph *periph) 4558 { 4559 union ccb *new_ccb; 4560 4561 cam_periph_unlock(periph); 4562 new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_WAITOK); 4563 cam_periph_lock(periph); 4564 periph->periph_allocated++; 4565 cam_ccbq_take_opening(&periph->path->device->ccbq); 4566 return (new_ccb); 4567 } 4568 4569 union ccb * 4570 cam_periph_getccb(struct cam_periph *periph, u_int32_t priority) 4571 { 4572 struct ccb_hdr *ccb_h; 4573 4574 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("cam_periph_getccb\n")); 4575 cam_periph_assert(periph, MA_OWNED); 4576 while ((ccb_h = SLIST_FIRST(&periph->ccb_list)) == NULL || 4577 ccb_h->pinfo.priority != priority) { 4578 if (priority < periph->immediate_priority) { 4579 periph->immediate_priority = priority; 4580 xpt_run_allocq(periph, 0); 4581 } else 4582 cam_periph_sleep(periph, &periph->ccb_list, PRIBIO, 4583 "cgticb", 0); 4584 } 4585 SLIST_REMOVE_HEAD(&periph->ccb_list, periph_links.sle); 4586 return ((union ccb *)ccb_h); 4587 } 4588 4589 static void 4590 xpt_acquire_bus(struct cam_eb *bus) 4591 { 4592 4593 xpt_lock_buses(); 4594 bus->refcount++; 4595 xpt_unlock_buses(); 4596 } 4597 4598 static void 4599 xpt_release_bus(struct cam_eb *bus) 4600 { 4601 4602 xpt_lock_buses(); 4603 KASSERT(bus->refcount >= 1, ("bus->refcount >= 1")); 4604 if (--bus->refcount > 0) { 4605 xpt_unlock_buses(); 4606 return; 4607 } 4608 TAILQ_REMOVE(&xsoftc.xpt_busses, bus, links); 4609 xsoftc.bus_generation++; 4610 xpt_unlock_buses(); 4611 KASSERT(TAILQ_EMPTY(&bus->et_entries), 4612 ("destroying bus, but target list is not empty")); 4613 cam_sim_release(bus->sim); 4614 mtx_destroy(&bus->eb_mtx); 4615 free(bus, M_CAMXPT); 4616 } 4617 4618 static struct cam_et * 4619 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id) 4620 { 4621 struct cam_et *cur_target, *target; 4622 4623 mtx_assert(&xsoftc.xpt_topo_lock, MA_OWNED); 4624 mtx_assert(&bus->eb_mtx, MA_OWNED); 4625 target = (struct cam_et *)malloc(sizeof(*target), M_CAMXPT, 4626 M_NOWAIT|M_ZERO); 4627 if (target == NULL) 4628 return (NULL); 4629 4630 TAILQ_INIT(&target->ed_entries); 4631 target->bus = bus; 4632 target->target_id = target_id; 4633 target->refcount = 1; 4634 target->generation = 0; 4635 target->luns = NULL; 4636 mtx_init(&target->luns_mtx, "CAM LUNs lock", NULL, MTX_DEF); 4637 timevalclear(&target->last_reset); 4638 /* 4639 * Hold a reference to our parent bus so it 4640 * will not go away before we do. 4641 */ 4642 bus->refcount++; 4643 4644 /* Insertion sort into our bus's target list */ 4645 cur_target = TAILQ_FIRST(&bus->et_entries); 4646 while (cur_target != NULL && cur_target->target_id < target_id) 4647 cur_target = TAILQ_NEXT(cur_target, links); 4648 if (cur_target != NULL) { 4649 TAILQ_INSERT_BEFORE(cur_target, target, links); 4650 } else { 4651 TAILQ_INSERT_TAIL(&bus->et_entries, target, links); 4652 } 4653 bus->generation++; 4654 return (target); 4655 } 4656 4657 static void 4658 xpt_acquire_target(struct cam_et *target) 4659 { 4660 struct cam_eb *bus = target->bus; 4661 4662 mtx_lock(&bus->eb_mtx); 4663 target->refcount++; 4664 mtx_unlock(&bus->eb_mtx); 4665 } 4666 4667 static void 4668 xpt_release_target(struct cam_et *target) 4669 { 4670 struct cam_eb *bus = target->bus; 4671 4672 mtx_lock(&bus->eb_mtx); 4673 if (--target->refcount > 0) { 4674 mtx_unlock(&bus->eb_mtx); 4675 return; 4676 } 4677 TAILQ_REMOVE(&bus->et_entries, target, links); 4678 bus->generation++; 4679 mtx_unlock(&bus->eb_mtx); 4680 KASSERT(TAILQ_EMPTY(&target->ed_entries), 4681 ("destroying target, but device list is not empty")); 4682 xpt_release_bus(bus); 4683 mtx_destroy(&target->luns_mtx); 4684 if (target->luns) 4685 free(target->luns, M_CAMXPT); 4686 free(target, M_CAMXPT); 4687 } 4688 4689 static struct cam_ed * 4690 xpt_alloc_device_default(struct cam_eb *bus, struct cam_et *target, 4691 lun_id_t lun_id) 4692 { 4693 struct cam_ed *device; 4694 4695 device = xpt_alloc_device(bus, target, lun_id); 4696 if (device == NULL) 4697 return (NULL); 4698 4699 device->mintags = 1; 4700 device->maxtags = 1; 4701 return (device); 4702 } 4703 4704 static void 4705 xpt_destroy_device(void *context, int pending) 4706 { 4707 struct cam_ed *device = context; 4708 4709 mtx_lock(&device->device_mtx); 4710 mtx_destroy(&device->device_mtx); 4711 free(device, M_CAMDEV); 4712 } 4713 4714 struct cam_ed * 4715 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id) 4716 { 4717 struct cam_ed *cur_device, *device; 4718 struct cam_devq *devq; 4719 cam_status status; 4720 4721 mtx_assert(&bus->eb_mtx, MA_OWNED); 4722 /* Make space for us in the device queue on our bus */ 4723 devq = bus->sim->devq; 4724 mtx_lock(&devq->send_mtx); 4725 status = cam_devq_resize(devq, devq->send_queue.array_size + 1); 4726 mtx_unlock(&devq->send_mtx); 4727 if (status != CAM_REQ_CMP) 4728 return (NULL); 4729 4730 device = (struct cam_ed *)malloc(sizeof(*device), 4731 M_CAMDEV, M_NOWAIT|M_ZERO); 4732 if (device == NULL) 4733 return (NULL); 4734 4735 cam_init_pinfo(&device->devq_entry); 4736 device->target = target; 4737 device->lun_id = lun_id; 4738 device->sim = bus->sim; 4739 if (cam_ccbq_init(&device->ccbq, 4740 bus->sim->max_dev_openings) != 0) { 4741 free(device, M_CAMDEV); 4742 return (NULL); 4743 } 4744 SLIST_INIT(&device->asyncs); 4745 SLIST_INIT(&device->periphs); 4746 device->generation = 0; 4747 device->flags = CAM_DEV_UNCONFIGURED; 4748 device->tag_delay_count = 0; 4749 device->tag_saved_openings = 0; 4750 device->refcount = 1; 4751 mtx_init(&device->device_mtx, "CAM device lock", NULL, MTX_DEF); 4752 callout_init_mtx(&device->callout, &devq->send_mtx, 0); 4753 TASK_INIT(&device->device_destroy_task, 0, xpt_destroy_device, device); 4754 /* 4755 * Hold a reference to our parent bus so it 4756 * will not go away before we do. 4757 */ 4758 target->refcount++; 4759 4760 cur_device = TAILQ_FIRST(&target->ed_entries); 4761 while (cur_device != NULL && cur_device->lun_id < lun_id) 4762 cur_device = TAILQ_NEXT(cur_device, links); 4763 if (cur_device != NULL) 4764 TAILQ_INSERT_BEFORE(cur_device, device, links); 4765 else 4766 TAILQ_INSERT_TAIL(&target->ed_entries, device, links); 4767 target->generation++; 4768 return (device); 4769 } 4770 4771 void 4772 xpt_acquire_device(struct cam_ed *device) 4773 { 4774 struct cam_eb *bus = device->target->bus; 4775 4776 mtx_lock(&bus->eb_mtx); 4777 device->refcount++; 4778 mtx_unlock(&bus->eb_mtx); 4779 } 4780 4781 void 4782 xpt_release_device(struct cam_ed *device) 4783 { 4784 struct cam_eb *bus = device->target->bus; 4785 struct cam_devq *devq; 4786 4787 mtx_lock(&bus->eb_mtx); 4788 if (--device->refcount > 0) { 4789 mtx_unlock(&bus->eb_mtx); 4790 return; 4791 } 4792 4793 TAILQ_REMOVE(&device->target->ed_entries, device,links); 4794 device->target->generation++; 4795 mtx_unlock(&bus->eb_mtx); 4796 4797 /* Release our slot in the devq */ 4798 devq = bus->sim->devq; 4799 mtx_lock(&devq->send_mtx); 4800 cam_devq_resize(devq, devq->send_queue.array_size - 1); 4801 mtx_unlock(&devq->send_mtx); 4802 4803 KASSERT(SLIST_EMPTY(&device->periphs), 4804 ("destroying device, but periphs list is not empty")); 4805 KASSERT(device->devq_entry.index == CAM_UNQUEUED_INDEX, 4806 ("destroying device while still queued for ccbs")); 4807 4808 if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) 4809 callout_stop(&device->callout); 4810 4811 xpt_release_target(device->target); 4812 4813 cam_ccbq_fini(&device->ccbq); 4814 /* 4815 * Free allocated memory. free(9) does nothing if the 4816 * supplied pointer is NULL, so it is safe to call without 4817 * checking. 4818 */ 4819 free(device->supported_vpds, M_CAMXPT); 4820 free(device->device_id, M_CAMXPT); 4821 free(device->ext_inq, M_CAMXPT); 4822 free(device->physpath, M_CAMXPT); 4823 free(device->rcap_buf, M_CAMXPT); 4824 free(device->serial_num, M_CAMXPT); 4825 taskqueue_enqueue(xsoftc.xpt_taskq, &device->device_destroy_task); 4826 } 4827 4828 u_int32_t 4829 xpt_dev_ccbq_resize(struct cam_path *path, int newopenings) 4830 { 4831 int result; 4832 struct cam_ed *dev; 4833 4834 dev = path->device; 4835 mtx_lock(&dev->sim->devq->send_mtx); 4836 result = cam_ccbq_resize(&dev->ccbq, newopenings); 4837 mtx_unlock(&dev->sim->devq->send_mtx); 4838 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 4839 || (dev->inq_flags & SID_CmdQue) != 0) 4840 dev->tag_saved_openings = newopenings; 4841 return (result); 4842 } 4843 4844 static struct cam_eb * 4845 xpt_find_bus(path_id_t path_id) 4846 { 4847 struct cam_eb *bus; 4848 4849 xpt_lock_buses(); 4850 for (bus = TAILQ_FIRST(&xsoftc.xpt_busses); 4851 bus != NULL; 4852 bus = TAILQ_NEXT(bus, links)) { 4853 if (bus->path_id == path_id) { 4854 bus->refcount++; 4855 break; 4856 } 4857 } 4858 xpt_unlock_buses(); 4859 return (bus); 4860 } 4861 4862 static struct cam_et * 4863 xpt_find_target(struct cam_eb *bus, target_id_t target_id) 4864 { 4865 struct cam_et *target; 4866 4867 mtx_assert(&bus->eb_mtx, MA_OWNED); 4868 for (target = TAILQ_FIRST(&bus->et_entries); 4869 target != NULL; 4870 target = TAILQ_NEXT(target, links)) { 4871 if (target->target_id == target_id) { 4872 target->refcount++; 4873 break; 4874 } 4875 } 4876 return (target); 4877 } 4878 4879 static struct cam_ed * 4880 xpt_find_device(struct cam_et *target, lun_id_t lun_id) 4881 { 4882 struct cam_ed *device; 4883 4884 mtx_assert(&target->bus->eb_mtx, MA_OWNED); 4885 for (device = TAILQ_FIRST(&target->ed_entries); 4886 device != NULL; 4887 device = TAILQ_NEXT(device, links)) { 4888 if (device->lun_id == lun_id) { 4889 device->refcount++; 4890 break; 4891 } 4892 } 4893 return (device); 4894 } 4895 4896 void 4897 xpt_start_tags(struct cam_path *path) 4898 { 4899 struct ccb_relsim crs; 4900 struct cam_ed *device; 4901 struct cam_sim *sim; 4902 int newopenings; 4903 4904 device = path->device; 4905 sim = path->bus->sim; 4906 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT; 4907 xpt_freeze_devq(path, /*count*/1); 4908 device->inq_flags |= SID_CmdQue; 4909 if (device->tag_saved_openings != 0) 4910 newopenings = device->tag_saved_openings; 4911 else 4912 newopenings = min(device->maxtags, 4913 sim->max_tagged_dev_openings); 4914 xpt_dev_ccbq_resize(path, newopenings); 4915 xpt_async(AC_GETDEV_CHANGED, path, NULL); 4916 xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL); 4917 crs.ccb_h.func_code = XPT_REL_SIMQ; 4918 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY; 4919 crs.openings 4920 = crs.release_timeout 4921 = crs.qfrozen_cnt 4922 = 0; 4923 xpt_action((union ccb *)&crs); 4924 } 4925 4926 void 4927 xpt_stop_tags(struct cam_path *path) 4928 { 4929 struct ccb_relsim crs; 4930 struct cam_ed *device; 4931 struct cam_sim *sim; 4932 4933 device = path->device; 4934 sim = path->bus->sim; 4935 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT; 4936 device->tag_delay_count = 0; 4937 xpt_freeze_devq(path, /*count*/1); 4938 device->inq_flags &= ~SID_CmdQue; 4939 xpt_dev_ccbq_resize(path, sim->max_dev_openings); 4940 xpt_async(AC_GETDEV_CHANGED, path, NULL); 4941 xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL); 4942 crs.ccb_h.func_code = XPT_REL_SIMQ; 4943 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY; 4944 crs.openings 4945 = crs.release_timeout 4946 = crs.qfrozen_cnt 4947 = 0; 4948 xpt_action((union ccb *)&crs); 4949 } 4950 4951 static void 4952 xpt_boot_delay(void *arg) 4953 { 4954 4955 xpt_release_boot(); 4956 } 4957 4958 static void 4959 xpt_config(void *arg) 4960 { 4961 /* 4962 * Now that interrupts are enabled, go find our devices 4963 */ 4964 if (taskqueue_start_threads(&xsoftc.xpt_taskq, 1, PRIBIO, "CAM taskq")) 4965 printf("xpt_config: failed to create taskqueue thread.\n"); 4966 4967 /* Setup debugging path */ 4968 if (cam_dflags != CAM_DEBUG_NONE) { 4969 if (xpt_create_path(&cam_dpath, NULL, 4970 CAM_DEBUG_BUS, CAM_DEBUG_TARGET, 4971 CAM_DEBUG_LUN) != CAM_REQ_CMP) { 4972 printf("xpt_config: xpt_create_path() failed for debug" 4973 " target %d:%d:%d, debugging disabled\n", 4974 CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN); 4975 cam_dflags = CAM_DEBUG_NONE; 4976 } 4977 } else 4978 cam_dpath = NULL; 4979 4980 periphdriver_init(1); 4981 xpt_hold_boot(); 4982 callout_init(&xsoftc.boot_callout, 1); 4983 callout_reset_sbt(&xsoftc.boot_callout, SBT_1MS * xsoftc.boot_delay, 0, 4984 xpt_boot_delay, NULL, 0); 4985 /* Fire up rescan thread. */ 4986 if (kproc_kthread_add(xpt_scanner_thread, NULL, &cam_proc, NULL, 0, 0, 4987 "cam", "scanner")) { 4988 printf("xpt_config: failed to create rescan thread.\n"); 4989 } 4990 } 4991 4992 void 4993 xpt_hold_boot(void) 4994 { 4995 xpt_lock_buses(); 4996 xsoftc.buses_to_config++; 4997 xpt_unlock_buses(); 4998 } 4999 5000 void 5001 xpt_release_boot(void) 5002 { 5003 xpt_lock_buses(); 5004 xsoftc.buses_to_config--; 5005 if (xsoftc.buses_to_config == 0 && xsoftc.buses_config_done == 0) { 5006 struct xpt_task *task; 5007 5008 xsoftc.buses_config_done = 1; 5009 xpt_unlock_buses(); 5010 /* Call manually because we don't have any busses */ 5011 task = malloc(sizeof(struct xpt_task), M_CAMXPT, M_NOWAIT); 5012 if (task != NULL) { 5013 TASK_INIT(&task->task, 0, xpt_finishconfig_task, task); 5014 taskqueue_enqueue(taskqueue_thread, &task->task); 5015 } 5016 } else 5017 xpt_unlock_buses(); 5018 } 5019 5020 /* 5021 * If the given device only has one peripheral attached to it, and if that 5022 * peripheral is the passthrough driver, announce it. This insures that the 5023 * user sees some sort of announcement for every peripheral in their system. 5024 */ 5025 static int 5026 xptpassannouncefunc(struct cam_ed *device, void *arg) 5027 { 5028 struct cam_periph *periph; 5029 int i; 5030 5031 for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL; 5032 periph = SLIST_NEXT(periph, periph_links), i++); 5033 5034 periph = SLIST_FIRST(&device->periphs); 5035 if ((i == 1) 5036 && (strncmp(periph->periph_name, "pass", 4) == 0)) 5037 xpt_announce_periph(periph, NULL); 5038 5039 return(1); 5040 } 5041 5042 static void 5043 xpt_finishconfig_task(void *context, int pending) 5044 { 5045 5046 periphdriver_init(2); 5047 /* 5048 * Check for devices with no "standard" peripheral driver 5049 * attached. For any devices like that, announce the 5050 * passthrough driver so the user will see something. 5051 */ 5052 if (!bootverbose) 5053 xpt_for_all_devices(xptpassannouncefunc, NULL); 5054 5055 /* Release our hook so that the boot can continue. */ 5056 config_intrhook_disestablish(xsoftc.xpt_config_hook); 5057 free(xsoftc.xpt_config_hook, M_CAMXPT); 5058 xsoftc.xpt_config_hook = NULL; 5059 5060 free(context, M_CAMXPT); 5061 } 5062 5063 cam_status 5064 xpt_register_async(int event, ac_callback_t *cbfunc, void *cbarg, 5065 struct cam_path *path) 5066 { 5067 struct ccb_setasync csa; 5068 cam_status status; 5069 int xptpath = 0; 5070 5071 if (path == NULL) { 5072 status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID, 5073 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 5074 if (status != CAM_REQ_CMP) 5075 return (status); 5076 xpt_path_lock(path); 5077 xptpath = 1; 5078 } 5079 5080 xpt_setup_ccb(&csa.ccb_h, path, CAM_PRIORITY_NORMAL); 5081 csa.ccb_h.func_code = XPT_SASYNC_CB; 5082 csa.event_enable = event; 5083 csa.callback = cbfunc; 5084 csa.callback_arg = cbarg; 5085 xpt_action((union ccb *)&csa); 5086 status = csa.ccb_h.status; 5087 5088 CAM_DEBUG(csa.ccb_h.path, CAM_DEBUG_TRACE, 5089 ("xpt_register_async: func %p\n", cbfunc)); 5090 5091 if (xptpath) { 5092 xpt_path_unlock(path); 5093 xpt_free_path(path); 5094 } 5095 5096 if ((status == CAM_REQ_CMP) && 5097 (csa.event_enable & AC_FOUND_DEVICE)) { 5098 /* 5099 * Get this peripheral up to date with all 5100 * the currently existing devices. 5101 */ 5102 xpt_for_all_devices(xptsetasyncfunc, &csa); 5103 } 5104 if ((status == CAM_REQ_CMP) && 5105 (csa.event_enable & AC_PATH_REGISTERED)) { 5106 /* 5107 * Get this peripheral up to date with all 5108 * the currently existing busses. 5109 */ 5110 xpt_for_all_busses(xptsetasyncbusfunc, &csa); 5111 } 5112 5113 return (status); 5114 } 5115 5116 static void 5117 xptaction(struct cam_sim *sim, union ccb *work_ccb) 5118 { 5119 CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n")); 5120 5121 switch (work_ccb->ccb_h.func_code) { 5122 /* Common cases first */ 5123 case XPT_PATH_INQ: /* Path routing inquiry */ 5124 { 5125 struct ccb_pathinq *cpi; 5126 5127 cpi = &work_ccb->cpi; 5128 cpi->version_num = 1; /* XXX??? */ 5129 cpi->hba_inquiry = 0; 5130 cpi->target_sprt = 0; 5131 cpi->hba_misc = 0; 5132 cpi->hba_eng_cnt = 0; 5133 cpi->max_target = 0; 5134 cpi->max_lun = 0; 5135 cpi->initiator_id = 0; 5136 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 5137 strncpy(cpi->hba_vid, "", HBA_IDLEN); 5138 strncpy(cpi->dev_name, sim->sim_name, DEV_IDLEN); 5139 cpi->unit_number = sim->unit_number; 5140 cpi->bus_id = sim->bus_id; 5141 cpi->base_transfer_speed = 0; 5142 cpi->protocol = PROTO_UNSPECIFIED; 5143 cpi->protocol_version = PROTO_VERSION_UNSPECIFIED; 5144 cpi->transport = XPORT_UNSPECIFIED; 5145 cpi->transport_version = XPORT_VERSION_UNSPECIFIED; 5146 cpi->ccb_h.status = CAM_REQ_CMP; 5147 xpt_done(work_ccb); 5148 break; 5149 } 5150 default: 5151 work_ccb->ccb_h.status = CAM_REQ_INVALID; 5152 xpt_done(work_ccb); 5153 break; 5154 } 5155 } 5156 5157 /* 5158 * The xpt as a "controller" has no interrupt sources, so polling 5159 * is a no-op. 5160 */ 5161 static void 5162 xptpoll(struct cam_sim *sim) 5163 { 5164 } 5165 5166 void 5167 xpt_lock_buses(void) 5168 { 5169 mtx_lock(&xsoftc.xpt_topo_lock); 5170 } 5171 5172 void 5173 xpt_unlock_buses(void) 5174 { 5175 mtx_unlock(&xsoftc.xpt_topo_lock); 5176 } 5177 5178 struct mtx * 5179 xpt_path_mtx(struct cam_path *path) 5180 { 5181 5182 return (&path->device->device_mtx); 5183 } 5184 5185 static void 5186 xpt_done_process(struct ccb_hdr *ccb_h) 5187 { 5188 struct cam_sim *sim; 5189 struct cam_devq *devq; 5190 struct mtx *mtx = NULL; 5191 5192 if (ccb_h->flags & CAM_HIGH_POWER) { 5193 struct highpowerlist *hphead; 5194 struct cam_ed *device; 5195 5196 mtx_lock(&xsoftc.xpt_highpower_lock); 5197 hphead = &xsoftc.highpowerq; 5198 5199 device = STAILQ_FIRST(hphead); 5200 5201 /* 5202 * Increment the count since this command is done. 5203 */ 5204 xsoftc.num_highpower++; 5205 5206 /* 5207 * Any high powered commands queued up? 5208 */ 5209 if (device != NULL) { 5210 5211 STAILQ_REMOVE_HEAD(hphead, highpowerq_entry); 5212 mtx_unlock(&xsoftc.xpt_highpower_lock); 5213 5214 mtx_lock(&device->sim->devq->send_mtx); 5215 xpt_release_devq_device(device, 5216 /*count*/1, /*runqueue*/TRUE); 5217 mtx_unlock(&device->sim->devq->send_mtx); 5218 } else 5219 mtx_unlock(&xsoftc.xpt_highpower_lock); 5220 } 5221 5222 sim = ccb_h->path->bus->sim; 5223 5224 if (ccb_h->status & CAM_RELEASE_SIMQ) { 5225 xpt_release_simq(sim, /*run_queue*/FALSE); 5226 ccb_h->status &= ~CAM_RELEASE_SIMQ; 5227 } 5228 5229 if ((ccb_h->flags & CAM_DEV_QFRZDIS) 5230 && (ccb_h->status & CAM_DEV_QFRZN)) { 5231 xpt_release_devq(ccb_h->path, /*count*/1, /*run_queue*/TRUE); 5232 ccb_h->status &= ~CAM_DEV_QFRZN; 5233 } 5234 5235 devq = sim->devq; 5236 if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) { 5237 struct cam_ed *dev = ccb_h->path->device; 5238 5239 mtx_lock(&devq->send_mtx); 5240 devq->send_active--; 5241 devq->send_openings++; 5242 cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h); 5243 5244 if (((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0 5245 && (dev->ccbq.dev_active == 0))) { 5246 dev->flags &= ~CAM_DEV_REL_ON_QUEUE_EMPTY; 5247 xpt_release_devq_device(dev, /*count*/1, 5248 /*run_queue*/FALSE); 5249 } 5250 5251 if (((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0 5252 && (ccb_h->status&CAM_STATUS_MASK) != CAM_REQUEUE_REQ)) { 5253 dev->flags &= ~CAM_DEV_REL_ON_COMPLETE; 5254 xpt_release_devq_device(dev, /*count*/1, 5255 /*run_queue*/FALSE); 5256 } 5257 5258 if (!device_is_queued(dev)) 5259 (void)xpt_schedule_devq(devq, dev); 5260 xpt_run_devq(devq); 5261 mtx_unlock(&devq->send_mtx); 5262 5263 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0) { 5264 mtx = xpt_path_mtx(ccb_h->path); 5265 mtx_lock(mtx); 5266 5267 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 5268 && (--dev->tag_delay_count == 0)) 5269 xpt_start_tags(ccb_h->path); 5270 } 5271 } 5272 5273 if ((ccb_h->flags & CAM_UNLOCKED) == 0) { 5274 if (mtx == NULL) { 5275 mtx = xpt_path_mtx(ccb_h->path); 5276 mtx_lock(mtx); 5277 } 5278 } else { 5279 if (mtx != NULL) { 5280 mtx_unlock(mtx); 5281 mtx = NULL; 5282 } 5283 } 5284 5285 /* Call the peripheral driver's callback */ 5286 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX; 5287 (*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h); 5288 if (mtx != NULL) 5289 mtx_unlock(mtx); 5290 } 5291 5292 void 5293 xpt_done_td(void *arg) 5294 { 5295 struct cam_doneq *queue = arg; 5296 struct ccb_hdr *ccb_h; 5297 STAILQ_HEAD(, ccb_hdr) doneq; 5298 5299 STAILQ_INIT(&doneq); 5300 mtx_lock(&queue->cam_doneq_mtx); 5301 while (1) { 5302 while (STAILQ_EMPTY(&queue->cam_doneq)) { 5303 queue->cam_doneq_sleep = 1; 5304 msleep(&queue->cam_doneq, &queue->cam_doneq_mtx, 5305 PRIBIO, "-", 0); 5306 queue->cam_doneq_sleep = 0; 5307 } 5308 STAILQ_CONCAT(&doneq, &queue->cam_doneq); 5309 mtx_unlock(&queue->cam_doneq_mtx); 5310 5311 THREAD_NO_SLEEPING(); 5312 while ((ccb_h = STAILQ_FIRST(&doneq)) != NULL) { 5313 STAILQ_REMOVE_HEAD(&doneq, sim_links.stqe); 5314 xpt_done_process(ccb_h); 5315 } 5316 THREAD_SLEEPING_OK(); 5317 5318 mtx_lock(&queue->cam_doneq_mtx); 5319 } 5320 } 5321 5322 static void 5323 camisr_runqueue(void) 5324 { 5325 struct ccb_hdr *ccb_h; 5326 struct cam_doneq *queue; 5327 int i; 5328 5329 /* Process global queues. */ 5330 for (i = 0; i < cam_num_doneqs; i++) { 5331 queue = &cam_doneqs[i]; 5332 mtx_lock(&queue->cam_doneq_mtx); 5333 while ((ccb_h = STAILQ_FIRST(&queue->cam_doneq)) != NULL) { 5334 STAILQ_REMOVE_HEAD(&queue->cam_doneq, sim_links.stqe); 5335 mtx_unlock(&queue->cam_doneq_mtx); 5336 xpt_done_process(ccb_h); 5337 mtx_lock(&queue->cam_doneq_mtx); 5338 } 5339 mtx_unlock(&queue->cam_doneq_mtx); 5340 } 5341 } 5342 5343 struct kv 5344 { 5345 uint32_t v; 5346 const char *name; 5347 }; 5348 5349 static struct kv map[] = { 5350 { XPT_NOOP, "XPT_NOOP" }, 5351 { XPT_SCSI_IO, "XPT_SCSI_IO" }, 5352 { XPT_GDEV_TYPE, "XPT_GDEV_TYPE" }, 5353 { XPT_GDEVLIST, "XPT_GDEVLIST" }, 5354 { XPT_PATH_INQ, "XPT_PATH_INQ" }, 5355 { XPT_REL_SIMQ, "XPT_REL_SIMQ" }, 5356 { XPT_SASYNC_CB, "XPT_SASYNC_CB" }, 5357 { XPT_SDEV_TYPE, "XPT_SDEV_TYPE" }, 5358 { XPT_SCAN_BUS, "XPT_SCAN_BUS" }, 5359 { XPT_DEV_MATCH, "XPT_DEV_MATCH" }, 5360 { XPT_DEBUG, "XPT_DEBUG" }, 5361 { XPT_PATH_STATS, "XPT_PATH_STATS" }, 5362 { XPT_GDEV_STATS, "XPT_GDEV_STATS" }, 5363 { XPT_DEV_ADVINFO, "XPT_DEV_ADVINFO" }, 5364 { XPT_ASYNC, "XPT_ASYNC" }, 5365 { XPT_ABORT, "XPT_ABORT" }, 5366 { XPT_RESET_BUS, "XPT_RESET_BUS" }, 5367 { XPT_RESET_DEV, "XPT_RESET_DEV" }, 5368 { XPT_TERM_IO, "XPT_TERM_IO" }, 5369 { XPT_SCAN_LUN, "XPT_SCAN_LUN" }, 5370 { XPT_GET_TRAN_SETTINGS, "XPT_GET_TRAN_SETTINGS" }, 5371 { XPT_SET_TRAN_SETTINGS, "XPT_SET_TRAN_SETTINGS" }, 5372 { XPT_CALC_GEOMETRY, "XPT_CALC_GEOMETRY" }, 5373 { XPT_ATA_IO, "XPT_ATA_IO" }, 5374 { XPT_GET_SIM_KNOB, "XPT_GET_SIM_KNOB" }, 5375 { XPT_SET_SIM_KNOB, "XPT_SET_SIM_KNOB" }, 5376 { XPT_NVME_IO, "XPT_NVME_IO" }, 5377 { XPT_MMCSD_IO, "XPT_MMCSD_IO" }, 5378 { XPT_SMP_IO, "XPT_SMP_IO" }, 5379 { XPT_SCAN_TGT, "XPT_SCAN_TGT" }, 5380 { XPT_ENG_INQ, "XPT_ENG_INQ" }, 5381 { XPT_ENG_EXEC, "XPT_ENG_EXEC" }, 5382 { XPT_EN_LUN, "XPT_EN_LUN" }, 5383 { XPT_TARGET_IO, "XPT_TARGET_IO" }, 5384 { XPT_ACCEPT_TARGET_IO, "XPT_ACCEPT_TARGET_IO" }, 5385 { XPT_CONT_TARGET_IO, "XPT_CONT_TARGET_IO" }, 5386 { XPT_IMMED_NOTIFY, "XPT_IMMED_NOTIFY" }, 5387 { XPT_NOTIFY_ACK, "XPT_NOTIFY_ACK" }, 5388 { XPT_IMMEDIATE_NOTIFY, "XPT_IMMEDIATE_NOTIFY" }, 5389 { XPT_NOTIFY_ACKNOWLEDGE, "XPT_NOTIFY_ACKNOWLEDGE" }, 5390 { 0, 0 } 5391 }; 5392 5393 static const char * 5394 xpt_action_name(uint32_t action) 5395 { 5396 static char buffer[32]; /* Only for unknown messages -- racy */ 5397 struct kv *walker = map; 5398 5399 while (walker->name != NULL) { 5400 if (walker->v == action) 5401 return (walker->name); 5402 walker++; 5403 } 5404 5405 snprintf(buffer, sizeof(buffer), "%#x", action); 5406 return (buffer); 5407 } 5408