1 /*- 2 * Implementation of the Common Access Method Transport (XPT) layer. 3 * 4 * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs. 5 * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions, and the following disclaimer, 13 * without modification, immediately at the beginning of the file. 14 * 2. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include <sys/param.h> 34 #include <sys/bio.h> 35 #include <sys/bus.h> 36 #include <sys/systm.h> 37 #include <sys/types.h> 38 #include <sys/malloc.h> 39 #include <sys/kernel.h> 40 #include <sys/time.h> 41 #include <sys/conf.h> 42 #include <sys/fcntl.h> 43 #include <sys/interrupt.h> 44 #include <sys/proc.h> 45 #include <sys/sbuf.h> 46 #include <sys/smp.h> 47 #include <sys/taskqueue.h> 48 49 #include <sys/lock.h> 50 #include <sys/mutex.h> 51 #include <sys/sysctl.h> 52 #include <sys/kthread.h> 53 54 #include <cam/cam.h> 55 #include <cam/cam_ccb.h> 56 #include <cam/cam_periph.h> 57 #include <cam/cam_queue.h> 58 #include <cam/cam_sim.h> 59 #include <cam/cam_xpt.h> 60 #include <cam/cam_xpt_sim.h> 61 #include <cam/cam_xpt_periph.h> 62 #include <cam/cam_xpt_internal.h> 63 #include <cam/cam_debug.h> 64 #include <cam/cam_compat.h> 65 66 #include <cam/scsi/scsi_all.h> 67 #include <cam/scsi/scsi_message.h> 68 #include <cam/scsi/scsi_pass.h> 69 70 #include <machine/md_var.h> /* geometry translation */ 71 #include <machine/stdarg.h> /* for xpt_print below */ 72 73 #include "opt_cam.h" 74 75 /* 76 * This is the maximum number of high powered commands (e.g. start unit) 77 * that can be outstanding at a particular time. 78 */ 79 #ifndef CAM_MAX_HIGHPOWER 80 #define CAM_MAX_HIGHPOWER 4 81 #endif 82 83 /* Datastructures internal to the xpt layer */ 84 MALLOC_DEFINE(M_CAMXPT, "CAM XPT", "CAM XPT buffers"); 85 MALLOC_DEFINE(M_CAMDEV, "CAM DEV", "CAM devices"); 86 MALLOC_DEFINE(M_CAMCCB, "CAM CCB", "CAM CCBs"); 87 MALLOC_DEFINE(M_CAMPATH, "CAM path", "CAM paths"); 88 89 /* Object for defering XPT actions to a taskqueue */ 90 struct xpt_task { 91 struct task task; 92 void *data1; 93 uintptr_t data2; 94 }; 95 96 struct xpt_softc { 97 uint32_t xpt_generation; 98 99 /* number of high powered commands that can go through right now */ 100 struct mtx xpt_highpower_lock; 101 STAILQ_HEAD(highpowerlist, cam_ed) highpowerq; 102 int num_highpower; 103 104 /* queue for handling async rescan requests. */ 105 TAILQ_HEAD(, ccb_hdr) ccb_scanq; 106 int buses_to_config; 107 int buses_config_done; 108 109 /* Registered busses */ 110 TAILQ_HEAD(,cam_eb) xpt_busses; 111 u_int bus_generation; 112 113 struct intr_config_hook *xpt_config_hook; 114 115 int boot_delay; 116 struct callout boot_callout; 117 118 struct mtx xpt_topo_lock; 119 struct mtx xpt_lock; 120 struct taskqueue *xpt_taskq; 121 }; 122 123 typedef enum { 124 DM_RET_COPY = 0x01, 125 DM_RET_FLAG_MASK = 0x0f, 126 DM_RET_NONE = 0x00, 127 DM_RET_STOP = 0x10, 128 DM_RET_DESCEND = 0x20, 129 DM_RET_ERROR = 0x30, 130 DM_RET_ACTION_MASK = 0xf0 131 } dev_match_ret; 132 133 typedef enum { 134 XPT_DEPTH_BUS, 135 XPT_DEPTH_TARGET, 136 XPT_DEPTH_DEVICE, 137 XPT_DEPTH_PERIPH 138 } xpt_traverse_depth; 139 140 struct xpt_traverse_config { 141 xpt_traverse_depth depth; 142 void *tr_func; 143 void *tr_arg; 144 }; 145 146 typedef int xpt_busfunc_t (struct cam_eb *bus, void *arg); 147 typedef int xpt_targetfunc_t (struct cam_et *target, void *arg); 148 typedef int xpt_devicefunc_t (struct cam_ed *device, void *arg); 149 typedef int xpt_periphfunc_t (struct cam_periph *periph, void *arg); 150 typedef int xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg); 151 152 /* Transport layer configuration information */ 153 static struct xpt_softc xsoftc; 154 155 MTX_SYSINIT(xpt_topo_init, &xsoftc.xpt_topo_lock, "XPT topology lock", MTX_DEF); 156 157 SYSCTL_INT(_kern_cam, OID_AUTO, boot_delay, CTLFLAG_RDTUN, 158 &xsoftc.boot_delay, 0, "Bus registration wait time"); 159 SYSCTL_UINT(_kern_cam, OID_AUTO, xpt_generation, CTLFLAG_RD, 160 &xsoftc.xpt_generation, 0, "CAM peripheral generation count"); 161 162 struct cam_doneq { 163 struct mtx_padalign cam_doneq_mtx; 164 STAILQ_HEAD(, ccb_hdr) cam_doneq; 165 int cam_doneq_sleep; 166 }; 167 168 static struct cam_doneq cam_doneqs[MAXCPU]; 169 static int cam_num_doneqs; 170 static struct proc *cam_proc; 171 172 SYSCTL_INT(_kern_cam, OID_AUTO, num_doneqs, CTLFLAG_RDTUN, 173 &cam_num_doneqs, 0, "Number of completion queues/threads"); 174 175 struct cam_periph *xpt_periph; 176 177 static periph_init_t xpt_periph_init; 178 179 static struct periph_driver xpt_driver = 180 { 181 xpt_periph_init, "xpt", 182 TAILQ_HEAD_INITIALIZER(xpt_driver.units), /* generation */ 0, 183 CAM_PERIPH_DRV_EARLY 184 }; 185 186 PERIPHDRIVER_DECLARE(xpt, xpt_driver); 187 188 static d_open_t xptopen; 189 static d_close_t xptclose; 190 static d_ioctl_t xptioctl; 191 static d_ioctl_t xptdoioctl; 192 193 static struct cdevsw xpt_cdevsw = { 194 .d_version = D_VERSION, 195 .d_flags = 0, 196 .d_open = xptopen, 197 .d_close = xptclose, 198 .d_ioctl = xptioctl, 199 .d_name = "xpt", 200 }; 201 202 /* Storage for debugging datastructures */ 203 struct cam_path *cam_dpath; 204 u_int32_t cam_dflags = CAM_DEBUG_FLAGS; 205 SYSCTL_UINT(_kern_cam, OID_AUTO, dflags, CTLFLAG_RWTUN, 206 &cam_dflags, 0, "Enabled debug flags"); 207 u_int32_t cam_debug_delay = CAM_DEBUG_DELAY; 208 SYSCTL_UINT(_kern_cam, OID_AUTO, debug_delay, CTLFLAG_RWTUN, 209 &cam_debug_delay, 0, "Delay in us after each debug message"); 210 211 /* Our boot-time initialization hook */ 212 static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *); 213 214 static moduledata_t cam_moduledata = { 215 "cam", 216 cam_module_event_handler, 217 NULL 218 }; 219 220 static int xpt_init(void *); 221 222 DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND); 223 MODULE_VERSION(cam, 1); 224 225 226 static void xpt_async_bcast(struct async_list *async_head, 227 u_int32_t async_code, 228 struct cam_path *path, 229 void *async_arg); 230 static path_id_t xptnextfreepathid(void); 231 static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus); 232 static union ccb *xpt_get_ccb(struct cam_periph *periph); 233 static union ccb *xpt_get_ccb_nowait(struct cam_periph *periph); 234 static void xpt_run_allocq(struct cam_periph *periph, int sleep); 235 static void xpt_run_allocq_task(void *context, int pending); 236 static void xpt_run_devq(struct cam_devq *devq); 237 static timeout_t xpt_release_devq_timeout; 238 static void xpt_release_simq_timeout(void *arg) __unused; 239 static void xpt_acquire_bus(struct cam_eb *bus); 240 static void xpt_release_bus(struct cam_eb *bus); 241 static uint32_t xpt_freeze_devq_device(struct cam_ed *dev, u_int count); 242 static int xpt_release_devq_device(struct cam_ed *dev, u_int count, 243 int run_queue); 244 static struct cam_et* 245 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id); 246 static void xpt_acquire_target(struct cam_et *target); 247 static void xpt_release_target(struct cam_et *target); 248 static struct cam_eb* 249 xpt_find_bus(path_id_t path_id); 250 static struct cam_et* 251 xpt_find_target(struct cam_eb *bus, target_id_t target_id); 252 static struct cam_ed* 253 xpt_find_device(struct cam_et *target, lun_id_t lun_id); 254 static void xpt_config(void *arg); 255 static int xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo, 256 u_int32_t new_priority); 257 static xpt_devicefunc_t xptpassannouncefunc; 258 static void xptaction(struct cam_sim *sim, union ccb *work_ccb); 259 static void xptpoll(struct cam_sim *sim); 260 static void camisr_runqueue(void); 261 static void xpt_done_process(struct ccb_hdr *ccb_h); 262 static void xpt_done_td(void *); 263 static dev_match_ret xptbusmatch(struct dev_match_pattern *patterns, 264 u_int num_patterns, struct cam_eb *bus); 265 static dev_match_ret xptdevicematch(struct dev_match_pattern *patterns, 266 u_int num_patterns, 267 struct cam_ed *device); 268 static dev_match_ret xptperiphmatch(struct dev_match_pattern *patterns, 269 u_int num_patterns, 270 struct cam_periph *periph); 271 static xpt_busfunc_t xptedtbusfunc; 272 static xpt_targetfunc_t xptedttargetfunc; 273 static xpt_devicefunc_t xptedtdevicefunc; 274 static xpt_periphfunc_t xptedtperiphfunc; 275 static xpt_pdrvfunc_t xptplistpdrvfunc; 276 static xpt_periphfunc_t xptplistperiphfunc; 277 static int xptedtmatch(struct ccb_dev_match *cdm); 278 static int xptperiphlistmatch(struct ccb_dev_match *cdm); 279 static int xptbustraverse(struct cam_eb *start_bus, 280 xpt_busfunc_t *tr_func, void *arg); 281 static int xpttargettraverse(struct cam_eb *bus, 282 struct cam_et *start_target, 283 xpt_targetfunc_t *tr_func, void *arg); 284 static int xptdevicetraverse(struct cam_et *target, 285 struct cam_ed *start_device, 286 xpt_devicefunc_t *tr_func, void *arg); 287 static int xptperiphtraverse(struct cam_ed *device, 288 struct cam_periph *start_periph, 289 xpt_periphfunc_t *tr_func, void *arg); 290 static int xptpdrvtraverse(struct periph_driver **start_pdrv, 291 xpt_pdrvfunc_t *tr_func, void *arg); 292 static int xptpdperiphtraverse(struct periph_driver **pdrv, 293 struct cam_periph *start_periph, 294 xpt_periphfunc_t *tr_func, 295 void *arg); 296 static xpt_busfunc_t xptdefbusfunc; 297 static xpt_targetfunc_t xptdeftargetfunc; 298 static xpt_devicefunc_t xptdefdevicefunc; 299 static xpt_periphfunc_t xptdefperiphfunc; 300 static void xpt_finishconfig_task(void *context, int pending); 301 static void xpt_dev_async_default(u_int32_t async_code, 302 struct cam_eb *bus, 303 struct cam_et *target, 304 struct cam_ed *device, 305 void *async_arg); 306 static struct cam_ed * xpt_alloc_device_default(struct cam_eb *bus, 307 struct cam_et *target, 308 lun_id_t lun_id); 309 static xpt_devicefunc_t xptsetasyncfunc; 310 static xpt_busfunc_t xptsetasyncbusfunc; 311 static cam_status xptregister(struct cam_periph *periph, 312 void *arg); 313 static const char * xpt_action_name(uint32_t action); 314 static __inline int device_is_queued(struct cam_ed *device); 315 316 static __inline int 317 xpt_schedule_devq(struct cam_devq *devq, struct cam_ed *dev) 318 { 319 int retval; 320 321 mtx_assert(&devq->send_mtx, MA_OWNED); 322 if ((dev->ccbq.queue.entries > 0) && 323 (dev->ccbq.dev_openings > 0) && 324 (dev->ccbq.queue.qfrozen_cnt == 0)) { 325 /* 326 * The priority of a device waiting for controller 327 * resources is that of the highest priority CCB 328 * enqueued. 329 */ 330 retval = 331 xpt_schedule_dev(&devq->send_queue, 332 &dev->devq_entry, 333 CAMQ_GET_PRIO(&dev->ccbq.queue)); 334 } else { 335 retval = 0; 336 } 337 return (retval); 338 } 339 340 static __inline int 341 device_is_queued(struct cam_ed *device) 342 { 343 return (device->devq_entry.index != CAM_UNQUEUED_INDEX); 344 } 345 346 static void 347 xpt_periph_init() 348 { 349 make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0"); 350 } 351 352 static int 353 xptopen(struct cdev *dev, int flags, int fmt, struct thread *td) 354 { 355 356 /* 357 * Only allow read-write access. 358 */ 359 if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0)) 360 return(EPERM); 361 362 /* 363 * We don't allow nonblocking access. 364 */ 365 if ((flags & O_NONBLOCK) != 0) { 366 printf("%s: can't do nonblocking access\n", devtoname(dev)); 367 return(ENODEV); 368 } 369 370 return(0); 371 } 372 373 static int 374 xptclose(struct cdev *dev, int flag, int fmt, struct thread *td) 375 { 376 377 return(0); 378 } 379 380 /* 381 * Don't automatically grab the xpt softc lock here even though this is going 382 * through the xpt device. The xpt device is really just a back door for 383 * accessing other devices and SIMs, so the right thing to do is to grab 384 * the appropriate SIM lock once the bus/SIM is located. 385 */ 386 static int 387 xptioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) 388 { 389 int error; 390 391 if ((error = xptdoioctl(dev, cmd, addr, flag, td)) == ENOTTY) { 392 error = cam_compat_ioctl(dev, cmd, addr, flag, td, xptdoioctl); 393 } 394 return (error); 395 } 396 397 static int 398 xptdoioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) 399 { 400 int error; 401 402 error = 0; 403 404 switch(cmd) { 405 /* 406 * For the transport layer CAMIOCOMMAND ioctl, we really only want 407 * to accept CCB types that don't quite make sense to send through a 408 * passthrough driver. XPT_PATH_INQ is an exception to this, as stated 409 * in the CAM spec. 410 */ 411 case CAMIOCOMMAND: { 412 union ccb *ccb; 413 union ccb *inccb; 414 struct cam_eb *bus; 415 416 inccb = (union ccb *)addr; 417 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) 418 if (inccb->ccb_h.func_code == XPT_SCSI_IO) 419 inccb->csio.bio = NULL; 420 #endif 421 422 bus = xpt_find_bus(inccb->ccb_h.path_id); 423 if (bus == NULL) 424 return (EINVAL); 425 426 switch (inccb->ccb_h.func_code) { 427 case XPT_SCAN_BUS: 428 case XPT_RESET_BUS: 429 if (inccb->ccb_h.target_id != CAM_TARGET_WILDCARD || 430 inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) { 431 xpt_release_bus(bus); 432 return (EINVAL); 433 } 434 break; 435 case XPT_SCAN_TGT: 436 if (inccb->ccb_h.target_id == CAM_TARGET_WILDCARD || 437 inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) { 438 xpt_release_bus(bus); 439 return (EINVAL); 440 } 441 break; 442 default: 443 break; 444 } 445 446 switch(inccb->ccb_h.func_code) { 447 case XPT_SCAN_BUS: 448 case XPT_RESET_BUS: 449 case XPT_PATH_INQ: 450 case XPT_ENG_INQ: 451 case XPT_SCAN_LUN: 452 case XPT_SCAN_TGT: 453 454 ccb = xpt_alloc_ccb(); 455 456 /* 457 * Create a path using the bus, target, and lun the 458 * user passed in. 459 */ 460 if (xpt_create_path(&ccb->ccb_h.path, NULL, 461 inccb->ccb_h.path_id, 462 inccb->ccb_h.target_id, 463 inccb->ccb_h.target_lun) != 464 CAM_REQ_CMP){ 465 error = EINVAL; 466 xpt_free_ccb(ccb); 467 break; 468 } 469 /* Ensure all of our fields are correct */ 470 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, 471 inccb->ccb_h.pinfo.priority); 472 xpt_merge_ccb(ccb, inccb); 473 xpt_path_lock(ccb->ccb_h.path); 474 cam_periph_runccb(ccb, NULL, 0, 0, NULL); 475 xpt_path_unlock(ccb->ccb_h.path); 476 bcopy(ccb, inccb, sizeof(union ccb)); 477 xpt_free_path(ccb->ccb_h.path); 478 xpt_free_ccb(ccb); 479 break; 480 481 case XPT_DEBUG: { 482 union ccb ccb; 483 484 /* 485 * This is an immediate CCB, so it's okay to 486 * allocate it on the stack. 487 */ 488 489 /* 490 * Create a path using the bus, target, and lun the 491 * user passed in. 492 */ 493 if (xpt_create_path(&ccb.ccb_h.path, NULL, 494 inccb->ccb_h.path_id, 495 inccb->ccb_h.target_id, 496 inccb->ccb_h.target_lun) != 497 CAM_REQ_CMP){ 498 error = EINVAL; 499 break; 500 } 501 /* Ensure all of our fields are correct */ 502 xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path, 503 inccb->ccb_h.pinfo.priority); 504 xpt_merge_ccb(&ccb, inccb); 505 xpt_action(&ccb); 506 bcopy(&ccb, inccb, sizeof(union ccb)); 507 xpt_free_path(ccb.ccb_h.path); 508 break; 509 510 } 511 case XPT_DEV_MATCH: { 512 struct cam_periph_map_info mapinfo; 513 struct cam_path *old_path; 514 515 /* 516 * We can't deal with physical addresses for this 517 * type of transaction. 518 */ 519 if ((inccb->ccb_h.flags & CAM_DATA_MASK) != 520 CAM_DATA_VADDR) { 521 error = EINVAL; 522 break; 523 } 524 525 /* 526 * Save this in case the caller had it set to 527 * something in particular. 528 */ 529 old_path = inccb->ccb_h.path; 530 531 /* 532 * We really don't need a path for the matching 533 * code. The path is needed because of the 534 * debugging statements in xpt_action(). They 535 * assume that the CCB has a valid path. 536 */ 537 inccb->ccb_h.path = xpt_periph->path; 538 539 bzero(&mapinfo, sizeof(mapinfo)); 540 541 /* 542 * Map the pattern and match buffers into kernel 543 * virtual address space. 544 */ 545 error = cam_periph_mapmem(inccb, &mapinfo, MAXPHYS); 546 547 if (error) { 548 inccb->ccb_h.path = old_path; 549 break; 550 } 551 552 /* 553 * This is an immediate CCB, we can send it on directly. 554 */ 555 xpt_action(inccb); 556 557 /* 558 * Map the buffers back into user space. 559 */ 560 cam_periph_unmapmem(inccb, &mapinfo); 561 562 inccb->ccb_h.path = old_path; 563 564 error = 0; 565 break; 566 } 567 default: 568 error = ENOTSUP; 569 break; 570 } 571 xpt_release_bus(bus); 572 break; 573 } 574 /* 575 * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input, 576 * with the periphal driver name and unit name filled in. The other 577 * fields don't really matter as input. The passthrough driver name 578 * ("pass"), and unit number are passed back in the ccb. The current 579 * device generation number, and the index into the device peripheral 580 * driver list, and the status are also passed back. Note that 581 * since we do everything in one pass, unlike the XPT_GDEVLIST ccb, 582 * we never return a status of CAM_GDEVLIST_LIST_CHANGED. It is 583 * (or rather should be) impossible for the device peripheral driver 584 * list to change since we look at the whole thing in one pass, and 585 * we do it with lock protection. 586 * 587 */ 588 case CAMGETPASSTHRU: { 589 union ccb *ccb; 590 struct cam_periph *periph; 591 struct periph_driver **p_drv; 592 char *name; 593 u_int unit; 594 int base_periph_found; 595 596 ccb = (union ccb *)addr; 597 unit = ccb->cgdl.unit_number; 598 name = ccb->cgdl.periph_name; 599 base_periph_found = 0; 600 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) 601 if (ccb->ccb_h.func_code == XPT_SCSI_IO) 602 ccb->csio.bio = NULL; 603 #endif 604 605 /* 606 * Sanity check -- make sure we don't get a null peripheral 607 * driver name. 608 */ 609 if (*ccb->cgdl.periph_name == '\0') { 610 error = EINVAL; 611 break; 612 } 613 614 /* Keep the list from changing while we traverse it */ 615 xpt_lock_buses(); 616 617 /* first find our driver in the list of drivers */ 618 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) 619 if (strcmp((*p_drv)->driver_name, name) == 0) 620 break; 621 622 if (*p_drv == NULL) { 623 xpt_unlock_buses(); 624 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 625 ccb->cgdl.status = CAM_GDEVLIST_ERROR; 626 *ccb->cgdl.periph_name = '\0'; 627 ccb->cgdl.unit_number = 0; 628 error = ENOENT; 629 break; 630 } 631 632 /* 633 * Run through every peripheral instance of this driver 634 * and check to see whether it matches the unit passed 635 * in by the user. If it does, get out of the loops and 636 * find the passthrough driver associated with that 637 * peripheral driver. 638 */ 639 for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL; 640 periph = TAILQ_NEXT(periph, unit_links)) { 641 642 if (periph->unit_number == unit) 643 break; 644 } 645 /* 646 * If we found the peripheral driver that the user passed 647 * in, go through all of the peripheral drivers for that 648 * particular device and look for a passthrough driver. 649 */ 650 if (periph != NULL) { 651 struct cam_ed *device; 652 int i; 653 654 base_periph_found = 1; 655 device = periph->path->device; 656 for (i = 0, periph = SLIST_FIRST(&device->periphs); 657 periph != NULL; 658 periph = SLIST_NEXT(periph, periph_links), i++) { 659 /* 660 * Check to see whether we have a 661 * passthrough device or not. 662 */ 663 if (strcmp(periph->periph_name, "pass") == 0) { 664 /* 665 * Fill in the getdevlist fields. 666 */ 667 strcpy(ccb->cgdl.periph_name, 668 periph->periph_name); 669 ccb->cgdl.unit_number = 670 periph->unit_number; 671 if (SLIST_NEXT(periph, periph_links)) 672 ccb->cgdl.status = 673 CAM_GDEVLIST_MORE_DEVS; 674 else 675 ccb->cgdl.status = 676 CAM_GDEVLIST_LAST_DEVICE; 677 ccb->cgdl.generation = 678 device->generation; 679 ccb->cgdl.index = i; 680 /* 681 * Fill in some CCB header fields 682 * that the user may want. 683 */ 684 ccb->ccb_h.path_id = 685 periph->path->bus->path_id; 686 ccb->ccb_h.target_id = 687 periph->path->target->target_id; 688 ccb->ccb_h.target_lun = 689 periph->path->device->lun_id; 690 ccb->ccb_h.status = CAM_REQ_CMP; 691 break; 692 } 693 } 694 } 695 696 /* 697 * If the periph is null here, one of two things has 698 * happened. The first possibility is that we couldn't 699 * find the unit number of the particular peripheral driver 700 * that the user is asking about. e.g. the user asks for 701 * the passthrough driver for "da11". We find the list of 702 * "da" peripherals all right, but there is no unit 11. 703 * The other possibility is that we went through the list 704 * of peripheral drivers attached to the device structure, 705 * but didn't find one with the name "pass". Either way, 706 * we return ENOENT, since we couldn't find something. 707 */ 708 if (periph == NULL) { 709 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 710 ccb->cgdl.status = CAM_GDEVLIST_ERROR; 711 *ccb->cgdl.periph_name = '\0'; 712 ccb->cgdl.unit_number = 0; 713 error = ENOENT; 714 /* 715 * It is unfortunate that this is even necessary, 716 * but there are many, many clueless users out there. 717 * If this is true, the user is looking for the 718 * passthrough driver, but doesn't have one in his 719 * kernel. 720 */ 721 if (base_periph_found == 1) { 722 printf("xptioctl: pass driver is not in the " 723 "kernel\n"); 724 printf("xptioctl: put \"device pass\" in " 725 "your kernel config file\n"); 726 } 727 } 728 xpt_unlock_buses(); 729 break; 730 } 731 default: 732 error = ENOTTY; 733 break; 734 } 735 736 return(error); 737 } 738 739 static int 740 cam_module_event_handler(module_t mod, int what, void *arg) 741 { 742 int error; 743 744 switch (what) { 745 case MOD_LOAD: 746 if ((error = xpt_init(NULL)) != 0) 747 return (error); 748 break; 749 case MOD_UNLOAD: 750 return EBUSY; 751 default: 752 return EOPNOTSUPP; 753 } 754 755 return 0; 756 } 757 758 static struct xpt_proto * 759 xpt_proto_find(cam_proto proto) 760 { 761 struct xpt_proto **pp; 762 763 SET_FOREACH(pp, cam_xpt_proto_set) { 764 if ((*pp)->proto == proto) 765 return *pp; 766 } 767 768 return NULL; 769 } 770 771 static void 772 xpt_rescan_done(struct cam_periph *periph, union ccb *done_ccb) 773 { 774 775 if (done_ccb->ccb_h.ppriv_ptr1 == NULL) { 776 xpt_free_path(done_ccb->ccb_h.path); 777 xpt_free_ccb(done_ccb); 778 } else { 779 done_ccb->ccb_h.cbfcnp = done_ccb->ccb_h.ppriv_ptr1; 780 (*done_ccb->ccb_h.cbfcnp)(periph, done_ccb); 781 } 782 xpt_release_boot(); 783 } 784 785 /* thread to handle bus rescans */ 786 static void 787 xpt_scanner_thread(void *dummy) 788 { 789 union ccb *ccb; 790 struct cam_path path; 791 792 xpt_lock_buses(); 793 for (;;) { 794 if (TAILQ_EMPTY(&xsoftc.ccb_scanq)) 795 msleep(&xsoftc.ccb_scanq, &xsoftc.xpt_topo_lock, PRIBIO, 796 "-", 0); 797 if ((ccb = (union ccb *)TAILQ_FIRST(&xsoftc.ccb_scanq)) != NULL) { 798 TAILQ_REMOVE(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe); 799 xpt_unlock_buses(); 800 801 /* 802 * Since lock can be dropped inside and path freed 803 * by completion callback even before return here, 804 * take our own path copy for reference. 805 */ 806 xpt_copy_path(&path, ccb->ccb_h.path); 807 xpt_path_lock(&path); 808 xpt_action(ccb); 809 xpt_path_unlock(&path); 810 xpt_release_path(&path); 811 812 xpt_lock_buses(); 813 } 814 } 815 } 816 817 void 818 xpt_rescan(union ccb *ccb) 819 { 820 struct ccb_hdr *hdr; 821 822 /* Prepare request */ 823 if (ccb->ccb_h.path->target->target_id == CAM_TARGET_WILDCARD && 824 ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD) 825 ccb->ccb_h.func_code = XPT_SCAN_BUS; 826 else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD && 827 ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD) 828 ccb->ccb_h.func_code = XPT_SCAN_TGT; 829 else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD && 830 ccb->ccb_h.path->device->lun_id != CAM_LUN_WILDCARD) 831 ccb->ccb_h.func_code = XPT_SCAN_LUN; 832 else { 833 xpt_print(ccb->ccb_h.path, "illegal scan path\n"); 834 xpt_free_path(ccb->ccb_h.path); 835 xpt_free_ccb(ccb); 836 return; 837 } 838 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, 839 ("xpt_rescan: func %#x %s\n", ccb->ccb_h.func_code, 840 xpt_action_name(ccb->ccb_h.func_code))); 841 842 ccb->ccb_h.ppriv_ptr1 = ccb->ccb_h.cbfcnp; 843 ccb->ccb_h.cbfcnp = xpt_rescan_done; 844 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, CAM_PRIORITY_XPT); 845 /* Don't make duplicate entries for the same paths. */ 846 xpt_lock_buses(); 847 if (ccb->ccb_h.ppriv_ptr1 == NULL) { 848 TAILQ_FOREACH(hdr, &xsoftc.ccb_scanq, sim_links.tqe) { 849 if (xpt_path_comp(hdr->path, ccb->ccb_h.path) == 0) { 850 wakeup(&xsoftc.ccb_scanq); 851 xpt_unlock_buses(); 852 xpt_print(ccb->ccb_h.path, "rescan already queued\n"); 853 xpt_free_path(ccb->ccb_h.path); 854 xpt_free_ccb(ccb); 855 return; 856 } 857 } 858 } 859 TAILQ_INSERT_TAIL(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe); 860 xsoftc.buses_to_config++; 861 wakeup(&xsoftc.ccb_scanq); 862 xpt_unlock_buses(); 863 } 864 865 /* Functions accessed by the peripheral drivers */ 866 static int 867 xpt_init(void *dummy) 868 { 869 struct cam_sim *xpt_sim; 870 struct cam_path *path; 871 struct cam_devq *devq; 872 cam_status status; 873 int error, i; 874 875 TAILQ_INIT(&xsoftc.xpt_busses); 876 TAILQ_INIT(&xsoftc.ccb_scanq); 877 STAILQ_INIT(&xsoftc.highpowerq); 878 xsoftc.num_highpower = CAM_MAX_HIGHPOWER; 879 880 mtx_init(&xsoftc.xpt_lock, "XPT lock", NULL, MTX_DEF); 881 mtx_init(&xsoftc.xpt_highpower_lock, "XPT highpower lock", NULL, MTX_DEF); 882 xsoftc.xpt_taskq = taskqueue_create("CAM XPT task", M_WAITOK, 883 taskqueue_thread_enqueue, /*context*/&xsoftc.xpt_taskq); 884 885 #ifdef CAM_BOOT_DELAY 886 /* 887 * Override this value at compile time to assist our users 888 * who don't use loader to boot a kernel. 889 */ 890 xsoftc.boot_delay = CAM_BOOT_DELAY; 891 #endif 892 /* 893 * The xpt layer is, itself, the equivalent of a SIM. 894 * Allow 16 ccbs in the ccb pool for it. This should 895 * give decent parallelism when we probe busses and 896 * perform other XPT functions. 897 */ 898 devq = cam_simq_alloc(16); 899 xpt_sim = cam_sim_alloc(xptaction, 900 xptpoll, 901 "xpt", 902 /*softc*/NULL, 903 /*unit*/0, 904 /*mtx*/&xsoftc.xpt_lock, 905 /*max_dev_transactions*/0, 906 /*max_tagged_dev_transactions*/0, 907 devq); 908 if (xpt_sim == NULL) 909 return (ENOMEM); 910 911 mtx_lock(&xsoftc.xpt_lock); 912 if ((status = xpt_bus_register(xpt_sim, NULL, 0)) != CAM_SUCCESS) { 913 mtx_unlock(&xsoftc.xpt_lock); 914 printf("xpt_init: xpt_bus_register failed with status %#x," 915 " failing attach\n", status); 916 return (EINVAL); 917 } 918 mtx_unlock(&xsoftc.xpt_lock); 919 920 /* 921 * Looking at the XPT from the SIM layer, the XPT is 922 * the equivalent of a peripheral driver. Allocate 923 * a peripheral driver entry for us. 924 */ 925 if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID, 926 CAM_TARGET_WILDCARD, 927 CAM_LUN_WILDCARD)) != CAM_REQ_CMP) { 928 printf("xpt_init: xpt_create_path failed with status %#x," 929 " failing attach\n", status); 930 return (EINVAL); 931 } 932 xpt_path_lock(path); 933 cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO, 934 path, NULL, 0, xpt_sim); 935 xpt_path_unlock(path); 936 xpt_free_path(path); 937 938 if (cam_num_doneqs < 1) 939 cam_num_doneqs = 1 + mp_ncpus / 6; 940 else if (cam_num_doneqs > MAXCPU) 941 cam_num_doneqs = MAXCPU; 942 for (i = 0; i < cam_num_doneqs; i++) { 943 mtx_init(&cam_doneqs[i].cam_doneq_mtx, "CAM doneq", NULL, 944 MTX_DEF); 945 STAILQ_INIT(&cam_doneqs[i].cam_doneq); 946 error = kproc_kthread_add(xpt_done_td, &cam_doneqs[i], 947 &cam_proc, NULL, 0, 0, "cam", "doneq%d", i); 948 if (error != 0) { 949 cam_num_doneqs = i; 950 break; 951 } 952 } 953 if (cam_num_doneqs < 1) { 954 printf("xpt_init: Cannot init completion queues " 955 "- failing attach\n"); 956 return (ENOMEM); 957 } 958 /* 959 * Register a callback for when interrupts are enabled. 960 */ 961 xsoftc.xpt_config_hook = 962 (struct intr_config_hook *)malloc(sizeof(struct intr_config_hook), 963 M_CAMXPT, M_NOWAIT | M_ZERO); 964 if (xsoftc.xpt_config_hook == NULL) { 965 printf("xpt_init: Cannot malloc config hook " 966 "- failing attach\n"); 967 return (ENOMEM); 968 } 969 xsoftc.xpt_config_hook->ich_func = xpt_config; 970 if (config_intrhook_establish(xsoftc.xpt_config_hook) != 0) { 971 free (xsoftc.xpt_config_hook, M_CAMXPT); 972 printf("xpt_init: config_intrhook_establish failed " 973 "- failing attach\n"); 974 } 975 976 return (0); 977 } 978 979 static cam_status 980 xptregister(struct cam_periph *periph, void *arg) 981 { 982 struct cam_sim *xpt_sim; 983 984 if (periph == NULL) { 985 printf("xptregister: periph was NULL!!\n"); 986 return(CAM_REQ_CMP_ERR); 987 } 988 989 xpt_sim = (struct cam_sim *)arg; 990 xpt_sim->softc = periph; 991 xpt_periph = periph; 992 periph->softc = NULL; 993 994 return(CAM_REQ_CMP); 995 } 996 997 int32_t 998 xpt_add_periph(struct cam_periph *periph) 999 { 1000 struct cam_ed *device; 1001 int32_t status; 1002 1003 TASK_INIT(&periph->periph_run_task, 0, xpt_run_allocq_task, periph); 1004 device = periph->path->device; 1005 status = CAM_REQ_CMP; 1006 if (device != NULL) { 1007 mtx_lock(&device->target->bus->eb_mtx); 1008 device->generation++; 1009 SLIST_INSERT_HEAD(&device->periphs, periph, periph_links); 1010 mtx_unlock(&device->target->bus->eb_mtx); 1011 atomic_add_32(&xsoftc.xpt_generation, 1); 1012 } 1013 1014 return (status); 1015 } 1016 1017 void 1018 xpt_remove_periph(struct cam_periph *periph) 1019 { 1020 struct cam_ed *device; 1021 1022 device = periph->path->device; 1023 if (device != NULL) { 1024 mtx_lock(&device->target->bus->eb_mtx); 1025 device->generation++; 1026 SLIST_REMOVE(&device->periphs, periph, cam_periph, periph_links); 1027 mtx_unlock(&device->target->bus->eb_mtx); 1028 atomic_add_32(&xsoftc.xpt_generation, 1); 1029 } 1030 } 1031 1032 1033 void 1034 xpt_announce_periph(struct cam_periph *periph, char *announce_string) 1035 { 1036 struct cam_path *path = periph->path; 1037 struct xpt_proto *proto; 1038 1039 cam_periph_assert(periph, MA_OWNED); 1040 periph->flags |= CAM_PERIPH_ANNOUNCED; 1041 1042 printf("%s%d at %s%d bus %d scbus%d target %d lun %jx\n", 1043 periph->periph_name, periph->unit_number, 1044 path->bus->sim->sim_name, 1045 path->bus->sim->unit_number, 1046 path->bus->sim->bus_id, 1047 path->bus->path_id, 1048 path->target->target_id, 1049 (uintmax_t)path->device->lun_id); 1050 printf("%s%d: ", periph->periph_name, periph->unit_number); 1051 proto = xpt_proto_find(path->device->protocol); 1052 if (proto) 1053 proto->ops->announce(path->device); 1054 else 1055 printf("%s%d: Unknown protocol device %d\n", 1056 periph->periph_name, periph->unit_number, 1057 path->device->protocol); 1058 if (path->device->serial_num_len > 0) { 1059 /* Don't wrap the screen - print only the first 60 chars */ 1060 printf("%s%d: Serial Number %.60s\n", periph->periph_name, 1061 periph->unit_number, path->device->serial_num); 1062 } 1063 /* Announce transport details. */ 1064 path->bus->xport->ops->announce(periph); 1065 /* Announce command queueing. */ 1066 if (path->device->inq_flags & SID_CmdQue 1067 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) { 1068 printf("%s%d: Command Queueing enabled\n", 1069 periph->periph_name, periph->unit_number); 1070 } 1071 /* Announce caller's details if they've passed in. */ 1072 if (announce_string != NULL) 1073 printf("%s%d: %s\n", periph->periph_name, 1074 periph->unit_number, announce_string); 1075 } 1076 1077 void 1078 xpt_announce_quirks(struct cam_periph *periph, int quirks, char *bit_string) 1079 { 1080 if (quirks != 0) { 1081 printf("%s%d: quirks=0x%b\n", periph->periph_name, 1082 periph->unit_number, quirks, bit_string); 1083 } 1084 } 1085 1086 void 1087 xpt_denounce_periph(struct cam_periph *periph) 1088 { 1089 struct cam_path *path = periph->path; 1090 struct xpt_proto *proto; 1091 1092 cam_periph_assert(periph, MA_OWNED); 1093 printf("%s%d at %s%d bus %d scbus%d target %d lun %jx\n", 1094 periph->periph_name, periph->unit_number, 1095 path->bus->sim->sim_name, 1096 path->bus->sim->unit_number, 1097 path->bus->sim->bus_id, 1098 path->bus->path_id, 1099 path->target->target_id, 1100 (uintmax_t)path->device->lun_id); 1101 printf("%s%d: ", periph->periph_name, periph->unit_number); 1102 proto = xpt_proto_find(path->device->protocol); 1103 if (proto) 1104 proto->ops->denounce(path->device); 1105 else 1106 printf("%s%d: Unknown protocol device %d\n", 1107 periph->periph_name, periph->unit_number, 1108 path->device->protocol); 1109 if (path->device->serial_num_len > 0) 1110 printf(" s/n %.60s", path->device->serial_num); 1111 printf(" detached\n"); 1112 } 1113 1114 1115 int 1116 xpt_getattr(char *buf, size_t len, const char *attr, struct cam_path *path) 1117 { 1118 int ret = -1, l, o; 1119 struct ccb_dev_advinfo cdai; 1120 struct scsi_vpd_id_descriptor *idd; 1121 1122 xpt_path_assert(path, MA_OWNED); 1123 1124 memset(&cdai, 0, sizeof(cdai)); 1125 xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL); 1126 cdai.ccb_h.func_code = XPT_DEV_ADVINFO; 1127 cdai.flags = CDAI_FLAG_NONE; 1128 cdai.bufsiz = len; 1129 1130 if (!strcmp(attr, "GEOM::ident")) 1131 cdai.buftype = CDAI_TYPE_SERIAL_NUM; 1132 else if (!strcmp(attr, "GEOM::physpath")) 1133 cdai.buftype = CDAI_TYPE_PHYS_PATH; 1134 else if (strcmp(attr, "GEOM::lunid") == 0 || 1135 strcmp(attr, "GEOM::lunname") == 0) { 1136 cdai.buftype = CDAI_TYPE_SCSI_DEVID; 1137 cdai.bufsiz = CAM_SCSI_DEVID_MAXLEN; 1138 } else 1139 goto out; 1140 1141 cdai.buf = malloc(cdai.bufsiz, M_CAMXPT, M_NOWAIT|M_ZERO); 1142 if (cdai.buf == NULL) { 1143 ret = ENOMEM; 1144 goto out; 1145 } 1146 xpt_action((union ccb *)&cdai); /* can only be synchronous */ 1147 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0) 1148 cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE); 1149 if (cdai.provsiz == 0) 1150 goto out; 1151 if (cdai.buftype == CDAI_TYPE_SCSI_DEVID) { 1152 if (strcmp(attr, "GEOM::lunid") == 0) { 1153 idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf, 1154 cdai.provsiz, scsi_devid_is_lun_naa); 1155 if (idd == NULL) 1156 idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf, 1157 cdai.provsiz, scsi_devid_is_lun_eui64); 1158 if (idd == NULL) 1159 idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf, 1160 cdai.provsiz, scsi_devid_is_lun_uuid); 1161 if (idd == NULL) 1162 idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf, 1163 cdai.provsiz, scsi_devid_is_lun_md5); 1164 } else 1165 idd = NULL; 1166 if (idd == NULL) 1167 idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf, 1168 cdai.provsiz, scsi_devid_is_lun_t10); 1169 if (idd == NULL) 1170 idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf, 1171 cdai.provsiz, scsi_devid_is_lun_name); 1172 if (idd == NULL) 1173 goto out; 1174 ret = 0; 1175 if ((idd->proto_codeset & SVPD_ID_CODESET_MASK) == SVPD_ID_CODESET_ASCII) { 1176 if (idd->length < len) { 1177 for (l = 0; l < idd->length; l++) 1178 buf[l] = idd->identifier[l] ? 1179 idd->identifier[l] : ' '; 1180 buf[l] = 0; 1181 } else 1182 ret = EFAULT; 1183 } else if ((idd->proto_codeset & SVPD_ID_CODESET_MASK) == SVPD_ID_CODESET_UTF8) { 1184 l = strnlen(idd->identifier, idd->length); 1185 if (l < len) { 1186 bcopy(idd->identifier, buf, l); 1187 buf[l] = 0; 1188 } else 1189 ret = EFAULT; 1190 } else if ((idd->id_type & SVPD_ID_TYPE_MASK) == SVPD_ID_TYPE_UUID 1191 && idd->identifier[0] == 0x10) { 1192 if ((idd->length - 2) * 2 + 4 < len) { 1193 for (l = 2, o = 0; l < idd->length; l++) { 1194 if (l == 6 || l == 8 || l == 10 || l == 12) 1195 o += sprintf(buf + o, "-"); 1196 o += sprintf(buf + o, "%02x", 1197 idd->identifier[l]); 1198 } 1199 } else 1200 ret = EFAULT; 1201 } else { 1202 if (idd->length * 2 < len) { 1203 for (l = 0; l < idd->length; l++) 1204 sprintf(buf + l * 2, "%02x", 1205 idd->identifier[l]); 1206 } else 1207 ret = EFAULT; 1208 } 1209 } else { 1210 ret = 0; 1211 if (strlcpy(buf, cdai.buf, len) >= len) 1212 ret = EFAULT; 1213 } 1214 1215 out: 1216 if (cdai.buf != NULL) 1217 free(cdai.buf, M_CAMXPT); 1218 return ret; 1219 } 1220 1221 static dev_match_ret 1222 xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns, 1223 struct cam_eb *bus) 1224 { 1225 dev_match_ret retval; 1226 u_int i; 1227 1228 retval = DM_RET_NONE; 1229 1230 /* 1231 * If we aren't given something to match against, that's an error. 1232 */ 1233 if (bus == NULL) 1234 return(DM_RET_ERROR); 1235 1236 /* 1237 * If there are no match entries, then this bus matches no 1238 * matter what. 1239 */ 1240 if ((patterns == NULL) || (num_patterns == 0)) 1241 return(DM_RET_DESCEND | DM_RET_COPY); 1242 1243 for (i = 0; i < num_patterns; i++) { 1244 struct bus_match_pattern *cur_pattern; 1245 1246 /* 1247 * If the pattern in question isn't for a bus node, we 1248 * aren't interested. However, we do indicate to the 1249 * calling routine that we should continue descending the 1250 * tree, since the user wants to match against lower-level 1251 * EDT elements. 1252 */ 1253 if (patterns[i].type != DEV_MATCH_BUS) { 1254 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) 1255 retval |= DM_RET_DESCEND; 1256 continue; 1257 } 1258 1259 cur_pattern = &patterns[i].pattern.bus_pattern; 1260 1261 /* 1262 * If they want to match any bus node, we give them any 1263 * device node. 1264 */ 1265 if (cur_pattern->flags == BUS_MATCH_ANY) { 1266 /* set the copy flag */ 1267 retval |= DM_RET_COPY; 1268 1269 /* 1270 * If we've already decided on an action, go ahead 1271 * and return. 1272 */ 1273 if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE) 1274 return(retval); 1275 } 1276 1277 /* 1278 * Not sure why someone would do this... 1279 */ 1280 if (cur_pattern->flags == BUS_MATCH_NONE) 1281 continue; 1282 1283 if (((cur_pattern->flags & BUS_MATCH_PATH) != 0) 1284 && (cur_pattern->path_id != bus->path_id)) 1285 continue; 1286 1287 if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0) 1288 && (cur_pattern->bus_id != bus->sim->bus_id)) 1289 continue; 1290 1291 if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0) 1292 && (cur_pattern->unit_number != bus->sim->unit_number)) 1293 continue; 1294 1295 if (((cur_pattern->flags & BUS_MATCH_NAME) != 0) 1296 && (strncmp(cur_pattern->dev_name, bus->sim->sim_name, 1297 DEV_IDLEN) != 0)) 1298 continue; 1299 1300 /* 1301 * If we get to this point, the user definitely wants 1302 * information on this bus. So tell the caller to copy the 1303 * data out. 1304 */ 1305 retval |= DM_RET_COPY; 1306 1307 /* 1308 * If the return action has been set to descend, then we 1309 * know that we've already seen a non-bus matching 1310 * expression, therefore we need to further descend the tree. 1311 * This won't change by continuing around the loop, so we 1312 * go ahead and return. If we haven't seen a non-bus 1313 * matching expression, we keep going around the loop until 1314 * we exhaust the matching expressions. We'll set the stop 1315 * flag once we fall out of the loop. 1316 */ 1317 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND) 1318 return(retval); 1319 } 1320 1321 /* 1322 * If the return action hasn't been set to descend yet, that means 1323 * we haven't seen anything other than bus matching patterns. So 1324 * tell the caller to stop descending the tree -- the user doesn't 1325 * want to match against lower level tree elements. 1326 */ 1327 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) 1328 retval |= DM_RET_STOP; 1329 1330 return(retval); 1331 } 1332 1333 static dev_match_ret 1334 xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns, 1335 struct cam_ed *device) 1336 { 1337 dev_match_ret retval; 1338 u_int i; 1339 1340 retval = DM_RET_NONE; 1341 1342 /* 1343 * If we aren't given something to match against, that's an error. 1344 */ 1345 if (device == NULL) 1346 return(DM_RET_ERROR); 1347 1348 /* 1349 * If there are no match entries, then this device matches no 1350 * matter what. 1351 */ 1352 if ((patterns == NULL) || (num_patterns == 0)) 1353 return(DM_RET_DESCEND | DM_RET_COPY); 1354 1355 for (i = 0; i < num_patterns; i++) { 1356 struct device_match_pattern *cur_pattern; 1357 struct scsi_vpd_device_id *device_id_page; 1358 1359 /* 1360 * If the pattern in question isn't for a device node, we 1361 * aren't interested. 1362 */ 1363 if (patterns[i].type != DEV_MATCH_DEVICE) { 1364 if ((patterns[i].type == DEV_MATCH_PERIPH) 1365 && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)) 1366 retval |= DM_RET_DESCEND; 1367 continue; 1368 } 1369 1370 cur_pattern = &patterns[i].pattern.device_pattern; 1371 1372 /* Error out if mutually exclusive options are specified. */ 1373 if ((cur_pattern->flags & (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID)) 1374 == (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID)) 1375 return(DM_RET_ERROR); 1376 1377 /* 1378 * If they want to match any device node, we give them any 1379 * device node. 1380 */ 1381 if (cur_pattern->flags == DEV_MATCH_ANY) 1382 goto copy_dev_node; 1383 1384 /* 1385 * Not sure why someone would do this... 1386 */ 1387 if (cur_pattern->flags == DEV_MATCH_NONE) 1388 continue; 1389 1390 if (((cur_pattern->flags & DEV_MATCH_PATH) != 0) 1391 && (cur_pattern->path_id != device->target->bus->path_id)) 1392 continue; 1393 1394 if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0) 1395 && (cur_pattern->target_id != device->target->target_id)) 1396 continue; 1397 1398 if (((cur_pattern->flags & DEV_MATCH_LUN) != 0) 1399 && (cur_pattern->target_lun != device->lun_id)) 1400 continue; 1401 1402 if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0) 1403 && (cam_quirkmatch((caddr_t)&device->inq_data, 1404 (caddr_t)&cur_pattern->data.inq_pat, 1405 1, sizeof(cur_pattern->data.inq_pat), 1406 scsi_static_inquiry_match) == NULL)) 1407 continue; 1408 1409 device_id_page = (struct scsi_vpd_device_id *)device->device_id; 1410 if (((cur_pattern->flags & DEV_MATCH_DEVID) != 0) 1411 && (device->device_id_len < SVPD_DEVICE_ID_HDR_LEN 1412 || scsi_devid_match((uint8_t *)device_id_page->desc_list, 1413 device->device_id_len 1414 - SVPD_DEVICE_ID_HDR_LEN, 1415 cur_pattern->data.devid_pat.id, 1416 cur_pattern->data.devid_pat.id_len) != 0)) 1417 continue; 1418 1419 copy_dev_node: 1420 /* 1421 * If we get to this point, the user definitely wants 1422 * information on this device. So tell the caller to copy 1423 * the data out. 1424 */ 1425 retval |= DM_RET_COPY; 1426 1427 /* 1428 * If the return action has been set to descend, then we 1429 * know that we've already seen a peripheral matching 1430 * expression, therefore we need to further descend the tree. 1431 * This won't change by continuing around the loop, so we 1432 * go ahead and return. If we haven't seen a peripheral 1433 * matching expression, we keep going around the loop until 1434 * we exhaust the matching expressions. We'll set the stop 1435 * flag once we fall out of the loop. 1436 */ 1437 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND) 1438 return(retval); 1439 } 1440 1441 /* 1442 * If the return action hasn't been set to descend yet, that means 1443 * we haven't seen any peripheral matching patterns. So tell the 1444 * caller to stop descending the tree -- the user doesn't want to 1445 * match against lower level tree elements. 1446 */ 1447 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) 1448 retval |= DM_RET_STOP; 1449 1450 return(retval); 1451 } 1452 1453 /* 1454 * Match a single peripheral against any number of match patterns. 1455 */ 1456 static dev_match_ret 1457 xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns, 1458 struct cam_periph *periph) 1459 { 1460 dev_match_ret retval; 1461 u_int i; 1462 1463 /* 1464 * If we aren't given something to match against, that's an error. 1465 */ 1466 if (periph == NULL) 1467 return(DM_RET_ERROR); 1468 1469 /* 1470 * If there are no match entries, then this peripheral matches no 1471 * matter what. 1472 */ 1473 if ((patterns == NULL) || (num_patterns == 0)) 1474 return(DM_RET_STOP | DM_RET_COPY); 1475 1476 /* 1477 * There aren't any nodes below a peripheral node, so there's no 1478 * reason to descend the tree any further. 1479 */ 1480 retval = DM_RET_STOP; 1481 1482 for (i = 0; i < num_patterns; i++) { 1483 struct periph_match_pattern *cur_pattern; 1484 1485 /* 1486 * If the pattern in question isn't for a peripheral, we 1487 * aren't interested. 1488 */ 1489 if (patterns[i].type != DEV_MATCH_PERIPH) 1490 continue; 1491 1492 cur_pattern = &patterns[i].pattern.periph_pattern; 1493 1494 /* 1495 * If they want to match on anything, then we will do so. 1496 */ 1497 if (cur_pattern->flags == PERIPH_MATCH_ANY) { 1498 /* set the copy flag */ 1499 retval |= DM_RET_COPY; 1500 1501 /* 1502 * We've already set the return action to stop, 1503 * since there are no nodes below peripherals in 1504 * the tree. 1505 */ 1506 return(retval); 1507 } 1508 1509 /* 1510 * Not sure why someone would do this... 1511 */ 1512 if (cur_pattern->flags == PERIPH_MATCH_NONE) 1513 continue; 1514 1515 if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0) 1516 && (cur_pattern->path_id != periph->path->bus->path_id)) 1517 continue; 1518 1519 /* 1520 * For the target and lun id's, we have to make sure the 1521 * target and lun pointers aren't NULL. The xpt peripheral 1522 * has a wildcard target and device. 1523 */ 1524 if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0) 1525 && ((periph->path->target == NULL) 1526 ||(cur_pattern->target_id != periph->path->target->target_id))) 1527 continue; 1528 1529 if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0) 1530 && ((periph->path->device == NULL) 1531 || (cur_pattern->target_lun != periph->path->device->lun_id))) 1532 continue; 1533 1534 if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0) 1535 && (cur_pattern->unit_number != periph->unit_number)) 1536 continue; 1537 1538 if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0) 1539 && (strncmp(cur_pattern->periph_name, periph->periph_name, 1540 DEV_IDLEN) != 0)) 1541 continue; 1542 1543 /* 1544 * If we get to this point, the user definitely wants 1545 * information on this peripheral. So tell the caller to 1546 * copy the data out. 1547 */ 1548 retval |= DM_RET_COPY; 1549 1550 /* 1551 * The return action has already been set to stop, since 1552 * peripherals don't have any nodes below them in the EDT. 1553 */ 1554 return(retval); 1555 } 1556 1557 /* 1558 * If we get to this point, the peripheral that was passed in 1559 * doesn't match any of the patterns. 1560 */ 1561 return(retval); 1562 } 1563 1564 static int 1565 xptedtbusfunc(struct cam_eb *bus, void *arg) 1566 { 1567 struct ccb_dev_match *cdm; 1568 struct cam_et *target; 1569 dev_match_ret retval; 1570 1571 cdm = (struct ccb_dev_match *)arg; 1572 1573 /* 1574 * If our position is for something deeper in the tree, that means 1575 * that we've already seen this node. So, we keep going down. 1576 */ 1577 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1578 && (cdm->pos.cookie.bus == bus) 1579 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1580 && (cdm->pos.cookie.target != NULL)) 1581 retval = DM_RET_DESCEND; 1582 else 1583 retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus); 1584 1585 /* 1586 * If we got an error, bail out of the search. 1587 */ 1588 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 1589 cdm->status = CAM_DEV_MATCH_ERROR; 1590 return(0); 1591 } 1592 1593 /* 1594 * If the copy flag is set, copy this bus out. 1595 */ 1596 if (retval & DM_RET_COPY) { 1597 int spaceleft, j; 1598 1599 spaceleft = cdm->match_buf_len - (cdm->num_matches * 1600 sizeof(struct dev_match_result)); 1601 1602 /* 1603 * If we don't have enough space to put in another 1604 * match result, save our position and tell the 1605 * user there are more devices to check. 1606 */ 1607 if (spaceleft < sizeof(struct dev_match_result)) { 1608 bzero(&cdm->pos, sizeof(cdm->pos)); 1609 cdm->pos.position_type = 1610 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS; 1611 1612 cdm->pos.cookie.bus = bus; 1613 cdm->pos.generations[CAM_BUS_GENERATION]= 1614 xsoftc.bus_generation; 1615 cdm->status = CAM_DEV_MATCH_MORE; 1616 return(0); 1617 } 1618 j = cdm->num_matches; 1619 cdm->num_matches++; 1620 cdm->matches[j].type = DEV_MATCH_BUS; 1621 cdm->matches[j].result.bus_result.path_id = bus->path_id; 1622 cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id; 1623 cdm->matches[j].result.bus_result.unit_number = 1624 bus->sim->unit_number; 1625 strncpy(cdm->matches[j].result.bus_result.dev_name, 1626 bus->sim->sim_name, DEV_IDLEN); 1627 } 1628 1629 /* 1630 * If the user is only interested in busses, there's no 1631 * reason to descend to the next level in the tree. 1632 */ 1633 if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP) 1634 return(1); 1635 1636 /* 1637 * If there is a target generation recorded, check it to 1638 * make sure the target list hasn't changed. 1639 */ 1640 mtx_lock(&bus->eb_mtx); 1641 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1642 && (cdm->pos.cookie.bus == bus) 1643 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1644 && (cdm->pos.cookie.target != NULL)) { 1645 if ((cdm->pos.generations[CAM_TARGET_GENERATION] != 1646 bus->generation)) { 1647 mtx_unlock(&bus->eb_mtx); 1648 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 1649 return (0); 1650 } 1651 target = (struct cam_et *)cdm->pos.cookie.target; 1652 target->refcount++; 1653 } else 1654 target = NULL; 1655 mtx_unlock(&bus->eb_mtx); 1656 1657 return (xpttargettraverse(bus, target, xptedttargetfunc, arg)); 1658 } 1659 1660 static int 1661 xptedttargetfunc(struct cam_et *target, void *arg) 1662 { 1663 struct ccb_dev_match *cdm; 1664 struct cam_eb *bus; 1665 struct cam_ed *device; 1666 1667 cdm = (struct ccb_dev_match *)arg; 1668 bus = target->bus; 1669 1670 /* 1671 * If there is a device list generation recorded, check it to 1672 * make sure the device list hasn't changed. 1673 */ 1674 mtx_lock(&bus->eb_mtx); 1675 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1676 && (cdm->pos.cookie.bus == bus) 1677 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1678 && (cdm->pos.cookie.target == target) 1679 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) 1680 && (cdm->pos.cookie.device != NULL)) { 1681 if (cdm->pos.generations[CAM_DEV_GENERATION] != 1682 target->generation) { 1683 mtx_unlock(&bus->eb_mtx); 1684 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 1685 return(0); 1686 } 1687 device = (struct cam_ed *)cdm->pos.cookie.device; 1688 device->refcount++; 1689 } else 1690 device = NULL; 1691 mtx_unlock(&bus->eb_mtx); 1692 1693 return (xptdevicetraverse(target, device, xptedtdevicefunc, arg)); 1694 } 1695 1696 static int 1697 xptedtdevicefunc(struct cam_ed *device, void *arg) 1698 { 1699 struct cam_eb *bus; 1700 struct cam_periph *periph; 1701 struct ccb_dev_match *cdm; 1702 dev_match_ret retval; 1703 1704 cdm = (struct ccb_dev_match *)arg; 1705 bus = device->target->bus; 1706 1707 /* 1708 * If our position is for something deeper in the tree, that means 1709 * that we've already seen this node. So, we keep going down. 1710 */ 1711 if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE) 1712 && (cdm->pos.cookie.device == device) 1713 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 1714 && (cdm->pos.cookie.periph != NULL)) 1715 retval = DM_RET_DESCEND; 1716 else 1717 retval = xptdevicematch(cdm->patterns, cdm->num_patterns, 1718 device); 1719 1720 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 1721 cdm->status = CAM_DEV_MATCH_ERROR; 1722 return(0); 1723 } 1724 1725 /* 1726 * If the copy flag is set, copy this device out. 1727 */ 1728 if (retval & DM_RET_COPY) { 1729 int spaceleft, j; 1730 1731 spaceleft = cdm->match_buf_len - (cdm->num_matches * 1732 sizeof(struct dev_match_result)); 1733 1734 /* 1735 * If we don't have enough space to put in another 1736 * match result, save our position and tell the 1737 * user there are more devices to check. 1738 */ 1739 if (spaceleft < sizeof(struct dev_match_result)) { 1740 bzero(&cdm->pos, sizeof(cdm->pos)); 1741 cdm->pos.position_type = 1742 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS | 1743 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE; 1744 1745 cdm->pos.cookie.bus = device->target->bus; 1746 cdm->pos.generations[CAM_BUS_GENERATION]= 1747 xsoftc.bus_generation; 1748 cdm->pos.cookie.target = device->target; 1749 cdm->pos.generations[CAM_TARGET_GENERATION] = 1750 device->target->bus->generation; 1751 cdm->pos.cookie.device = device; 1752 cdm->pos.generations[CAM_DEV_GENERATION] = 1753 device->target->generation; 1754 cdm->status = CAM_DEV_MATCH_MORE; 1755 return(0); 1756 } 1757 j = cdm->num_matches; 1758 cdm->num_matches++; 1759 cdm->matches[j].type = DEV_MATCH_DEVICE; 1760 cdm->matches[j].result.device_result.path_id = 1761 device->target->bus->path_id; 1762 cdm->matches[j].result.device_result.target_id = 1763 device->target->target_id; 1764 cdm->matches[j].result.device_result.target_lun = 1765 device->lun_id; 1766 cdm->matches[j].result.device_result.protocol = 1767 device->protocol; 1768 bcopy(&device->inq_data, 1769 &cdm->matches[j].result.device_result.inq_data, 1770 sizeof(struct scsi_inquiry_data)); 1771 bcopy(&device->ident_data, 1772 &cdm->matches[j].result.device_result.ident_data, 1773 sizeof(struct ata_params)); 1774 1775 /* Let the user know whether this device is unconfigured */ 1776 if (device->flags & CAM_DEV_UNCONFIGURED) 1777 cdm->matches[j].result.device_result.flags = 1778 DEV_RESULT_UNCONFIGURED; 1779 else 1780 cdm->matches[j].result.device_result.flags = 1781 DEV_RESULT_NOFLAG; 1782 } 1783 1784 /* 1785 * If the user isn't interested in peripherals, don't descend 1786 * the tree any further. 1787 */ 1788 if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP) 1789 return(1); 1790 1791 /* 1792 * If there is a peripheral list generation recorded, make sure 1793 * it hasn't changed. 1794 */ 1795 xpt_lock_buses(); 1796 mtx_lock(&bus->eb_mtx); 1797 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1798 && (cdm->pos.cookie.bus == bus) 1799 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1800 && (cdm->pos.cookie.target == device->target) 1801 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) 1802 && (cdm->pos.cookie.device == device) 1803 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 1804 && (cdm->pos.cookie.periph != NULL)) { 1805 if (cdm->pos.generations[CAM_PERIPH_GENERATION] != 1806 device->generation) { 1807 mtx_unlock(&bus->eb_mtx); 1808 xpt_unlock_buses(); 1809 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 1810 return(0); 1811 } 1812 periph = (struct cam_periph *)cdm->pos.cookie.periph; 1813 periph->refcount++; 1814 } else 1815 periph = NULL; 1816 mtx_unlock(&bus->eb_mtx); 1817 xpt_unlock_buses(); 1818 1819 return (xptperiphtraverse(device, periph, xptedtperiphfunc, arg)); 1820 } 1821 1822 static int 1823 xptedtperiphfunc(struct cam_periph *periph, void *arg) 1824 { 1825 struct ccb_dev_match *cdm; 1826 dev_match_ret retval; 1827 1828 cdm = (struct ccb_dev_match *)arg; 1829 1830 retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph); 1831 1832 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 1833 cdm->status = CAM_DEV_MATCH_ERROR; 1834 return(0); 1835 } 1836 1837 /* 1838 * If the copy flag is set, copy this peripheral out. 1839 */ 1840 if (retval & DM_RET_COPY) { 1841 int spaceleft, j; 1842 1843 spaceleft = cdm->match_buf_len - (cdm->num_matches * 1844 sizeof(struct dev_match_result)); 1845 1846 /* 1847 * If we don't have enough space to put in another 1848 * match result, save our position and tell the 1849 * user there are more devices to check. 1850 */ 1851 if (spaceleft < sizeof(struct dev_match_result)) { 1852 bzero(&cdm->pos, sizeof(cdm->pos)); 1853 cdm->pos.position_type = 1854 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS | 1855 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE | 1856 CAM_DEV_POS_PERIPH; 1857 1858 cdm->pos.cookie.bus = periph->path->bus; 1859 cdm->pos.generations[CAM_BUS_GENERATION]= 1860 xsoftc.bus_generation; 1861 cdm->pos.cookie.target = periph->path->target; 1862 cdm->pos.generations[CAM_TARGET_GENERATION] = 1863 periph->path->bus->generation; 1864 cdm->pos.cookie.device = periph->path->device; 1865 cdm->pos.generations[CAM_DEV_GENERATION] = 1866 periph->path->target->generation; 1867 cdm->pos.cookie.periph = periph; 1868 cdm->pos.generations[CAM_PERIPH_GENERATION] = 1869 periph->path->device->generation; 1870 cdm->status = CAM_DEV_MATCH_MORE; 1871 return(0); 1872 } 1873 1874 j = cdm->num_matches; 1875 cdm->num_matches++; 1876 cdm->matches[j].type = DEV_MATCH_PERIPH; 1877 cdm->matches[j].result.periph_result.path_id = 1878 periph->path->bus->path_id; 1879 cdm->matches[j].result.periph_result.target_id = 1880 periph->path->target->target_id; 1881 cdm->matches[j].result.periph_result.target_lun = 1882 periph->path->device->lun_id; 1883 cdm->matches[j].result.periph_result.unit_number = 1884 periph->unit_number; 1885 strncpy(cdm->matches[j].result.periph_result.periph_name, 1886 periph->periph_name, DEV_IDLEN); 1887 } 1888 1889 return(1); 1890 } 1891 1892 static int 1893 xptedtmatch(struct ccb_dev_match *cdm) 1894 { 1895 struct cam_eb *bus; 1896 int ret; 1897 1898 cdm->num_matches = 0; 1899 1900 /* 1901 * Check the bus list generation. If it has changed, the user 1902 * needs to reset everything and start over. 1903 */ 1904 xpt_lock_buses(); 1905 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1906 && (cdm->pos.cookie.bus != NULL)) { 1907 if (cdm->pos.generations[CAM_BUS_GENERATION] != 1908 xsoftc.bus_generation) { 1909 xpt_unlock_buses(); 1910 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 1911 return(0); 1912 } 1913 bus = (struct cam_eb *)cdm->pos.cookie.bus; 1914 bus->refcount++; 1915 } else 1916 bus = NULL; 1917 xpt_unlock_buses(); 1918 1919 ret = xptbustraverse(bus, xptedtbusfunc, cdm); 1920 1921 /* 1922 * If we get back 0, that means that we had to stop before fully 1923 * traversing the EDT. It also means that one of the subroutines 1924 * has set the status field to the proper value. If we get back 1, 1925 * we've fully traversed the EDT and copied out any matching entries. 1926 */ 1927 if (ret == 1) 1928 cdm->status = CAM_DEV_MATCH_LAST; 1929 1930 return(ret); 1931 } 1932 1933 static int 1934 xptplistpdrvfunc(struct periph_driver **pdrv, void *arg) 1935 { 1936 struct cam_periph *periph; 1937 struct ccb_dev_match *cdm; 1938 1939 cdm = (struct ccb_dev_match *)arg; 1940 1941 xpt_lock_buses(); 1942 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR) 1943 && (cdm->pos.cookie.pdrv == pdrv) 1944 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 1945 && (cdm->pos.cookie.periph != NULL)) { 1946 if (cdm->pos.generations[CAM_PERIPH_GENERATION] != 1947 (*pdrv)->generation) { 1948 xpt_unlock_buses(); 1949 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 1950 return(0); 1951 } 1952 periph = (struct cam_periph *)cdm->pos.cookie.periph; 1953 periph->refcount++; 1954 } else 1955 periph = NULL; 1956 xpt_unlock_buses(); 1957 1958 return (xptpdperiphtraverse(pdrv, periph, xptplistperiphfunc, arg)); 1959 } 1960 1961 static int 1962 xptplistperiphfunc(struct cam_periph *periph, void *arg) 1963 { 1964 struct ccb_dev_match *cdm; 1965 dev_match_ret retval; 1966 1967 cdm = (struct ccb_dev_match *)arg; 1968 1969 retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph); 1970 1971 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 1972 cdm->status = CAM_DEV_MATCH_ERROR; 1973 return(0); 1974 } 1975 1976 /* 1977 * If the copy flag is set, copy this peripheral out. 1978 */ 1979 if (retval & DM_RET_COPY) { 1980 int spaceleft, j; 1981 1982 spaceleft = cdm->match_buf_len - (cdm->num_matches * 1983 sizeof(struct dev_match_result)); 1984 1985 /* 1986 * If we don't have enough space to put in another 1987 * match result, save our position and tell the 1988 * user there are more devices to check. 1989 */ 1990 if (spaceleft < sizeof(struct dev_match_result)) { 1991 struct periph_driver **pdrv; 1992 1993 pdrv = NULL; 1994 bzero(&cdm->pos, sizeof(cdm->pos)); 1995 cdm->pos.position_type = 1996 CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR | 1997 CAM_DEV_POS_PERIPH; 1998 1999 /* 2000 * This may look a bit non-sensical, but it is 2001 * actually quite logical. There are very few 2002 * peripheral drivers, and bloating every peripheral 2003 * structure with a pointer back to its parent 2004 * peripheral driver linker set entry would cost 2005 * more in the long run than doing this quick lookup. 2006 */ 2007 for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) { 2008 if (strcmp((*pdrv)->driver_name, 2009 periph->periph_name) == 0) 2010 break; 2011 } 2012 2013 if (*pdrv == NULL) { 2014 cdm->status = CAM_DEV_MATCH_ERROR; 2015 return(0); 2016 } 2017 2018 cdm->pos.cookie.pdrv = pdrv; 2019 /* 2020 * The periph generation slot does double duty, as 2021 * does the periph pointer slot. They are used for 2022 * both edt and pdrv lookups and positioning. 2023 */ 2024 cdm->pos.cookie.periph = periph; 2025 cdm->pos.generations[CAM_PERIPH_GENERATION] = 2026 (*pdrv)->generation; 2027 cdm->status = CAM_DEV_MATCH_MORE; 2028 return(0); 2029 } 2030 2031 j = cdm->num_matches; 2032 cdm->num_matches++; 2033 cdm->matches[j].type = DEV_MATCH_PERIPH; 2034 cdm->matches[j].result.periph_result.path_id = 2035 periph->path->bus->path_id; 2036 2037 /* 2038 * The transport layer peripheral doesn't have a target or 2039 * lun. 2040 */ 2041 if (periph->path->target) 2042 cdm->matches[j].result.periph_result.target_id = 2043 periph->path->target->target_id; 2044 else 2045 cdm->matches[j].result.periph_result.target_id = 2046 CAM_TARGET_WILDCARD; 2047 2048 if (periph->path->device) 2049 cdm->matches[j].result.periph_result.target_lun = 2050 periph->path->device->lun_id; 2051 else 2052 cdm->matches[j].result.periph_result.target_lun = 2053 CAM_LUN_WILDCARD; 2054 2055 cdm->matches[j].result.periph_result.unit_number = 2056 periph->unit_number; 2057 strncpy(cdm->matches[j].result.periph_result.periph_name, 2058 periph->periph_name, DEV_IDLEN); 2059 } 2060 2061 return(1); 2062 } 2063 2064 static int 2065 xptperiphlistmatch(struct ccb_dev_match *cdm) 2066 { 2067 int ret; 2068 2069 cdm->num_matches = 0; 2070 2071 /* 2072 * At this point in the edt traversal function, we check the bus 2073 * list generation to make sure that no busses have been added or 2074 * removed since the user last sent a XPT_DEV_MATCH ccb through. 2075 * For the peripheral driver list traversal function, however, we 2076 * don't have to worry about new peripheral driver types coming or 2077 * going; they're in a linker set, and therefore can't change 2078 * without a recompile. 2079 */ 2080 2081 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR) 2082 && (cdm->pos.cookie.pdrv != NULL)) 2083 ret = xptpdrvtraverse( 2084 (struct periph_driver **)cdm->pos.cookie.pdrv, 2085 xptplistpdrvfunc, cdm); 2086 else 2087 ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm); 2088 2089 /* 2090 * If we get back 0, that means that we had to stop before fully 2091 * traversing the peripheral driver tree. It also means that one of 2092 * the subroutines has set the status field to the proper value. If 2093 * we get back 1, we've fully traversed the EDT and copied out any 2094 * matching entries. 2095 */ 2096 if (ret == 1) 2097 cdm->status = CAM_DEV_MATCH_LAST; 2098 2099 return(ret); 2100 } 2101 2102 static int 2103 xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg) 2104 { 2105 struct cam_eb *bus, *next_bus; 2106 int retval; 2107 2108 retval = 1; 2109 if (start_bus) 2110 bus = start_bus; 2111 else { 2112 xpt_lock_buses(); 2113 bus = TAILQ_FIRST(&xsoftc.xpt_busses); 2114 if (bus == NULL) { 2115 xpt_unlock_buses(); 2116 return (retval); 2117 } 2118 bus->refcount++; 2119 xpt_unlock_buses(); 2120 } 2121 for (; bus != NULL; bus = next_bus) { 2122 retval = tr_func(bus, arg); 2123 if (retval == 0) { 2124 xpt_release_bus(bus); 2125 break; 2126 } 2127 xpt_lock_buses(); 2128 next_bus = TAILQ_NEXT(bus, links); 2129 if (next_bus) 2130 next_bus->refcount++; 2131 xpt_unlock_buses(); 2132 xpt_release_bus(bus); 2133 } 2134 return(retval); 2135 } 2136 2137 static int 2138 xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target, 2139 xpt_targetfunc_t *tr_func, void *arg) 2140 { 2141 struct cam_et *target, *next_target; 2142 int retval; 2143 2144 retval = 1; 2145 if (start_target) 2146 target = start_target; 2147 else { 2148 mtx_lock(&bus->eb_mtx); 2149 target = TAILQ_FIRST(&bus->et_entries); 2150 if (target == NULL) { 2151 mtx_unlock(&bus->eb_mtx); 2152 return (retval); 2153 } 2154 target->refcount++; 2155 mtx_unlock(&bus->eb_mtx); 2156 } 2157 for (; target != NULL; target = next_target) { 2158 retval = tr_func(target, arg); 2159 if (retval == 0) { 2160 xpt_release_target(target); 2161 break; 2162 } 2163 mtx_lock(&bus->eb_mtx); 2164 next_target = TAILQ_NEXT(target, links); 2165 if (next_target) 2166 next_target->refcount++; 2167 mtx_unlock(&bus->eb_mtx); 2168 xpt_release_target(target); 2169 } 2170 return(retval); 2171 } 2172 2173 static int 2174 xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device, 2175 xpt_devicefunc_t *tr_func, void *arg) 2176 { 2177 struct cam_eb *bus; 2178 struct cam_ed *device, *next_device; 2179 int retval; 2180 2181 retval = 1; 2182 bus = target->bus; 2183 if (start_device) 2184 device = start_device; 2185 else { 2186 mtx_lock(&bus->eb_mtx); 2187 device = TAILQ_FIRST(&target->ed_entries); 2188 if (device == NULL) { 2189 mtx_unlock(&bus->eb_mtx); 2190 return (retval); 2191 } 2192 device->refcount++; 2193 mtx_unlock(&bus->eb_mtx); 2194 } 2195 for (; device != NULL; device = next_device) { 2196 mtx_lock(&device->device_mtx); 2197 retval = tr_func(device, arg); 2198 mtx_unlock(&device->device_mtx); 2199 if (retval == 0) { 2200 xpt_release_device(device); 2201 break; 2202 } 2203 mtx_lock(&bus->eb_mtx); 2204 next_device = TAILQ_NEXT(device, links); 2205 if (next_device) 2206 next_device->refcount++; 2207 mtx_unlock(&bus->eb_mtx); 2208 xpt_release_device(device); 2209 } 2210 return(retval); 2211 } 2212 2213 static int 2214 xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph, 2215 xpt_periphfunc_t *tr_func, void *arg) 2216 { 2217 struct cam_eb *bus; 2218 struct cam_periph *periph, *next_periph; 2219 int retval; 2220 2221 retval = 1; 2222 2223 bus = device->target->bus; 2224 if (start_periph) 2225 periph = start_periph; 2226 else { 2227 xpt_lock_buses(); 2228 mtx_lock(&bus->eb_mtx); 2229 periph = SLIST_FIRST(&device->periphs); 2230 while (periph != NULL && (periph->flags & CAM_PERIPH_FREE) != 0) 2231 periph = SLIST_NEXT(periph, periph_links); 2232 if (periph == NULL) { 2233 mtx_unlock(&bus->eb_mtx); 2234 xpt_unlock_buses(); 2235 return (retval); 2236 } 2237 periph->refcount++; 2238 mtx_unlock(&bus->eb_mtx); 2239 xpt_unlock_buses(); 2240 } 2241 for (; periph != NULL; periph = next_periph) { 2242 retval = tr_func(periph, arg); 2243 if (retval == 0) { 2244 cam_periph_release_locked(periph); 2245 break; 2246 } 2247 xpt_lock_buses(); 2248 mtx_lock(&bus->eb_mtx); 2249 next_periph = SLIST_NEXT(periph, periph_links); 2250 while (next_periph != NULL && 2251 (next_periph->flags & CAM_PERIPH_FREE) != 0) 2252 next_periph = SLIST_NEXT(next_periph, periph_links); 2253 if (next_periph) 2254 next_periph->refcount++; 2255 mtx_unlock(&bus->eb_mtx); 2256 xpt_unlock_buses(); 2257 cam_periph_release_locked(periph); 2258 } 2259 return(retval); 2260 } 2261 2262 static int 2263 xptpdrvtraverse(struct periph_driver **start_pdrv, 2264 xpt_pdrvfunc_t *tr_func, void *arg) 2265 { 2266 struct periph_driver **pdrv; 2267 int retval; 2268 2269 retval = 1; 2270 2271 /* 2272 * We don't traverse the peripheral driver list like we do the 2273 * other lists, because it is a linker set, and therefore cannot be 2274 * changed during runtime. If the peripheral driver list is ever 2275 * re-done to be something other than a linker set (i.e. it can 2276 * change while the system is running), the list traversal should 2277 * be modified to work like the other traversal functions. 2278 */ 2279 for (pdrv = (start_pdrv ? start_pdrv : periph_drivers); 2280 *pdrv != NULL; pdrv++) { 2281 retval = tr_func(pdrv, arg); 2282 2283 if (retval == 0) 2284 return(retval); 2285 } 2286 2287 return(retval); 2288 } 2289 2290 static int 2291 xptpdperiphtraverse(struct periph_driver **pdrv, 2292 struct cam_periph *start_periph, 2293 xpt_periphfunc_t *tr_func, void *arg) 2294 { 2295 struct cam_periph *periph, *next_periph; 2296 int retval; 2297 2298 retval = 1; 2299 2300 if (start_periph) 2301 periph = start_periph; 2302 else { 2303 xpt_lock_buses(); 2304 periph = TAILQ_FIRST(&(*pdrv)->units); 2305 while (periph != NULL && (periph->flags & CAM_PERIPH_FREE) != 0) 2306 periph = TAILQ_NEXT(periph, unit_links); 2307 if (periph == NULL) { 2308 xpt_unlock_buses(); 2309 return (retval); 2310 } 2311 periph->refcount++; 2312 xpt_unlock_buses(); 2313 } 2314 for (; periph != NULL; periph = next_periph) { 2315 cam_periph_lock(periph); 2316 retval = tr_func(periph, arg); 2317 cam_periph_unlock(periph); 2318 if (retval == 0) { 2319 cam_periph_release(periph); 2320 break; 2321 } 2322 xpt_lock_buses(); 2323 next_periph = TAILQ_NEXT(periph, unit_links); 2324 while (next_periph != NULL && 2325 (next_periph->flags & CAM_PERIPH_FREE) != 0) 2326 next_periph = TAILQ_NEXT(next_periph, unit_links); 2327 if (next_periph) 2328 next_periph->refcount++; 2329 xpt_unlock_buses(); 2330 cam_periph_release(periph); 2331 } 2332 return(retval); 2333 } 2334 2335 static int 2336 xptdefbusfunc(struct cam_eb *bus, void *arg) 2337 { 2338 struct xpt_traverse_config *tr_config; 2339 2340 tr_config = (struct xpt_traverse_config *)arg; 2341 2342 if (tr_config->depth == XPT_DEPTH_BUS) { 2343 xpt_busfunc_t *tr_func; 2344 2345 tr_func = (xpt_busfunc_t *)tr_config->tr_func; 2346 2347 return(tr_func(bus, tr_config->tr_arg)); 2348 } else 2349 return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg)); 2350 } 2351 2352 static int 2353 xptdeftargetfunc(struct cam_et *target, void *arg) 2354 { 2355 struct xpt_traverse_config *tr_config; 2356 2357 tr_config = (struct xpt_traverse_config *)arg; 2358 2359 if (tr_config->depth == XPT_DEPTH_TARGET) { 2360 xpt_targetfunc_t *tr_func; 2361 2362 tr_func = (xpt_targetfunc_t *)tr_config->tr_func; 2363 2364 return(tr_func(target, tr_config->tr_arg)); 2365 } else 2366 return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg)); 2367 } 2368 2369 static int 2370 xptdefdevicefunc(struct cam_ed *device, void *arg) 2371 { 2372 struct xpt_traverse_config *tr_config; 2373 2374 tr_config = (struct xpt_traverse_config *)arg; 2375 2376 if (tr_config->depth == XPT_DEPTH_DEVICE) { 2377 xpt_devicefunc_t *tr_func; 2378 2379 tr_func = (xpt_devicefunc_t *)tr_config->tr_func; 2380 2381 return(tr_func(device, tr_config->tr_arg)); 2382 } else 2383 return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg)); 2384 } 2385 2386 static int 2387 xptdefperiphfunc(struct cam_periph *periph, void *arg) 2388 { 2389 struct xpt_traverse_config *tr_config; 2390 xpt_periphfunc_t *tr_func; 2391 2392 tr_config = (struct xpt_traverse_config *)arg; 2393 2394 tr_func = (xpt_periphfunc_t *)tr_config->tr_func; 2395 2396 /* 2397 * Unlike the other default functions, we don't check for depth 2398 * here. The peripheral driver level is the last level in the EDT, 2399 * so if we're here, we should execute the function in question. 2400 */ 2401 return(tr_func(periph, tr_config->tr_arg)); 2402 } 2403 2404 /* 2405 * Execute the given function for every bus in the EDT. 2406 */ 2407 static int 2408 xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg) 2409 { 2410 struct xpt_traverse_config tr_config; 2411 2412 tr_config.depth = XPT_DEPTH_BUS; 2413 tr_config.tr_func = tr_func; 2414 tr_config.tr_arg = arg; 2415 2416 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config)); 2417 } 2418 2419 /* 2420 * Execute the given function for every device in the EDT. 2421 */ 2422 static int 2423 xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg) 2424 { 2425 struct xpt_traverse_config tr_config; 2426 2427 tr_config.depth = XPT_DEPTH_DEVICE; 2428 tr_config.tr_func = tr_func; 2429 tr_config.tr_arg = arg; 2430 2431 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config)); 2432 } 2433 2434 static int 2435 xptsetasyncfunc(struct cam_ed *device, void *arg) 2436 { 2437 struct cam_path path; 2438 struct ccb_getdev cgd; 2439 struct ccb_setasync *csa = (struct ccb_setasync *)arg; 2440 2441 /* 2442 * Don't report unconfigured devices (Wildcard devs, 2443 * devices only for target mode, device instances 2444 * that have been invalidated but are waiting for 2445 * their last reference count to be released). 2446 */ 2447 if ((device->flags & CAM_DEV_UNCONFIGURED) != 0) 2448 return (1); 2449 2450 xpt_compile_path(&path, 2451 NULL, 2452 device->target->bus->path_id, 2453 device->target->target_id, 2454 device->lun_id); 2455 xpt_setup_ccb(&cgd.ccb_h, &path, CAM_PRIORITY_NORMAL); 2456 cgd.ccb_h.func_code = XPT_GDEV_TYPE; 2457 xpt_action((union ccb *)&cgd); 2458 csa->callback(csa->callback_arg, 2459 AC_FOUND_DEVICE, 2460 &path, &cgd); 2461 xpt_release_path(&path); 2462 2463 return(1); 2464 } 2465 2466 static int 2467 xptsetasyncbusfunc(struct cam_eb *bus, void *arg) 2468 { 2469 struct cam_path path; 2470 struct ccb_pathinq cpi; 2471 struct ccb_setasync *csa = (struct ccb_setasync *)arg; 2472 2473 xpt_compile_path(&path, /*periph*/NULL, 2474 bus->path_id, 2475 CAM_TARGET_WILDCARD, 2476 CAM_LUN_WILDCARD); 2477 xpt_path_lock(&path); 2478 xpt_setup_ccb(&cpi.ccb_h, &path, CAM_PRIORITY_NORMAL); 2479 cpi.ccb_h.func_code = XPT_PATH_INQ; 2480 xpt_action((union ccb *)&cpi); 2481 csa->callback(csa->callback_arg, 2482 AC_PATH_REGISTERED, 2483 &path, &cpi); 2484 xpt_path_unlock(&path); 2485 xpt_release_path(&path); 2486 2487 return(1); 2488 } 2489 2490 void 2491 xpt_action(union ccb *start_ccb) 2492 { 2493 2494 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, 2495 ("xpt_action: func %#x %s\n", start_ccb->ccb_h.func_code, 2496 xpt_action_name(start_ccb->ccb_h.func_code))); 2497 2498 start_ccb->ccb_h.status = CAM_REQ_INPROG; 2499 (*(start_ccb->ccb_h.path->bus->xport->ops->action))(start_ccb); 2500 } 2501 2502 void 2503 xpt_action_default(union ccb *start_ccb) 2504 { 2505 struct cam_path *path; 2506 struct cam_sim *sim; 2507 int lock; 2508 2509 path = start_ccb->ccb_h.path; 2510 CAM_DEBUG(path, CAM_DEBUG_TRACE, 2511 ("xpt_action_default: func %#x %s\n", start_ccb->ccb_h.func_code, 2512 xpt_action_name(start_ccb->ccb_h.func_code))); 2513 2514 switch (start_ccb->ccb_h.func_code) { 2515 case XPT_SCSI_IO: 2516 { 2517 struct cam_ed *device; 2518 2519 /* 2520 * For the sake of compatibility with SCSI-1 2521 * devices that may not understand the identify 2522 * message, we include lun information in the 2523 * second byte of all commands. SCSI-1 specifies 2524 * that luns are a 3 bit value and reserves only 3 2525 * bits for lun information in the CDB. Later 2526 * revisions of the SCSI spec allow for more than 8 2527 * luns, but have deprecated lun information in the 2528 * CDB. So, if the lun won't fit, we must omit. 2529 * 2530 * Also be aware that during initial probing for devices, 2531 * the inquiry information is unknown but initialized to 0. 2532 * This means that this code will be exercised while probing 2533 * devices with an ANSI revision greater than 2. 2534 */ 2535 device = path->device; 2536 if (device->protocol_version <= SCSI_REV_2 2537 && start_ccb->ccb_h.target_lun < 8 2538 && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) { 2539 2540 start_ccb->csio.cdb_io.cdb_bytes[1] |= 2541 start_ccb->ccb_h.target_lun << 5; 2542 } 2543 start_ccb->csio.scsi_status = SCSI_STATUS_OK; 2544 } 2545 /* FALLTHROUGH */ 2546 case XPT_TARGET_IO: 2547 case XPT_CONT_TARGET_IO: 2548 start_ccb->csio.sense_resid = 0; 2549 start_ccb->csio.resid = 0; 2550 /* FALLTHROUGH */ 2551 case XPT_ATA_IO: 2552 if (start_ccb->ccb_h.func_code == XPT_ATA_IO) 2553 start_ccb->ataio.resid = 0; 2554 /* FALLTHROUGH */ 2555 case XPT_NVME_IO: 2556 if (start_ccb->ccb_h.func_code == XPT_NVME_IO) 2557 start_ccb->nvmeio.resid = 0; 2558 /* FALLTHROUGH */ 2559 case XPT_RESET_DEV: 2560 case XPT_ENG_EXEC: 2561 case XPT_SMP_IO: 2562 { 2563 struct cam_devq *devq; 2564 2565 devq = path->bus->sim->devq; 2566 mtx_lock(&devq->send_mtx); 2567 cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb); 2568 if (xpt_schedule_devq(devq, path->device) != 0) 2569 xpt_run_devq(devq); 2570 mtx_unlock(&devq->send_mtx); 2571 break; 2572 } 2573 case XPT_CALC_GEOMETRY: 2574 /* Filter out garbage */ 2575 if (start_ccb->ccg.block_size == 0 2576 || start_ccb->ccg.volume_size == 0) { 2577 start_ccb->ccg.cylinders = 0; 2578 start_ccb->ccg.heads = 0; 2579 start_ccb->ccg.secs_per_track = 0; 2580 start_ccb->ccb_h.status = CAM_REQ_CMP; 2581 break; 2582 } 2583 #if defined(PC98) || defined(__sparc64__) 2584 /* 2585 * In a PC-98 system, geometry translation depens on 2586 * the "real" device geometry obtained from mode page 4. 2587 * SCSI geometry translation is performed in the 2588 * initialization routine of the SCSI BIOS and the result 2589 * stored in host memory. If the translation is available 2590 * in host memory, use it. If not, rely on the default 2591 * translation the device driver performs. 2592 * For sparc64, we may need adjust the geometry of large 2593 * disks in order to fit the limitations of the 16-bit 2594 * fields of the VTOC8 disk label. 2595 */ 2596 if (scsi_da_bios_params(&start_ccb->ccg) != 0) { 2597 start_ccb->ccb_h.status = CAM_REQ_CMP; 2598 break; 2599 } 2600 #endif 2601 goto call_sim; 2602 case XPT_ABORT: 2603 { 2604 union ccb* abort_ccb; 2605 2606 abort_ccb = start_ccb->cab.abort_ccb; 2607 if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) { 2608 struct cam_ed *device; 2609 struct cam_devq *devq; 2610 2611 device = abort_ccb->ccb_h.path->device; 2612 devq = device->sim->devq; 2613 2614 mtx_lock(&devq->send_mtx); 2615 if (abort_ccb->ccb_h.pinfo.index > 0) { 2616 cam_ccbq_remove_ccb(&device->ccbq, abort_ccb); 2617 abort_ccb->ccb_h.status = 2618 CAM_REQ_ABORTED|CAM_DEV_QFRZN; 2619 xpt_freeze_devq_device(device, 1); 2620 mtx_unlock(&devq->send_mtx); 2621 xpt_done(abort_ccb); 2622 start_ccb->ccb_h.status = CAM_REQ_CMP; 2623 break; 2624 } 2625 mtx_unlock(&devq->send_mtx); 2626 2627 if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX 2628 && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) { 2629 /* 2630 * We've caught this ccb en route to 2631 * the SIM. Flag it for abort and the 2632 * SIM will do so just before starting 2633 * real work on the CCB. 2634 */ 2635 abort_ccb->ccb_h.status = 2636 CAM_REQ_ABORTED|CAM_DEV_QFRZN; 2637 xpt_freeze_devq(abort_ccb->ccb_h.path, 1); 2638 start_ccb->ccb_h.status = CAM_REQ_CMP; 2639 break; 2640 } 2641 } 2642 if (XPT_FC_IS_QUEUED(abort_ccb) 2643 && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) { 2644 /* 2645 * It's already completed but waiting 2646 * for our SWI to get to it. 2647 */ 2648 start_ccb->ccb_h.status = CAM_UA_ABORT; 2649 break; 2650 } 2651 /* 2652 * If we weren't able to take care of the abort request 2653 * in the XPT, pass the request down to the SIM for processing. 2654 */ 2655 } 2656 /* FALLTHROUGH */ 2657 case XPT_ACCEPT_TARGET_IO: 2658 case XPT_EN_LUN: 2659 case XPT_IMMED_NOTIFY: 2660 case XPT_NOTIFY_ACK: 2661 case XPT_RESET_BUS: 2662 case XPT_IMMEDIATE_NOTIFY: 2663 case XPT_NOTIFY_ACKNOWLEDGE: 2664 case XPT_GET_SIM_KNOB_OLD: 2665 case XPT_GET_SIM_KNOB: 2666 case XPT_SET_SIM_KNOB: 2667 case XPT_GET_TRAN_SETTINGS: 2668 case XPT_SET_TRAN_SETTINGS: 2669 case XPT_PATH_INQ: 2670 call_sim: 2671 sim = path->bus->sim; 2672 lock = (mtx_owned(sim->mtx) == 0); 2673 if (lock) 2674 CAM_SIM_LOCK(sim); 2675 CAM_DEBUG(path, CAM_DEBUG_TRACE, 2676 ("sim->sim_action: func=%#x\n", start_ccb->ccb_h.func_code)); 2677 (*(sim->sim_action))(sim, start_ccb); 2678 CAM_DEBUG(path, CAM_DEBUG_TRACE, 2679 ("sim->sim_action: status=%#x\n", start_ccb->ccb_h.status)); 2680 if (lock) 2681 CAM_SIM_UNLOCK(sim); 2682 break; 2683 case XPT_PATH_STATS: 2684 start_ccb->cpis.last_reset = path->bus->last_reset; 2685 start_ccb->ccb_h.status = CAM_REQ_CMP; 2686 break; 2687 case XPT_GDEV_TYPE: 2688 { 2689 struct cam_ed *dev; 2690 2691 dev = path->device; 2692 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) { 2693 start_ccb->ccb_h.status = CAM_DEV_NOT_THERE; 2694 } else { 2695 struct ccb_getdev *cgd; 2696 2697 cgd = &start_ccb->cgd; 2698 cgd->protocol = dev->protocol; 2699 cgd->inq_data = dev->inq_data; 2700 cgd->ident_data = dev->ident_data; 2701 cgd->inq_flags = dev->inq_flags; 2702 cgd->nvme_data = dev->nvme_data; 2703 cgd->nvme_cdata = dev->nvme_cdata; 2704 cgd->ccb_h.status = CAM_REQ_CMP; 2705 cgd->serial_num_len = dev->serial_num_len; 2706 if ((dev->serial_num_len > 0) 2707 && (dev->serial_num != NULL)) 2708 bcopy(dev->serial_num, cgd->serial_num, 2709 dev->serial_num_len); 2710 } 2711 break; 2712 } 2713 case XPT_GDEV_STATS: 2714 { 2715 struct cam_ed *dev; 2716 2717 dev = path->device; 2718 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) { 2719 start_ccb->ccb_h.status = CAM_DEV_NOT_THERE; 2720 } else { 2721 struct ccb_getdevstats *cgds; 2722 struct cam_eb *bus; 2723 struct cam_et *tar; 2724 struct cam_devq *devq; 2725 2726 cgds = &start_ccb->cgds; 2727 bus = path->bus; 2728 tar = path->target; 2729 devq = bus->sim->devq; 2730 mtx_lock(&devq->send_mtx); 2731 cgds->dev_openings = dev->ccbq.dev_openings; 2732 cgds->dev_active = dev->ccbq.dev_active; 2733 cgds->allocated = dev->ccbq.allocated; 2734 cgds->queued = cam_ccbq_pending_ccb_count(&dev->ccbq); 2735 cgds->held = cgds->allocated - cgds->dev_active - 2736 cgds->queued; 2737 cgds->last_reset = tar->last_reset; 2738 cgds->maxtags = dev->maxtags; 2739 cgds->mintags = dev->mintags; 2740 if (timevalcmp(&tar->last_reset, &bus->last_reset, <)) 2741 cgds->last_reset = bus->last_reset; 2742 mtx_unlock(&devq->send_mtx); 2743 cgds->ccb_h.status = CAM_REQ_CMP; 2744 } 2745 break; 2746 } 2747 case XPT_GDEVLIST: 2748 { 2749 struct cam_periph *nperiph; 2750 struct periph_list *periph_head; 2751 struct ccb_getdevlist *cgdl; 2752 u_int i; 2753 struct cam_ed *device; 2754 int found; 2755 2756 2757 found = 0; 2758 2759 /* 2760 * Don't want anyone mucking with our data. 2761 */ 2762 device = path->device; 2763 periph_head = &device->periphs; 2764 cgdl = &start_ccb->cgdl; 2765 2766 /* 2767 * Check and see if the list has changed since the user 2768 * last requested a list member. If so, tell them that the 2769 * list has changed, and therefore they need to start over 2770 * from the beginning. 2771 */ 2772 if ((cgdl->index != 0) && 2773 (cgdl->generation != device->generation)) { 2774 cgdl->status = CAM_GDEVLIST_LIST_CHANGED; 2775 break; 2776 } 2777 2778 /* 2779 * Traverse the list of peripherals and attempt to find 2780 * the requested peripheral. 2781 */ 2782 for (nperiph = SLIST_FIRST(periph_head), i = 0; 2783 (nperiph != NULL) && (i <= cgdl->index); 2784 nperiph = SLIST_NEXT(nperiph, periph_links), i++) { 2785 if (i == cgdl->index) { 2786 strncpy(cgdl->periph_name, 2787 nperiph->periph_name, 2788 DEV_IDLEN); 2789 cgdl->unit_number = nperiph->unit_number; 2790 found = 1; 2791 } 2792 } 2793 if (found == 0) { 2794 cgdl->status = CAM_GDEVLIST_ERROR; 2795 break; 2796 } 2797 2798 if (nperiph == NULL) 2799 cgdl->status = CAM_GDEVLIST_LAST_DEVICE; 2800 else 2801 cgdl->status = CAM_GDEVLIST_MORE_DEVS; 2802 2803 cgdl->index++; 2804 cgdl->generation = device->generation; 2805 2806 cgdl->ccb_h.status = CAM_REQ_CMP; 2807 break; 2808 } 2809 case XPT_DEV_MATCH: 2810 { 2811 dev_pos_type position_type; 2812 struct ccb_dev_match *cdm; 2813 2814 cdm = &start_ccb->cdm; 2815 2816 /* 2817 * There are two ways of getting at information in the EDT. 2818 * The first way is via the primary EDT tree. It starts 2819 * with a list of busses, then a list of targets on a bus, 2820 * then devices/luns on a target, and then peripherals on a 2821 * device/lun. The "other" way is by the peripheral driver 2822 * lists. The peripheral driver lists are organized by 2823 * peripheral driver. (obviously) So it makes sense to 2824 * use the peripheral driver list if the user is looking 2825 * for something like "da1", or all "da" devices. If the 2826 * user is looking for something on a particular bus/target 2827 * or lun, it's generally better to go through the EDT tree. 2828 */ 2829 2830 if (cdm->pos.position_type != CAM_DEV_POS_NONE) 2831 position_type = cdm->pos.position_type; 2832 else { 2833 u_int i; 2834 2835 position_type = CAM_DEV_POS_NONE; 2836 2837 for (i = 0; i < cdm->num_patterns; i++) { 2838 if ((cdm->patterns[i].type == DEV_MATCH_BUS) 2839 ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){ 2840 position_type = CAM_DEV_POS_EDT; 2841 break; 2842 } 2843 } 2844 2845 if (cdm->num_patterns == 0) 2846 position_type = CAM_DEV_POS_EDT; 2847 else if (position_type == CAM_DEV_POS_NONE) 2848 position_type = CAM_DEV_POS_PDRV; 2849 } 2850 2851 switch(position_type & CAM_DEV_POS_TYPEMASK) { 2852 case CAM_DEV_POS_EDT: 2853 xptedtmatch(cdm); 2854 break; 2855 case CAM_DEV_POS_PDRV: 2856 xptperiphlistmatch(cdm); 2857 break; 2858 default: 2859 cdm->status = CAM_DEV_MATCH_ERROR; 2860 break; 2861 } 2862 2863 if (cdm->status == CAM_DEV_MATCH_ERROR) 2864 start_ccb->ccb_h.status = CAM_REQ_CMP_ERR; 2865 else 2866 start_ccb->ccb_h.status = CAM_REQ_CMP; 2867 2868 break; 2869 } 2870 case XPT_SASYNC_CB: 2871 { 2872 struct ccb_setasync *csa; 2873 struct async_node *cur_entry; 2874 struct async_list *async_head; 2875 u_int32_t added; 2876 2877 csa = &start_ccb->csa; 2878 added = csa->event_enable; 2879 async_head = &path->device->asyncs; 2880 2881 /* 2882 * If there is already an entry for us, simply 2883 * update it. 2884 */ 2885 cur_entry = SLIST_FIRST(async_head); 2886 while (cur_entry != NULL) { 2887 if ((cur_entry->callback_arg == csa->callback_arg) 2888 && (cur_entry->callback == csa->callback)) 2889 break; 2890 cur_entry = SLIST_NEXT(cur_entry, links); 2891 } 2892 2893 if (cur_entry != NULL) { 2894 /* 2895 * If the request has no flags set, 2896 * remove the entry. 2897 */ 2898 added &= ~cur_entry->event_enable; 2899 if (csa->event_enable == 0) { 2900 SLIST_REMOVE(async_head, cur_entry, 2901 async_node, links); 2902 xpt_release_device(path->device); 2903 free(cur_entry, M_CAMXPT); 2904 } else { 2905 cur_entry->event_enable = csa->event_enable; 2906 } 2907 csa->event_enable = added; 2908 } else { 2909 cur_entry = malloc(sizeof(*cur_entry), M_CAMXPT, 2910 M_NOWAIT); 2911 if (cur_entry == NULL) { 2912 csa->ccb_h.status = CAM_RESRC_UNAVAIL; 2913 break; 2914 } 2915 cur_entry->event_enable = csa->event_enable; 2916 cur_entry->event_lock = 2917 mtx_owned(path->bus->sim->mtx) ? 1 : 0; 2918 cur_entry->callback_arg = csa->callback_arg; 2919 cur_entry->callback = csa->callback; 2920 SLIST_INSERT_HEAD(async_head, cur_entry, links); 2921 xpt_acquire_device(path->device); 2922 } 2923 start_ccb->ccb_h.status = CAM_REQ_CMP; 2924 break; 2925 } 2926 case XPT_REL_SIMQ: 2927 { 2928 struct ccb_relsim *crs; 2929 struct cam_ed *dev; 2930 2931 crs = &start_ccb->crs; 2932 dev = path->device; 2933 if (dev == NULL) { 2934 2935 crs->ccb_h.status = CAM_DEV_NOT_THERE; 2936 break; 2937 } 2938 2939 if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) { 2940 2941 /* Don't ever go below one opening */ 2942 if (crs->openings > 0) { 2943 xpt_dev_ccbq_resize(path, crs->openings); 2944 if (bootverbose) { 2945 xpt_print(path, 2946 "number of openings is now %d\n", 2947 crs->openings); 2948 } 2949 } 2950 } 2951 2952 mtx_lock(&dev->sim->devq->send_mtx); 2953 if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) { 2954 2955 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) { 2956 2957 /* 2958 * Just extend the old timeout and decrement 2959 * the freeze count so that a single timeout 2960 * is sufficient for releasing the queue. 2961 */ 2962 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE; 2963 callout_stop(&dev->callout); 2964 } else { 2965 2966 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 2967 } 2968 2969 callout_reset_sbt(&dev->callout, 2970 SBT_1MS * crs->release_timeout, 0, 2971 xpt_release_devq_timeout, dev, 0); 2972 2973 dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING; 2974 2975 } 2976 2977 if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) { 2978 2979 if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) { 2980 /* 2981 * Decrement the freeze count so that a single 2982 * completion is still sufficient to unfreeze 2983 * the queue. 2984 */ 2985 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE; 2986 } else { 2987 2988 dev->flags |= CAM_DEV_REL_ON_COMPLETE; 2989 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 2990 } 2991 } 2992 2993 if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) { 2994 2995 if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0 2996 || (dev->ccbq.dev_active == 0)) { 2997 2998 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE; 2999 } else { 3000 3001 dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY; 3002 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 3003 } 3004 } 3005 mtx_unlock(&dev->sim->devq->send_mtx); 3006 3007 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) 3008 xpt_release_devq(path, /*count*/1, /*run_queue*/TRUE); 3009 start_ccb->crs.qfrozen_cnt = dev->ccbq.queue.qfrozen_cnt; 3010 start_ccb->ccb_h.status = CAM_REQ_CMP; 3011 break; 3012 } 3013 case XPT_DEBUG: { 3014 struct cam_path *oldpath; 3015 3016 /* Check that all request bits are supported. */ 3017 if (start_ccb->cdbg.flags & ~(CAM_DEBUG_COMPILE)) { 3018 start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 3019 break; 3020 } 3021 3022 cam_dflags = CAM_DEBUG_NONE; 3023 if (cam_dpath != NULL) { 3024 oldpath = cam_dpath; 3025 cam_dpath = NULL; 3026 xpt_free_path(oldpath); 3027 } 3028 if (start_ccb->cdbg.flags != CAM_DEBUG_NONE) { 3029 if (xpt_create_path(&cam_dpath, NULL, 3030 start_ccb->ccb_h.path_id, 3031 start_ccb->ccb_h.target_id, 3032 start_ccb->ccb_h.target_lun) != 3033 CAM_REQ_CMP) { 3034 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 3035 } else { 3036 cam_dflags = start_ccb->cdbg.flags; 3037 start_ccb->ccb_h.status = CAM_REQ_CMP; 3038 xpt_print(cam_dpath, "debugging flags now %x\n", 3039 cam_dflags); 3040 } 3041 } else 3042 start_ccb->ccb_h.status = CAM_REQ_CMP; 3043 break; 3044 } 3045 case XPT_NOOP: 3046 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0) 3047 xpt_freeze_devq(path, 1); 3048 start_ccb->ccb_h.status = CAM_REQ_CMP; 3049 break; 3050 case XPT_REPROBE_LUN: 3051 xpt_async(AC_INQ_CHANGED, path, NULL); 3052 start_ccb->ccb_h.status = CAM_REQ_CMP; 3053 xpt_done(start_ccb); 3054 break; 3055 default: 3056 case XPT_SDEV_TYPE: 3057 case XPT_TERM_IO: 3058 case XPT_ENG_INQ: 3059 /* XXX Implement */ 3060 xpt_print_path(start_ccb->ccb_h.path); 3061 printf("%s: CCB type %#x %s not supported\n", __func__, 3062 start_ccb->ccb_h.func_code, 3063 xpt_action_name(start_ccb->ccb_h.func_code)); 3064 start_ccb->ccb_h.status = CAM_PROVIDE_FAIL; 3065 if (start_ccb->ccb_h.func_code & XPT_FC_DEV_QUEUED) { 3066 xpt_done(start_ccb); 3067 } 3068 break; 3069 } 3070 CAM_DEBUG(path, CAM_DEBUG_TRACE, 3071 ("xpt_action_default: func= %#x %s status %#x\n", 3072 start_ccb->ccb_h.func_code, 3073 xpt_action_name(start_ccb->ccb_h.func_code), 3074 start_ccb->ccb_h.status)); 3075 } 3076 3077 void 3078 xpt_polled_action(union ccb *start_ccb) 3079 { 3080 u_int32_t timeout; 3081 struct cam_sim *sim; 3082 struct cam_devq *devq; 3083 struct cam_ed *dev; 3084 3085 timeout = start_ccb->ccb_h.timeout * 10; 3086 sim = start_ccb->ccb_h.path->bus->sim; 3087 devq = sim->devq; 3088 dev = start_ccb->ccb_h.path->device; 3089 3090 mtx_unlock(&dev->device_mtx); 3091 3092 /* 3093 * Steal an opening so that no other queued requests 3094 * can get it before us while we simulate interrupts. 3095 */ 3096 mtx_lock(&devq->send_mtx); 3097 dev->ccbq.dev_openings--; 3098 while((devq->send_openings <= 0 || dev->ccbq.dev_openings < 0) && 3099 (--timeout > 0)) { 3100 mtx_unlock(&devq->send_mtx); 3101 DELAY(100); 3102 CAM_SIM_LOCK(sim); 3103 (*(sim->sim_poll))(sim); 3104 CAM_SIM_UNLOCK(sim); 3105 camisr_runqueue(); 3106 mtx_lock(&devq->send_mtx); 3107 } 3108 dev->ccbq.dev_openings++; 3109 mtx_unlock(&devq->send_mtx); 3110 3111 if (timeout != 0) { 3112 xpt_action(start_ccb); 3113 while(--timeout > 0) { 3114 CAM_SIM_LOCK(sim); 3115 (*(sim->sim_poll))(sim); 3116 CAM_SIM_UNLOCK(sim); 3117 camisr_runqueue(); 3118 if ((start_ccb->ccb_h.status & CAM_STATUS_MASK) 3119 != CAM_REQ_INPROG) 3120 break; 3121 DELAY(100); 3122 } 3123 if (timeout == 0) { 3124 /* 3125 * XXX Is it worth adding a sim_timeout entry 3126 * point so we can attempt recovery? If 3127 * this is only used for dumps, I don't think 3128 * it is. 3129 */ 3130 start_ccb->ccb_h.status = CAM_CMD_TIMEOUT; 3131 } 3132 } else { 3133 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 3134 } 3135 3136 mtx_lock(&dev->device_mtx); 3137 } 3138 3139 /* 3140 * Schedule a peripheral driver to receive a ccb when its 3141 * target device has space for more transactions. 3142 */ 3143 void 3144 xpt_schedule(struct cam_periph *periph, u_int32_t new_priority) 3145 { 3146 3147 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n")); 3148 cam_periph_assert(periph, MA_OWNED); 3149 if (new_priority < periph->scheduled_priority) { 3150 periph->scheduled_priority = new_priority; 3151 xpt_run_allocq(periph, 0); 3152 } 3153 } 3154 3155 3156 /* 3157 * Schedule a device to run on a given queue. 3158 * If the device was inserted as a new entry on the queue, 3159 * return 1 meaning the device queue should be run. If we 3160 * were already queued, implying someone else has already 3161 * started the queue, return 0 so the caller doesn't attempt 3162 * to run the queue. 3163 */ 3164 static int 3165 xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo, 3166 u_int32_t new_priority) 3167 { 3168 int retval; 3169 u_int32_t old_priority; 3170 3171 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n")); 3172 3173 old_priority = pinfo->priority; 3174 3175 /* 3176 * Are we already queued? 3177 */ 3178 if (pinfo->index != CAM_UNQUEUED_INDEX) { 3179 /* Simply reorder based on new priority */ 3180 if (new_priority < old_priority) { 3181 camq_change_priority(queue, pinfo->index, 3182 new_priority); 3183 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3184 ("changed priority to %d\n", 3185 new_priority)); 3186 retval = 1; 3187 } else 3188 retval = 0; 3189 } else { 3190 /* New entry on the queue */ 3191 if (new_priority < old_priority) 3192 pinfo->priority = new_priority; 3193 3194 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3195 ("Inserting onto queue\n")); 3196 pinfo->generation = ++queue->generation; 3197 camq_insert(queue, pinfo); 3198 retval = 1; 3199 } 3200 return (retval); 3201 } 3202 3203 static void 3204 xpt_run_allocq_task(void *context, int pending) 3205 { 3206 struct cam_periph *periph = context; 3207 3208 cam_periph_lock(periph); 3209 periph->flags &= ~CAM_PERIPH_RUN_TASK; 3210 xpt_run_allocq(periph, 1); 3211 cam_periph_unlock(periph); 3212 cam_periph_release(periph); 3213 } 3214 3215 static void 3216 xpt_run_allocq(struct cam_periph *periph, int sleep) 3217 { 3218 struct cam_ed *device; 3219 union ccb *ccb; 3220 uint32_t prio; 3221 3222 cam_periph_assert(periph, MA_OWNED); 3223 if (periph->periph_allocating) 3224 return; 3225 periph->periph_allocating = 1; 3226 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_allocq(%p)\n", periph)); 3227 device = periph->path->device; 3228 ccb = NULL; 3229 restart: 3230 while ((prio = min(periph->scheduled_priority, 3231 periph->immediate_priority)) != CAM_PRIORITY_NONE && 3232 (periph->periph_allocated - (ccb != NULL ? 1 : 0) < 3233 device->ccbq.total_openings || prio <= CAM_PRIORITY_OOB)) { 3234 3235 if (ccb == NULL && 3236 (ccb = xpt_get_ccb_nowait(periph)) == NULL) { 3237 if (sleep) { 3238 ccb = xpt_get_ccb(periph); 3239 goto restart; 3240 } 3241 if (periph->flags & CAM_PERIPH_RUN_TASK) 3242 break; 3243 cam_periph_doacquire(periph); 3244 periph->flags |= CAM_PERIPH_RUN_TASK; 3245 taskqueue_enqueue(xsoftc.xpt_taskq, 3246 &periph->periph_run_task); 3247 break; 3248 } 3249 xpt_setup_ccb(&ccb->ccb_h, periph->path, prio); 3250 if (prio == periph->immediate_priority) { 3251 periph->immediate_priority = CAM_PRIORITY_NONE; 3252 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3253 ("waking cam_periph_getccb()\n")); 3254 SLIST_INSERT_HEAD(&periph->ccb_list, &ccb->ccb_h, 3255 periph_links.sle); 3256 wakeup(&periph->ccb_list); 3257 } else { 3258 periph->scheduled_priority = CAM_PRIORITY_NONE; 3259 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3260 ("calling periph_start()\n")); 3261 periph->periph_start(periph, ccb); 3262 } 3263 ccb = NULL; 3264 } 3265 if (ccb != NULL) 3266 xpt_release_ccb(ccb); 3267 periph->periph_allocating = 0; 3268 } 3269 3270 static void 3271 xpt_run_devq(struct cam_devq *devq) 3272 { 3273 int lock; 3274 3275 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_devq\n")); 3276 3277 devq->send_queue.qfrozen_cnt++; 3278 while ((devq->send_queue.entries > 0) 3279 && (devq->send_openings > 0) 3280 && (devq->send_queue.qfrozen_cnt <= 1)) { 3281 struct cam_ed *device; 3282 union ccb *work_ccb; 3283 struct cam_sim *sim; 3284 struct xpt_proto *proto; 3285 3286 device = (struct cam_ed *)camq_remove(&devq->send_queue, 3287 CAMQ_HEAD); 3288 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3289 ("running device %p\n", device)); 3290 3291 work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD); 3292 if (work_ccb == NULL) { 3293 printf("device on run queue with no ccbs???\n"); 3294 continue; 3295 } 3296 3297 if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) { 3298 3299 mtx_lock(&xsoftc.xpt_highpower_lock); 3300 if (xsoftc.num_highpower <= 0) { 3301 /* 3302 * We got a high power command, but we 3303 * don't have any available slots. Freeze 3304 * the device queue until we have a slot 3305 * available. 3306 */ 3307 xpt_freeze_devq_device(device, 1); 3308 STAILQ_INSERT_TAIL(&xsoftc.highpowerq, device, 3309 highpowerq_entry); 3310 3311 mtx_unlock(&xsoftc.xpt_highpower_lock); 3312 continue; 3313 } else { 3314 /* 3315 * Consume a high power slot while 3316 * this ccb runs. 3317 */ 3318 xsoftc.num_highpower--; 3319 } 3320 mtx_unlock(&xsoftc.xpt_highpower_lock); 3321 } 3322 cam_ccbq_remove_ccb(&device->ccbq, work_ccb); 3323 cam_ccbq_send_ccb(&device->ccbq, work_ccb); 3324 devq->send_openings--; 3325 devq->send_active++; 3326 xpt_schedule_devq(devq, device); 3327 mtx_unlock(&devq->send_mtx); 3328 3329 if ((work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0) { 3330 /* 3331 * The client wants to freeze the queue 3332 * after this CCB is sent. 3333 */ 3334 xpt_freeze_devq(work_ccb->ccb_h.path, 1); 3335 } 3336 3337 /* In Target mode, the peripheral driver knows best... */ 3338 if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) { 3339 if ((device->inq_flags & SID_CmdQue) != 0 3340 && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE) 3341 work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID; 3342 else 3343 /* 3344 * Clear this in case of a retried CCB that 3345 * failed due to a rejected tag. 3346 */ 3347 work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID; 3348 } 3349 3350 KASSERT(device == work_ccb->ccb_h.path->device, 3351 ("device (%p) / path->device (%p) mismatch", 3352 device, work_ccb->ccb_h.path->device)); 3353 proto = xpt_proto_find(device->protocol); 3354 if (proto && proto->ops->debug_out) 3355 proto->ops->debug_out(work_ccb); 3356 3357 /* 3358 * Device queues can be shared among multiple SIM instances 3359 * that reside on different busses. Use the SIM from the 3360 * queued device, rather than the one from the calling bus. 3361 */ 3362 sim = device->sim; 3363 lock = (mtx_owned(sim->mtx) == 0); 3364 if (lock) 3365 CAM_SIM_LOCK(sim); 3366 work_ccb->ccb_h.qos.sim_data = sbinuptime(); // xxx uintprt_t too small 32bit platforms 3367 (*(sim->sim_action))(sim, work_ccb); 3368 if (lock) 3369 CAM_SIM_UNLOCK(sim); 3370 mtx_lock(&devq->send_mtx); 3371 } 3372 devq->send_queue.qfrozen_cnt--; 3373 } 3374 3375 /* 3376 * This function merges stuff from the slave ccb into the master ccb, while 3377 * keeping important fields in the master ccb constant. 3378 */ 3379 void 3380 xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb) 3381 { 3382 3383 /* 3384 * Pull fields that are valid for peripheral drivers to set 3385 * into the master CCB along with the CCB "payload". 3386 */ 3387 master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count; 3388 master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code; 3389 master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout; 3390 master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags; 3391 bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1], 3392 sizeof(union ccb) - sizeof(struct ccb_hdr)); 3393 } 3394 3395 void 3396 xpt_setup_ccb_flags(struct ccb_hdr *ccb_h, struct cam_path *path, 3397 u_int32_t priority, u_int32_t flags) 3398 { 3399 3400 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n")); 3401 ccb_h->pinfo.priority = priority; 3402 ccb_h->path = path; 3403 ccb_h->path_id = path->bus->path_id; 3404 if (path->target) 3405 ccb_h->target_id = path->target->target_id; 3406 else 3407 ccb_h->target_id = CAM_TARGET_WILDCARD; 3408 if (path->device) { 3409 ccb_h->target_lun = path->device->lun_id; 3410 ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation; 3411 } else { 3412 ccb_h->target_lun = CAM_TARGET_WILDCARD; 3413 } 3414 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX; 3415 ccb_h->flags = flags; 3416 ccb_h->xflags = 0; 3417 } 3418 3419 void 3420 xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority) 3421 { 3422 xpt_setup_ccb_flags(ccb_h, path, priority, /*flags*/ 0); 3423 } 3424 3425 /* Path manipulation functions */ 3426 cam_status 3427 xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph, 3428 path_id_t path_id, target_id_t target_id, lun_id_t lun_id) 3429 { 3430 struct cam_path *path; 3431 cam_status status; 3432 3433 path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_NOWAIT); 3434 3435 if (path == NULL) { 3436 status = CAM_RESRC_UNAVAIL; 3437 return(status); 3438 } 3439 status = xpt_compile_path(path, perph, path_id, target_id, lun_id); 3440 if (status != CAM_REQ_CMP) { 3441 free(path, M_CAMPATH); 3442 path = NULL; 3443 } 3444 *new_path_ptr = path; 3445 return (status); 3446 } 3447 3448 cam_status 3449 xpt_create_path_unlocked(struct cam_path **new_path_ptr, 3450 struct cam_periph *periph, path_id_t path_id, 3451 target_id_t target_id, lun_id_t lun_id) 3452 { 3453 3454 return (xpt_create_path(new_path_ptr, periph, path_id, target_id, 3455 lun_id)); 3456 } 3457 3458 cam_status 3459 xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph, 3460 path_id_t path_id, target_id_t target_id, lun_id_t lun_id) 3461 { 3462 struct cam_eb *bus; 3463 struct cam_et *target; 3464 struct cam_ed *device; 3465 cam_status status; 3466 3467 status = CAM_REQ_CMP; /* Completed without error */ 3468 target = NULL; /* Wildcarded */ 3469 device = NULL; /* Wildcarded */ 3470 3471 /* 3472 * We will potentially modify the EDT, so block interrupts 3473 * that may attempt to create cam paths. 3474 */ 3475 bus = xpt_find_bus(path_id); 3476 if (bus == NULL) { 3477 status = CAM_PATH_INVALID; 3478 } else { 3479 xpt_lock_buses(); 3480 mtx_lock(&bus->eb_mtx); 3481 target = xpt_find_target(bus, target_id); 3482 if (target == NULL) { 3483 /* Create one */ 3484 struct cam_et *new_target; 3485 3486 new_target = xpt_alloc_target(bus, target_id); 3487 if (new_target == NULL) { 3488 status = CAM_RESRC_UNAVAIL; 3489 } else { 3490 target = new_target; 3491 } 3492 } 3493 xpt_unlock_buses(); 3494 if (target != NULL) { 3495 device = xpt_find_device(target, lun_id); 3496 if (device == NULL) { 3497 /* Create one */ 3498 struct cam_ed *new_device; 3499 3500 new_device = 3501 (*(bus->xport->ops->alloc_device))(bus, 3502 target, 3503 lun_id); 3504 if (new_device == NULL) { 3505 status = CAM_RESRC_UNAVAIL; 3506 } else { 3507 device = new_device; 3508 } 3509 } 3510 } 3511 mtx_unlock(&bus->eb_mtx); 3512 } 3513 3514 /* 3515 * Only touch the user's data if we are successful. 3516 */ 3517 if (status == CAM_REQ_CMP) { 3518 new_path->periph = perph; 3519 new_path->bus = bus; 3520 new_path->target = target; 3521 new_path->device = device; 3522 CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n")); 3523 } else { 3524 if (device != NULL) 3525 xpt_release_device(device); 3526 if (target != NULL) 3527 xpt_release_target(target); 3528 if (bus != NULL) 3529 xpt_release_bus(bus); 3530 } 3531 return (status); 3532 } 3533 3534 cam_status 3535 xpt_clone_path(struct cam_path **new_path_ptr, struct cam_path *path) 3536 { 3537 struct cam_path *new_path; 3538 3539 new_path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_NOWAIT); 3540 if (new_path == NULL) 3541 return(CAM_RESRC_UNAVAIL); 3542 xpt_copy_path(new_path, path); 3543 *new_path_ptr = new_path; 3544 return (CAM_REQ_CMP); 3545 } 3546 3547 void 3548 xpt_copy_path(struct cam_path *new_path, struct cam_path *path) 3549 { 3550 3551 *new_path = *path; 3552 if (path->bus != NULL) 3553 xpt_acquire_bus(path->bus); 3554 if (path->target != NULL) 3555 xpt_acquire_target(path->target); 3556 if (path->device != NULL) 3557 xpt_acquire_device(path->device); 3558 } 3559 3560 void 3561 xpt_release_path(struct cam_path *path) 3562 { 3563 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n")); 3564 if (path->device != NULL) { 3565 xpt_release_device(path->device); 3566 path->device = NULL; 3567 } 3568 if (path->target != NULL) { 3569 xpt_release_target(path->target); 3570 path->target = NULL; 3571 } 3572 if (path->bus != NULL) { 3573 xpt_release_bus(path->bus); 3574 path->bus = NULL; 3575 } 3576 } 3577 3578 void 3579 xpt_free_path(struct cam_path *path) 3580 { 3581 3582 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n")); 3583 xpt_release_path(path); 3584 free(path, M_CAMPATH); 3585 } 3586 3587 void 3588 xpt_path_counts(struct cam_path *path, uint32_t *bus_ref, 3589 uint32_t *periph_ref, uint32_t *target_ref, uint32_t *device_ref) 3590 { 3591 3592 xpt_lock_buses(); 3593 if (bus_ref) { 3594 if (path->bus) 3595 *bus_ref = path->bus->refcount; 3596 else 3597 *bus_ref = 0; 3598 } 3599 if (periph_ref) { 3600 if (path->periph) 3601 *periph_ref = path->periph->refcount; 3602 else 3603 *periph_ref = 0; 3604 } 3605 xpt_unlock_buses(); 3606 if (target_ref) { 3607 if (path->target) 3608 *target_ref = path->target->refcount; 3609 else 3610 *target_ref = 0; 3611 } 3612 if (device_ref) { 3613 if (path->device) 3614 *device_ref = path->device->refcount; 3615 else 3616 *device_ref = 0; 3617 } 3618 } 3619 3620 /* 3621 * Return -1 for failure, 0 for exact match, 1 for match with wildcards 3622 * in path1, 2 for match with wildcards in path2. 3623 */ 3624 int 3625 xpt_path_comp(struct cam_path *path1, struct cam_path *path2) 3626 { 3627 int retval = 0; 3628 3629 if (path1->bus != path2->bus) { 3630 if (path1->bus->path_id == CAM_BUS_WILDCARD) 3631 retval = 1; 3632 else if (path2->bus->path_id == CAM_BUS_WILDCARD) 3633 retval = 2; 3634 else 3635 return (-1); 3636 } 3637 if (path1->target != path2->target) { 3638 if (path1->target->target_id == CAM_TARGET_WILDCARD) { 3639 if (retval == 0) 3640 retval = 1; 3641 } else if (path2->target->target_id == CAM_TARGET_WILDCARD) 3642 retval = 2; 3643 else 3644 return (-1); 3645 } 3646 if (path1->device != path2->device) { 3647 if (path1->device->lun_id == CAM_LUN_WILDCARD) { 3648 if (retval == 0) 3649 retval = 1; 3650 } else if (path2->device->lun_id == CAM_LUN_WILDCARD) 3651 retval = 2; 3652 else 3653 return (-1); 3654 } 3655 return (retval); 3656 } 3657 3658 int 3659 xpt_path_comp_dev(struct cam_path *path, struct cam_ed *dev) 3660 { 3661 int retval = 0; 3662 3663 if (path->bus != dev->target->bus) { 3664 if (path->bus->path_id == CAM_BUS_WILDCARD) 3665 retval = 1; 3666 else if (dev->target->bus->path_id == CAM_BUS_WILDCARD) 3667 retval = 2; 3668 else 3669 return (-1); 3670 } 3671 if (path->target != dev->target) { 3672 if (path->target->target_id == CAM_TARGET_WILDCARD) { 3673 if (retval == 0) 3674 retval = 1; 3675 } else if (dev->target->target_id == CAM_TARGET_WILDCARD) 3676 retval = 2; 3677 else 3678 return (-1); 3679 } 3680 if (path->device != dev) { 3681 if (path->device->lun_id == CAM_LUN_WILDCARD) { 3682 if (retval == 0) 3683 retval = 1; 3684 } else if (dev->lun_id == CAM_LUN_WILDCARD) 3685 retval = 2; 3686 else 3687 return (-1); 3688 } 3689 return (retval); 3690 } 3691 3692 void 3693 xpt_print_path(struct cam_path *path) 3694 { 3695 3696 if (path == NULL) 3697 printf("(nopath): "); 3698 else { 3699 if (path->periph != NULL) 3700 printf("(%s%d:", path->periph->periph_name, 3701 path->periph->unit_number); 3702 else 3703 printf("(noperiph:"); 3704 3705 if (path->bus != NULL) 3706 printf("%s%d:%d:", path->bus->sim->sim_name, 3707 path->bus->sim->unit_number, 3708 path->bus->sim->bus_id); 3709 else 3710 printf("nobus:"); 3711 3712 if (path->target != NULL) 3713 printf("%d:", path->target->target_id); 3714 else 3715 printf("X:"); 3716 3717 if (path->device != NULL) 3718 printf("%jx): ", (uintmax_t)path->device->lun_id); 3719 else 3720 printf("X): "); 3721 } 3722 } 3723 3724 void 3725 xpt_print_device(struct cam_ed *device) 3726 { 3727 3728 if (device == NULL) 3729 printf("(nopath): "); 3730 else { 3731 printf("(noperiph:%s%d:%d:%d:%jx): ", device->sim->sim_name, 3732 device->sim->unit_number, 3733 device->sim->bus_id, 3734 device->target->target_id, 3735 (uintmax_t)device->lun_id); 3736 } 3737 } 3738 3739 void 3740 xpt_print(struct cam_path *path, const char *fmt, ...) 3741 { 3742 va_list ap; 3743 xpt_print_path(path); 3744 va_start(ap, fmt); 3745 vprintf(fmt, ap); 3746 va_end(ap); 3747 } 3748 3749 int 3750 xpt_path_string(struct cam_path *path, char *str, size_t str_len) 3751 { 3752 struct sbuf sb; 3753 3754 sbuf_new(&sb, str, str_len, 0); 3755 3756 if (path == NULL) 3757 sbuf_printf(&sb, "(nopath): "); 3758 else { 3759 if (path->periph != NULL) 3760 sbuf_printf(&sb, "(%s%d:", path->periph->periph_name, 3761 path->periph->unit_number); 3762 else 3763 sbuf_printf(&sb, "(noperiph:"); 3764 3765 if (path->bus != NULL) 3766 sbuf_printf(&sb, "%s%d:%d:", path->bus->sim->sim_name, 3767 path->bus->sim->unit_number, 3768 path->bus->sim->bus_id); 3769 else 3770 sbuf_printf(&sb, "nobus:"); 3771 3772 if (path->target != NULL) 3773 sbuf_printf(&sb, "%d:", path->target->target_id); 3774 else 3775 sbuf_printf(&sb, "X:"); 3776 3777 if (path->device != NULL) 3778 sbuf_printf(&sb, "%jx): ", 3779 (uintmax_t)path->device->lun_id); 3780 else 3781 sbuf_printf(&sb, "X): "); 3782 } 3783 sbuf_finish(&sb); 3784 3785 return(sbuf_len(&sb)); 3786 } 3787 3788 path_id_t 3789 xpt_path_path_id(struct cam_path *path) 3790 { 3791 return(path->bus->path_id); 3792 } 3793 3794 target_id_t 3795 xpt_path_target_id(struct cam_path *path) 3796 { 3797 if (path->target != NULL) 3798 return (path->target->target_id); 3799 else 3800 return (CAM_TARGET_WILDCARD); 3801 } 3802 3803 lun_id_t 3804 xpt_path_lun_id(struct cam_path *path) 3805 { 3806 if (path->device != NULL) 3807 return (path->device->lun_id); 3808 else 3809 return (CAM_LUN_WILDCARD); 3810 } 3811 3812 struct cam_sim * 3813 xpt_path_sim(struct cam_path *path) 3814 { 3815 3816 return (path->bus->sim); 3817 } 3818 3819 struct cam_periph* 3820 xpt_path_periph(struct cam_path *path) 3821 { 3822 3823 return (path->periph); 3824 } 3825 3826 /* 3827 * Release a CAM control block for the caller. Remit the cost of the structure 3828 * to the device referenced by the path. If the this device had no 'credits' 3829 * and peripheral drivers have registered async callbacks for this notification 3830 * call them now. 3831 */ 3832 void 3833 xpt_release_ccb(union ccb *free_ccb) 3834 { 3835 struct cam_ed *device; 3836 struct cam_periph *periph; 3837 3838 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n")); 3839 xpt_path_assert(free_ccb->ccb_h.path, MA_OWNED); 3840 device = free_ccb->ccb_h.path->device; 3841 periph = free_ccb->ccb_h.path->periph; 3842 3843 xpt_free_ccb(free_ccb); 3844 periph->periph_allocated--; 3845 cam_ccbq_release_opening(&device->ccbq); 3846 xpt_run_allocq(periph, 0); 3847 } 3848 3849 /* Functions accessed by SIM drivers */ 3850 3851 static struct xpt_xport_ops xport_default_ops = { 3852 .alloc_device = xpt_alloc_device_default, 3853 .action = xpt_action_default, 3854 .async = xpt_dev_async_default, 3855 }; 3856 static struct xpt_xport xport_default = { 3857 .xport = XPORT_UNKNOWN, 3858 .name = "unknown", 3859 .ops = &xport_default_ops, 3860 }; 3861 3862 CAM_XPT_XPORT(xport_default); 3863 3864 /* 3865 * A sim structure, listing the SIM entry points and instance 3866 * identification info is passed to xpt_bus_register to hook the SIM 3867 * into the CAM framework. xpt_bus_register creates a cam_eb entry 3868 * for this new bus and places it in the array of busses and assigns 3869 * it a path_id. The path_id may be influenced by "hard wiring" 3870 * information specified by the user. Once interrupt services are 3871 * available, the bus will be probed. 3872 */ 3873 int32_t 3874 xpt_bus_register(struct cam_sim *sim, device_t parent, u_int32_t bus) 3875 { 3876 struct cam_eb *new_bus; 3877 struct cam_eb *old_bus; 3878 struct ccb_pathinq cpi; 3879 struct cam_path *path; 3880 cam_status status; 3881 3882 mtx_assert(sim->mtx, MA_OWNED); 3883 3884 sim->bus_id = bus; 3885 new_bus = (struct cam_eb *)malloc(sizeof(*new_bus), 3886 M_CAMXPT, M_NOWAIT|M_ZERO); 3887 if (new_bus == NULL) { 3888 /* Couldn't satisfy request */ 3889 return (CAM_RESRC_UNAVAIL); 3890 } 3891 3892 mtx_init(&new_bus->eb_mtx, "CAM bus lock", NULL, MTX_DEF); 3893 TAILQ_INIT(&new_bus->et_entries); 3894 cam_sim_hold(sim); 3895 new_bus->sim = sim; 3896 timevalclear(&new_bus->last_reset); 3897 new_bus->flags = 0; 3898 new_bus->refcount = 1; /* Held until a bus_deregister event */ 3899 new_bus->generation = 0; 3900 3901 xpt_lock_buses(); 3902 sim->path_id = new_bus->path_id = 3903 xptpathid(sim->sim_name, sim->unit_number, sim->bus_id); 3904 old_bus = TAILQ_FIRST(&xsoftc.xpt_busses); 3905 while (old_bus != NULL 3906 && old_bus->path_id < new_bus->path_id) 3907 old_bus = TAILQ_NEXT(old_bus, links); 3908 if (old_bus != NULL) 3909 TAILQ_INSERT_BEFORE(old_bus, new_bus, links); 3910 else 3911 TAILQ_INSERT_TAIL(&xsoftc.xpt_busses, new_bus, links); 3912 xsoftc.bus_generation++; 3913 xpt_unlock_buses(); 3914 3915 /* 3916 * Set a default transport so that a PATH_INQ can be issued to 3917 * the SIM. This will then allow for probing and attaching of 3918 * a more appropriate transport. 3919 */ 3920 new_bus->xport = &xport_default; 3921 3922 status = xpt_create_path(&path, /*periph*/NULL, sim->path_id, 3923 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 3924 if (status != CAM_REQ_CMP) { 3925 xpt_release_bus(new_bus); 3926 free(path, M_CAMXPT); 3927 return (CAM_RESRC_UNAVAIL); 3928 } 3929 3930 xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NORMAL); 3931 cpi.ccb_h.func_code = XPT_PATH_INQ; 3932 xpt_action((union ccb *)&cpi); 3933 3934 if (cpi.ccb_h.status == CAM_REQ_CMP) { 3935 struct xpt_xport **xpt; 3936 3937 SET_FOREACH(xpt, cam_xpt_xport_set) { 3938 if ((*xpt)->xport == cpi.transport) { 3939 new_bus->xport = *xpt; 3940 break; 3941 } 3942 } 3943 if (new_bus->xport == NULL) { 3944 xpt_print_path(path); 3945 printf("No transport found for %d\n", cpi.transport); 3946 xpt_release_bus(new_bus); 3947 free(path, M_CAMXPT); 3948 return (CAM_RESRC_UNAVAIL); 3949 } 3950 } 3951 3952 /* Notify interested parties */ 3953 if (sim->path_id != CAM_XPT_PATH_ID) { 3954 3955 xpt_async(AC_PATH_REGISTERED, path, &cpi); 3956 if ((cpi.hba_misc & PIM_NOSCAN) == 0) { 3957 union ccb *scan_ccb; 3958 3959 /* Initiate bus rescan. */ 3960 scan_ccb = xpt_alloc_ccb_nowait(); 3961 if (scan_ccb != NULL) { 3962 scan_ccb->ccb_h.path = path; 3963 scan_ccb->ccb_h.func_code = XPT_SCAN_BUS; 3964 scan_ccb->crcn.flags = 0; 3965 xpt_rescan(scan_ccb); 3966 } else { 3967 xpt_print(path, 3968 "Can't allocate CCB to scan bus\n"); 3969 xpt_free_path(path); 3970 } 3971 } else 3972 xpt_free_path(path); 3973 } else 3974 xpt_free_path(path); 3975 return (CAM_SUCCESS); 3976 } 3977 3978 int32_t 3979 xpt_bus_deregister(path_id_t pathid) 3980 { 3981 struct cam_path bus_path; 3982 cam_status status; 3983 3984 status = xpt_compile_path(&bus_path, NULL, pathid, 3985 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 3986 if (status != CAM_REQ_CMP) 3987 return (status); 3988 3989 xpt_async(AC_LOST_DEVICE, &bus_path, NULL); 3990 xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL); 3991 3992 /* Release the reference count held while registered. */ 3993 xpt_release_bus(bus_path.bus); 3994 xpt_release_path(&bus_path); 3995 3996 return (CAM_REQ_CMP); 3997 } 3998 3999 static path_id_t 4000 xptnextfreepathid(void) 4001 { 4002 struct cam_eb *bus; 4003 path_id_t pathid; 4004 const char *strval; 4005 4006 mtx_assert(&xsoftc.xpt_topo_lock, MA_OWNED); 4007 pathid = 0; 4008 bus = TAILQ_FIRST(&xsoftc.xpt_busses); 4009 retry: 4010 /* Find an unoccupied pathid */ 4011 while (bus != NULL && bus->path_id <= pathid) { 4012 if (bus->path_id == pathid) 4013 pathid++; 4014 bus = TAILQ_NEXT(bus, links); 4015 } 4016 4017 /* 4018 * Ensure that this pathid is not reserved for 4019 * a bus that may be registered in the future. 4020 */ 4021 if (resource_string_value("scbus", pathid, "at", &strval) == 0) { 4022 ++pathid; 4023 /* Start the search over */ 4024 goto retry; 4025 } 4026 return (pathid); 4027 } 4028 4029 static path_id_t 4030 xptpathid(const char *sim_name, int sim_unit, int sim_bus) 4031 { 4032 path_id_t pathid; 4033 int i, dunit, val; 4034 char buf[32]; 4035 const char *dname; 4036 4037 pathid = CAM_XPT_PATH_ID; 4038 snprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit); 4039 if (strcmp(buf, "xpt0") == 0 && sim_bus == 0) 4040 return (pathid); 4041 i = 0; 4042 while ((resource_find_match(&i, &dname, &dunit, "at", buf)) == 0) { 4043 if (strcmp(dname, "scbus")) { 4044 /* Avoid a bit of foot shooting. */ 4045 continue; 4046 } 4047 if (dunit < 0) /* unwired?! */ 4048 continue; 4049 if (resource_int_value("scbus", dunit, "bus", &val) == 0) { 4050 if (sim_bus == val) { 4051 pathid = dunit; 4052 break; 4053 } 4054 } else if (sim_bus == 0) { 4055 /* Unspecified matches bus 0 */ 4056 pathid = dunit; 4057 break; 4058 } else { 4059 printf("Ambiguous scbus configuration for %s%d " 4060 "bus %d, cannot wire down. The kernel " 4061 "config entry for scbus%d should " 4062 "specify a controller bus.\n" 4063 "Scbus will be assigned dynamically.\n", 4064 sim_name, sim_unit, sim_bus, dunit); 4065 break; 4066 } 4067 } 4068 4069 if (pathid == CAM_XPT_PATH_ID) 4070 pathid = xptnextfreepathid(); 4071 return (pathid); 4072 } 4073 4074 static const char * 4075 xpt_async_string(u_int32_t async_code) 4076 { 4077 4078 switch (async_code) { 4079 case AC_BUS_RESET: return ("AC_BUS_RESET"); 4080 case AC_UNSOL_RESEL: return ("AC_UNSOL_RESEL"); 4081 case AC_SCSI_AEN: return ("AC_SCSI_AEN"); 4082 case AC_SENT_BDR: return ("AC_SENT_BDR"); 4083 case AC_PATH_REGISTERED: return ("AC_PATH_REGISTERED"); 4084 case AC_PATH_DEREGISTERED: return ("AC_PATH_DEREGISTERED"); 4085 case AC_FOUND_DEVICE: return ("AC_FOUND_DEVICE"); 4086 case AC_LOST_DEVICE: return ("AC_LOST_DEVICE"); 4087 case AC_TRANSFER_NEG: return ("AC_TRANSFER_NEG"); 4088 case AC_INQ_CHANGED: return ("AC_INQ_CHANGED"); 4089 case AC_GETDEV_CHANGED: return ("AC_GETDEV_CHANGED"); 4090 case AC_CONTRACT: return ("AC_CONTRACT"); 4091 case AC_ADVINFO_CHANGED: return ("AC_ADVINFO_CHANGED"); 4092 case AC_UNIT_ATTENTION: return ("AC_UNIT_ATTENTION"); 4093 } 4094 return ("AC_UNKNOWN"); 4095 } 4096 4097 static int 4098 xpt_async_size(u_int32_t async_code) 4099 { 4100 4101 switch (async_code) { 4102 case AC_BUS_RESET: return (0); 4103 case AC_UNSOL_RESEL: return (0); 4104 case AC_SCSI_AEN: return (0); 4105 case AC_SENT_BDR: return (0); 4106 case AC_PATH_REGISTERED: return (sizeof(struct ccb_pathinq)); 4107 case AC_PATH_DEREGISTERED: return (0); 4108 case AC_FOUND_DEVICE: return (sizeof(struct ccb_getdev)); 4109 case AC_LOST_DEVICE: return (0); 4110 case AC_TRANSFER_NEG: return (sizeof(struct ccb_trans_settings)); 4111 case AC_INQ_CHANGED: return (0); 4112 case AC_GETDEV_CHANGED: return (0); 4113 case AC_CONTRACT: return (sizeof(struct ac_contract)); 4114 case AC_ADVINFO_CHANGED: return (-1); 4115 case AC_UNIT_ATTENTION: return (sizeof(struct ccb_scsiio)); 4116 } 4117 return (0); 4118 } 4119 4120 static int 4121 xpt_async_process_dev(struct cam_ed *device, void *arg) 4122 { 4123 union ccb *ccb = arg; 4124 struct cam_path *path = ccb->ccb_h.path; 4125 void *async_arg = ccb->casync.async_arg_ptr; 4126 u_int32_t async_code = ccb->casync.async_code; 4127 int relock; 4128 4129 if (path->device != device 4130 && path->device->lun_id != CAM_LUN_WILDCARD 4131 && device->lun_id != CAM_LUN_WILDCARD) 4132 return (1); 4133 4134 /* 4135 * The async callback could free the device. 4136 * If it is a broadcast async, it doesn't hold 4137 * device reference, so take our own reference. 4138 */ 4139 xpt_acquire_device(device); 4140 4141 /* 4142 * If async for specific device is to be delivered to 4143 * the wildcard client, take the specific device lock. 4144 * XXX: We may need a way for client to specify it. 4145 */ 4146 if ((device->lun_id == CAM_LUN_WILDCARD && 4147 path->device->lun_id != CAM_LUN_WILDCARD) || 4148 (device->target->target_id == CAM_TARGET_WILDCARD && 4149 path->target->target_id != CAM_TARGET_WILDCARD) || 4150 (device->target->bus->path_id == CAM_BUS_WILDCARD && 4151 path->target->bus->path_id != CAM_BUS_WILDCARD)) { 4152 mtx_unlock(&device->device_mtx); 4153 xpt_path_lock(path); 4154 relock = 1; 4155 } else 4156 relock = 0; 4157 4158 (*(device->target->bus->xport->ops->async))(async_code, 4159 device->target->bus, device->target, device, async_arg); 4160 xpt_async_bcast(&device->asyncs, async_code, path, async_arg); 4161 4162 if (relock) { 4163 xpt_path_unlock(path); 4164 mtx_lock(&device->device_mtx); 4165 } 4166 xpt_release_device(device); 4167 return (1); 4168 } 4169 4170 static int 4171 xpt_async_process_tgt(struct cam_et *target, void *arg) 4172 { 4173 union ccb *ccb = arg; 4174 struct cam_path *path = ccb->ccb_h.path; 4175 4176 if (path->target != target 4177 && path->target->target_id != CAM_TARGET_WILDCARD 4178 && target->target_id != CAM_TARGET_WILDCARD) 4179 return (1); 4180 4181 if (ccb->casync.async_code == AC_SENT_BDR) { 4182 /* Update our notion of when the last reset occurred */ 4183 microtime(&target->last_reset); 4184 } 4185 4186 return (xptdevicetraverse(target, NULL, xpt_async_process_dev, ccb)); 4187 } 4188 4189 static void 4190 xpt_async_process(struct cam_periph *periph, union ccb *ccb) 4191 { 4192 struct cam_eb *bus; 4193 struct cam_path *path; 4194 void *async_arg; 4195 u_int32_t async_code; 4196 4197 path = ccb->ccb_h.path; 4198 async_code = ccb->casync.async_code; 4199 async_arg = ccb->casync.async_arg_ptr; 4200 CAM_DEBUG(path, CAM_DEBUG_TRACE | CAM_DEBUG_INFO, 4201 ("xpt_async(%s)\n", xpt_async_string(async_code))); 4202 bus = path->bus; 4203 4204 if (async_code == AC_BUS_RESET) { 4205 /* Update our notion of when the last reset occurred */ 4206 microtime(&bus->last_reset); 4207 } 4208 4209 xpttargettraverse(bus, NULL, xpt_async_process_tgt, ccb); 4210 4211 /* 4212 * If this wasn't a fully wildcarded async, tell all 4213 * clients that want all async events. 4214 */ 4215 if (bus != xpt_periph->path->bus) { 4216 xpt_path_lock(xpt_periph->path); 4217 xpt_async_process_dev(xpt_periph->path->device, ccb); 4218 xpt_path_unlock(xpt_periph->path); 4219 } 4220 4221 if (path->device != NULL && path->device->lun_id != CAM_LUN_WILDCARD) 4222 xpt_release_devq(path, 1, TRUE); 4223 else 4224 xpt_release_simq(path->bus->sim, TRUE); 4225 if (ccb->casync.async_arg_size > 0) 4226 free(async_arg, M_CAMXPT); 4227 xpt_free_path(path); 4228 xpt_free_ccb(ccb); 4229 } 4230 4231 static void 4232 xpt_async_bcast(struct async_list *async_head, 4233 u_int32_t async_code, 4234 struct cam_path *path, void *async_arg) 4235 { 4236 struct async_node *cur_entry; 4237 int lock; 4238 4239 cur_entry = SLIST_FIRST(async_head); 4240 while (cur_entry != NULL) { 4241 struct async_node *next_entry; 4242 /* 4243 * Grab the next list entry before we call the current 4244 * entry's callback. This is because the callback function 4245 * can delete its async callback entry. 4246 */ 4247 next_entry = SLIST_NEXT(cur_entry, links); 4248 if ((cur_entry->event_enable & async_code) != 0) { 4249 lock = cur_entry->event_lock; 4250 if (lock) 4251 CAM_SIM_LOCK(path->device->sim); 4252 cur_entry->callback(cur_entry->callback_arg, 4253 async_code, path, 4254 async_arg); 4255 if (lock) 4256 CAM_SIM_UNLOCK(path->device->sim); 4257 } 4258 cur_entry = next_entry; 4259 } 4260 } 4261 4262 void 4263 xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg) 4264 { 4265 union ccb *ccb; 4266 int size; 4267 4268 ccb = xpt_alloc_ccb_nowait(); 4269 if (ccb == NULL) { 4270 xpt_print(path, "Can't allocate CCB to send %s\n", 4271 xpt_async_string(async_code)); 4272 return; 4273 } 4274 4275 if (xpt_clone_path(&ccb->ccb_h.path, path) != CAM_REQ_CMP) { 4276 xpt_print(path, "Can't allocate path to send %s\n", 4277 xpt_async_string(async_code)); 4278 xpt_free_ccb(ccb); 4279 return; 4280 } 4281 ccb->ccb_h.path->periph = NULL; 4282 ccb->ccb_h.func_code = XPT_ASYNC; 4283 ccb->ccb_h.cbfcnp = xpt_async_process; 4284 ccb->ccb_h.flags |= CAM_UNLOCKED; 4285 ccb->casync.async_code = async_code; 4286 ccb->casync.async_arg_size = 0; 4287 size = xpt_async_size(async_code); 4288 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, 4289 ("xpt_async: func %#x %s aync_code %d %s\n", 4290 ccb->ccb_h.func_code, 4291 xpt_action_name(ccb->ccb_h.func_code), 4292 async_code, 4293 xpt_async_string(async_code))); 4294 if (size > 0 && async_arg != NULL) { 4295 ccb->casync.async_arg_ptr = malloc(size, M_CAMXPT, M_NOWAIT); 4296 if (ccb->casync.async_arg_ptr == NULL) { 4297 xpt_print(path, "Can't allocate argument to send %s\n", 4298 xpt_async_string(async_code)); 4299 xpt_free_path(ccb->ccb_h.path); 4300 xpt_free_ccb(ccb); 4301 return; 4302 } 4303 memcpy(ccb->casync.async_arg_ptr, async_arg, size); 4304 ccb->casync.async_arg_size = size; 4305 } else if (size < 0) { 4306 ccb->casync.async_arg_ptr = async_arg; 4307 ccb->casync.async_arg_size = size; 4308 } 4309 if (path->device != NULL && path->device->lun_id != CAM_LUN_WILDCARD) 4310 xpt_freeze_devq(path, 1); 4311 else 4312 xpt_freeze_simq(path->bus->sim, 1); 4313 xpt_done(ccb); 4314 } 4315 4316 static void 4317 xpt_dev_async_default(u_int32_t async_code, struct cam_eb *bus, 4318 struct cam_et *target, struct cam_ed *device, 4319 void *async_arg) 4320 { 4321 4322 /* 4323 * We only need to handle events for real devices. 4324 */ 4325 if (target->target_id == CAM_TARGET_WILDCARD 4326 || device->lun_id == CAM_LUN_WILDCARD) 4327 return; 4328 4329 printf("%s called\n", __func__); 4330 } 4331 4332 static uint32_t 4333 xpt_freeze_devq_device(struct cam_ed *dev, u_int count) 4334 { 4335 struct cam_devq *devq; 4336 uint32_t freeze; 4337 4338 devq = dev->sim->devq; 4339 mtx_assert(&devq->send_mtx, MA_OWNED); 4340 CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE, 4341 ("xpt_freeze_devq_device(%d) %u->%u\n", count, 4342 dev->ccbq.queue.qfrozen_cnt, dev->ccbq.queue.qfrozen_cnt + count)); 4343 freeze = (dev->ccbq.queue.qfrozen_cnt += count); 4344 /* Remove frozen device from sendq. */ 4345 if (device_is_queued(dev)) 4346 camq_remove(&devq->send_queue, dev->devq_entry.index); 4347 return (freeze); 4348 } 4349 4350 u_int32_t 4351 xpt_freeze_devq(struct cam_path *path, u_int count) 4352 { 4353 struct cam_ed *dev = path->device; 4354 struct cam_devq *devq; 4355 uint32_t freeze; 4356 4357 devq = dev->sim->devq; 4358 mtx_lock(&devq->send_mtx); 4359 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_freeze_devq(%d)\n", count)); 4360 freeze = xpt_freeze_devq_device(dev, count); 4361 mtx_unlock(&devq->send_mtx); 4362 return (freeze); 4363 } 4364 4365 u_int32_t 4366 xpt_freeze_simq(struct cam_sim *sim, u_int count) 4367 { 4368 struct cam_devq *devq; 4369 uint32_t freeze; 4370 4371 devq = sim->devq; 4372 mtx_lock(&devq->send_mtx); 4373 freeze = (devq->send_queue.qfrozen_cnt += count); 4374 mtx_unlock(&devq->send_mtx); 4375 return (freeze); 4376 } 4377 4378 static void 4379 xpt_release_devq_timeout(void *arg) 4380 { 4381 struct cam_ed *dev; 4382 struct cam_devq *devq; 4383 4384 dev = (struct cam_ed *)arg; 4385 CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE, ("xpt_release_devq_timeout\n")); 4386 devq = dev->sim->devq; 4387 mtx_assert(&devq->send_mtx, MA_OWNED); 4388 if (xpt_release_devq_device(dev, /*count*/1, /*run_queue*/TRUE)) 4389 xpt_run_devq(devq); 4390 } 4391 4392 void 4393 xpt_release_devq(struct cam_path *path, u_int count, int run_queue) 4394 { 4395 struct cam_ed *dev; 4396 struct cam_devq *devq; 4397 4398 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_devq(%d, %d)\n", 4399 count, run_queue)); 4400 dev = path->device; 4401 devq = dev->sim->devq; 4402 mtx_lock(&devq->send_mtx); 4403 if (xpt_release_devq_device(dev, count, run_queue)) 4404 xpt_run_devq(dev->sim->devq); 4405 mtx_unlock(&devq->send_mtx); 4406 } 4407 4408 static int 4409 xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue) 4410 { 4411 4412 mtx_assert(&dev->sim->devq->send_mtx, MA_OWNED); 4413 CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE, 4414 ("xpt_release_devq_device(%d, %d) %u->%u\n", count, run_queue, 4415 dev->ccbq.queue.qfrozen_cnt, dev->ccbq.queue.qfrozen_cnt - count)); 4416 if (count > dev->ccbq.queue.qfrozen_cnt) { 4417 #ifdef INVARIANTS 4418 printf("xpt_release_devq(): requested %u > present %u\n", 4419 count, dev->ccbq.queue.qfrozen_cnt); 4420 #endif 4421 count = dev->ccbq.queue.qfrozen_cnt; 4422 } 4423 dev->ccbq.queue.qfrozen_cnt -= count; 4424 if (dev->ccbq.queue.qfrozen_cnt == 0) { 4425 /* 4426 * No longer need to wait for a successful 4427 * command completion. 4428 */ 4429 dev->flags &= ~CAM_DEV_REL_ON_COMPLETE; 4430 /* 4431 * Remove any timeouts that might be scheduled 4432 * to release this queue. 4433 */ 4434 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) { 4435 callout_stop(&dev->callout); 4436 dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING; 4437 } 4438 /* 4439 * Now that we are unfrozen schedule the 4440 * device so any pending transactions are 4441 * run. 4442 */ 4443 xpt_schedule_devq(dev->sim->devq, dev); 4444 } else 4445 run_queue = 0; 4446 return (run_queue); 4447 } 4448 4449 void 4450 xpt_release_simq(struct cam_sim *sim, int run_queue) 4451 { 4452 struct cam_devq *devq; 4453 4454 devq = sim->devq; 4455 mtx_lock(&devq->send_mtx); 4456 if (devq->send_queue.qfrozen_cnt <= 0) { 4457 #ifdef INVARIANTS 4458 printf("xpt_release_simq: requested 1 > present %u\n", 4459 devq->send_queue.qfrozen_cnt); 4460 #endif 4461 } else 4462 devq->send_queue.qfrozen_cnt--; 4463 if (devq->send_queue.qfrozen_cnt == 0) { 4464 /* 4465 * If there is a timeout scheduled to release this 4466 * sim queue, remove it. The queue frozen count is 4467 * already at 0. 4468 */ 4469 if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){ 4470 callout_stop(&sim->callout); 4471 sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING; 4472 } 4473 if (run_queue) { 4474 /* 4475 * Now that we are unfrozen run the send queue. 4476 */ 4477 xpt_run_devq(sim->devq); 4478 } 4479 } 4480 mtx_unlock(&devq->send_mtx); 4481 } 4482 4483 /* 4484 * XXX Appears to be unused. 4485 */ 4486 static void 4487 xpt_release_simq_timeout(void *arg) 4488 { 4489 struct cam_sim *sim; 4490 4491 sim = (struct cam_sim *)arg; 4492 xpt_release_simq(sim, /* run_queue */ TRUE); 4493 } 4494 4495 void 4496 xpt_done(union ccb *done_ccb) 4497 { 4498 struct cam_doneq *queue; 4499 int run, hash; 4500 4501 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) 4502 if (done_ccb->ccb_h.func_code == XPT_SCSI_IO && 4503 done_ccb->csio.bio != NULL) 4504 biotrack(done_ccb->csio.bio, __func__); 4505 #endif 4506 4507 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, 4508 ("xpt_done: func= %#x %s status %#x\n", 4509 done_ccb->ccb_h.func_code, 4510 xpt_action_name(done_ccb->ccb_h.func_code), 4511 done_ccb->ccb_h.status)); 4512 if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0) 4513 return; 4514 4515 /* Store the time the ccb was in the sim */ 4516 done_ccb->ccb_h.qos.sim_data = sbinuptime() - done_ccb->ccb_h.qos.sim_data; 4517 hash = (done_ccb->ccb_h.path_id + done_ccb->ccb_h.target_id + 4518 done_ccb->ccb_h.target_lun) % cam_num_doneqs; 4519 queue = &cam_doneqs[hash]; 4520 mtx_lock(&queue->cam_doneq_mtx); 4521 run = (queue->cam_doneq_sleep && STAILQ_EMPTY(&queue->cam_doneq)); 4522 STAILQ_INSERT_TAIL(&queue->cam_doneq, &done_ccb->ccb_h, sim_links.stqe); 4523 done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX; 4524 mtx_unlock(&queue->cam_doneq_mtx); 4525 if (run) 4526 wakeup(&queue->cam_doneq); 4527 } 4528 4529 void 4530 xpt_done_direct(union ccb *done_ccb) 4531 { 4532 4533 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, 4534 ("xpt_done_direct: status %#x\n", done_ccb->ccb_h.status)); 4535 if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0) 4536 return; 4537 4538 /* Store the time the ccb was in the sim */ 4539 done_ccb->ccb_h.qos.sim_data = sbinuptime() - done_ccb->ccb_h.qos.sim_data; 4540 xpt_done_process(&done_ccb->ccb_h); 4541 } 4542 4543 union ccb * 4544 xpt_alloc_ccb() 4545 { 4546 union ccb *new_ccb; 4547 4548 new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_WAITOK); 4549 return (new_ccb); 4550 } 4551 4552 union ccb * 4553 xpt_alloc_ccb_nowait() 4554 { 4555 union ccb *new_ccb; 4556 4557 new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_NOWAIT); 4558 return (new_ccb); 4559 } 4560 4561 void 4562 xpt_free_ccb(union ccb *free_ccb) 4563 { 4564 free(free_ccb, M_CAMCCB); 4565 } 4566 4567 4568 4569 /* Private XPT functions */ 4570 4571 /* 4572 * Get a CAM control block for the caller. Charge the structure to the device 4573 * referenced by the path. If we don't have sufficient resources to allocate 4574 * more ccbs, we return NULL. 4575 */ 4576 static union ccb * 4577 xpt_get_ccb_nowait(struct cam_periph *periph) 4578 { 4579 union ccb *new_ccb; 4580 4581 new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_NOWAIT); 4582 if (new_ccb == NULL) 4583 return (NULL); 4584 periph->periph_allocated++; 4585 cam_ccbq_take_opening(&periph->path->device->ccbq); 4586 return (new_ccb); 4587 } 4588 4589 static union ccb * 4590 xpt_get_ccb(struct cam_periph *periph) 4591 { 4592 union ccb *new_ccb; 4593 4594 cam_periph_unlock(periph); 4595 new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_WAITOK); 4596 cam_periph_lock(periph); 4597 periph->periph_allocated++; 4598 cam_ccbq_take_opening(&periph->path->device->ccbq); 4599 return (new_ccb); 4600 } 4601 4602 union ccb * 4603 cam_periph_getccb(struct cam_periph *periph, u_int32_t priority) 4604 { 4605 struct ccb_hdr *ccb_h; 4606 4607 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("cam_periph_getccb\n")); 4608 cam_periph_assert(periph, MA_OWNED); 4609 while ((ccb_h = SLIST_FIRST(&periph->ccb_list)) == NULL || 4610 ccb_h->pinfo.priority != priority) { 4611 if (priority < periph->immediate_priority) { 4612 periph->immediate_priority = priority; 4613 xpt_run_allocq(periph, 0); 4614 } else 4615 cam_periph_sleep(periph, &periph->ccb_list, PRIBIO, 4616 "cgticb", 0); 4617 } 4618 SLIST_REMOVE_HEAD(&periph->ccb_list, periph_links.sle); 4619 return ((union ccb *)ccb_h); 4620 } 4621 4622 static void 4623 xpt_acquire_bus(struct cam_eb *bus) 4624 { 4625 4626 xpt_lock_buses(); 4627 bus->refcount++; 4628 xpt_unlock_buses(); 4629 } 4630 4631 static void 4632 xpt_release_bus(struct cam_eb *bus) 4633 { 4634 4635 xpt_lock_buses(); 4636 KASSERT(bus->refcount >= 1, ("bus->refcount >= 1")); 4637 if (--bus->refcount > 0) { 4638 xpt_unlock_buses(); 4639 return; 4640 } 4641 TAILQ_REMOVE(&xsoftc.xpt_busses, bus, links); 4642 xsoftc.bus_generation++; 4643 xpt_unlock_buses(); 4644 KASSERT(TAILQ_EMPTY(&bus->et_entries), 4645 ("destroying bus, but target list is not empty")); 4646 cam_sim_release(bus->sim); 4647 mtx_destroy(&bus->eb_mtx); 4648 free(bus, M_CAMXPT); 4649 } 4650 4651 static struct cam_et * 4652 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id) 4653 { 4654 struct cam_et *cur_target, *target; 4655 4656 mtx_assert(&xsoftc.xpt_topo_lock, MA_OWNED); 4657 mtx_assert(&bus->eb_mtx, MA_OWNED); 4658 target = (struct cam_et *)malloc(sizeof(*target), M_CAMXPT, 4659 M_NOWAIT|M_ZERO); 4660 if (target == NULL) 4661 return (NULL); 4662 4663 TAILQ_INIT(&target->ed_entries); 4664 target->bus = bus; 4665 target->target_id = target_id; 4666 target->refcount = 1; 4667 target->generation = 0; 4668 target->luns = NULL; 4669 mtx_init(&target->luns_mtx, "CAM LUNs lock", NULL, MTX_DEF); 4670 timevalclear(&target->last_reset); 4671 /* 4672 * Hold a reference to our parent bus so it 4673 * will not go away before we do. 4674 */ 4675 bus->refcount++; 4676 4677 /* Insertion sort into our bus's target list */ 4678 cur_target = TAILQ_FIRST(&bus->et_entries); 4679 while (cur_target != NULL && cur_target->target_id < target_id) 4680 cur_target = TAILQ_NEXT(cur_target, links); 4681 if (cur_target != NULL) { 4682 TAILQ_INSERT_BEFORE(cur_target, target, links); 4683 } else { 4684 TAILQ_INSERT_TAIL(&bus->et_entries, target, links); 4685 } 4686 bus->generation++; 4687 return (target); 4688 } 4689 4690 static void 4691 xpt_acquire_target(struct cam_et *target) 4692 { 4693 struct cam_eb *bus = target->bus; 4694 4695 mtx_lock(&bus->eb_mtx); 4696 target->refcount++; 4697 mtx_unlock(&bus->eb_mtx); 4698 } 4699 4700 static void 4701 xpt_release_target(struct cam_et *target) 4702 { 4703 struct cam_eb *bus = target->bus; 4704 4705 mtx_lock(&bus->eb_mtx); 4706 if (--target->refcount > 0) { 4707 mtx_unlock(&bus->eb_mtx); 4708 return; 4709 } 4710 TAILQ_REMOVE(&bus->et_entries, target, links); 4711 bus->generation++; 4712 mtx_unlock(&bus->eb_mtx); 4713 KASSERT(TAILQ_EMPTY(&target->ed_entries), 4714 ("destroying target, but device list is not empty")); 4715 xpt_release_bus(bus); 4716 mtx_destroy(&target->luns_mtx); 4717 if (target->luns) 4718 free(target->luns, M_CAMXPT); 4719 free(target, M_CAMXPT); 4720 } 4721 4722 static struct cam_ed * 4723 xpt_alloc_device_default(struct cam_eb *bus, struct cam_et *target, 4724 lun_id_t lun_id) 4725 { 4726 struct cam_ed *device; 4727 4728 device = xpt_alloc_device(bus, target, lun_id); 4729 if (device == NULL) 4730 return (NULL); 4731 4732 device->mintags = 1; 4733 device->maxtags = 1; 4734 return (device); 4735 } 4736 4737 static void 4738 xpt_destroy_device(void *context, int pending) 4739 { 4740 struct cam_ed *device = context; 4741 4742 mtx_lock(&device->device_mtx); 4743 mtx_destroy(&device->device_mtx); 4744 free(device, M_CAMDEV); 4745 } 4746 4747 struct cam_ed * 4748 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id) 4749 { 4750 struct cam_ed *cur_device, *device; 4751 struct cam_devq *devq; 4752 cam_status status; 4753 4754 mtx_assert(&bus->eb_mtx, MA_OWNED); 4755 /* Make space for us in the device queue on our bus */ 4756 devq = bus->sim->devq; 4757 mtx_lock(&devq->send_mtx); 4758 status = cam_devq_resize(devq, devq->send_queue.array_size + 1); 4759 mtx_unlock(&devq->send_mtx); 4760 if (status != CAM_REQ_CMP) 4761 return (NULL); 4762 4763 device = (struct cam_ed *)malloc(sizeof(*device), 4764 M_CAMDEV, M_NOWAIT|M_ZERO); 4765 if (device == NULL) 4766 return (NULL); 4767 4768 cam_init_pinfo(&device->devq_entry); 4769 device->target = target; 4770 device->lun_id = lun_id; 4771 device->sim = bus->sim; 4772 if (cam_ccbq_init(&device->ccbq, 4773 bus->sim->max_dev_openings) != 0) { 4774 free(device, M_CAMDEV); 4775 return (NULL); 4776 } 4777 SLIST_INIT(&device->asyncs); 4778 SLIST_INIT(&device->periphs); 4779 device->generation = 0; 4780 device->flags = CAM_DEV_UNCONFIGURED; 4781 device->tag_delay_count = 0; 4782 device->tag_saved_openings = 0; 4783 device->refcount = 1; 4784 mtx_init(&device->device_mtx, "CAM device lock", NULL, MTX_DEF); 4785 callout_init_mtx(&device->callout, &devq->send_mtx, 0); 4786 TASK_INIT(&device->device_destroy_task, 0, xpt_destroy_device, device); 4787 /* 4788 * Hold a reference to our parent bus so it 4789 * will not go away before we do. 4790 */ 4791 target->refcount++; 4792 4793 cur_device = TAILQ_FIRST(&target->ed_entries); 4794 while (cur_device != NULL && cur_device->lun_id < lun_id) 4795 cur_device = TAILQ_NEXT(cur_device, links); 4796 if (cur_device != NULL) 4797 TAILQ_INSERT_BEFORE(cur_device, device, links); 4798 else 4799 TAILQ_INSERT_TAIL(&target->ed_entries, device, links); 4800 target->generation++; 4801 return (device); 4802 } 4803 4804 void 4805 xpt_acquire_device(struct cam_ed *device) 4806 { 4807 struct cam_eb *bus = device->target->bus; 4808 4809 mtx_lock(&bus->eb_mtx); 4810 device->refcount++; 4811 mtx_unlock(&bus->eb_mtx); 4812 } 4813 4814 void 4815 xpt_release_device(struct cam_ed *device) 4816 { 4817 struct cam_eb *bus = device->target->bus; 4818 struct cam_devq *devq; 4819 4820 mtx_lock(&bus->eb_mtx); 4821 if (--device->refcount > 0) { 4822 mtx_unlock(&bus->eb_mtx); 4823 return; 4824 } 4825 4826 TAILQ_REMOVE(&device->target->ed_entries, device,links); 4827 device->target->generation++; 4828 mtx_unlock(&bus->eb_mtx); 4829 4830 /* Release our slot in the devq */ 4831 devq = bus->sim->devq; 4832 mtx_lock(&devq->send_mtx); 4833 cam_devq_resize(devq, devq->send_queue.array_size - 1); 4834 mtx_unlock(&devq->send_mtx); 4835 4836 KASSERT(SLIST_EMPTY(&device->periphs), 4837 ("destroying device, but periphs list is not empty")); 4838 KASSERT(device->devq_entry.index == CAM_UNQUEUED_INDEX, 4839 ("destroying device while still queued for ccbs")); 4840 4841 if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) 4842 callout_stop(&device->callout); 4843 4844 xpt_release_target(device->target); 4845 4846 cam_ccbq_fini(&device->ccbq); 4847 /* 4848 * Free allocated memory. free(9) does nothing if the 4849 * supplied pointer is NULL, so it is safe to call without 4850 * checking. 4851 */ 4852 free(device->supported_vpds, M_CAMXPT); 4853 free(device->device_id, M_CAMXPT); 4854 free(device->ext_inq, M_CAMXPT); 4855 free(device->physpath, M_CAMXPT); 4856 free(device->rcap_buf, M_CAMXPT); 4857 free(device->serial_num, M_CAMXPT); 4858 taskqueue_enqueue(xsoftc.xpt_taskq, &device->device_destroy_task); 4859 } 4860 4861 u_int32_t 4862 xpt_dev_ccbq_resize(struct cam_path *path, int newopenings) 4863 { 4864 int result; 4865 struct cam_ed *dev; 4866 4867 dev = path->device; 4868 mtx_lock(&dev->sim->devq->send_mtx); 4869 result = cam_ccbq_resize(&dev->ccbq, newopenings); 4870 mtx_unlock(&dev->sim->devq->send_mtx); 4871 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 4872 || (dev->inq_flags & SID_CmdQue) != 0) 4873 dev->tag_saved_openings = newopenings; 4874 return (result); 4875 } 4876 4877 static struct cam_eb * 4878 xpt_find_bus(path_id_t path_id) 4879 { 4880 struct cam_eb *bus; 4881 4882 xpt_lock_buses(); 4883 for (bus = TAILQ_FIRST(&xsoftc.xpt_busses); 4884 bus != NULL; 4885 bus = TAILQ_NEXT(bus, links)) { 4886 if (bus->path_id == path_id) { 4887 bus->refcount++; 4888 break; 4889 } 4890 } 4891 xpt_unlock_buses(); 4892 return (bus); 4893 } 4894 4895 static struct cam_et * 4896 xpt_find_target(struct cam_eb *bus, target_id_t target_id) 4897 { 4898 struct cam_et *target; 4899 4900 mtx_assert(&bus->eb_mtx, MA_OWNED); 4901 for (target = TAILQ_FIRST(&bus->et_entries); 4902 target != NULL; 4903 target = TAILQ_NEXT(target, links)) { 4904 if (target->target_id == target_id) { 4905 target->refcount++; 4906 break; 4907 } 4908 } 4909 return (target); 4910 } 4911 4912 static struct cam_ed * 4913 xpt_find_device(struct cam_et *target, lun_id_t lun_id) 4914 { 4915 struct cam_ed *device; 4916 4917 mtx_assert(&target->bus->eb_mtx, MA_OWNED); 4918 for (device = TAILQ_FIRST(&target->ed_entries); 4919 device != NULL; 4920 device = TAILQ_NEXT(device, links)) { 4921 if (device->lun_id == lun_id) { 4922 device->refcount++; 4923 break; 4924 } 4925 } 4926 return (device); 4927 } 4928 4929 void 4930 xpt_start_tags(struct cam_path *path) 4931 { 4932 struct ccb_relsim crs; 4933 struct cam_ed *device; 4934 struct cam_sim *sim; 4935 int newopenings; 4936 4937 device = path->device; 4938 sim = path->bus->sim; 4939 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT; 4940 xpt_freeze_devq(path, /*count*/1); 4941 device->inq_flags |= SID_CmdQue; 4942 if (device->tag_saved_openings != 0) 4943 newopenings = device->tag_saved_openings; 4944 else 4945 newopenings = min(device->maxtags, 4946 sim->max_tagged_dev_openings); 4947 xpt_dev_ccbq_resize(path, newopenings); 4948 xpt_async(AC_GETDEV_CHANGED, path, NULL); 4949 xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL); 4950 crs.ccb_h.func_code = XPT_REL_SIMQ; 4951 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY; 4952 crs.openings 4953 = crs.release_timeout 4954 = crs.qfrozen_cnt 4955 = 0; 4956 xpt_action((union ccb *)&crs); 4957 } 4958 4959 void 4960 xpt_stop_tags(struct cam_path *path) 4961 { 4962 struct ccb_relsim crs; 4963 struct cam_ed *device; 4964 struct cam_sim *sim; 4965 4966 device = path->device; 4967 sim = path->bus->sim; 4968 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT; 4969 device->tag_delay_count = 0; 4970 xpt_freeze_devq(path, /*count*/1); 4971 device->inq_flags &= ~SID_CmdQue; 4972 xpt_dev_ccbq_resize(path, sim->max_dev_openings); 4973 xpt_async(AC_GETDEV_CHANGED, path, NULL); 4974 xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL); 4975 crs.ccb_h.func_code = XPT_REL_SIMQ; 4976 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY; 4977 crs.openings 4978 = crs.release_timeout 4979 = crs.qfrozen_cnt 4980 = 0; 4981 xpt_action((union ccb *)&crs); 4982 } 4983 4984 static void 4985 xpt_boot_delay(void *arg) 4986 { 4987 4988 xpt_release_boot(); 4989 } 4990 4991 static void 4992 xpt_config(void *arg) 4993 { 4994 /* 4995 * Now that interrupts are enabled, go find our devices 4996 */ 4997 if (taskqueue_start_threads(&xsoftc.xpt_taskq, 1, PRIBIO, "CAM taskq")) 4998 printf("xpt_config: failed to create taskqueue thread.\n"); 4999 5000 /* Setup debugging path */ 5001 if (cam_dflags != CAM_DEBUG_NONE) { 5002 if (xpt_create_path(&cam_dpath, NULL, 5003 CAM_DEBUG_BUS, CAM_DEBUG_TARGET, 5004 CAM_DEBUG_LUN) != CAM_REQ_CMP) { 5005 printf("xpt_config: xpt_create_path() failed for debug" 5006 " target %d:%d:%d, debugging disabled\n", 5007 CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN); 5008 cam_dflags = CAM_DEBUG_NONE; 5009 } 5010 } else 5011 cam_dpath = NULL; 5012 5013 periphdriver_init(1); 5014 xpt_hold_boot(); 5015 callout_init(&xsoftc.boot_callout, 1); 5016 callout_reset_sbt(&xsoftc.boot_callout, SBT_1MS * xsoftc.boot_delay, 0, 5017 xpt_boot_delay, NULL, 0); 5018 /* Fire up rescan thread. */ 5019 if (kproc_kthread_add(xpt_scanner_thread, NULL, &cam_proc, NULL, 0, 0, 5020 "cam", "scanner")) { 5021 printf("xpt_config: failed to create rescan thread.\n"); 5022 } 5023 } 5024 5025 void 5026 xpt_hold_boot(void) 5027 { 5028 xpt_lock_buses(); 5029 xsoftc.buses_to_config++; 5030 xpt_unlock_buses(); 5031 } 5032 5033 void 5034 xpt_release_boot(void) 5035 { 5036 xpt_lock_buses(); 5037 xsoftc.buses_to_config--; 5038 if (xsoftc.buses_to_config == 0 && xsoftc.buses_config_done == 0) { 5039 struct xpt_task *task; 5040 5041 xsoftc.buses_config_done = 1; 5042 xpt_unlock_buses(); 5043 /* Call manually because we don't have any busses */ 5044 task = malloc(sizeof(struct xpt_task), M_CAMXPT, M_NOWAIT); 5045 if (task != NULL) { 5046 TASK_INIT(&task->task, 0, xpt_finishconfig_task, task); 5047 taskqueue_enqueue(taskqueue_thread, &task->task); 5048 } 5049 } else 5050 xpt_unlock_buses(); 5051 } 5052 5053 /* 5054 * If the given device only has one peripheral attached to it, and if that 5055 * peripheral is the passthrough driver, announce it. This insures that the 5056 * user sees some sort of announcement for every peripheral in their system. 5057 */ 5058 static int 5059 xptpassannouncefunc(struct cam_ed *device, void *arg) 5060 { 5061 struct cam_periph *periph; 5062 int i; 5063 5064 for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL; 5065 periph = SLIST_NEXT(periph, periph_links), i++); 5066 5067 periph = SLIST_FIRST(&device->periphs); 5068 if ((i == 1) 5069 && (strncmp(periph->periph_name, "pass", 4) == 0)) 5070 xpt_announce_periph(periph, NULL); 5071 5072 return(1); 5073 } 5074 5075 static void 5076 xpt_finishconfig_task(void *context, int pending) 5077 { 5078 5079 periphdriver_init(2); 5080 /* 5081 * Check for devices with no "standard" peripheral driver 5082 * attached. For any devices like that, announce the 5083 * passthrough driver so the user will see something. 5084 */ 5085 if (!bootverbose) 5086 xpt_for_all_devices(xptpassannouncefunc, NULL); 5087 5088 /* Release our hook so that the boot can continue. */ 5089 config_intrhook_disestablish(xsoftc.xpt_config_hook); 5090 free(xsoftc.xpt_config_hook, M_CAMXPT); 5091 xsoftc.xpt_config_hook = NULL; 5092 5093 free(context, M_CAMXPT); 5094 } 5095 5096 cam_status 5097 xpt_register_async(int event, ac_callback_t *cbfunc, void *cbarg, 5098 struct cam_path *path) 5099 { 5100 struct ccb_setasync csa; 5101 cam_status status; 5102 int xptpath = 0; 5103 5104 if (path == NULL) { 5105 status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID, 5106 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 5107 if (status != CAM_REQ_CMP) 5108 return (status); 5109 xpt_path_lock(path); 5110 xptpath = 1; 5111 } 5112 5113 xpt_setup_ccb(&csa.ccb_h, path, CAM_PRIORITY_NORMAL); 5114 csa.ccb_h.func_code = XPT_SASYNC_CB; 5115 csa.event_enable = event; 5116 csa.callback = cbfunc; 5117 csa.callback_arg = cbarg; 5118 xpt_action((union ccb *)&csa); 5119 status = csa.ccb_h.status; 5120 5121 CAM_DEBUG(csa.ccb_h.path, CAM_DEBUG_TRACE, 5122 ("xpt_register_async: func %p\n", cbfunc)); 5123 5124 if (xptpath) { 5125 xpt_path_unlock(path); 5126 xpt_free_path(path); 5127 } 5128 5129 if ((status == CAM_REQ_CMP) && 5130 (csa.event_enable & AC_FOUND_DEVICE)) { 5131 /* 5132 * Get this peripheral up to date with all 5133 * the currently existing devices. 5134 */ 5135 xpt_for_all_devices(xptsetasyncfunc, &csa); 5136 } 5137 if ((status == CAM_REQ_CMP) && 5138 (csa.event_enable & AC_PATH_REGISTERED)) { 5139 /* 5140 * Get this peripheral up to date with all 5141 * the currently existing busses. 5142 */ 5143 xpt_for_all_busses(xptsetasyncbusfunc, &csa); 5144 } 5145 5146 return (status); 5147 } 5148 5149 static void 5150 xptaction(struct cam_sim *sim, union ccb *work_ccb) 5151 { 5152 CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n")); 5153 5154 switch (work_ccb->ccb_h.func_code) { 5155 /* Common cases first */ 5156 case XPT_PATH_INQ: /* Path routing inquiry */ 5157 { 5158 struct ccb_pathinq *cpi; 5159 5160 cpi = &work_ccb->cpi; 5161 cpi->version_num = 1; /* XXX??? */ 5162 cpi->hba_inquiry = 0; 5163 cpi->target_sprt = 0; 5164 cpi->hba_misc = 0; 5165 cpi->hba_eng_cnt = 0; 5166 cpi->max_target = 0; 5167 cpi->max_lun = 0; 5168 cpi->initiator_id = 0; 5169 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 5170 strlcpy(cpi->hba_vid, "", HBA_IDLEN); 5171 strlcpy(cpi->dev_name, sim->sim_name, DEV_IDLEN); 5172 cpi->unit_number = sim->unit_number; 5173 cpi->bus_id = sim->bus_id; 5174 cpi->base_transfer_speed = 0; 5175 cpi->protocol = PROTO_UNSPECIFIED; 5176 cpi->protocol_version = PROTO_VERSION_UNSPECIFIED; 5177 cpi->transport = XPORT_UNSPECIFIED; 5178 cpi->transport_version = XPORT_VERSION_UNSPECIFIED; 5179 cpi->ccb_h.status = CAM_REQ_CMP; 5180 xpt_done(work_ccb); 5181 break; 5182 } 5183 default: 5184 work_ccb->ccb_h.status = CAM_REQ_INVALID; 5185 xpt_done(work_ccb); 5186 break; 5187 } 5188 } 5189 5190 /* 5191 * The xpt as a "controller" has no interrupt sources, so polling 5192 * is a no-op. 5193 */ 5194 static void 5195 xptpoll(struct cam_sim *sim) 5196 { 5197 } 5198 5199 void 5200 xpt_lock_buses(void) 5201 { 5202 mtx_lock(&xsoftc.xpt_topo_lock); 5203 } 5204 5205 void 5206 xpt_unlock_buses(void) 5207 { 5208 mtx_unlock(&xsoftc.xpt_topo_lock); 5209 } 5210 5211 struct mtx * 5212 xpt_path_mtx(struct cam_path *path) 5213 { 5214 5215 return (&path->device->device_mtx); 5216 } 5217 5218 static void 5219 xpt_done_process(struct ccb_hdr *ccb_h) 5220 { 5221 struct cam_sim *sim; 5222 struct cam_devq *devq; 5223 struct mtx *mtx = NULL; 5224 5225 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) 5226 struct ccb_scsiio *csio; 5227 5228 if (ccb_h->func_code == XPT_SCSI_IO) { 5229 csio = &((union ccb *)ccb_h)->csio; 5230 if (csio->bio != NULL) 5231 biotrack(csio->bio, __func__); 5232 } 5233 #endif 5234 5235 if (ccb_h->flags & CAM_HIGH_POWER) { 5236 struct highpowerlist *hphead; 5237 struct cam_ed *device; 5238 5239 mtx_lock(&xsoftc.xpt_highpower_lock); 5240 hphead = &xsoftc.highpowerq; 5241 5242 device = STAILQ_FIRST(hphead); 5243 5244 /* 5245 * Increment the count since this command is done. 5246 */ 5247 xsoftc.num_highpower++; 5248 5249 /* 5250 * Any high powered commands queued up? 5251 */ 5252 if (device != NULL) { 5253 5254 STAILQ_REMOVE_HEAD(hphead, highpowerq_entry); 5255 mtx_unlock(&xsoftc.xpt_highpower_lock); 5256 5257 mtx_lock(&device->sim->devq->send_mtx); 5258 xpt_release_devq_device(device, 5259 /*count*/1, /*runqueue*/TRUE); 5260 mtx_unlock(&device->sim->devq->send_mtx); 5261 } else 5262 mtx_unlock(&xsoftc.xpt_highpower_lock); 5263 } 5264 5265 sim = ccb_h->path->bus->sim; 5266 5267 if (ccb_h->status & CAM_RELEASE_SIMQ) { 5268 xpt_release_simq(sim, /*run_queue*/FALSE); 5269 ccb_h->status &= ~CAM_RELEASE_SIMQ; 5270 } 5271 5272 if ((ccb_h->flags & CAM_DEV_QFRZDIS) 5273 && (ccb_h->status & CAM_DEV_QFRZN)) { 5274 xpt_release_devq(ccb_h->path, /*count*/1, /*run_queue*/TRUE); 5275 ccb_h->status &= ~CAM_DEV_QFRZN; 5276 } 5277 5278 devq = sim->devq; 5279 if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) { 5280 struct cam_ed *dev = ccb_h->path->device; 5281 5282 mtx_lock(&devq->send_mtx); 5283 devq->send_active--; 5284 devq->send_openings++; 5285 cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h); 5286 5287 if (((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0 5288 && (dev->ccbq.dev_active == 0))) { 5289 dev->flags &= ~CAM_DEV_REL_ON_QUEUE_EMPTY; 5290 xpt_release_devq_device(dev, /*count*/1, 5291 /*run_queue*/FALSE); 5292 } 5293 5294 if (((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0 5295 && (ccb_h->status&CAM_STATUS_MASK) != CAM_REQUEUE_REQ)) { 5296 dev->flags &= ~CAM_DEV_REL_ON_COMPLETE; 5297 xpt_release_devq_device(dev, /*count*/1, 5298 /*run_queue*/FALSE); 5299 } 5300 5301 if (!device_is_queued(dev)) 5302 (void)xpt_schedule_devq(devq, dev); 5303 xpt_run_devq(devq); 5304 mtx_unlock(&devq->send_mtx); 5305 5306 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0) { 5307 mtx = xpt_path_mtx(ccb_h->path); 5308 mtx_lock(mtx); 5309 5310 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 5311 && (--dev->tag_delay_count == 0)) 5312 xpt_start_tags(ccb_h->path); 5313 } 5314 } 5315 5316 if ((ccb_h->flags & CAM_UNLOCKED) == 0) { 5317 if (mtx == NULL) { 5318 mtx = xpt_path_mtx(ccb_h->path); 5319 mtx_lock(mtx); 5320 } 5321 } else { 5322 if (mtx != NULL) { 5323 mtx_unlock(mtx); 5324 mtx = NULL; 5325 } 5326 } 5327 5328 /* Call the peripheral driver's callback */ 5329 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX; 5330 (*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h); 5331 if (mtx != NULL) 5332 mtx_unlock(mtx); 5333 } 5334 5335 void 5336 xpt_done_td(void *arg) 5337 { 5338 struct cam_doneq *queue = arg; 5339 struct ccb_hdr *ccb_h; 5340 STAILQ_HEAD(, ccb_hdr) doneq; 5341 5342 STAILQ_INIT(&doneq); 5343 mtx_lock(&queue->cam_doneq_mtx); 5344 while (1) { 5345 while (STAILQ_EMPTY(&queue->cam_doneq)) { 5346 queue->cam_doneq_sleep = 1; 5347 msleep(&queue->cam_doneq, &queue->cam_doneq_mtx, 5348 PRIBIO, "-", 0); 5349 queue->cam_doneq_sleep = 0; 5350 } 5351 STAILQ_CONCAT(&doneq, &queue->cam_doneq); 5352 mtx_unlock(&queue->cam_doneq_mtx); 5353 5354 THREAD_NO_SLEEPING(); 5355 while ((ccb_h = STAILQ_FIRST(&doneq)) != NULL) { 5356 STAILQ_REMOVE_HEAD(&doneq, sim_links.stqe); 5357 xpt_done_process(ccb_h); 5358 } 5359 THREAD_SLEEPING_OK(); 5360 5361 mtx_lock(&queue->cam_doneq_mtx); 5362 } 5363 } 5364 5365 static void 5366 camisr_runqueue(void) 5367 { 5368 struct ccb_hdr *ccb_h; 5369 struct cam_doneq *queue; 5370 int i; 5371 5372 /* Process global queues. */ 5373 for (i = 0; i < cam_num_doneqs; i++) { 5374 queue = &cam_doneqs[i]; 5375 mtx_lock(&queue->cam_doneq_mtx); 5376 while ((ccb_h = STAILQ_FIRST(&queue->cam_doneq)) != NULL) { 5377 STAILQ_REMOVE_HEAD(&queue->cam_doneq, sim_links.stqe); 5378 mtx_unlock(&queue->cam_doneq_mtx); 5379 xpt_done_process(ccb_h); 5380 mtx_lock(&queue->cam_doneq_mtx); 5381 } 5382 mtx_unlock(&queue->cam_doneq_mtx); 5383 } 5384 } 5385 5386 struct kv 5387 { 5388 uint32_t v; 5389 const char *name; 5390 }; 5391 5392 static struct kv map[] = { 5393 { XPT_NOOP, "XPT_NOOP" }, 5394 { XPT_SCSI_IO, "XPT_SCSI_IO" }, 5395 { XPT_GDEV_TYPE, "XPT_GDEV_TYPE" }, 5396 { XPT_GDEVLIST, "XPT_GDEVLIST" }, 5397 { XPT_PATH_INQ, "XPT_PATH_INQ" }, 5398 { XPT_REL_SIMQ, "XPT_REL_SIMQ" }, 5399 { XPT_SASYNC_CB, "XPT_SASYNC_CB" }, 5400 { XPT_SDEV_TYPE, "XPT_SDEV_TYPE" }, 5401 { XPT_SCAN_BUS, "XPT_SCAN_BUS" }, 5402 { XPT_DEV_MATCH, "XPT_DEV_MATCH" }, 5403 { XPT_DEBUG, "XPT_DEBUG" }, 5404 { XPT_PATH_STATS, "XPT_PATH_STATS" }, 5405 { XPT_GDEV_STATS, "XPT_GDEV_STATS" }, 5406 { XPT_DEV_ADVINFO, "XPT_DEV_ADVINFO" }, 5407 { XPT_ASYNC, "XPT_ASYNC" }, 5408 { XPT_ABORT, "XPT_ABORT" }, 5409 { XPT_RESET_BUS, "XPT_RESET_BUS" }, 5410 { XPT_RESET_DEV, "XPT_RESET_DEV" }, 5411 { XPT_TERM_IO, "XPT_TERM_IO" }, 5412 { XPT_SCAN_LUN, "XPT_SCAN_LUN" }, 5413 { XPT_GET_TRAN_SETTINGS, "XPT_GET_TRAN_SETTINGS" }, 5414 { XPT_SET_TRAN_SETTINGS, "XPT_SET_TRAN_SETTINGS" }, 5415 { XPT_CALC_GEOMETRY, "XPT_CALC_GEOMETRY" }, 5416 { XPT_ATA_IO, "XPT_ATA_IO" }, 5417 { XPT_GET_SIM_KNOB, "XPT_GET_SIM_KNOB" }, 5418 { XPT_SET_SIM_KNOB, "XPT_SET_SIM_KNOB" }, 5419 { XPT_NVME_IO, "XPT_NVME_IO" }, 5420 { XPT_MMCSD_IO, "XPT_MMCSD_IO" }, 5421 { XPT_SMP_IO, "XPT_SMP_IO" }, 5422 { XPT_SCAN_TGT, "XPT_SCAN_TGT" }, 5423 { XPT_ENG_INQ, "XPT_ENG_INQ" }, 5424 { XPT_ENG_EXEC, "XPT_ENG_EXEC" }, 5425 { XPT_EN_LUN, "XPT_EN_LUN" }, 5426 { XPT_TARGET_IO, "XPT_TARGET_IO" }, 5427 { XPT_ACCEPT_TARGET_IO, "XPT_ACCEPT_TARGET_IO" }, 5428 { XPT_CONT_TARGET_IO, "XPT_CONT_TARGET_IO" }, 5429 { XPT_IMMED_NOTIFY, "XPT_IMMED_NOTIFY" }, 5430 { XPT_NOTIFY_ACK, "XPT_NOTIFY_ACK" }, 5431 { XPT_IMMEDIATE_NOTIFY, "XPT_IMMEDIATE_NOTIFY" }, 5432 { XPT_NOTIFY_ACKNOWLEDGE, "XPT_NOTIFY_ACKNOWLEDGE" }, 5433 { 0, 0 } 5434 }; 5435 5436 static const char * 5437 xpt_action_name(uint32_t action) 5438 { 5439 static char buffer[32]; /* Only for unknown messages -- racy */ 5440 struct kv *walker = map; 5441 5442 while (walker->name != NULL) { 5443 if (walker->v == action) 5444 return (walker->name); 5445 walker++; 5446 } 5447 5448 snprintf(buffer, sizeof(buffer), "%#x", action); 5449 return (buffer); 5450 } 5451