1 /*- 2 * Implementation of the Common Access Method Transport (XPT) layer. 3 * 4 * SPDX-License-Identifier: BSD-2-Clause 5 * 6 * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs. 7 * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry. 8 * All rights reserved. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions, and the following disclaimer, 15 * without modification, immediately at the beginning of the file. 16 * 2. The name of the author may not be used to endorse or promote products 17 * derived from this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 23 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 #include "opt_printf.h" 33 34 #include <sys/cdefs.h> 35 #include <sys/param.h> 36 #include <sys/bio.h> 37 #include <sys/bus.h> 38 #include <sys/systm.h> 39 #include <sys/types.h> 40 #include <sys/malloc.h> 41 #include <sys/kernel.h> 42 #include <sys/time.h> 43 #include <sys/conf.h> 44 #include <sys/fcntl.h> 45 #include <sys/proc.h> 46 #include <sys/sbuf.h> 47 #include <sys/smp.h> 48 #include <sys/taskqueue.h> 49 50 #include <sys/lock.h> 51 #include <sys/mutex.h> 52 #include <sys/sysctl.h> 53 #include <sys/kthread.h> 54 55 #include <cam/cam.h> 56 #include <cam/cam_ccb.h> 57 #include <cam/cam_iosched.h> 58 #include <cam/cam_periph.h> 59 #include <cam/cam_queue.h> 60 #include <cam/cam_sim.h> 61 #include <cam/cam_xpt.h> 62 #include <cam/cam_xpt_sim.h> 63 #include <cam/cam_xpt_periph.h> 64 #include <cam/cam_xpt_internal.h> 65 #include <cam/cam_debug.h> 66 #include <cam/cam_compat.h> 67 68 #include <cam/scsi/scsi_all.h> 69 #include <cam/scsi/scsi_message.h> 70 #include <cam/scsi/scsi_pass.h> 71 72 #include <machine/stdarg.h> /* for xpt_print below */ 73 74 #include "opt_cam.h" 75 76 /* Wild guess based on not wanting to grow the stack too much */ 77 #define XPT_PRINT_MAXLEN 512 78 #ifdef PRINTF_BUFR_SIZE 79 #define XPT_PRINT_LEN PRINTF_BUFR_SIZE 80 #else 81 #define XPT_PRINT_LEN 128 82 #endif 83 _Static_assert(XPT_PRINT_LEN <= XPT_PRINT_MAXLEN, "XPT_PRINT_LEN is too large"); 84 85 /* 86 * This is the maximum number of high powered commands (e.g. start unit) 87 * that can be outstanding at a particular time. 88 */ 89 #ifndef CAM_MAX_HIGHPOWER 90 #define CAM_MAX_HIGHPOWER 4 91 #endif 92 93 /* Datastructures internal to the xpt layer */ 94 MALLOC_DEFINE(M_CAMXPT, "CAM XPT", "CAM XPT buffers"); 95 MALLOC_DEFINE(M_CAMDEV, "CAM DEV", "CAM devices"); 96 MALLOC_DEFINE(M_CAMCCB, "CAM CCB", "CAM CCBs"); 97 MALLOC_DEFINE(M_CAMPATH, "CAM path", "CAM paths"); 98 99 struct xpt_softc { 100 uint32_t xpt_generation; 101 102 /* number of high powered commands that can go through right now */ 103 struct mtx xpt_highpower_lock; 104 STAILQ_HEAD(highpowerlist, cam_ed) highpowerq; 105 int num_highpower; 106 107 /* queue for handling async rescan requests. */ 108 TAILQ_HEAD(, ccb_hdr) ccb_scanq; 109 int buses_to_config; 110 int buses_config_done; 111 112 /* 113 * Registered buses 114 * 115 * N.B., "busses" is an archaic spelling of "buses". In new code 116 * "buses" is preferred. 117 */ 118 TAILQ_HEAD(,cam_eb) xpt_busses; 119 u_int bus_generation; 120 121 int boot_delay; 122 struct callout boot_callout; 123 struct task boot_task; 124 struct root_hold_token xpt_rootmount; 125 126 struct mtx xpt_topo_lock; 127 struct taskqueue *xpt_taskq; 128 }; 129 130 typedef enum { 131 DM_RET_COPY = 0x01, 132 DM_RET_FLAG_MASK = 0x0f, 133 DM_RET_NONE = 0x00, 134 DM_RET_STOP = 0x10, 135 DM_RET_DESCEND = 0x20, 136 DM_RET_ERROR = 0x30, 137 DM_RET_ACTION_MASK = 0xf0 138 } dev_match_ret; 139 140 typedef enum { 141 XPT_DEPTH_BUS, 142 XPT_DEPTH_TARGET, 143 XPT_DEPTH_DEVICE, 144 XPT_DEPTH_PERIPH 145 } xpt_traverse_depth; 146 147 struct xpt_traverse_config { 148 xpt_traverse_depth depth; 149 void *tr_func; 150 void *tr_arg; 151 }; 152 153 typedef int xpt_busfunc_t (struct cam_eb *bus, void *arg); 154 typedef int xpt_targetfunc_t (struct cam_et *target, void *arg); 155 typedef int xpt_devicefunc_t (struct cam_ed *device, void *arg); 156 typedef int xpt_periphfunc_t (struct cam_periph *periph, void *arg); 157 typedef int xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg); 158 159 /* Transport layer configuration information */ 160 static struct xpt_softc xsoftc; 161 162 MTX_SYSINIT(xpt_topo_init, &xsoftc.xpt_topo_lock, "XPT topology lock", MTX_DEF); 163 164 SYSCTL_INT(_kern_cam, OID_AUTO, boot_delay, CTLFLAG_RDTUN, 165 &xsoftc.boot_delay, 0, "Bus registration wait time"); 166 SYSCTL_UINT(_kern_cam, OID_AUTO, xpt_generation, CTLFLAG_RD, 167 &xsoftc.xpt_generation, 0, "CAM peripheral generation count"); 168 169 struct cam_doneq { 170 struct mtx_padalign cam_doneq_mtx; 171 STAILQ_HEAD(, ccb_hdr) cam_doneq; 172 int cam_doneq_sleep; 173 }; 174 175 static struct cam_doneq cam_doneqs[MAXCPU]; 176 static u_int __read_mostly cam_num_doneqs; 177 static struct proc *cam_proc; 178 static struct cam_doneq cam_async; 179 180 SYSCTL_INT(_kern_cam, OID_AUTO, num_doneqs, CTLFLAG_RDTUN, 181 &cam_num_doneqs, 0, "Number of completion queues/threads"); 182 183 struct cam_periph *xpt_periph; 184 185 static periph_init_t xpt_periph_init; 186 187 static struct periph_driver xpt_driver = 188 { 189 xpt_periph_init, "xpt", 190 TAILQ_HEAD_INITIALIZER(xpt_driver.units), /* generation */ 0, 191 CAM_PERIPH_DRV_EARLY 192 }; 193 194 PERIPHDRIVER_DECLARE(xpt, xpt_driver); 195 196 static d_open_t xptopen; 197 static d_close_t xptclose; 198 static d_ioctl_t xptioctl; 199 static d_ioctl_t xptdoioctl; 200 201 static struct cdevsw xpt_cdevsw = { 202 .d_version = D_VERSION, 203 .d_flags = 0, 204 .d_open = xptopen, 205 .d_close = xptclose, 206 .d_ioctl = xptioctl, 207 .d_name = "xpt", 208 }; 209 210 /* Storage for debugging datastructures */ 211 struct cam_path *cam_dpath; 212 uint32_t __read_mostly cam_dflags = CAM_DEBUG_FLAGS; 213 SYSCTL_UINT(_kern_cam, OID_AUTO, dflags, CTLFLAG_RWTUN, 214 &cam_dflags, 0, "Enabled debug flags"); 215 uint32_t cam_debug_delay = CAM_DEBUG_DELAY; 216 SYSCTL_UINT(_kern_cam, OID_AUTO, debug_delay, CTLFLAG_RWTUN, 217 &cam_debug_delay, 0, "Delay in us after each debug message"); 218 219 /* Our boot-time initialization hook */ 220 static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *); 221 222 static moduledata_t cam_moduledata = { 223 "cam", 224 cam_module_event_handler, 225 NULL 226 }; 227 228 static int xpt_init(void *); 229 230 DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND); 231 MODULE_VERSION(cam, 1); 232 233 static void xpt_async_bcast(struct async_list *async_head, 234 uint32_t async_code, 235 struct cam_path *path, 236 void *async_arg); 237 static path_id_t xptnextfreepathid(void); 238 static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus); 239 static union ccb *xpt_get_ccb(struct cam_periph *periph); 240 static union ccb *xpt_get_ccb_nowait(struct cam_periph *periph); 241 static void xpt_run_allocq(struct cam_periph *periph, int sleep); 242 static void xpt_run_allocq_task(void *context, int pending); 243 static void xpt_run_devq(struct cam_devq *devq); 244 static callout_func_t xpt_release_devq_timeout; 245 static void xpt_acquire_bus(struct cam_eb *bus); 246 static void xpt_release_bus(struct cam_eb *bus); 247 static uint32_t xpt_freeze_devq_device(struct cam_ed *dev, u_int count); 248 static int xpt_release_devq_device(struct cam_ed *dev, u_int count, 249 int run_queue); 250 static struct cam_et* 251 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id); 252 static void xpt_acquire_target(struct cam_et *target); 253 static void xpt_release_target(struct cam_et *target); 254 static struct cam_eb* 255 xpt_find_bus(path_id_t path_id); 256 static struct cam_et* 257 xpt_find_target(struct cam_eb *bus, target_id_t target_id); 258 static struct cam_ed* 259 xpt_find_device(struct cam_et *target, lun_id_t lun_id); 260 static void xpt_config(void *arg); 261 static void xpt_hold_boot_locked(void); 262 static int xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo, 263 uint32_t new_priority); 264 static xpt_devicefunc_t xptpassannouncefunc; 265 static void xptaction(struct cam_sim *sim, union ccb *work_ccb); 266 static void xptpoll(struct cam_sim *sim); 267 static void camisr_runqueue(void); 268 static void xpt_done_process(struct ccb_hdr *ccb_h); 269 static void xpt_done_td(void *); 270 static void xpt_async_td(void *); 271 static dev_match_ret xptbusmatch(struct dev_match_pattern *patterns, 272 u_int num_patterns, struct cam_eb *bus); 273 static dev_match_ret xptdevicematch(struct dev_match_pattern *patterns, 274 u_int num_patterns, 275 struct cam_ed *device); 276 static dev_match_ret xptperiphmatch(struct dev_match_pattern *patterns, 277 u_int num_patterns, 278 struct cam_periph *periph); 279 static xpt_busfunc_t xptedtbusfunc; 280 static xpt_targetfunc_t xptedttargetfunc; 281 static xpt_devicefunc_t xptedtdevicefunc; 282 static xpt_periphfunc_t xptedtperiphfunc; 283 static xpt_pdrvfunc_t xptplistpdrvfunc; 284 static xpt_periphfunc_t xptplistperiphfunc; 285 static int xptedtmatch(struct ccb_dev_match *cdm); 286 static int xptperiphlistmatch(struct ccb_dev_match *cdm); 287 static int xptbustraverse(struct cam_eb *start_bus, 288 xpt_busfunc_t *tr_func, void *arg); 289 static int xpttargettraverse(struct cam_eb *bus, 290 struct cam_et *start_target, 291 xpt_targetfunc_t *tr_func, void *arg); 292 static int xptdevicetraverse(struct cam_et *target, 293 struct cam_ed *start_device, 294 xpt_devicefunc_t *tr_func, void *arg); 295 static int xptperiphtraverse(struct cam_ed *device, 296 struct cam_periph *start_periph, 297 xpt_periphfunc_t *tr_func, void *arg); 298 static int xptpdrvtraverse(struct periph_driver **start_pdrv, 299 xpt_pdrvfunc_t *tr_func, void *arg); 300 static int xptpdperiphtraverse(struct periph_driver **pdrv, 301 struct cam_periph *start_periph, 302 xpt_periphfunc_t *tr_func, 303 void *arg); 304 static xpt_busfunc_t xptdefbusfunc; 305 static xpt_targetfunc_t xptdeftargetfunc; 306 static xpt_devicefunc_t xptdefdevicefunc; 307 static xpt_periphfunc_t xptdefperiphfunc; 308 static void xpt_finishconfig_task(void *context, int pending); 309 static void xpt_dev_async_default(uint32_t async_code, 310 struct cam_eb *bus, 311 struct cam_et *target, 312 struct cam_ed *device, 313 void *async_arg); 314 static struct cam_ed * xpt_alloc_device_default(struct cam_eb *bus, 315 struct cam_et *target, 316 lun_id_t lun_id); 317 static xpt_devicefunc_t xptsetasyncfunc; 318 static xpt_busfunc_t xptsetasyncbusfunc; 319 static cam_status xptregister(struct cam_periph *periph, 320 void *arg); 321 322 static __inline int 323 xpt_schedule_devq(struct cam_devq *devq, struct cam_ed *dev) 324 { 325 int retval; 326 327 mtx_assert(&devq->send_mtx, MA_OWNED); 328 if ((dev->ccbq.queue.entries > 0) && 329 (dev->ccbq.dev_openings > 0) && 330 (dev->ccbq.queue.qfrozen_cnt == 0)) { 331 /* 332 * The priority of a device waiting for controller 333 * resources is that of the highest priority CCB 334 * enqueued. 335 */ 336 retval = 337 xpt_schedule_dev(&devq->send_queue, 338 &dev->devq_entry, 339 CAMQ_GET_PRIO(&dev->ccbq.queue)); 340 } else { 341 retval = 0; 342 } 343 return (retval); 344 } 345 346 static __inline int 347 device_is_queued(struct cam_ed *device) 348 { 349 return (device->devq_entry.index != CAM_UNQUEUED_INDEX); 350 } 351 352 static void 353 xpt_periph_init(void) 354 { 355 make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0"); 356 } 357 358 static int 359 xptopen(struct cdev *dev, int flags, int fmt, struct thread *td) 360 { 361 362 /* 363 * Only allow read-write access. 364 */ 365 if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0)) 366 return(EPERM); 367 368 /* 369 * We don't allow nonblocking access. 370 */ 371 if ((flags & O_NONBLOCK) != 0) { 372 printf("%s: can't do nonblocking access\n", devtoname(dev)); 373 return(ENODEV); 374 } 375 376 return(0); 377 } 378 379 static int 380 xptclose(struct cdev *dev, int flag, int fmt, struct thread *td) 381 { 382 383 return(0); 384 } 385 386 /* 387 * Don't automatically grab the xpt softc lock here even though this is going 388 * through the xpt device. The xpt device is really just a back door for 389 * accessing other devices and SIMs, so the right thing to do is to grab 390 * the appropriate SIM lock once the bus/SIM is located. 391 */ 392 static int 393 xptioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) 394 { 395 int error; 396 397 if ((error = xptdoioctl(dev, cmd, addr, flag, td)) == ENOTTY) { 398 error = cam_compat_ioctl(dev, cmd, addr, flag, td, xptdoioctl); 399 } 400 return (error); 401 } 402 403 static int 404 xptdoioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) 405 { 406 int error; 407 408 error = 0; 409 410 switch(cmd) { 411 /* 412 * For the transport layer CAMIOCOMMAND ioctl, we really only want 413 * to accept CCB types that don't quite make sense to send through a 414 * passthrough driver. XPT_PATH_INQ is an exception to this, as stated 415 * in the CAM spec. 416 */ 417 case CAMIOCOMMAND: { 418 union ccb *ccb; 419 union ccb *inccb; 420 struct cam_eb *bus; 421 422 inccb = (union ccb *)addr; 423 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) 424 if (inccb->ccb_h.func_code == XPT_SCSI_IO) 425 inccb->csio.bio = NULL; 426 #endif 427 428 if (inccb->ccb_h.flags & CAM_UNLOCKED) 429 return (EINVAL); 430 431 bus = xpt_find_bus(inccb->ccb_h.path_id); 432 if (bus == NULL) 433 return (EINVAL); 434 435 switch (inccb->ccb_h.func_code) { 436 case XPT_SCAN_BUS: 437 case XPT_RESET_BUS: 438 if (inccb->ccb_h.target_id != CAM_TARGET_WILDCARD || 439 inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) { 440 xpt_release_bus(bus); 441 return (EINVAL); 442 } 443 break; 444 case XPT_SCAN_TGT: 445 if (inccb->ccb_h.target_id == CAM_TARGET_WILDCARD || 446 inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) { 447 xpt_release_bus(bus); 448 return (EINVAL); 449 } 450 break; 451 default: 452 break; 453 } 454 455 switch(inccb->ccb_h.func_code) { 456 case XPT_SCAN_BUS: 457 case XPT_RESET_BUS: 458 case XPT_PATH_INQ: 459 case XPT_ENG_INQ: 460 case XPT_SCAN_LUN: 461 case XPT_SCAN_TGT: 462 463 ccb = xpt_alloc_ccb(); 464 465 /* 466 * Create a path using the bus, target, and lun the 467 * user passed in. 468 */ 469 if (xpt_create_path(&ccb->ccb_h.path, NULL, 470 inccb->ccb_h.path_id, 471 inccb->ccb_h.target_id, 472 inccb->ccb_h.target_lun) != 473 CAM_REQ_CMP){ 474 error = EINVAL; 475 xpt_free_ccb(ccb); 476 break; 477 } 478 /* Ensure all of our fields are correct */ 479 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, 480 inccb->ccb_h.pinfo.priority); 481 xpt_merge_ccb(ccb, inccb); 482 xpt_path_lock(ccb->ccb_h.path); 483 cam_periph_runccb(ccb, NULL, 0, 0, NULL); 484 xpt_path_unlock(ccb->ccb_h.path); 485 bcopy(ccb, inccb, sizeof(union ccb)); 486 xpt_free_path(ccb->ccb_h.path); 487 xpt_free_ccb(ccb); 488 break; 489 490 case XPT_DEBUG: { 491 union ccb ccb; 492 493 /* 494 * This is an immediate CCB, so it's okay to 495 * allocate it on the stack. 496 */ 497 memset(&ccb, 0, sizeof(ccb)); 498 499 /* 500 * Create a path using the bus, target, and lun the 501 * user passed in. 502 */ 503 if (xpt_create_path(&ccb.ccb_h.path, NULL, 504 inccb->ccb_h.path_id, 505 inccb->ccb_h.target_id, 506 inccb->ccb_h.target_lun) != 507 CAM_REQ_CMP){ 508 error = EINVAL; 509 break; 510 } 511 /* Ensure all of our fields are correct */ 512 xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path, 513 inccb->ccb_h.pinfo.priority); 514 xpt_merge_ccb(&ccb, inccb); 515 xpt_action(&ccb); 516 bcopy(&ccb, inccb, sizeof(union ccb)); 517 xpt_free_path(ccb.ccb_h.path); 518 break; 519 } 520 case XPT_DEV_MATCH: { 521 struct cam_periph_map_info mapinfo; 522 struct cam_path *old_path; 523 524 /* 525 * We can't deal with physical addresses for this 526 * type of transaction. 527 */ 528 if ((inccb->ccb_h.flags & CAM_DATA_MASK) != 529 CAM_DATA_VADDR) { 530 error = EINVAL; 531 break; 532 } 533 534 /* 535 * Save this in case the caller had it set to 536 * something in particular. 537 */ 538 old_path = inccb->ccb_h.path; 539 540 /* 541 * We really don't need a path for the matching 542 * code. The path is needed because of the 543 * debugging statements in xpt_action(). They 544 * assume that the CCB has a valid path. 545 */ 546 inccb->ccb_h.path = xpt_periph->path; 547 548 bzero(&mapinfo, sizeof(mapinfo)); 549 550 /* 551 * Map the pattern and match buffers into kernel 552 * virtual address space. 553 */ 554 error = cam_periph_mapmem(inccb, &mapinfo, maxphys); 555 556 if (error) { 557 inccb->ccb_h.path = old_path; 558 break; 559 } 560 561 /* 562 * This is an immediate CCB, we can send it on directly. 563 */ 564 xpt_action(inccb); 565 566 /* 567 * Map the buffers back into user space. 568 */ 569 cam_periph_unmapmem(inccb, &mapinfo); 570 571 inccb->ccb_h.path = old_path; 572 573 error = 0; 574 break; 575 } 576 default: 577 error = ENOTSUP; 578 break; 579 } 580 xpt_release_bus(bus); 581 break; 582 } 583 /* 584 * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input, 585 * with the periphal driver name and unit name filled in. The other 586 * fields don't really matter as input. The passthrough driver name 587 * ("pass"), and unit number are passed back in the ccb. The current 588 * device generation number, and the index into the device peripheral 589 * driver list, and the status are also passed back. Note that 590 * since we do everything in one pass, unlike the XPT_GDEVLIST ccb, 591 * we never return a status of CAM_GDEVLIST_LIST_CHANGED. It is 592 * (or rather should be) impossible for the device peripheral driver 593 * list to change since we look at the whole thing in one pass, and 594 * we do it with lock protection. 595 * 596 */ 597 case CAMGETPASSTHRU: { 598 union ccb *ccb; 599 struct cam_periph *periph; 600 struct periph_driver **p_drv; 601 char *name; 602 u_int unit; 603 bool base_periph_found; 604 605 ccb = (union ccb *)addr; 606 unit = ccb->cgdl.unit_number; 607 name = ccb->cgdl.periph_name; 608 base_periph_found = false; 609 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) 610 if (ccb->ccb_h.func_code == XPT_SCSI_IO) 611 ccb->csio.bio = NULL; 612 #endif 613 614 /* 615 * Sanity check -- make sure we don't get a null peripheral 616 * driver name. 617 */ 618 if (*ccb->cgdl.periph_name == '\0') { 619 error = EINVAL; 620 break; 621 } 622 623 /* Keep the list from changing while we traverse it */ 624 xpt_lock_buses(); 625 626 /* first find our driver in the list of drivers */ 627 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) 628 if (strcmp((*p_drv)->driver_name, name) == 0) 629 break; 630 631 if (*p_drv == NULL) { 632 xpt_unlock_buses(); 633 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 634 ccb->cgdl.status = CAM_GDEVLIST_ERROR; 635 *ccb->cgdl.periph_name = '\0'; 636 ccb->cgdl.unit_number = 0; 637 error = ENOENT; 638 break; 639 } 640 641 /* 642 * Run through every peripheral instance of this driver 643 * and check to see whether it matches the unit passed 644 * in by the user. If it does, get out of the loops and 645 * find the passthrough driver associated with that 646 * peripheral driver. 647 */ 648 for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL; 649 periph = TAILQ_NEXT(periph, unit_links)) { 650 if (periph->unit_number == unit) 651 break; 652 } 653 /* 654 * If we found the peripheral driver that the user passed 655 * in, go through all of the peripheral drivers for that 656 * particular device and look for a passthrough driver. 657 */ 658 if (periph != NULL) { 659 struct cam_ed *device; 660 int i; 661 662 base_periph_found = true; 663 device = periph->path->device; 664 for (i = 0, periph = SLIST_FIRST(&device->periphs); 665 periph != NULL; 666 periph = SLIST_NEXT(periph, periph_links), i++) { 667 /* 668 * Check to see whether we have a 669 * passthrough device or not. 670 */ 671 if (strcmp(periph->periph_name, "pass") == 0) { 672 /* 673 * Fill in the getdevlist fields. 674 */ 675 strlcpy(ccb->cgdl.periph_name, 676 periph->periph_name, 677 sizeof(ccb->cgdl.periph_name)); 678 ccb->cgdl.unit_number = 679 periph->unit_number; 680 if (SLIST_NEXT(periph, periph_links)) 681 ccb->cgdl.status = 682 CAM_GDEVLIST_MORE_DEVS; 683 else 684 ccb->cgdl.status = 685 CAM_GDEVLIST_LAST_DEVICE; 686 ccb->cgdl.generation = 687 device->generation; 688 ccb->cgdl.index = i; 689 /* 690 * Fill in some CCB header fields 691 * that the user may want. 692 */ 693 ccb->ccb_h.path_id = 694 periph->path->bus->path_id; 695 ccb->ccb_h.target_id = 696 periph->path->target->target_id; 697 ccb->ccb_h.target_lun = 698 periph->path->device->lun_id; 699 ccb->ccb_h.status = CAM_REQ_CMP; 700 break; 701 } 702 } 703 } 704 705 /* 706 * If the periph is null here, one of two things has 707 * happened. The first possibility is that we couldn't 708 * find the unit number of the particular peripheral driver 709 * that the user is asking about. e.g. the user asks for 710 * the passthrough driver for "da11". We find the list of 711 * "da" peripherals all right, but there is no unit 11. 712 * The other possibility is that we went through the list 713 * of peripheral drivers attached to the device structure, 714 * but didn't find one with the name "pass". Either way, 715 * we return ENOENT, since we couldn't find something. 716 */ 717 if (periph == NULL) { 718 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 719 ccb->cgdl.status = CAM_GDEVLIST_ERROR; 720 *ccb->cgdl.periph_name = '\0'; 721 ccb->cgdl.unit_number = 0; 722 error = ENOENT; 723 /* 724 * It is unfortunate that this is even necessary, 725 * but there are many, many clueless users out there. 726 * If this is true, the user is looking for the 727 * passthrough driver, but doesn't have one in his 728 * kernel. 729 */ 730 if (base_periph_found) { 731 printf("xptioctl: pass driver is not in the " 732 "kernel\n"); 733 printf("xptioctl: put \"device pass\" in " 734 "your kernel config file\n"); 735 } 736 } 737 xpt_unlock_buses(); 738 break; 739 } 740 default: 741 error = ENOTTY; 742 break; 743 } 744 745 return(error); 746 } 747 748 static int 749 cam_module_event_handler(module_t mod, int what, void *arg) 750 { 751 int error; 752 753 switch (what) { 754 case MOD_LOAD: 755 if ((error = xpt_init(NULL)) != 0) 756 return (error); 757 break; 758 case MOD_UNLOAD: 759 return EBUSY; 760 default: 761 return EOPNOTSUPP; 762 } 763 764 return 0; 765 } 766 767 static struct xpt_proto * 768 xpt_proto_find(cam_proto proto) 769 { 770 struct xpt_proto **pp; 771 772 SET_FOREACH(pp, cam_xpt_proto_set) { 773 if ((*pp)->proto == proto) 774 return *pp; 775 } 776 777 return NULL; 778 } 779 780 static void 781 xpt_rescan_done(struct cam_periph *periph, union ccb *done_ccb) 782 { 783 784 if (done_ccb->ccb_h.ppriv_ptr1 == NULL) { 785 xpt_free_path(done_ccb->ccb_h.path); 786 xpt_free_ccb(done_ccb); 787 } else { 788 done_ccb->ccb_h.cbfcnp = done_ccb->ccb_h.ppriv_ptr1; 789 (*done_ccb->ccb_h.cbfcnp)(periph, done_ccb); 790 } 791 xpt_release_boot(); 792 } 793 794 /* thread to handle bus rescans */ 795 static void 796 xpt_scanner_thread(void *dummy) 797 { 798 union ccb *ccb; 799 struct mtx *mtx; 800 struct cam_ed *device; 801 802 xpt_lock_buses(); 803 for (;;) { 804 if (TAILQ_EMPTY(&xsoftc.ccb_scanq)) 805 msleep(&xsoftc.ccb_scanq, &xsoftc.xpt_topo_lock, PRIBIO, 806 "-", 0); 807 if ((ccb = (union ccb *)TAILQ_FIRST(&xsoftc.ccb_scanq)) != NULL) { 808 TAILQ_REMOVE(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe); 809 xpt_unlock_buses(); 810 811 /* 812 * We need to lock the device's mutex which we use as 813 * the path mutex. We can't do it directly because the 814 * cam_path in the ccb may wind up going away because 815 * the path lock may be dropped and the path retired in 816 * the completion callback. We do this directly to keep 817 * the reference counts in cam_path sane. We also have 818 * to copy the device pointer because ccb_h.path may 819 * be freed in the callback. 820 */ 821 mtx = xpt_path_mtx(ccb->ccb_h.path); 822 device = ccb->ccb_h.path->device; 823 xpt_acquire_device(device); 824 mtx_lock(mtx); 825 xpt_action(ccb); 826 mtx_unlock(mtx); 827 xpt_release_device(device); 828 829 xpt_lock_buses(); 830 } 831 } 832 } 833 834 void 835 xpt_rescan(union ccb *ccb) 836 { 837 struct ccb_hdr *hdr; 838 839 /* Prepare request */ 840 if (ccb->ccb_h.path->target->target_id == CAM_TARGET_WILDCARD && 841 ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD) 842 ccb->ccb_h.func_code = XPT_SCAN_BUS; 843 else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD && 844 ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD) 845 ccb->ccb_h.func_code = XPT_SCAN_TGT; 846 else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD && 847 ccb->ccb_h.path->device->lun_id != CAM_LUN_WILDCARD) 848 ccb->ccb_h.func_code = XPT_SCAN_LUN; 849 else { 850 xpt_print(ccb->ccb_h.path, "illegal scan path\n"); 851 xpt_free_path(ccb->ccb_h.path); 852 xpt_free_ccb(ccb); 853 return; 854 } 855 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, 856 ("xpt_rescan: func %#x %s\n", ccb->ccb_h.func_code, 857 xpt_action_name(ccb->ccb_h.func_code))); 858 859 ccb->ccb_h.ppriv_ptr1 = ccb->ccb_h.cbfcnp; 860 ccb->ccb_h.cbfcnp = xpt_rescan_done; 861 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, CAM_PRIORITY_XPT); 862 /* Don't make duplicate entries for the same paths. */ 863 xpt_lock_buses(); 864 if (ccb->ccb_h.ppriv_ptr1 == NULL) { 865 TAILQ_FOREACH(hdr, &xsoftc.ccb_scanq, sim_links.tqe) { 866 if (xpt_path_comp(hdr->path, ccb->ccb_h.path) == 0) { 867 wakeup(&xsoftc.ccb_scanq); 868 xpt_unlock_buses(); 869 xpt_print(ccb->ccb_h.path, "rescan already queued\n"); 870 xpt_free_path(ccb->ccb_h.path); 871 xpt_free_ccb(ccb); 872 return; 873 } 874 } 875 } 876 TAILQ_INSERT_TAIL(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe); 877 xpt_hold_boot_locked(); 878 wakeup(&xsoftc.ccb_scanq); 879 xpt_unlock_buses(); 880 } 881 882 /* Functions accessed by the peripheral drivers */ 883 static int 884 xpt_init(void *dummy) 885 { 886 struct cam_sim *xpt_sim; 887 struct cam_path *path; 888 struct cam_devq *devq; 889 cam_status status; 890 int error, i; 891 892 TAILQ_INIT(&xsoftc.xpt_busses); 893 TAILQ_INIT(&xsoftc.ccb_scanq); 894 STAILQ_INIT(&xsoftc.highpowerq); 895 xsoftc.num_highpower = CAM_MAX_HIGHPOWER; 896 897 mtx_init(&xsoftc.xpt_highpower_lock, "XPT highpower lock", NULL, MTX_DEF); 898 xsoftc.xpt_taskq = taskqueue_create("CAM XPT task", M_WAITOK, 899 taskqueue_thread_enqueue, /*context*/&xsoftc.xpt_taskq); 900 901 #ifdef CAM_BOOT_DELAY 902 /* 903 * Override this value at compile time to assist our users 904 * who don't use loader to boot a kernel. 905 */ 906 xsoftc.boot_delay = CAM_BOOT_DELAY; 907 #endif 908 909 /* 910 * The xpt layer is, itself, the equivalent of a SIM. 911 * Allow 16 ccbs in the ccb pool for it. This should 912 * give decent parallelism when we probe buses and 913 * perform other XPT functions. 914 */ 915 devq = cam_simq_alloc(16); 916 xpt_sim = cam_sim_alloc(xptaction, 917 xptpoll, 918 "xpt", 919 /*softc*/NULL, 920 /*unit*/0, 921 /*mtx*/NULL, 922 /*max_dev_transactions*/0, 923 /*max_tagged_dev_transactions*/0, 924 devq); 925 if (xpt_sim == NULL) 926 return (ENOMEM); 927 928 if ((error = xpt_bus_register(xpt_sim, NULL, 0)) != CAM_SUCCESS) { 929 printf("xpt_init: xpt_bus_register failed with errno %d," 930 " failing attach\n", error); 931 return (EINVAL); 932 } 933 934 /* 935 * Looking at the XPT from the SIM layer, the XPT is 936 * the equivalent of a peripheral driver. Allocate 937 * a peripheral driver entry for us. 938 */ 939 if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID, 940 CAM_TARGET_WILDCARD, 941 CAM_LUN_WILDCARD)) != CAM_REQ_CMP) { 942 printf("xpt_init: xpt_create_path failed with status %#x," 943 " failing attach\n", status); 944 return (EINVAL); 945 } 946 xpt_path_lock(path); 947 cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO, 948 path, NULL, 0, xpt_sim); 949 xpt_path_unlock(path); 950 xpt_free_path(path); 951 952 if (cam_num_doneqs < 1) 953 cam_num_doneqs = 1 + mp_ncpus / 6; 954 else if (cam_num_doneqs > MAXCPU) 955 cam_num_doneqs = MAXCPU; 956 for (i = 0; i < cam_num_doneqs; i++) { 957 mtx_init(&cam_doneqs[i].cam_doneq_mtx, "CAM doneq", NULL, 958 MTX_DEF); 959 STAILQ_INIT(&cam_doneqs[i].cam_doneq); 960 error = kproc_kthread_add(xpt_done_td, &cam_doneqs[i], 961 &cam_proc, NULL, 0, 0, "cam", "doneq%d", i); 962 if (error != 0) { 963 cam_num_doneqs = i; 964 break; 965 } 966 } 967 if (cam_num_doneqs < 1) { 968 printf("xpt_init: Cannot init completion queues " 969 "- failing attach\n"); 970 return (ENOMEM); 971 } 972 973 mtx_init(&cam_async.cam_doneq_mtx, "CAM async", NULL, MTX_DEF); 974 STAILQ_INIT(&cam_async.cam_doneq); 975 if (kproc_kthread_add(xpt_async_td, &cam_async, 976 &cam_proc, NULL, 0, 0, "cam", "async") != 0) { 977 printf("xpt_init: Cannot init async thread " 978 "- failing attach\n"); 979 return (ENOMEM); 980 } 981 982 /* 983 * Register a callback for when interrupts are enabled. 984 */ 985 config_intrhook_oneshot(xpt_config, NULL); 986 987 return (0); 988 } 989 990 static cam_status 991 xptregister(struct cam_periph *periph, void *arg) 992 { 993 struct cam_sim *xpt_sim; 994 995 if (periph == NULL) { 996 printf("xptregister: periph was NULL!!\n"); 997 return(CAM_REQ_CMP_ERR); 998 } 999 1000 xpt_sim = (struct cam_sim *)arg; 1001 xpt_sim->softc = periph; 1002 xpt_periph = periph; 1003 periph->softc = NULL; 1004 1005 return(CAM_REQ_CMP); 1006 } 1007 1008 int32_t 1009 xpt_add_periph(struct cam_periph *periph) 1010 { 1011 struct cam_ed *device; 1012 int32_t status; 1013 1014 TASK_INIT(&periph->periph_run_task, 0, xpt_run_allocq_task, periph); 1015 device = periph->path->device; 1016 status = CAM_REQ_CMP; 1017 if (device != NULL) { 1018 mtx_lock(&device->target->bus->eb_mtx); 1019 device->generation++; 1020 SLIST_INSERT_HEAD(&device->periphs, periph, periph_links); 1021 mtx_unlock(&device->target->bus->eb_mtx); 1022 atomic_add_32(&xsoftc.xpt_generation, 1); 1023 } 1024 1025 return (status); 1026 } 1027 1028 void 1029 xpt_remove_periph(struct cam_periph *periph) 1030 { 1031 struct cam_ed *device; 1032 1033 device = periph->path->device; 1034 if (device != NULL) { 1035 mtx_lock(&device->target->bus->eb_mtx); 1036 device->generation++; 1037 SLIST_REMOVE(&device->periphs, periph, cam_periph, periph_links); 1038 mtx_unlock(&device->target->bus->eb_mtx); 1039 atomic_add_32(&xsoftc.xpt_generation, 1); 1040 } 1041 } 1042 1043 void 1044 xpt_announce_periph(struct cam_periph *periph, char *announce_string) 1045 { 1046 char buf[128]; 1047 struct sbuf sb; 1048 1049 (void)sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN | SBUF_INCLUDENUL); 1050 sbuf_set_drain(&sb, sbuf_printf_drain, NULL); 1051 xpt_announce_periph_sbuf(periph, &sb, announce_string); 1052 (void)sbuf_finish(&sb); 1053 } 1054 1055 void 1056 xpt_announce_periph_sbuf(struct cam_periph *periph, struct sbuf *sb, 1057 char *announce_string) 1058 { 1059 struct cam_path *path = periph->path; 1060 struct xpt_proto *proto; 1061 1062 cam_periph_assert(periph, MA_OWNED); 1063 periph->flags |= CAM_PERIPH_ANNOUNCED; 1064 1065 sbuf_printf(sb, "%s%d at %s%d bus %d scbus%d target %d lun %jx\n", 1066 periph->periph_name, periph->unit_number, 1067 path->bus->sim->sim_name, 1068 path->bus->sim->unit_number, 1069 path->bus->sim->bus_id, 1070 path->bus->path_id, 1071 path->target->target_id, 1072 (uintmax_t)path->device->lun_id); 1073 sbuf_printf(sb, "%s%d: ", periph->periph_name, periph->unit_number); 1074 proto = xpt_proto_find(path->device->protocol); 1075 if (proto) 1076 proto->ops->announce_sbuf(path->device, sb); 1077 else 1078 sbuf_printf(sb, "Unknown protocol device %d\n", 1079 path->device->protocol); 1080 if (path->device->serial_num_len > 0) { 1081 /* Don't wrap the screen - print only the first 60 chars */ 1082 sbuf_printf(sb, "%s%d: Serial Number %.60s\n", 1083 periph->periph_name, periph->unit_number, 1084 path->device->serial_num); 1085 } 1086 /* Announce transport details. */ 1087 path->bus->xport->ops->announce_sbuf(periph, sb); 1088 /* Announce command queueing. */ 1089 if (path->device->inq_flags & SID_CmdQue 1090 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) { 1091 sbuf_printf(sb, "%s%d: Command Queueing enabled\n", 1092 periph->periph_name, periph->unit_number); 1093 } 1094 /* Announce caller's details if they've passed in. */ 1095 if (announce_string != NULL) 1096 sbuf_printf(sb, "%s%d: %s\n", periph->periph_name, 1097 periph->unit_number, announce_string); 1098 } 1099 1100 void 1101 xpt_announce_quirks(struct cam_periph *periph, int quirks, char *bit_string) 1102 { 1103 if (quirks != 0) { 1104 printf("%s%d: quirks=0x%b\n", periph->periph_name, 1105 periph->unit_number, quirks, bit_string); 1106 } 1107 } 1108 1109 void 1110 xpt_announce_quirks_sbuf(struct cam_periph *periph, struct sbuf *sb, 1111 int quirks, char *bit_string) 1112 { 1113 if (quirks != 0) { 1114 sbuf_printf(sb, "%s%d: quirks=0x%b\n", periph->periph_name, 1115 periph->unit_number, quirks, bit_string); 1116 } 1117 } 1118 1119 void 1120 xpt_denounce_periph(struct cam_periph *periph) 1121 { 1122 char buf[128]; 1123 struct sbuf sb; 1124 1125 (void)sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN | SBUF_INCLUDENUL); 1126 sbuf_set_drain(&sb, sbuf_printf_drain, NULL); 1127 xpt_denounce_periph_sbuf(periph, &sb); 1128 (void)sbuf_finish(&sb); 1129 } 1130 1131 void 1132 xpt_denounce_periph_sbuf(struct cam_periph *periph, struct sbuf *sb) 1133 { 1134 struct cam_path *path = periph->path; 1135 struct xpt_proto *proto; 1136 1137 cam_periph_assert(periph, MA_OWNED); 1138 1139 sbuf_printf(sb, "%s%d at %s%d bus %d scbus%d target %d lun %jx\n", 1140 periph->periph_name, periph->unit_number, 1141 path->bus->sim->sim_name, 1142 path->bus->sim->unit_number, 1143 path->bus->sim->bus_id, 1144 path->bus->path_id, 1145 path->target->target_id, 1146 (uintmax_t)path->device->lun_id); 1147 sbuf_printf(sb, "%s%d: ", periph->periph_name, periph->unit_number); 1148 proto = xpt_proto_find(path->device->protocol); 1149 if (proto) 1150 proto->ops->denounce_sbuf(path->device, sb); 1151 else 1152 sbuf_printf(sb, "Unknown protocol device %d", 1153 path->device->protocol); 1154 if (path->device->serial_num_len > 0) 1155 sbuf_printf(sb, " s/n %.60s", path->device->serial_num); 1156 sbuf_printf(sb, " detached\n"); 1157 } 1158 1159 int 1160 xpt_getattr(char *buf, size_t len, const char *attr, struct cam_path *path) 1161 { 1162 int ret = -1, l, o; 1163 struct ccb_dev_advinfo cdai; 1164 struct scsi_vpd_device_id *did; 1165 struct scsi_vpd_id_descriptor *idd; 1166 1167 xpt_path_assert(path, MA_OWNED); 1168 1169 memset(&cdai, 0, sizeof(cdai)); 1170 xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL); 1171 cdai.ccb_h.func_code = XPT_DEV_ADVINFO; 1172 cdai.flags = CDAI_FLAG_NONE; 1173 cdai.bufsiz = len; 1174 cdai.buf = buf; 1175 1176 if (!strcmp(attr, "GEOM::ident")) 1177 cdai.buftype = CDAI_TYPE_SERIAL_NUM; 1178 else if (!strcmp(attr, "GEOM::physpath")) 1179 cdai.buftype = CDAI_TYPE_PHYS_PATH; 1180 else if (strcmp(attr, "GEOM::lunid") == 0 || 1181 strcmp(attr, "GEOM::lunname") == 0) { 1182 cdai.buftype = CDAI_TYPE_SCSI_DEVID; 1183 cdai.bufsiz = CAM_SCSI_DEVID_MAXLEN; 1184 cdai.buf = malloc(cdai.bufsiz, M_CAMXPT, M_NOWAIT); 1185 if (cdai.buf == NULL) { 1186 ret = ENOMEM; 1187 goto out; 1188 } 1189 } else 1190 goto out; 1191 1192 xpt_action((union ccb *)&cdai); /* can only be synchronous */ 1193 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0) 1194 cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE); 1195 if (cdai.provsiz == 0) 1196 goto out; 1197 switch(cdai.buftype) { 1198 case CDAI_TYPE_SCSI_DEVID: 1199 did = (struct scsi_vpd_device_id *)cdai.buf; 1200 if (strcmp(attr, "GEOM::lunid") == 0) { 1201 idd = scsi_get_devid(did, cdai.provsiz, 1202 scsi_devid_is_lun_naa); 1203 if (idd == NULL) 1204 idd = scsi_get_devid(did, cdai.provsiz, 1205 scsi_devid_is_lun_eui64); 1206 if (idd == NULL) 1207 idd = scsi_get_devid(did, cdai.provsiz, 1208 scsi_devid_is_lun_uuid); 1209 if (idd == NULL) 1210 idd = scsi_get_devid(did, cdai.provsiz, 1211 scsi_devid_is_lun_md5); 1212 } else 1213 idd = NULL; 1214 1215 if (idd == NULL) 1216 idd = scsi_get_devid(did, cdai.provsiz, 1217 scsi_devid_is_lun_t10); 1218 if (idd == NULL) 1219 idd = scsi_get_devid(did, cdai.provsiz, 1220 scsi_devid_is_lun_name); 1221 if (idd == NULL) 1222 break; 1223 1224 ret = 0; 1225 if ((idd->proto_codeset & SVPD_ID_CODESET_MASK) == 1226 SVPD_ID_CODESET_ASCII) { 1227 if (idd->length < len) { 1228 for (l = 0; l < idd->length; l++) 1229 buf[l] = idd->identifier[l] ? 1230 idd->identifier[l] : ' '; 1231 buf[l] = 0; 1232 } else 1233 ret = EFAULT; 1234 break; 1235 } 1236 if ((idd->proto_codeset & SVPD_ID_CODESET_MASK) == 1237 SVPD_ID_CODESET_UTF8) { 1238 l = strnlen(idd->identifier, idd->length); 1239 if (l < len) { 1240 bcopy(idd->identifier, buf, l); 1241 buf[l] = 0; 1242 } else 1243 ret = EFAULT; 1244 break; 1245 } 1246 if ((idd->id_type & SVPD_ID_TYPE_MASK) == 1247 SVPD_ID_TYPE_UUID && idd->identifier[0] == 0x10) { 1248 if ((idd->length - 2) * 2 + 4 >= len) { 1249 ret = EFAULT; 1250 break; 1251 } 1252 for (l = 2, o = 0; l < idd->length; l++) { 1253 if (l == 6 || l == 8 || l == 10 || l == 12) 1254 o += sprintf(buf + o, "-"); 1255 o += sprintf(buf + o, "%02x", 1256 idd->identifier[l]); 1257 } 1258 break; 1259 } 1260 if (idd->length * 2 < len) { 1261 for (l = 0; l < idd->length; l++) 1262 sprintf(buf + l * 2, "%02x", 1263 idd->identifier[l]); 1264 } else 1265 ret = EFAULT; 1266 break; 1267 default: 1268 if (cdai.provsiz < len) { 1269 cdai.buf[cdai.provsiz] = 0; 1270 ret = 0; 1271 } else 1272 ret = EFAULT; 1273 break; 1274 } 1275 1276 out: 1277 if ((char *)cdai.buf != buf) 1278 free(cdai.buf, M_CAMXPT); 1279 return ret; 1280 } 1281 1282 static dev_match_ret 1283 xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns, 1284 struct cam_eb *bus) 1285 { 1286 dev_match_ret retval; 1287 u_int i; 1288 1289 retval = DM_RET_NONE; 1290 1291 /* 1292 * If we aren't given something to match against, that's an error. 1293 */ 1294 if (bus == NULL) 1295 return(DM_RET_ERROR); 1296 1297 /* 1298 * If there are no match entries, then this bus matches no 1299 * matter what. 1300 */ 1301 if ((patterns == NULL) || (num_patterns == 0)) 1302 return(DM_RET_DESCEND | DM_RET_COPY); 1303 1304 for (i = 0; i < num_patterns; i++) { 1305 struct bus_match_pattern *cur_pattern; 1306 struct device_match_pattern *dp = &patterns[i].pattern.device_pattern; 1307 struct periph_match_pattern *pp = &patterns[i].pattern.periph_pattern; 1308 1309 /* 1310 * If the pattern in question isn't for a bus node, we 1311 * aren't interested. However, we do indicate to the 1312 * calling routine that we should continue descending the 1313 * tree, since the user wants to match against lower-level 1314 * EDT elements. 1315 */ 1316 if (patterns[i].type == DEV_MATCH_DEVICE && 1317 (dp->flags & DEV_MATCH_PATH) != 0 && 1318 dp->path_id != bus->path_id) 1319 continue; 1320 if (patterns[i].type == DEV_MATCH_PERIPH && 1321 (pp->flags & PERIPH_MATCH_PATH) != 0 && 1322 pp->path_id != bus->path_id) 1323 continue; 1324 if (patterns[i].type != DEV_MATCH_BUS) { 1325 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) 1326 retval |= DM_RET_DESCEND; 1327 continue; 1328 } 1329 1330 cur_pattern = &patterns[i].pattern.bus_pattern; 1331 1332 if (((cur_pattern->flags & BUS_MATCH_PATH) != 0) 1333 && (cur_pattern->path_id != bus->path_id)) 1334 continue; 1335 1336 if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0) 1337 && (cur_pattern->bus_id != bus->sim->bus_id)) 1338 continue; 1339 1340 if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0) 1341 && (cur_pattern->unit_number != bus->sim->unit_number)) 1342 continue; 1343 1344 if (((cur_pattern->flags & BUS_MATCH_NAME) != 0) 1345 && (strncmp(cur_pattern->dev_name, bus->sim->sim_name, 1346 DEV_IDLEN) != 0)) 1347 continue; 1348 1349 /* 1350 * If we get to this point, the user definitely wants 1351 * information on this bus. So tell the caller to copy the 1352 * data out. 1353 */ 1354 retval |= DM_RET_COPY; 1355 1356 /* 1357 * If the return action has been set to descend, then we 1358 * know that we've already seen a non-bus matching 1359 * expression, therefore we need to further descend the tree. 1360 * This won't change by continuing around the loop, so we 1361 * go ahead and return. If we haven't seen a non-bus 1362 * matching expression, we keep going around the loop until 1363 * we exhaust the matching expressions. We'll set the stop 1364 * flag once we fall out of the loop. 1365 */ 1366 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND) 1367 return(retval); 1368 } 1369 1370 /* 1371 * If the return action hasn't been set to descend yet, that means 1372 * we haven't seen anything other than bus matching patterns. So 1373 * tell the caller to stop descending the tree -- the user doesn't 1374 * want to match against lower level tree elements. 1375 */ 1376 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) 1377 retval |= DM_RET_STOP; 1378 1379 return(retval); 1380 } 1381 1382 static dev_match_ret 1383 xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns, 1384 struct cam_ed *device) 1385 { 1386 dev_match_ret retval; 1387 u_int i; 1388 1389 retval = DM_RET_NONE; 1390 1391 /* 1392 * If we aren't given something to match against, that's an error. 1393 */ 1394 if (device == NULL) 1395 return(DM_RET_ERROR); 1396 1397 /* 1398 * If there are no match entries, then this device matches no 1399 * matter what. 1400 */ 1401 if ((patterns == NULL) || (num_patterns == 0)) 1402 return(DM_RET_DESCEND | DM_RET_COPY); 1403 1404 for (i = 0; i < num_patterns; i++) { 1405 struct device_match_pattern *cur_pattern; 1406 struct scsi_vpd_device_id *device_id_page; 1407 struct periph_match_pattern *pp = &patterns[i].pattern.periph_pattern; 1408 1409 /* 1410 * If the pattern in question isn't for a device node, we 1411 * aren't interested. 1412 */ 1413 if (patterns[i].type == DEV_MATCH_PERIPH && 1414 (pp->flags & PERIPH_MATCH_TARGET) != 0 && 1415 pp->target_id != device->target->target_id) 1416 continue; 1417 if (patterns[i].type == DEV_MATCH_PERIPH && 1418 (pp->flags & PERIPH_MATCH_LUN) != 0 && 1419 pp->target_lun != device->lun_id) 1420 continue; 1421 if (patterns[i].type != DEV_MATCH_DEVICE) { 1422 if ((patterns[i].type == DEV_MATCH_PERIPH) 1423 && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)) 1424 retval |= DM_RET_DESCEND; 1425 continue; 1426 } 1427 1428 cur_pattern = &patterns[i].pattern.device_pattern; 1429 1430 /* Error out if mutually exclusive options are specified. */ 1431 if ((cur_pattern->flags & (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID)) 1432 == (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID)) 1433 return(DM_RET_ERROR); 1434 1435 if (((cur_pattern->flags & DEV_MATCH_PATH) != 0) 1436 && (cur_pattern->path_id != device->target->bus->path_id)) 1437 continue; 1438 1439 if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0) 1440 && (cur_pattern->target_id != device->target->target_id)) 1441 continue; 1442 1443 if (((cur_pattern->flags & DEV_MATCH_LUN) != 0) 1444 && (cur_pattern->target_lun != device->lun_id)) 1445 continue; 1446 1447 if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0) 1448 && (cam_quirkmatch((caddr_t)&device->inq_data, 1449 (caddr_t)&cur_pattern->data.inq_pat, 1450 1, sizeof(cur_pattern->data.inq_pat), 1451 scsi_static_inquiry_match) == NULL)) 1452 continue; 1453 1454 device_id_page = (struct scsi_vpd_device_id *)device->device_id; 1455 if (((cur_pattern->flags & DEV_MATCH_DEVID) != 0) 1456 && (device->device_id_len < SVPD_DEVICE_ID_HDR_LEN 1457 || scsi_devid_match((uint8_t *)device_id_page->desc_list, 1458 device->device_id_len 1459 - SVPD_DEVICE_ID_HDR_LEN, 1460 cur_pattern->data.devid_pat.id, 1461 cur_pattern->data.devid_pat.id_len) != 0)) 1462 continue; 1463 1464 /* 1465 * If we get to this point, the user definitely wants 1466 * information on this device. So tell the caller to copy 1467 * the data out. 1468 */ 1469 retval |= DM_RET_COPY; 1470 1471 /* 1472 * If the return action has been set to descend, then we 1473 * know that we've already seen a peripheral matching 1474 * expression, therefore we need to further descend the tree. 1475 * This won't change by continuing around the loop, so we 1476 * go ahead and return. If we haven't seen a peripheral 1477 * matching expression, we keep going around the loop until 1478 * we exhaust the matching expressions. We'll set the stop 1479 * flag once we fall out of the loop. 1480 */ 1481 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND) 1482 return(retval); 1483 } 1484 1485 /* 1486 * If the return action hasn't been set to descend yet, that means 1487 * we haven't seen any peripheral matching patterns. So tell the 1488 * caller to stop descending the tree -- the user doesn't want to 1489 * match against lower level tree elements. 1490 */ 1491 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) 1492 retval |= DM_RET_STOP; 1493 1494 return(retval); 1495 } 1496 1497 /* 1498 * Match a single peripheral against any number of match patterns. 1499 */ 1500 static dev_match_ret 1501 xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns, 1502 struct cam_periph *periph) 1503 { 1504 dev_match_ret retval; 1505 u_int i; 1506 1507 /* 1508 * If we aren't given something to match against, that's an error. 1509 */ 1510 if (periph == NULL) 1511 return(DM_RET_ERROR); 1512 1513 /* 1514 * If there are no match entries, then this peripheral matches no 1515 * matter what. 1516 */ 1517 if ((patterns == NULL) || (num_patterns == 0)) 1518 return(DM_RET_STOP | DM_RET_COPY); 1519 1520 /* 1521 * There aren't any nodes below a peripheral node, so there's no 1522 * reason to descend the tree any further. 1523 */ 1524 retval = DM_RET_STOP; 1525 1526 for (i = 0; i < num_patterns; i++) { 1527 struct periph_match_pattern *cur_pattern; 1528 1529 /* 1530 * If the pattern in question isn't for a peripheral, we 1531 * aren't interested. 1532 */ 1533 if (patterns[i].type != DEV_MATCH_PERIPH) 1534 continue; 1535 1536 cur_pattern = &patterns[i].pattern.periph_pattern; 1537 1538 if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0) 1539 && (cur_pattern->path_id != periph->path->bus->path_id)) 1540 continue; 1541 1542 /* 1543 * For the target and lun id's, we have to make sure the 1544 * target and lun pointers aren't NULL. The xpt peripheral 1545 * has a wildcard target and device. 1546 */ 1547 if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0) 1548 && ((periph->path->target == NULL) 1549 ||(cur_pattern->target_id != periph->path->target->target_id))) 1550 continue; 1551 1552 if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0) 1553 && ((periph->path->device == NULL) 1554 || (cur_pattern->target_lun != periph->path->device->lun_id))) 1555 continue; 1556 1557 if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0) 1558 && (cur_pattern->unit_number != periph->unit_number)) 1559 continue; 1560 1561 if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0) 1562 && (strncmp(cur_pattern->periph_name, periph->periph_name, 1563 DEV_IDLEN) != 0)) 1564 continue; 1565 1566 /* 1567 * If we get to this point, the user definitely wants 1568 * information on this peripheral. So tell the caller to 1569 * copy the data out. 1570 */ 1571 retval |= DM_RET_COPY; 1572 1573 /* 1574 * The return action has already been set to stop, since 1575 * peripherals don't have any nodes below them in the EDT. 1576 */ 1577 return(retval); 1578 } 1579 1580 /* 1581 * If we get to this point, the peripheral that was passed in 1582 * doesn't match any of the patterns. 1583 */ 1584 return(retval); 1585 } 1586 1587 static int 1588 xptedtbusfunc(struct cam_eb *bus, void *arg) 1589 { 1590 struct ccb_dev_match *cdm; 1591 struct cam_et *target; 1592 dev_match_ret retval; 1593 1594 cdm = (struct ccb_dev_match *)arg; 1595 1596 /* 1597 * If our position is for something deeper in the tree, that means 1598 * that we've already seen this node. So, we keep going down. 1599 */ 1600 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1601 && (cdm->pos.cookie.bus == bus) 1602 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1603 && (cdm->pos.cookie.target != NULL)) 1604 retval = DM_RET_DESCEND; 1605 else 1606 retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus); 1607 1608 /* 1609 * If we got an error, bail out of the search. 1610 */ 1611 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 1612 cdm->status = CAM_DEV_MATCH_ERROR; 1613 return(0); 1614 } 1615 1616 /* 1617 * If the copy flag is set, copy this bus out. 1618 */ 1619 if (retval & DM_RET_COPY) { 1620 int spaceleft, j; 1621 1622 spaceleft = cdm->match_buf_len - (cdm->num_matches * 1623 sizeof(struct dev_match_result)); 1624 1625 /* 1626 * If we don't have enough space to put in another 1627 * match result, save our position and tell the 1628 * user there are more devices to check. 1629 */ 1630 if (spaceleft < sizeof(struct dev_match_result)) { 1631 bzero(&cdm->pos, sizeof(cdm->pos)); 1632 cdm->pos.position_type = 1633 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS; 1634 1635 cdm->pos.cookie.bus = bus; 1636 cdm->pos.generations[CAM_BUS_GENERATION]= 1637 xsoftc.bus_generation; 1638 cdm->status = CAM_DEV_MATCH_MORE; 1639 return(0); 1640 } 1641 j = cdm->num_matches; 1642 cdm->num_matches++; 1643 cdm->matches[j].type = DEV_MATCH_BUS; 1644 cdm->matches[j].result.bus_result.path_id = bus->path_id; 1645 cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id; 1646 cdm->matches[j].result.bus_result.unit_number = 1647 bus->sim->unit_number; 1648 strlcpy(cdm->matches[j].result.bus_result.dev_name, 1649 bus->sim->sim_name, 1650 sizeof(cdm->matches[j].result.bus_result.dev_name)); 1651 } 1652 1653 /* 1654 * If the user is only interested in buses, there's no 1655 * reason to descend to the next level in the tree. 1656 */ 1657 if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP) 1658 return(1); 1659 1660 /* 1661 * If there is a target generation recorded, check it to 1662 * make sure the target list hasn't changed. 1663 */ 1664 mtx_lock(&bus->eb_mtx); 1665 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1666 && (cdm->pos.cookie.bus == bus) 1667 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1668 && (cdm->pos.cookie.target != NULL)) { 1669 if ((cdm->pos.generations[CAM_TARGET_GENERATION] != 1670 bus->generation)) { 1671 mtx_unlock(&bus->eb_mtx); 1672 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 1673 return (0); 1674 } 1675 target = (struct cam_et *)cdm->pos.cookie.target; 1676 target->refcount++; 1677 } else 1678 target = NULL; 1679 mtx_unlock(&bus->eb_mtx); 1680 1681 return (xpttargettraverse(bus, target, xptedttargetfunc, arg)); 1682 } 1683 1684 static int 1685 xptedttargetfunc(struct cam_et *target, void *arg) 1686 { 1687 struct ccb_dev_match *cdm; 1688 struct cam_eb *bus; 1689 struct cam_ed *device; 1690 1691 cdm = (struct ccb_dev_match *)arg; 1692 bus = target->bus; 1693 1694 /* 1695 * If there is a device list generation recorded, check it to 1696 * make sure the device list hasn't changed. 1697 */ 1698 mtx_lock(&bus->eb_mtx); 1699 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1700 && (cdm->pos.cookie.bus == bus) 1701 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1702 && (cdm->pos.cookie.target == target) 1703 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) 1704 && (cdm->pos.cookie.device != NULL)) { 1705 if (cdm->pos.generations[CAM_DEV_GENERATION] != 1706 target->generation) { 1707 mtx_unlock(&bus->eb_mtx); 1708 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 1709 return(0); 1710 } 1711 device = (struct cam_ed *)cdm->pos.cookie.device; 1712 device->refcount++; 1713 } else 1714 device = NULL; 1715 mtx_unlock(&bus->eb_mtx); 1716 1717 return (xptdevicetraverse(target, device, xptedtdevicefunc, arg)); 1718 } 1719 1720 static int 1721 xptedtdevicefunc(struct cam_ed *device, void *arg) 1722 { 1723 struct cam_eb *bus; 1724 struct cam_periph *periph; 1725 struct ccb_dev_match *cdm; 1726 dev_match_ret retval; 1727 1728 cdm = (struct ccb_dev_match *)arg; 1729 bus = device->target->bus; 1730 1731 /* 1732 * If our position is for something deeper in the tree, that means 1733 * that we've already seen this node. So, we keep going down. 1734 */ 1735 if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE) 1736 && (cdm->pos.cookie.device == device) 1737 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 1738 && (cdm->pos.cookie.periph != NULL)) 1739 retval = DM_RET_DESCEND; 1740 else 1741 retval = xptdevicematch(cdm->patterns, cdm->num_patterns, 1742 device); 1743 1744 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 1745 cdm->status = CAM_DEV_MATCH_ERROR; 1746 return(0); 1747 } 1748 1749 /* 1750 * If the copy flag is set, copy this device out. 1751 */ 1752 if (retval & DM_RET_COPY) { 1753 int spaceleft, j; 1754 1755 spaceleft = cdm->match_buf_len - (cdm->num_matches * 1756 sizeof(struct dev_match_result)); 1757 1758 /* 1759 * If we don't have enough space to put in another 1760 * match result, save our position and tell the 1761 * user there are more devices to check. 1762 */ 1763 if (spaceleft < sizeof(struct dev_match_result)) { 1764 bzero(&cdm->pos, sizeof(cdm->pos)); 1765 cdm->pos.position_type = 1766 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS | 1767 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE; 1768 1769 cdm->pos.cookie.bus = device->target->bus; 1770 cdm->pos.generations[CAM_BUS_GENERATION]= 1771 xsoftc.bus_generation; 1772 cdm->pos.cookie.target = device->target; 1773 cdm->pos.generations[CAM_TARGET_GENERATION] = 1774 device->target->bus->generation; 1775 cdm->pos.cookie.device = device; 1776 cdm->pos.generations[CAM_DEV_GENERATION] = 1777 device->target->generation; 1778 cdm->status = CAM_DEV_MATCH_MORE; 1779 return(0); 1780 } 1781 j = cdm->num_matches; 1782 cdm->num_matches++; 1783 cdm->matches[j].type = DEV_MATCH_DEVICE; 1784 cdm->matches[j].result.device_result.path_id = 1785 device->target->bus->path_id; 1786 cdm->matches[j].result.device_result.target_id = 1787 device->target->target_id; 1788 cdm->matches[j].result.device_result.target_lun = 1789 device->lun_id; 1790 cdm->matches[j].result.device_result.protocol = 1791 device->protocol; 1792 bcopy(&device->inq_data, 1793 &cdm->matches[j].result.device_result.inq_data, 1794 sizeof(struct scsi_inquiry_data)); 1795 bcopy(&device->ident_data, 1796 &cdm->matches[j].result.device_result.ident_data, 1797 sizeof(struct ata_params)); 1798 1799 /* Let the user know whether this device is unconfigured */ 1800 if (device->flags & CAM_DEV_UNCONFIGURED) 1801 cdm->matches[j].result.device_result.flags = 1802 DEV_RESULT_UNCONFIGURED; 1803 else 1804 cdm->matches[j].result.device_result.flags = 1805 DEV_RESULT_NOFLAG; 1806 } 1807 1808 /* 1809 * If the user isn't interested in peripherals, don't descend 1810 * the tree any further. 1811 */ 1812 if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP) 1813 return(1); 1814 1815 /* 1816 * If there is a peripheral list generation recorded, make sure 1817 * it hasn't changed. 1818 */ 1819 xpt_lock_buses(); 1820 mtx_lock(&bus->eb_mtx); 1821 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1822 && (cdm->pos.cookie.bus == bus) 1823 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1824 && (cdm->pos.cookie.target == device->target) 1825 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) 1826 && (cdm->pos.cookie.device == device) 1827 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 1828 && (cdm->pos.cookie.periph != NULL)) { 1829 if (cdm->pos.generations[CAM_PERIPH_GENERATION] != 1830 device->generation) { 1831 mtx_unlock(&bus->eb_mtx); 1832 xpt_unlock_buses(); 1833 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 1834 return(0); 1835 } 1836 periph = (struct cam_periph *)cdm->pos.cookie.periph; 1837 periph->refcount++; 1838 } else 1839 periph = NULL; 1840 mtx_unlock(&bus->eb_mtx); 1841 xpt_unlock_buses(); 1842 1843 return (xptperiphtraverse(device, periph, xptedtperiphfunc, arg)); 1844 } 1845 1846 static int 1847 xptedtperiphfunc(struct cam_periph *periph, void *arg) 1848 { 1849 struct ccb_dev_match *cdm; 1850 dev_match_ret retval; 1851 1852 cdm = (struct ccb_dev_match *)arg; 1853 1854 retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph); 1855 1856 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 1857 cdm->status = CAM_DEV_MATCH_ERROR; 1858 return(0); 1859 } 1860 1861 /* 1862 * If the copy flag is set, copy this peripheral out. 1863 */ 1864 if (retval & DM_RET_COPY) { 1865 int spaceleft, j; 1866 size_t l; 1867 1868 spaceleft = cdm->match_buf_len - (cdm->num_matches * 1869 sizeof(struct dev_match_result)); 1870 1871 /* 1872 * If we don't have enough space to put in another 1873 * match result, save our position and tell the 1874 * user there are more devices to check. 1875 */ 1876 if (spaceleft < sizeof(struct dev_match_result)) { 1877 bzero(&cdm->pos, sizeof(cdm->pos)); 1878 cdm->pos.position_type = 1879 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS | 1880 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE | 1881 CAM_DEV_POS_PERIPH; 1882 1883 cdm->pos.cookie.bus = periph->path->bus; 1884 cdm->pos.generations[CAM_BUS_GENERATION]= 1885 xsoftc.bus_generation; 1886 cdm->pos.cookie.target = periph->path->target; 1887 cdm->pos.generations[CAM_TARGET_GENERATION] = 1888 periph->path->bus->generation; 1889 cdm->pos.cookie.device = periph->path->device; 1890 cdm->pos.generations[CAM_DEV_GENERATION] = 1891 periph->path->target->generation; 1892 cdm->pos.cookie.periph = periph; 1893 cdm->pos.generations[CAM_PERIPH_GENERATION] = 1894 periph->path->device->generation; 1895 cdm->status = CAM_DEV_MATCH_MORE; 1896 return(0); 1897 } 1898 1899 j = cdm->num_matches; 1900 cdm->num_matches++; 1901 cdm->matches[j].type = DEV_MATCH_PERIPH; 1902 cdm->matches[j].result.periph_result.path_id = 1903 periph->path->bus->path_id; 1904 cdm->matches[j].result.periph_result.target_id = 1905 periph->path->target->target_id; 1906 cdm->matches[j].result.periph_result.target_lun = 1907 periph->path->device->lun_id; 1908 cdm->matches[j].result.periph_result.unit_number = 1909 periph->unit_number; 1910 l = sizeof(cdm->matches[j].result.periph_result.periph_name); 1911 strlcpy(cdm->matches[j].result.periph_result.periph_name, 1912 periph->periph_name, l); 1913 } 1914 1915 return(1); 1916 } 1917 1918 static int 1919 xptedtmatch(struct ccb_dev_match *cdm) 1920 { 1921 struct cam_eb *bus; 1922 int ret; 1923 1924 cdm->num_matches = 0; 1925 1926 /* 1927 * Check the bus list generation. If it has changed, the user 1928 * needs to reset everything and start over. 1929 */ 1930 xpt_lock_buses(); 1931 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1932 && (cdm->pos.cookie.bus != NULL)) { 1933 if (cdm->pos.generations[CAM_BUS_GENERATION] != 1934 xsoftc.bus_generation) { 1935 xpt_unlock_buses(); 1936 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 1937 return(0); 1938 } 1939 bus = (struct cam_eb *)cdm->pos.cookie.bus; 1940 bus->refcount++; 1941 } else 1942 bus = NULL; 1943 xpt_unlock_buses(); 1944 1945 ret = xptbustraverse(bus, xptedtbusfunc, cdm); 1946 1947 /* 1948 * If we get back 0, that means that we had to stop before fully 1949 * traversing the EDT. It also means that one of the subroutines 1950 * has set the status field to the proper value. If we get back 1, 1951 * we've fully traversed the EDT and copied out any matching entries. 1952 */ 1953 if (ret == 1) 1954 cdm->status = CAM_DEV_MATCH_LAST; 1955 1956 return(ret); 1957 } 1958 1959 static int 1960 xptplistpdrvfunc(struct periph_driver **pdrv, void *arg) 1961 { 1962 struct cam_periph *periph; 1963 struct ccb_dev_match *cdm; 1964 1965 cdm = (struct ccb_dev_match *)arg; 1966 1967 xpt_lock_buses(); 1968 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR) 1969 && (cdm->pos.cookie.pdrv == pdrv) 1970 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 1971 && (cdm->pos.cookie.periph != NULL)) { 1972 if (cdm->pos.generations[CAM_PERIPH_GENERATION] != 1973 (*pdrv)->generation) { 1974 xpt_unlock_buses(); 1975 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 1976 return(0); 1977 } 1978 periph = (struct cam_periph *)cdm->pos.cookie.periph; 1979 periph->refcount++; 1980 } else 1981 periph = NULL; 1982 xpt_unlock_buses(); 1983 1984 return (xptpdperiphtraverse(pdrv, periph, xptplistperiphfunc, arg)); 1985 } 1986 1987 static int 1988 xptplistperiphfunc(struct cam_periph *periph, void *arg) 1989 { 1990 struct ccb_dev_match *cdm; 1991 dev_match_ret retval; 1992 1993 cdm = (struct ccb_dev_match *)arg; 1994 1995 retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph); 1996 1997 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 1998 cdm->status = CAM_DEV_MATCH_ERROR; 1999 return(0); 2000 } 2001 2002 /* 2003 * If the copy flag is set, copy this peripheral out. 2004 */ 2005 if (retval & DM_RET_COPY) { 2006 int spaceleft, j; 2007 size_t l; 2008 2009 spaceleft = cdm->match_buf_len - (cdm->num_matches * 2010 sizeof(struct dev_match_result)); 2011 2012 /* 2013 * If we don't have enough space to put in another 2014 * match result, save our position and tell the 2015 * user there are more devices to check. 2016 */ 2017 if (spaceleft < sizeof(struct dev_match_result)) { 2018 struct periph_driver **pdrv; 2019 2020 pdrv = NULL; 2021 bzero(&cdm->pos, sizeof(cdm->pos)); 2022 cdm->pos.position_type = 2023 CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR | 2024 CAM_DEV_POS_PERIPH; 2025 2026 /* 2027 * This may look a bit non-sensical, but it is 2028 * actually quite logical. There are very few 2029 * peripheral drivers, and bloating every peripheral 2030 * structure with a pointer back to its parent 2031 * peripheral driver linker set entry would cost 2032 * more in the long run than doing this quick lookup. 2033 */ 2034 for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) { 2035 if (strcmp((*pdrv)->driver_name, 2036 periph->periph_name) == 0) 2037 break; 2038 } 2039 2040 if (*pdrv == NULL) { 2041 cdm->status = CAM_DEV_MATCH_ERROR; 2042 return(0); 2043 } 2044 2045 cdm->pos.cookie.pdrv = pdrv; 2046 /* 2047 * The periph generation slot does double duty, as 2048 * does the periph pointer slot. They are used for 2049 * both edt and pdrv lookups and positioning. 2050 */ 2051 cdm->pos.cookie.periph = periph; 2052 cdm->pos.generations[CAM_PERIPH_GENERATION] = 2053 (*pdrv)->generation; 2054 cdm->status = CAM_DEV_MATCH_MORE; 2055 return(0); 2056 } 2057 2058 j = cdm->num_matches; 2059 cdm->num_matches++; 2060 cdm->matches[j].type = DEV_MATCH_PERIPH; 2061 cdm->matches[j].result.periph_result.path_id = 2062 periph->path->bus->path_id; 2063 2064 /* 2065 * The transport layer peripheral doesn't have a target or 2066 * lun. 2067 */ 2068 if (periph->path->target) 2069 cdm->matches[j].result.periph_result.target_id = 2070 periph->path->target->target_id; 2071 else 2072 cdm->matches[j].result.periph_result.target_id = 2073 CAM_TARGET_WILDCARD; 2074 2075 if (periph->path->device) 2076 cdm->matches[j].result.periph_result.target_lun = 2077 periph->path->device->lun_id; 2078 else 2079 cdm->matches[j].result.periph_result.target_lun = 2080 CAM_LUN_WILDCARD; 2081 2082 cdm->matches[j].result.periph_result.unit_number = 2083 periph->unit_number; 2084 l = sizeof(cdm->matches[j].result.periph_result.periph_name); 2085 strlcpy(cdm->matches[j].result.periph_result.periph_name, 2086 periph->periph_name, l); 2087 } 2088 2089 return(1); 2090 } 2091 2092 static int 2093 xptperiphlistmatch(struct ccb_dev_match *cdm) 2094 { 2095 int ret; 2096 2097 cdm->num_matches = 0; 2098 2099 /* 2100 * At this point in the edt traversal function, we check the bus 2101 * list generation to make sure that no buses have been added or 2102 * removed since the user last sent a XPT_DEV_MATCH ccb through. 2103 * For the peripheral driver list traversal function, however, we 2104 * don't have to worry about new peripheral driver types coming or 2105 * going; they're in a linker set, and therefore can't change 2106 * without a recompile. 2107 */ 2108 2109 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR) 2110 && (cdm->pos.cookie.pdrv != NULL)) 2111 ret = xptpdrvtraverse( 2112 (struct periph_driver **)cdm->pos.cookie.pdrv, 2113 xptplistpdrvfunc, cdm); 2114 else 2115 ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm); 2116 2117 /* 2118 * If we get back 0, that means that we had to stop before fully 2119 * traversing the peripheral driver tree. It also means that one of 2120 * the subroutines has set the status field to the proper value. If 2121 * we get back 1, we've fully traversed the EDT and copied out any 2122 * matching entries. 2123 */ 2124 if (ret == 1) 2125 cdm->status = CAM_DEV_MATCH_LAST; 2126 2127 return(ret); 2128 } 2129 2130 static int 2131 xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg) 2132 { 2133 struct cam_eb *bus, *next_bus; 2134 int retval; 2135 2136 retval = 1; 2137 if (start_bus) 2138 bus = start_bus; 2139 else { 2140 xpt_lock_buses(); 2141 bus = TAILQ_FIRST(&xsoftc.xpt_busses); 2142 if (bus == NULL) { 2143 xpt_unlock_buses(); 2144 return (retval); 2145 } 2146 bus->refcount++; 2147 xpt_unlock_buses(); 2148 } 2149 for (; bus != NULL; bus = next_bus) { 2150 retval = tr_func(bus, arg); 2151 if (retval == 0) { 2152 xpt_release_bus(bus); 2153 break; 2154 } 2155 xpt_lock_buses(); 2156 next_bus = TAILQ_NEXT(bus, links); 2157 if (next_bus) 2158 next_bus->refcount++; 2159 xpt_unlock_buses(); 2160 xpt_release_bus(bus); 2161 } 2162 return(retval); 2163 } 2164 2165 static int 2166 xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target, 2167 xpt_targetfunc_t *tr_func, void *arg) 2168 { 2169 struct cam_et *target, *next_target; 2170 int retval; 2171 2172 retval = 1; 2173 if (start_target) 2174 target = start_target; 2175 else { 2176 mtx_lock(&bus->eb_mtx); 2177 target = TAILQ_FIRST(&bus->et_entries); 2178 if (target == NULL) { 2179 mtx_unlock(&bus->eb_mtx); 2180 return (retval); 2181 } 2182 target->refcount++; 2183 mtx_unlock(&bus->eb_mtx); 2184 } 2185 for (; target != NULL; target = next_target) { 2186 retval = tr_func(target, arg); 2187 if (retval == 0) { 2188 xpt_release_target(target); 2189 break; 2190 } 2191 mtx_lock(&bus->eb_mtx); 2192 next_target = TAILQ_NEXT(target, links); 2193 if (next_target) 2194 next_target->refcount++; 2195 mtx_unlock(&bus->eb_mtx); 2196 xpt_release_target(target); 2197 } 2198 return(retval); 2199 } 2200 2201 static int 2202 xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device, 2203 xpt_devicefunc_t *tr_func, void *arg) 2204 { 2205 struct cam_eb *bus; 2206 struct cam_ed *device, *next_device; 2207 int retval; 2208 2209 retval = 1; 2210 bus = target->bus; 2211 if (start_device) 2212 device = start_device; 2213 else { 2214 mtx_lock(&bus->eb_mtx); 2215 device = TAILQ_FIRST(&target->ed_entries); 2216 if (device == NULL) { 2217 mtx_unlock(&bus->eb_mtx); 2218 return (retval); 2219 } 2220 device->refcount++; 2221 mtx_unlock(&bus->eb_mtx); 2222 } 2223 for (; device != NULL; device = next_device) { 2224 mtx_lock(&device->device_mtx); 2225 retval = tr_func(device, arg); 2226 mtx_unlock(&device->device_mtx); 2227 if (retval == 0) { 2228 xpt_release_device(device); 2229 break; 2230 } 2231 mtx_lock(&bus->eb_mtx); 2232 next_device = TAILQ_NEXT(device, links); 2233 if (next_device) 2234 next_device->refcount++; 2235 mtx_unlock(&bus->eb_mtx); 2236 xpt_release_device(device); 2237 } 2238 return(retval); 2239 } 2240 2241 static int 2242 xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph, 2243 xpt_periphfunc_t *tr_func, void *arg) 2244 { 2245 struct cam_eb *bus; 2246 struct cam_periph *periph, *next_periph; 2247 int retval; 2248 2249 retval = 1; 2250 2251 bus = device->target->bus; 2252 if (start_periph) 2253 periph = start_periph; 2254 else { 2255 xpt_lock_buses(); 2256 mtx_lock(&bus->eb_mtx); 2257 periph = SLIST_FIRST(&device->periphs); 2258 while (periph != NULL && (periph->flags & CAM_PERIPH_FREE) != 0) 2259 periph = SLIST_NEXT(periph, periph_links); 2260 if (periph == NULL) { 2261 mtx_unlock(&bus->eb_mtx); 2262 xpt_unlock_buses(); 2263 return (retval); 2264 } 2265 periph->refcount++; 2266 mtx_unlock(&bus->eb_mtx); 2267 xpt_unlock_buses(); 2268 } 2269 for (; periph != NULL; periph = next_periph) { 2270 retval = tr_func(periph, arg); 2271 if (retval == 0) { 2272 cam_periph_release_locked(periph); 2273 break; 2274 } 2275 xpt_lock_buses(); 2276 mtx_lock(&bus->eb_mtx); 2277 next_periph = SLIST_NEXT(periph, periph_links); 2278 while (next_periph != NULL && 2279 (next_periph->flags & CAM_PERIPH_FREE) != 0) 2280 next_periph = SLIST_NEXT(next_periph, periph_links); 2281 if (next_periph) 2282 next_periph->refcount++; 2283 mtx_unlock(&bus->eb_mtx); 2284 xpt_unlock_buses(); 2285 cam_periph_release_locked(periph); 2286 } 2287 return(retval); 2288 } 2289 2290 static int 2291 xptpdrvtraverse(struct periph_driver **start_pdrv, 2292 xpt_pdrvfunc_t *tr_func, void *arg) 2293 { 2294 struct periph_driver **pdrv; 2295 int retval; 2296 2297 retval = 1; 2298 2299 /* 2300 * We don't traverse the peripheral driver list like we do the 2301 * other lists, because it is a linker set, and therefore cannot be 2302 * changed during runtime. If the peripheral driver list is ever 2303 * re-done to be something other than a linker set (i.e. it can 2304 * change while the system is running), the list traversal should 2305 * be modified to work like the other traversal functions. 2306 */ 2307 for (pdrv = (start_pdrv ? start_pdrv : periph_drivers); 2308 *pdrv != NULL; pdrv++) { 2309 retval = tr_func(pdrv, arg); 2310 2311 if (retval == 0) 2312 return(retval); 2313 } 2314 2315 return(retval); 2316 } 2317 2318 static int 2319 xptpdperiphtraverse(struct periph_driver **pdrv, 2320 struct cam_periph *start_periph, 2321 xpt_periphfunc_t *tr_func, void *arg) 2322 { 2323 struct cam_periph *periph, *next_periph; 2324 int retval; 2325 2326 retval = 1; 2327 2328 if (start_periph) 2329 periph = start_periph; 2330 else { 2331 xpt_lock_buses(); 2332 periph = TAILQ_FIRST(&(*pdrv)->units); 2333 while (periph != NULL && (periph->flags & CAM_PERIPH_FREE) != 0) 2334 periph = TAILQ_NEXT(periph, unit_links); 2335 if (periph == NULL) { 2336 xpt_unlock_buses(); 2337 return (retval); 2338 } 2339 periph->refcount++; 2340 xpt_unlock_buses(); 2341 } 2342 for (; periph != NULL; periph = next_periph) { 2343 cam_periph_lock(periph); 2344 retval = tr_func(periph, arg); 2345 cam_periph_unlock(periph); 2346 if (retval == 0) { 2347 cam_periph_release(periph); 2348 break; 2349 } 2350 xpt_lock_buses(); 2351 next_periph = TAILQ_NEXT(periph, unit_links); 2352 while (next_periph != NULL && 2353 (next_periph->flags & CAM_PERIPH_FREE) != 0) 2354 next_periph = TAILQ_NEXT(next_periph, unit_links); 2355 if (next_periph) 2356 next_periph->refcount++; 2357 xpt_unlock_buses(); 2358 cam_periph_release(periph); 2359 } 2360 return(retval); 2361 } 2362 2363 static int 2364 xptdefbusfunc(struct cam_eb *bus, void *arg) 2365 { 2366 struct xpt_traverse_config *tr_config; 2367 2368 tr_config = (struct xpt_traverse_config *)arg; 2369 2370 if (tr_config->depth == XPT_DEPTH_BUS) { 2371 xpt_busfunc_t *tr_func; 2372 2373 tr_func = (xpt_busfunc_t *)tr_config->tr_func; 2374 2375 return(tr_func(bus, tr_config->tr_arg)); 2376 } else 2377 return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg)); 2378 } 2379 2380 static int 2381 xptdeftargetfunc(struct cam_et *target, void *arg) 2382 { 2383 struct xpt_traverse_config *tr_config; 2384 2385 tr_config = (struct xpt_traverse_config *)arg; 2386 2387 if (tr_config->depth == XPT_DEPTH_TARGET) { 2388 xpt_targetfunc_t *tr_func; 2389 2390 tr_func = (xpt_targetfunc_t *)tr_config->tr_func; 2391 2392 return(tr_func(target, tr_config->tr_arg)); 2393 } else 2394 return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg)); 2395 } 2396 2397 static int 2398 xptdefdevicefunc(struct cam_ed *device, void *arg) 2399 { 2400 struct xpt_traverse_config *tr_config; 2401 2402 tr_config = (struct xpt_traverse_config *)arg; 2403 2404 if (tr_config->depth == XPT_DEPTH_DEVICE) { 2405 xpt_devicefunc_t *tr_func; 2406 2407 tr_func = (xpt_devicefunc_t *)tr_config->tr_func; 2408 2409 return(tr_func(device, tr_config->tr_arg)); 2410 } else 2411 return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg)); 2412 } 2413 2414 static int 2415 xptdefperiphfunc(struct cam_periph *periph, void *arg) 2416 { 2417 struct xpt_traverse_config *tr_config; 2418 xpt_periphfunc_t *tr_func; 2419 2420 tr_config = (struct xpt_traverse_config *)arg; 2421 2422 tr_func = (xpt_periphfunc_t *)tr_config->tr_func; 2423 2424 /* 2425 * Unlike the other default functions, we don't check for depth 2426 * here. The peripheral driver level is the last level in the EDT, 2427 * so if we're here, we should execute the function in question. 2428 */ 2429 return(tr_func(periph, tr_config->tr_arg)); 2430 } 2431 2432 /* 2433 * Execute the given function for every bus in the EDT. 2434 */ 2435 static int 2436 xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg) 2437 { 2438 struct xpt_traverse_config tr_config; 2439 2440 tr_config.depth = XPT_DEPTH_BUS; 2441 tr_config.tr_func = tr_func; 2442 tr_config.tr_arg = arg; 2443 2444 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config)); 2445 } 2446 2447 /* 2448 * Execute the given function for every device in the EDT. 2449 */ 2450 static int 2451 xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg) 2452 { 2453 struct xpt_traverse_config tr_config; 2454 2455 tr_config.depth = XPT_DEPTH_DEVICE; 2456 tr_config.tr_func = tr_func; 2457 tr_config.tr_arg = arg; 2458 2459 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config)); 2460 } 2461 2462 static int 2463 xptsetasyncfunc(struct cam_ed *device, void *arg) 2464 { 2465 struct cam_path path; 2466 struct ccb_getdev cgd; 2467 struct ccb_setasync *csa = (struct ccb_setasync *)arg; 2468 2469 /* 2470 * Don't report unconfigured devices (Wildcard devs, 2471 * devices only for target mode, device instances 2472 * that have been invalidated but are waiting for 2473 * their last reference count to be released). 2474 */ 2475 if ((device->flags & CAM_DEV_UNCONFIGURED) != 0) 2476 return (1); 2477 2478 memset(&cgd, 0, sizeof(cgd)); 2479 xpt_compile_path(&path, 2480 NULL, 2481 device->target->bus->path_id, 2482 device->target->target_id, 2483 device->lun_id); 2484 xpt_setup_ccb(&cgd.ccb_h, &path, CAM_PRIORITY_NORMAL); 2485 cgd.ccb_h.func_code = XPT_GDEV_TYPE; 2486 xpt_action((union ccb *)&cgd); 2487 csa->callback(csa->callback_arg, 2488 AC_FOUND_DEVICE, 2489 &path, &cgd); 2490 xpt_release_path(&path); 2491 2492 return(1); 2493 } 2494 2495 static int 2496 xptsetasyncbusfunc(struct cam_eb *bus, void *arg) 2497 { 2498 struct cam_path path; 2499 struct ccb_pathinq cpi; 2500 struct ccb_setasync *csa = (struct ccb_setasync *)arg; 2501 2502 xpt_compile_path(&path, /*periph*/NULL, 2503 bus->path_id, 2504 CAM_TARGET_WILDCARD, 2505 CAM_LUN_WILDCARD); 2506 xpt_path_lock(&path); 2507 xpt_path_inq(&cpi, &path); 2508 csa->callback(csa->callback_arg, 2509 AC_PATH_REGISTERED, 2510 &path, &cpi); 2511 xpt_path_unlock(&path); 2512 xpt_release_path(&path); 2513 2514 return(1); 2515 } 2516 2517 void 2518 xpt_action(union ccb *start_ccb) 2519 { 2520 2521 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, 2522 ("xpt_action: func %#x %s\n", start_ccb->ccb_h.func_code, 2523 xpt_action_name(start_ccb->ccb_h.func_code))); 2524 2525 start_ccb->ccb_h.status = CAM_REQ_INPROG; 2526 (*(start_ccb->ccb_h.path->bus->xport->ops->action))(start_ccb); 2527 } 2528 2529 void 2530 xpt_action_default(union ccb *start_ccb) 2531 { 2532 struct cam_path *path; 2533 struct cam_sim *sim; 2534 struct mtx *mtx; 2535 2536 path = start_ccb->ccb_h.path; 2537 CAM_DEBUG(path, CAM_DEBUG_TRACE, 2538 ("xpt_action_default: func %#x %s\n", start_ccb->ccb_h.func_code, 2539 xpt_action_name(start_ccb->ccb_h.func_code))); 2540 2541 switch (start_ccb->ccb_h.func_code) { 2542 case XPT_SCSI_IO: 2543 { 2544 struct cam_ed *device; 2545 2546 /* 2547 * For the sake of compatibility with SCSI-1 2548 * devices that may not understand the identify 2549 * message, we include lun information in the 2550 * second byte of all commands. SCSI-1 specifies 2551 * that luns are a 3 bit value and reserves only 3 2552 * bits for lun information in the CDB. Later 2553 * revisions of the SCSI spec allow for more than 8 2554 * luns, but have deprecated lun information in the 2555 * CDB. So, if the lun won't fit, we must omit. 2556 * 2557 * Also be aware that during initial probing for devices, 2558 * the inquiry information is unknown but initialized to 0. 2559 * This means that this code will be exercised while probing 2560 * devices with an ANSI revision greater than 2. 2561 */ 2562 device = path->device; 2563 if (device->protocol_version <= SCSI_REV_2 2564 && start_ccb->ccb_h.target_lun < 8 2565 && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) { 2566 start_ccb->csio.cdb_io.cdb_bytes[1] |= 2567 start_ccb->ccb_h.target_lun << 5; 2568 } 2569 start_ccb->csio.scsi_status = SCSI_STATUS_OK; 2570 } 2571 /* FALLTHROUGH */ 2572 case XPT_TARGET_IO: 2573 case XPT_CONT_TARGET_IO: 2574 start_ccb->csio.sense_resid = 0; 2575 start_ccb->csio.resid = 0; 2576 /* FALLTHROUGH */ 2577 case XPT_ATA_IO: 2578 if (start_ccb->ccb_h.func_code == XPT_ATA_IO) 2579 start_ccb->ataio.resid = 0; 2580 /* FALLTHROUGH */ 2581 case XPT_NVME_IO: 2582 case XPT_NVME_ADMIN: 2583 case XPT_MMC_IO: 2584 case XPT_MMC_GET_TRAN_SETTINGS: 2585 case XPT_MMC_SET_TRAN_SETTINGS: 2586 case XPT_RESET_DEV: 2587 case XPT_ENG_EXEC: 2588 case XPT_SMP_IO: 2589 { 2590 struct cam_devq *devq; 2591 2592 devq = path->bus->sim->devq; 2593 mtx_lock(&devq->send_mtx); 2594 cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb); 2595 if (xpt_schedule_devq(devq, path->device) != 0) 2596 xpt_run_devq(devq); 2597 mtx_unlock(&devq->send_mtx); 2598 break; 2599 } 2600 case XPT_CALC_GEOMETRY: 2601 /* Filter out garbage */ 2602 if (start_ccb->ccg.block_size == 0 2603 || start_ccb->ccg.volume_size == 0) { 2604 start_ccb->ccg.cylinders = 0; 2605 start_ccb->ccg.heads = 0; 2606 start_ccb->ccg.secs_per_track = 0; 2607 start_ccb->ccb_h.status = CAM_REQ_CMP; 2608 break; 2609 } 2610 goto call_sim; 2611 case XPT_ABORT: 2612 { 2613 union ccb* abort_ccb; 2614 2615 abort_ccb = start_ccb->cab.abort_ccb; 2616 if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) { 2617 struct cam_ed *device; 2618 struct cam_devq *devq; 2619 2620 device = abort_ccb->ccb_h.path->device; 2621 devq = device->sim->devq; 2622 2623 mtx_lock(&devq->send_mtx); 2624 if (abort_ccb->ccb_h.pinfo.index > 0) { 2625 cam_ccbq_remove_ccb(&device->ccbq, abort_ccb); 2626 abort_ccb->ccb_h.status = 2627 CAM_REQ_ABORTED|CAM_DEV_QFRZN; 2628 xpt_freeze_devq_device(device, 1); 2629 mtx_unlock(&devq->send_mtx); 2630 xpt_done(abort_ccb); 2631 start_ccb->ccb_h.status = CAM_REQ_CMP; 2632 break; 2633 } 2634 mtx_unlock(&devq->send_mtx); 2635 2636 if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX 2637 && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) { 2638 /* 2639 * We've caught this ccb en route to 2640 * the SIM. Flag it for abort and the 2641 * SIM will do so just before starting 2642 * real work on the CCB. 2643 */ 2644 abort_ccb->ccb_h.status = 2645 CAM_REQ_ABORTED|CAM_DEV_QFRZN; 2646 xpt_freeze_devq(abort_ccb->ccb_h.path, 1); 2647 start_ccb->ccb_h.status = CAM_REQ_CMP; 2648 break; 2649 } 2650 } 2651 if (XPT_FC_IS_QUEUED(abort_ccb) 2652 && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) { 2653 /* 2654 * It's already completed but waiting 2655 * for our SWI to get to it. 2656 */ 2657 start_ccb->ccb_h.status = CAM_UA_ABORT; 2658 break; 2659 } 2660 /* 2661 * If we weren't able to take care of the abort request 2662 * in the XPT, pass the request down to the SIM for processing. 2663 */ 2664 } 2665 /* FALLTHROUGH */ 2666 case XPT_ACCEPT_TARGET_IO: 2667 case XPT_EN_LUN: 2668 case XPT_IMMED_NOTIFY: 2669 case XPT_NOTIFY_ACK: 2670 case XPT_RESET_BUS: 2671 case XPT_IMMEDIATE_NOTIFY: 2672 case XPT_NOTIFY_ACKNOWLEDGE: 2673 case XPT_GET_SIM_KNOB_OLD: 2674 case XPT_GET_SIM_KNOB: 2675 case XPT_SET_SIM_KNOB: 2676 case XPT_GET_TRAN_SETTINGS: 2677 case XPT_SET_TRAN_SETTINGS: 2678 case XPT_PATH_INQ: 2679 call_sim: 2680 sim = path->bus->sim; 2681 mtx = sim->mtx; 2682 if (mtx && !mtx_owned(mtx)) 2683 mtx_lock(mtx); 2684 else 2685 mtx = NULL; 2686 2687 CAM_DEBUG(path, CAM_DEBUG_TRACE, 2688 ("Calling sim->sim_action(): func=%#x\n", start_ccb->ccb_h.func_code)); 2689 (*(sim->sim_action))(sim, start_ccb); 2690 CAM_DEBUG(path, CAM_DEBUG_TRACE, 2691 ("sim->sim_action returned: status=%#x\n", start_ccb->ccb_h.status)); 2692 if (mtx) 2693 mtx_unlock(mtx); 2694 break; 2695 case XPT_PATH_STATS: 2696 start_ccb->cpis.last_reset = path->bus->last_reset; 2697 start_ccb->ccb_h.status = CAM_REQ_CMP; 2698 break; 2699 case XPT_GDEV_TYPE: 2700 { 2701 struct cam_ed *dev; 2702 2703 dev = path->device; 2704 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) { 2705 start_ccb->ccb_h.status = CAM_DEV_NOT_THERE; 2706 } else { 2707 struct ccb_getdev *cgd; 2708 2709 cgd = &start_ccb->cgd; 2710 cgd->protocol = dev->protocol; 2711 cgd->inq_data = dev->inq_data; 2712 cgd->ident_data = dev->ident_data; 2713 cgd->inq_flags = dev->inq_flags; 2714 cgd->ccb_h.status = CAM_REQ_CMP; 2715 cgd->serial_num_len = dev->serial_num_len; 2716 if ((dev->serial_num_len > 0) 2717 && (dev->serial_num != NULL)) 2718 bcopy(dev->serial_num, cgd->serial_num, 2719 dev->serial_num_len); 2720 } 2721 break; 2722 } 2723 case XPT_GDEV_STATS: 2724 { 2725 struct ccb_getdevstats *cgds = &start_ccb->cgds; 2726 struct cam_ed *dev = path->device; 2727 struct cam_eb *bus = path->bus; 2728 struct cam_et *tar = path->target; 2729 struct cam_devq *devq = bus->sim->devq; 2730 2731 mtx_lock(&devq->send_mtx); 2732 cgds->dev_openings = dev->ccbq.dev_openings; 2733 cgds->dev_active = dev->ccbq.dev_active; 2734 cgds->allocated = dev->ccbq.allocated; 2735 cgds->queued = cam_ccbq_pending_ccb_count(&dev->ccbq); 2736 cgds->held = cgds->allocated - cgds->dev_active - cgds->queued; 2737 cgds->last_reset = tar->last_reset; 2738 cgds->maxtags = dev->maxtags; 2739 cgds->mintags = dev->mintags; 2740 if (timevalcmp(&tar->last_reset, &bus->last_reset, <)) 2741 cgds->last_reset = bus->last_reset; 2742 mtx_unlock(&devq->send_mtx); 2743 cgds->ccb_h.status = CAM_REQ_CMP; 2744 break; 2745 } 2746 case XPT_GDEVLIST: 2747 { 2748 struct cam_periph *nperiph; 2749 struct periph_list *periph_head; 2750 struct ccb_getdevlist *cgdl; 2751 u_int i; 2752 struct cam_ed *device; 2753 bool found; 2754 2755 found = false; 2756 2757 /* 2758 * Don't want anyone mucking with our data. 2759 */ 2760 device = path->device; 2761 periph_head = &device->periphs; 2762 cgdl = &start_ccb->cgdl; 2763 2764 /* 2765 * Check and see if the list has changed since the user 2766 * last requested a list member. If so, tell them that the 2767 * list has changed, and therefore they need to start over 2768 * from the beginning. 2769 */ 2770 if ((cgdl->index != 0) && 2771 (cgdl->generation != device->generation)) { 2772 cgdl->status = CAM_GDEVLIST_LIST_CHANGED; 2773 break; 2774 } 2775 2776 /* 2777 * Traverse the list of peripherals and attempt to find 2778 * the requested peripheral. 2779 */ 2780 for (nperiph = SLIST_FIRST(periph_head), i = 0; 2781 (nperiph != NULL) && (i <= cgdl->index); 2782 nperiph = SLIST_NEXT(nperiph, periph_links), i++) { 2783 if (i == cgdl->index) { 2784 strlcpy(cgdl->periph_name, 2785 nperiph->periph_name, 2786 sizeof(cgdl->periph_name)); 2787 cgdl->unit_number = nperiph->unit_number; 2788 found = true; 2789 } 2790 } 2791 if (!found) { 2792 cgdl->status = CAM_GDEVLIST_ERROR; 2793 break; 2794 } 2795 2796 if (nperiph == NULL) 2797 cgdl->status = CAM_GDEVLIST_LAST_DEVICE; 2798 else 2799 cgdl->status = CAM_GDEVLIST_MORE_DEVS; 2800 2801 cgdl->index++; 2802 cgdl->generation = device->generation; 2803 2804 cgdl->ccb_h.status = CAM_REQ_CMP; 2805 break; 2806 } 2807 case XPT_DEV_MATCH: 2808 { 2809 dev_pos_type position_type; 2810 struct ccb_dev_match *cdm; 2811 2812 cdm = &start_ccb->cdm; 2813 2814 /* 2815 * There are two ways of getting at information in the EDT. 2816 * The first way is via the primary EDT tree. It starts 2817 * with a list of buses, then a list of targets on a bus, 2818 * then devices/luns on a target, and then peripherals on a 2819 * device/lun. The "other" way is by the peripheral driver 2820 * lists. The peripheral driver lists are organized by 2821 * peripheral driver. (obviously) So it makes sense to 2822 * use the peripheral driver list if the user is looking 2823 * for something like "da1", or all "da" devices. If the 2824 * user is looking for something on a particular bus/target 2825 * or lun, it's generally better to go through the EDT tree. 2826 */ 2827 2828 if (cdm->pos.position_type != CAM_DEV_POS_NONE) 2829 position_type = cdm->pos.position_type; 2830 else { 2831 u_int i; 2832 2833 position_type = CAM_DEV_POS_NONE; 2834 2835 for (i = 0; i < cdm->num_patterns; i++) { 2836 if ((cdm->patterns[i].type == DEV_MATCH_BUS) 2837 ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){ 2838 position_type = CAM_DEV_POS_EDT; 2839 break; 2840 } 2841 } 2842 2843 if (cdm->num_patterns == 0) 2844 position_type = CAM_DEV_POS_EDT; 2845 else if (position_type == CAM_DEV_POS_NONE) 2846 position_type = CAM_DEV_POS_PDRV; 2847 } 2848 2849 switch(position_type & CAM_DEV_POS_TYPEMASK) { 2850 case CAM_DEV_POS_EDT: 2851 xptedtmatch(cdm); 2852 break; 2853 case CAM_DEV_POS_PDRV: 2854 xptperiphlistmatch(cdm); 2855 break; 2856 default: 2857 cdm->status = CAM_DEV_MATCH_ERROR; 2858 break; 2859 } 2860 2861 if (cdm->status == CAM_DEV_MATCH_ERROR) 2862 start_ccb->ccb_h.status = CAM_REQ_CMP_ERR; 2863 else 2864 start_ccb->ccb_h.status = CAM_REQ_CMP; 2865 2866 break; 2867 } 2868 case XPT_SASYNC_CB: 2869 { 2870 struct ccb_setasync *csa; 2871 struct async_node *cur_entry; 2872 struct async_list *async_head; 2873 uint32_t added; 2874 2875 csa = &start_ccb->csa; 2876 added = csa->event_enable; 2877 async_head = &path->device->asyncs; 2878 2879 /* 2880 * If there is already an entry for us, simply 2881 * update it. 2882 */ 2883 cur_entry = SLIST_FIRST(async_head); 2884 while (cur_entry != NULL) { 2885 if ((cur_entry->callback_arg == csa->callback_arg) 2886 && (cur_entry->callback == csa->callback)) 2887 break; 2888 cur_entry = SLIST_NEXT(cur_entry, links); 2889 } 2890 2891 if (cur_entry != NULL) { 2892 /* 2893 * If the request has no flags set, 2894 * remove the entry. 2895 */ 2896 added &= ~cur_entry->event_enable; 2897 if (csa->event_enable == 0) { 2898 SLIST_REMOVE(async_head, cur_entry, 2899 async_node, links); 2900 xpt_release_device(path->device); 2901 free(cur_entry, M_CAMXPT); 2902 } else { 2903 cur_entry->event_enable = csa->event_enable; 2904 } 2905 csa->event_enable = added; 2906 } else { 2907 cur_entry = malloc(sizeof(*cur_entry), M_CAMXPT, 2908 M_NOWAIT); 2909 if (cur_entry == NULL) { 2910 csa->ccb_h.status = CAM_RESRC_UNAVAIL; 2911 break; 2912 } 2913 cur_entry->event_enable = csa->event_enable; 2914 cur_entry->event_lock = (path->bus->sim->mtx && 2915 mtx_owned(path->bus->sim->mtx)) ? 1 : 0; 2916 cur_entry->callback_arg = csa->callback_arg; 2917 cur_entry->callback = csa->callback; 2918 SLIST_INSERT_HEAD(async_head, cur_entry, links); 2919 xpt_acquire_device(path->device); 2920 } 2921 start_ccb->ccb_h.status = CAM_REQ_CMP; 2922 break; 2923 } 2924 case XPT_REL_SIMQ: 2925 { 2926 struct ccb_relsim *crs; 2927 struct cam_ed *dev; 2928 2929 crs = &start_ccb->crs; 2930 dev = path->device; 2931 if (dev == NULL) { 2932 crs->ccb_h.status = CAM_DEV_NOT_THERE; 2933 break; 2934 } 2935 2936 if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) { 2937 /* Don't ever go below one opening */ 2938 if (crs->openings > 0) { 2939 xpt_dev_ccbq_resize(path, crs->openings); 2940 if (bootverbose) { 2941 xpt_print(path, 2942 "number of openings is now %d\n", 2943 crs->openings); 2944 } 2945 } 2946 } 2947 2948 mtx_lock(&dev->sim->devq->send_mtx); 2949 if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) { 2950 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) { 2951 /* 2952 * Just extend the old timeout and decrement 2953 * the freeze count so that a single timeout 2954 * is sufficient for releasing the queue. 2955 */ 2956 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE; 2957 callout_stop(&dev->callout); 2958 } else { 2959 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 2960 } 2961 2962 callout_reset_sbt(&dev->callout, 2963 SBT_1MS * crs->release_timeout, SBT_1MS, 2964 xpt_release_devq_timeout, dev, 0); 2965 2966 dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING; 2967 } 2968 2969 if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) { 2970 if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) { 2971 /* 2972 * Decrement the freeze count so that a single 2973 * completion is still sufficient to unfreeze 2974 * the queue. 2975 */ 2976 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE; 2977 } else { 2978 dev->flags |= CAM_DEV_REL_ON_COMPLETE; 2979 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 2980 } 2981 } 2982 2983 if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) { 2984 if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0 2985 || (dev->ccbq.dev_active == 0)) { 2986 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE; 2987 } else { 2988 dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY; 2989 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 2990 } 2991 } 2992 mtx_unlock(&dev->sim->devq->send_mtx); 2993 2994 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) 2995 xpt_release_devq(path, /*count*/1, /*run_queue*/TRUE); 2996 start_ccb->crs.qfrozen_cnt = dev->ccbq.queue.qfrozen_cnt; 2997 start_ccb->ccb_h.status = CAM_REQ_CMP; 2998 break; 2999 } 3000 case XPT_DEBUG: { 3001 struct cam_path *oldpath; 3002 3003 /* Check that all request bits are supported. */ 3004 if (start_ccb->cdbg.flags & ~(CAM_DEBUG_COMPILE)) { 3005 start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 3006 break; 3007 } 3008 3009 cam_dflags = CAM_DEBUG_NONE; 3010 if (cam_dpath != NULL) { 3011 oldpath = cam_dpath; 3012 cam_dpath = NULL; 3013 xpt_free_path(oldpath); 3014 } 3015 if (start_ccb->cdbg.flags != CAM_DEBUG_NONE) { 3016 if (xpt_create_path(&cam_dpath, NULL, 3017 start_ccb->ccb_h.path_id, 3018 start_ccb->ccb_h.target_id, 3019 start_ccb->ccb_h.target_lun) != 3020 CAM_REQ_CMP) { 3021 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 3022 } else { 3023 cam_dflags = start_ccb->cdbg.flags; 3024 start_ccb->ccb_h.status = CAM_REQ_CMP; 3025 xpt_print(cam_dpath, "debugging flags now %x\n", 3026 cam_dflags); 3027 } 3028 } else 3029 start_ccb->ccb_h.status = CAM_REQ_CMP; 3030 break; 3031 } 3032 case XPT_NOOP: 3033 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0) 3034 xpt_freeze_devq(path, 1); 3035 start_ccb->ccb_h.status = CAM_REQ_CMP; 3036 break; 3037 case XPT_REPROBE_LUN: 3038 xpt_async(AC_INQ_CHANGED, path, NULL); 3039 start_ccb->ccb_h.status = CAM_REQ_CMP; 3040 xpt_done(start_ccb); 3041 break; 3042 case XPT_ASYNC: 3043 /* 3044 * Queue the async operation so it can be run from a sleepable 3045 * context. 3046 */ 3047 start_ccb->ccb_h.status = CAM_REQ_CMP; 3048 mtx_lock(&cam_async.cam_doneq_mtx); 3049 STAILQ_INSERT_TAIL(&cam_async.cam_doneq, &start_ccb->ccb_h, sim_links.stqe); 3050 start_ccb->ccb_h.pinfo.index = CAM_ASYNC_INDEX; 3051 mtx_unlock(&cam_async.cam_doneq_mtx); 3052 wakeup(&cam_async.cam_doneq); 3053 break; 3054 default: 3055 case XPT_SDEV_TYPE: 3056 case XPT_TERM_IO: 3057 case XPT_ENG_INQ: 3058 /* XXX Implement */ 3059 xpt_print(start_ccb->ccb_h.path, 3060 "%s: CCB type %#x %s not supported\n", __func__, 3061 start_ccb->ccb_h.func_code, 3062 xpt_action_name(start_ccb->ccb_h.func_code)); 3063 start_ccb->ccb_h.status = CAM_PROVIDE_FAIL; 3064 if (start_ccb->ccb_h.func_code & XPT_FC_DEV_QUEUED) { 3065 xpt_done(start_ccb); 3066 } 3067 break; 3068 } 3069 CAM_DEBUG(path, CAM_DEBUG_TRACE, 3070 ("xpt_action_default: func= %#x %s status %#x\n", 3071 start_ccb->ccb_h.func_code, 3072 xpt_action_name(start_ccb->ccb_h.func_code), 3073 start_ccb->ccb_h.status)); 3074 } 3075 3076 /* 3077 * Call the sim poll routine to allow the sim to complete 3078 * any inflight requests, then call camisr_runqueue to 3079 * complete any CCB that the polling completed. 3080 */ 3081 void 3082 xpt_sim_poll(struct cam_sim *sim) 3083 { 3084 struct mtx *mtx; 3085 3086 KASSERT(cam_sim_pollable(sim), ("%s: non-pollable sim", __func__)); 3087 mtx = sim->mtx; 3088 if (mtx) 3089 mtx_lock(mtx); 3090 (*(sim->sim_poll))(sim); 3091 if (mtx) 3092 mtx_unlock(mtx); 3093 camisr_runqueue(); 3094 } 3095 3096 uint32_t 3097 xpt_poll_setup(union ccb *start_ccb) 3098 { 3099 uint32_t timeout; 3100 struct cam_sim *sim; 3101 struct cam_devq *devq; 3102 struct cam_ed *dev; 3103 3104 timeout = start_ccb->ccb_h.timeout * 10; 3105 sim = start_ccb->ccb_h.path->bus->sim; 3106 devq = sim->devq; 3107 dev = start_ccb->ccb_h.path->device; 3108 3109 KASSERT(cam_sim_pollable(sim), ("%s: non-pollable sim", __func__)); 3110 3111 /* 3112 * Steal an opening so that no other queued requests 3113 * can get it before us while we simulate interrupts. 3114 */ 3115 mtx_lock(&devq->send_mtx); 3116 dev->ccbq.dev_openings--; 3117 while((devq->send_openings <= 0 || dev->ccbq.dev_openings < 0) && 3118 (--timeout > 0)) { 3119 mtx_unlock(&devq->send_mtx); 3120 DELAY(100); 3121 xpt_sim_poll(sim); 3122 mtx_lock(&devq->send_mtx); 3123 } 3124 dev->ccbq.dev_openings++; 3125 mtx_unlock(&devq->send_mtx); 3126 3127 return (timeout); 3128 } 3129 3130 void 3131 xpt_pollwait(union ccb *start_ccb, uint32_t timeout) 3132 { 3133 3134 KASSERT(cam_sim_pollable(start_ccb->ccb_h.path->bus->sim), 3135 ("%s: non-pollable sim", __func__)); 3136 while (--timeout > 0) { 3137 xpt_sim_poll(start_ccb->ccb_h.path->bus->sim); 3138 if ((start_ccb->ccb_h.status & CAM_STATUS_MASK) 3139 != CAM_REQ_INPROG) 3140 break; 3141 DELAY(100); 3142 } 3143 3144 if (timeout == 0) { 3145 /* 3146 * XXX Is it worth adding a sim_timeout entry 3147 * point so we can attempt recovery? If 3148 * this is only used for dumps, I don't think 3149 * it is. 3150 */ 3151 start_ccb->ccb_h.status = CAM_CMD_TIMEOUT; 3152 } 3153 } 3154 3155 /* 3156 * Schedule a peripheral driver to receive a ccb when its 3157 * target device has space for more transactions. 3158 */ 3159 void 3160 xpt_schedule(struct cam_periph *periph, uint32_t new_priority) 3161 { 3162 3163 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n")); 3164 cam_periph_assert(periph, MA_OWNED); 3165 if (new_priority < periph->scheduled_priority) { 3166 periph->scheduled_priority = new_priority; 3167 xpt_run_allocq(periph, 0); 3168 } 3169 } 3170 3171 /* 3172 * Schedule a device to run on a given queue. 3173 * If the device was inserted as a new entry on the queue, 3174 * return 1 meaning the device queue should be run. If we 3175 * were already queued, implying someone else has already 3176 * started the queue, return 0 so the caller doesn't attempt 3177 * to run the queue. 3178 */ 3179 static int 3180 xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo, 3181 uint32_t new_priority) 3182 { 3183 int retval; 3184 uint32_t old_priority; 3185 3186 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n")); 3187 3188 old_priority = pinfo->priority; 3189 3190 /* 3191 * Are we already queued? 3192 */ 3193 if (pinfo->index != CAM_UNQUEUED_INDEX) { 3194 /* Simply reorder based on new priority */ 3195 if (new_priority < old_priority) { 3196 camq_change_priority(queue, pinfo->index, 3197 new_priority); 3198 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3199 ("changed priority to %d\n", 3200 new_priority)); 3201 retval = 1; 3202 } else 3203 retval = 0; 3204 } else { 3205 /* New entry on the queue */ 3206 if (new_priority < old_priority) 3207 pinfo->priority = new_priority; 3208 3209 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3210 ("Inserting onto queue\n")); 3211 pinfo->generation = ++queue->generation; 3212 camq_insert(queue, pinfo); 3213 retval = 1; 3214 } 3215 return (retval); 3216 } 3217 3218 static void 3219 xpt_run_allocq_task(void *context, int pending) 3220 { 3221 struct cam_periph *periph = context; 3222 3223 cam_periph_lock(periph); 3224 periph->flags &= ~CAM_PERIPH_RUN_TASK; 3225 xpt_run_allocq(periph, 1); 3226 cam_periph_unlock(periph); 3227 cam_periph_release(periph); 3228 } 3229 3230 static void 3231 xpt_run_allocq(struct cam_periph *periph, int sleep) 3232 { 3233 struct cam_ed *device; 3234 union ccb *ccb; 3235 uint32_t prio; 3236 3237 cam_periph_assert(periph, MA_OWNED); 3238 if (periph->periph_allocating) 3239 return; 3240 cam_periph_doacquire(periph); 3241 periph->periph_allocating = 1; 3242 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_allocq(%p)\n", periph)); 3243 device = periph->path->device; 3244 ccb = NULL; 3245 restart: 3246 while ((prio = min(periph->scheduled_priority, 3247 periph->immediate_priority)) != CAM_PRIORITY_NONE && 3248 (periph->periph_allocated - (ccb != NULL ? 1 : 0) < 3249 device->ccbq.total_openings || prio <= CAM_PRIORITY_OOB)) { 3250 if (ccb == NULL && 3251 (ccb = xpt_get_ccb_nowait(periph)) == NULL) { 3252 if (sleep) { 3253 ccb = xpt_get_ccb(periph); 3254 goto restart; 3255 } 3256 if (periph->flags & CAM_PERIPH_RUN_TASK) 3257 break; 3258 cam_periph_doacquire(periph); 3259 periph->flags |= CAM_PERIPH_RUN_TASK; 3260 taskqueue_enqueue(xsoftc.xpt_taskq, 3261 &periph->periph_run_task); 3262 break; 3263 } 3264 xpt_setup_ccb(&ccb->ccb_h, periph->path, prio); 3265 if (prio == periph->immediate_priority) { 3266 periph->immediate_priority = CAM_PRIORITY_NONE; 3267 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3268 ("waking cam_periph_getccb()\n")); 3269 SLIST_INSERT_HEAD(&periph->ccb_list, &ccb->ccb_h, 3270 periph_links.sle); 3271 wakeup(&periph->ccb_list); 3272 } else { 3273 periph->scheduled_priority = CAM_PRIORITY_NONE; 3274 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3275 ("calling periph_start()\n")); 3276 periph->periph_start(periph, ccb); 3277 } 3278 ccb = NULL; 3279 } 3280 if (ccb != NULL) 3281 xpt_release_ccb(ccb); 3282 periph->periph_allocating = 0; 3283 cam_periph_release_locked(periph); 3284 } 3285 3286 static void 3287 xpt_run_devq(struct cam_devq *devq) 3288 { 3289 struct mtx *mtx; 3290 3291 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_devq\n")); 3292 3293 devq->send_queue.qfrozen_cnt++; 3294 while ((devq->send_queue.entries > 0) 3295 && (devq->send_openings > 0) 3296 && (devq->send_queue.qfrozen_cnt <= 1)) { 3297 struct cam_ed *device; 3298 union ccb *work_ccb; 3299 struct cam_sim *sim; 3300 struct xpt_proto *proto; 3301 3302 device = (struct cam_ed *)camq_remove(&devq->send_queue, 3303 CAMQ_HEAD); 3304 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3305 ("running device %p\n", device)); 3306 3307 work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD); 3308 if (work_ccb == NULL) { 3309 printf("device on run queue with no ccbs???\n"); 3310 continue; 3311 } 3312 3313 if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) { 3314 mtx_lock(&xsoftc.xpt_highpower_lock); 3315 if (xsoftc.num_highpower <= 0) { 3316 /* 3317 * We got a high power command, but we 3318 * don't have any available slots. Freeze 3319 * the device queue until we have a slot 3320 * available. 3321 */ 3322 xpt_freeze_devq_device(device, 1); 3323 STAILQ_INSERT_TAIL(&xsoftc.highpowerq, device, 3324 highpowerq_entry); 3325 3326 mtx_unlock(&xsoftc.xpt_highpower_lock); 3327 continue; 3328 } else { 3329 /* 3330 * Consume a high power slot while 3331 * this ccb runs. 3332 */ 3333 xsoftc.num_highpower--; 3334 } 3335 mtx_unlock(&xsoftc.xpt_highpower_lock); 3336 } 3337 cam_ccbq_remove_ccb(&device->ccbq, work_ccb); 3338 cam_ccbq_send_ccb(&device->ccbq, work_ccb); 3339 devq->send_openings--; 3340 devq->send_active++; 3341 xpt_schedule_devq(devq, device); 3342 mtx_unlock(&devq->send_mtx); 3343 3344 if ((work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0) { 3345 /* 3346 * The client wants to freeze the queue 3347 * after this CCB is sent. 3348 */ 3349 xpt_freeze_devq(work_ccb->ccb_h.path, 1); 3350 } 3351 3352 /* In Target mode, the peripheral driver knows best... */ 3353 if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) { 3354 if ((device->inq_flags & SID_CmdQue) != 0 3355 && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE) 3356 work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID; 3357 else 3358 /* 3359 * Clear this in case of a retried CCB that 3360 * failed due to a rejected tag. 3361 */ 3362 work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID; 3363 } 3364 3365 KASSERT(device == work_ccb->ccb_h.path->device, 3366 ("device (%p) / path->device (%p) mismatch", 3367 device, work_ccb->ccb_h.path->device)); 3368 proto = xpt_proto_find(device->protocol); 3369 if (proto && proto->ops->debug_out) 3370 proto->ops->debug_out(work_ccb); 3371 3372 /* 3373 * Device queues can be shared among multiple SIM instances 3374 * that reside on different buses. Use the SIM from the 3375 * queued device, rather than the one from the calling bus. 3376 */ 3377 sim = device->sim; 3378 mtx = sim->mtx; 3379 if (mtx && !mtx_owned(mtx)) 3380 mtx_lock(mtx); 3381 else 3382 mtx = NULL; 3383 work_ccb->ccb_h.qos.periph_data = cam_iosched_now(); 3384 (*(sim->sim_action))(sim, work_ccb); 3385 if (mtx) 3386 mtx_unlock(mtx); 3387 mtx_lock(&devq->send_mtx); 3388 } 3389 devq->send_queue.qfrozen_cnt--; 3390 } 3391 3392 /* 3393 * This function merges stuff from the src ccb into the dst ccb, while keeping 3394 * important fields in the dst ccb constant. 3395 */ 3396 void 3397 xpt_merge_ccb(union ccb *dst_ccb, union ccb *src_ccb) 3398 { 3399 3400 /* 3401 * Pull fields that are valid for peripheral drivers to set 3402 * into the dst CCB along with the CCB "payload". 3403 */ 3404 dst_ccb->ccb_h.retry_count = src_ccb->ccb_h.retry_count; 3405 dst_ccb->ccb_h.func_code = src_ccb->ccb_h.func_code; 3406 dst_ccb->ccb_h.timeout = src_ccb->ccb_h.timeout; 3407 dst_ccb->ccb_h.flags = src_ccb->ccb_h.flags; 3408 bcopy(&(&src_ccb->ccb_h)[1], &(&dst_ccb->ccb_h)[1], 3409 sizeof(union ccb) - sizeof(struct ccb_hdr)); 3410 } 3411 3412 void 3413 xpt_setup_ccb_flags(struct ccb_hdr *ccb_h, struct cam_path *path, 3414 uint32_t priority, uint32_t flags) 3415 { 3416 3417 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n")); 3418 ccb_h->pinfo.priority = priority; 3419 ccb_h->path = path; 3420 ccb_h->path_id = path->bus->path_id; 3421 if (path->target) 3422 ccb_h->target_id = path->target->target_id; 3423 else 3424 ccb_h->target_id = CAM_TARGET_WILDCARD; 3425 if (path->device) { 3426 ccb_h->target_lun = path->device->lun_id; 3427 ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation; 3428 } else { 3429 ccb_h->target_lun = CAM_TARGET_WILDCARD; 3430 } 3431 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX; 3432 ccb_h->flags = flags; 3433 ccb_h->xflags = 0; 3434 } 3435 3436 void 3437 xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, uint32_t priority) 3438 { 3439 xpt_setup_ccb_flags(ccb_h, path, priority, /*flags*/ 0); 3440 } 3441 3442 /* Path manipulation functions */ 3443 cam_status 3444 xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph, 3445 path_id_t path_id, target_id_t target_id, lun_id_t lun_id) 3446 { 3447 struct cam_path *path; 3448 cam_status status; 3449 3450 path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_NOWAIT); 3451 3452 if (path == NULL) { 3453 status = CAM_RESRC_UNAVAIL; 3454 return(status); 3455 } 3456 status = xpt_compile_path(path, perph, path_id, target_id, lun_id); 3457 if (status != CAM_REQ_CMP) { 3458 free(path, M_CAMPATH); 3459 path = NULL; 3460 } 3461 *new_path_ptr = path; 3462 return (status); 3463 } 3464 3465 cam_status 3466 xpt_create_path_unlocked(struct cam_path **new_path_ptr, 3467 struct cam_periph *periph, path_id_t path_id, 3468 target_id_t target_id, lun_id_t lun_id) 3469 { 3470 3471 return (xpt_create_path(new_path_ptr, periph, path_id, target_id, 3472 lun_id)); 3473 } 3474 3475 cam_status 3476 xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph, 3477 path_id_t path_id, target_id_t target_id, lun_id_t lun_id) 3478 { 3479 struct cam_eb *bus; 3480 struct cam_et *target; 3481 struct cam_ed *device; 3482 cam_status status; 3483 3484 status = CAM_REQ_CMP; /* Completed without error */ 3485 target = NULL; /* Wildcarded */ 3486 device = NULL; /* Wildcarded */ 3487 3488 /* 3489 * We will potentially modify the EDT, so block interrupts 3490 * that may attempt to create cam paths. 3491 */ 3492 bus = xpt_find_bus(path_id); 3493 if (bus == NULL) { 3494 status = CAM_PATH_INVALID; 3495 } else { 3496 xpt_lock_buses(); 3497 mtx_lock(&bus->eb_mtx); 3498 target = xpt_find_target(bus, target_id); 3499 if (target == NULL) { 3500 /* Create one */ 3501 struct cam_et *new_target; 3502 3503 new_target = xpt_alloc_target(bus, target_id); 3504 if (new_target == NULL) { 3505 status = CAM_RESRC_UNAVAIL; 3506 } else { 3507 target = new_target; 3508 } 3509 } 3510 xpt_unlock_buses(); 3511 if (target != NULL) { 3512 device = xpt_find_device(target, lun_id); 3513 if (device == NULL) { 3514 /* Create one */ 3515 struct cam_ed *new_device; 3516 3517 new_device = 3518 (*(bus->xport->ops->alloc_device))(bus, 3519 target, 3520 lun_id); 3521 if (new_device == NULL) { 3522 status = CAM_RESRC_UNAVAIL; 3523 } else { 3524 device = new_device; 3525 } 3526 } 3527 } 3528 mtx_unlock(&bus->eb_mtx); 3529 } 3530 3531 /* 3532 * Only touch the user's data if we are successful. 3533 */ 3534 if (status == CAM_REQ_CMP) { 3535 new_path->periph = perph; 3536 new_path->bus = bus; 3537 new_path->target = target; 3538 new_path->device = device; 3539 CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n")); 3540 } else { 3541 if (device != NULL) 3542 xpt_release_device(device); 3543 if (target != NULL) 3544 xpt_release_target(target); 3545 if (bus != NULL) 3546 xpt_release_bus(bus); 3547 } 3548 return (status); 3549 } 3550 3551 int 3552 xpt_clone_path(struct cam_path **new_path_ptr, struct cam_path *path) 3553 { 3554 struct cam_path *new_path; 3555 3556 new_path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_NOWAIT); 3557 if (new_path == NULL) 3558 return (ENOMEM); 3559 *new_path = *path; 3560 if (path->bus != NULL) 3561 xpt_acquire_bus(path->bus); 3562 if (path->target != NULL) 3563 xpt_acquire_target(path->target); 3564 if (path->device != NULL) 3565 xpt_acquire_device(path->device); 3566 *new_path_ptr = new_path; 3567 return (0); 3568 } 3569 3570 void 3571 xpt_release_path(struct cam_path *path) 3572 { 3573 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n")); 3574 if (path->device != NULL) { 3575 xpt_release_device(path->device); 3576 path->device = NULL; 3577 } 3578 if (path->target != NULL) { 3579 xpt_release_target(path->target); 3580 path->target = NULL; 3581 } 3582 if (path->bus != NULL) { 3583 xpt_release_bus(path->bus); 3584 path->bus = NULL; 3585 } 3586 } 3587 3588 void 3589 xpt_free_path(struct cam_path *path) 3590 { 3591 3592 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n")); 3593 xpt_release_path(path); 3594 free(path, M_CAMPATH); 3595 } 3596 3597 void 3598 xpt_path_counts(struct cam_path *path, uint32_t *bus_ref, 3599 uint32_t *periph_ref, uint32_t *target_ref, uint32_t *device_ref) 3600 { 3601 3602 xpt_lock_buses(); 3603 if (bus_ref) { 3604 if (path->bus) 3605 *bus_ref = path->bus->refcount; 3606 else 3607 *bus_ref = 0; 3608 } 3609 if (periph_ref) { 3610 if (path->periph) 3611 *periph_ref = path->periph->refcount; 3612 else 3613 *periph_ref = 0; 3614 } 3615 xpt_unlock_buses(); 3616 if (target_ref) { 3617 if (path->target) 3618 *target_ref = path->target->refcount; 3619 else 3620 *target_ref = 0; 3621 } 3622 if (device_ref) { 3623 if (path->device) 3624 *device_ref = path->device->refcount; 3625 else 3626 *device_ref = 0; 3627 } 3628 } 3629 3630 /* 3631 * Return -1 for failure, 0 for exact match, 1 for match with wildcards 3632 * in path1, 2 for match with wildcards in path2. 3633 */ 3634 int 3635 xpt_path_comp(struct cam_path *path1, struct cam_path *path2) 3636 { 3637 int retval = 0; 3638 3639 if (path1->bus != path2->bus) { 3640 if (path1->bus->path_id == CAM_BUS_WILDCARD) 3641 retval = 1; 3642 else if (path2->bus->path_id == CAM_BUS_WILDCARD) 3643 retval = 2; 3644 else 3645 return (-1); 3646 } 3647 if (path1->target != path2->target) { 3648 if (path1->target->target_id == CAM_TARGET_WILDCARD) { 3649 if (retval == 0) 3650 retval = 1; 3651 } else if (path2->target->target_id == CAM_TARGET_WILDCARD) 3652 retval = 2; 3653 else 3654 return (-1); 3655 } 3656 if (path1->device != path2->device) { 3657 if (path1->device->lun_id == CAM_LUN_WILDCARD) { 3658 if (retval == 0) 3659 retval = 1; 3660 } else if (path2->device->lun_id == CAM_LUN_WILDCARD) 3661 retval = 2; 3662 else 3663 return (-1); 3664 } 3665 return (retval); 3666 } 3667 3668 int 3669 xpt_path_comp_dev(struct cam_path *path, struct cam_ed *dev) 3670 { 3671 int retval = 0; 3672 3673 if (path->bus != dev->target->bus) { 3674 if (path->bus->path_id == CAM_BUS_WILDCARD) 3675 retval = 1; 3676 else if (dev->target->bus->path_id == CAM_BUS_WILDCARD) 3677 retval = 2; 3678 else 3679 return (-1); 3680 } 3681 if (path->target != dev->target) { 3682 if (path->target->target_id == CAM_TARGET_WILDCARD) { 3683 if (retval == 0) 3684 retval = 1; 3685 } else if (dev->target->target_id == CAM_TARGET_WILDCARD) 3686 retval = 2; 3687 else 3688 return (-1); 3689 } 3690 if (path->device != dev) { 3691 if (path->device->lun_id == CAM_LUN_WILDCARD) { 3692 if (retval == 0) 3693 retval = 1; 3694 } else if (dev->lun_id == CAM_LUN_WILDCARD) 3695 retval = 2; 3696 else 3697 return (-1); 3698 } 3699 return (retval); 3700 } 3701 3702 void 3703 xpt_print_path(struct cam_path *path) 3704 { 3705 struct sbuf sb; 3706 char buffer[XPT_PRINT_LEN]; 3707 3708 sbuf_new(&sb, buffer, XPT_PRINT_LEN, SBUF_FIXEDLEN); 3709 xpt_path_sbuf(path, &sb); 3710 sbuf_finish(&sb); 3711 printf("%s", sbuf_data(&sb)); 3712 sbuf_delete(&sb); 3713 } 3714 3715 void 3716 xpt_print_device(struct cam_ed *device) 3717 { 3718 3719 if (device == NULL) 3720 printf("(nopath): "); 3721 else { 3722 printf("(noperiph:%s%d:%d:%d:%jx): ", device->sim->sim_name, 3723 device->sim->unit_number, 3724 device->sim->bus_id, 3725 device->target->target_id, 3726 (uintmax_t)device->lun_id); 3727 } 3728 } 3729 3730 void 3731 xpt_print(struct cam_path *path, const char *fmt, ...) 3732 { 3733 va_list ap; 3734 struct sbuf sb; 3735 char buffer[XPT_PRINT_LEN]; 3736 3737 sbuf_new(&sb, buffer, XPT_PRINT_LEN, SBUF_FIXEDLEN); 3738 3739 xpt_path_sbuf(path, &sb); 3740 va_start(ap, fmt); 3741 sbuf_vprintf(&sb, fmt, ap); 3742 va_end(ap); 3743 3744 sbuf_finish(&sb); 3745 printf("%s", sbuf_data(&sb)); 3746 sbuf_delete(&sb); 3747 } 3748 3749 int 3750 xpt_path_string(struct cam_path *path, char *str, size_t str_len) 3751 { 3752 struct sbuf sb; 3753 int len; 3754 3755 sbuf_new(&sb, str, str_len, 0); 3756 len = xpt_path_sbuf(path, &sb); 3757 sbuf_finish(&sb); 3758 return (len); 3759 } 3760 3761 int 3762 xpt_path_sbuf(struct cam_path *path, struct sbuf *sb) 3763 { 3764 3765 if (path == NULL) 3766 sbuf_printf(sb, "(nopath): "); 3767 else { 3768 if (path->periph != NULL) 3769 sbuf_printf(sb, "(%s%d:", path->periph->periph_name, 3770 path->periph->unit_number); 3771 else 3772 sbuf_printf(sb, "(noperiph:"); 3773 3774 if (path->bus != NULL) 3775 sbuf_printf(sb, "%s%d:%d:", path->bus->sim->sim_name, 3776 path->bus->sim->unit_number, 3777 path->bus->sim->bus_id); 3778 else 3779 sbuf_printf(sb, "nobus:"); 3780 3781 if (path->target != NULL) 3782 sbuf_printf(sb, "%d:", path->target->target_id); 3783 else 3784 sbuf_printf(sb, "X:"); 3785 3786 if (path->device != NULL) 3787 sbuf_printf(sb, "%jx): ", 3788 (uintmax_t)path->device->lun_id); 3789 else 3790 sbuf_printf(sb, "X): "); 3791 } 3792 3793 return(sbuf_len(sb)); 3794 } 3795 3796 path_id_t 3797 xpt_path_path_id(struct cam_path *path) 3798 { 3799 return(path->bus->path_id); 3800 } 3801 3802 target_id_t 3803 xpt_path_target_id(struct cam_path *path) 3804 { 3805 if (path->target != NULL) 3806 return (path->target->target_id); 3807 else 3808 return (CAM_TARGET_WILDCARD); 3809 } 3810 3811 lun_id_t 3812 xpt_path_lun_id(struct cam_path *path) 3813 { 3814 if (path->device != NULL) 3815 return (path->device->lun_id); 3816 else 3817 return (CAM_LUN_WILDCARD); 3818 } 3819 3820 struct cam_sim * 3821 xpt_path_sim(struct cam_path *path) 3822 { 3823 3824 return (path->bus->sim); 3825 } 3826 3827 struct cam_periph* 3828 xpt_path_periph(struct cam_path *path) 3829 { 3830 3831 return (path->periph); 3832 } 3833 3834 /* 3835 * Release a CAM control block for the caller. Remit the cost of the structure 3836 * to the device referenced by the path. If the this device had no 'credits' 3837 * and peripheral drivers have registered async callbacks for this notification 3838 * call them now. 3839 */ 3840 void 3841 xpt_release_ccb(union ccb *free_ccb) 3842 { 3843 struct cam_ed *device; 3844 struct cam_periph *periph; 3845 3846 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n")); 3847 xpt_path_assert(free_ccb->ccb_h.path, MA_OWNED); 3848 device = free_ccb->ccb_h.path->device; 3849 periph = free_ccb->ccb_h.path->periph; 3850 3851 xpt_free_ccb(free_ccb); 3852 periph->periph_allocated--; 3853 cam_ccbq_release_opening(&device->ccbq); 3854 xpt_run_allocq(periph, 0); 3855 } 3856 3857 /* Functions accessed by SIM drivers */ 3858 3859 static struct xpt_xport_ops xport_default_ops = { 3860 .alloc_device = xpt_alloc_device_default, 3861 .action = xpt_action_default, 3862 .async = xpt_dev_async_default, 3863 }; 3864 static struct xpt_xport xport_default = { 3865 .xport = XPORT_UNKNOWN, 3866 .name = "unknown", 3867 .ops = &xport_default_ops, 3868 }; 3869 3870 CAM_XPT_XPORT(xport_default); 3871 3872 /* 3873 * A sim structure, listing the SIM entry points and instance 3874 * identification info is passed to xpt_bus_register to hook the SIM 3875 * into the CAM framework. xpt_bus_register creates a cam_eb entry 3876 * for this new bus and places it in the array of buses and assigns 3877 * it a path_id. The path_id may be influenced by "hard wiring" 3878 * information specified by the user. Once interrupt services are 3879 * available, the bus will be probed. 3880 */ 3881 int 3882 xpt_bus_register(struct cam_sim *sim, device_t parent, uint32_t bus) 3883 { 3884 struct cam_eb *new_bus; 3885 struct cam_eb *old_bus; 3886 struct ccb_pathinq cpi; 3887 struct cam_path *path; 3888 cam_status status; 3889 3890 sim->bus_id = bus; 3891 new_bus = (struct cam_eb *)malloc(sizeof(*new_bus), 3892 M_CAMXPT, M_NOWAIT|M_ZERO); 3893 if (new_bus == NULL) { 3894 /* Couldn't satisfy request */ 3895 return (ENOMEM); 3896 } 3897 3898 mtx_init(&new_bus->eb_mtx, "CAM bus lock", NULL, MTX_DEF); 3899 TAILQ_INIT(&new_bus->et_entries); 3900 cam_sim_hold(sim); 3901 new_bus->sim = sim; 3902 timevalclear(&new_bus->last_reset); 3903 new_bus->flags = 0; 3904 new_bus->refcount = 1; /* Held until a bus_deregister event */ 3905 new_bus->generation = 0; 3906 new_bus->parent_dev = parent; 3907 3908 xpt_lock_buses(); 3909 sim->path_id = new_bus->path_id = 3910 xptpathid(sim->sim_name, sim->unit_number, sim->bus_id); 3911 old_bus = TAILQ_FIRST(&xsoftc.xpt_busses); 3912 while (old_bus != NULL 3913 && old_bus->path_id < new_bus->path_id) 3914 old_bus = TAILQ_NEXT(old_bus, links); 3915 if (old_bus != NULL) 3916 TAILQ_INSERT_BEFORE(old_bus, new_bus, links); 3917 else 3918 TAILQ_INSERT_TAIL(&xsoftc.xpt_busses, new_bus, links); 3919 xsoftc.bus_generation++; 3920 xpt_unlock_buses(); 3921 3922 /* 3923 * Set a default transport so that a PATH_INQ can be issued to 3924 * the SIM. This will then allow for probing and attaching of 3925 * a more appropriate transport. 3926 */ 3927 new_bus->xport = &xport_default; 3928 3929 status = xpt_create_path(&path, /*periph*/NULL, sim->path_id, 3930 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 3931 if (status != CAM_REQ_CMP) { 3932 xpt_release_bus(new_bus); 3933 return (ENOMEM); 3934 } 3935 3936 xpt_path_inq(&cpi, path); 3937 3938 /* 3939 * Use the results of PATH_INQ to pick a transport. Note that 3940 * the xpt bus (which uses XPORT_UNSPECIFIED) always uses 3941 * xport_default instead of a transport from 3942 * cam_xpt_port_set. 3943 */ 3944 if (cam_ccb_success((union ccb *)&cpi) && 3945 cpi.transport != XPORT_UNSPECIFIED) { 3946 struct xpt_xport **xpt; 3947 3948 SET_FOREACH(xpt, cam_xpt_xport_set) { 3949 if ((*xpt)->xport == cpi.transport) { 3950 new_bus->xport = *xpt; 3951 break; 3952 } 3953 } 3954 if (new_bus->xport == &xport_default) { 3955 xpt_print(path, 3956 "No transport found for %d\n", cpi.transport); 3957 xpt_release_bus(new_bus); 3958 xpt_free_path(path); 3959 return (EINVAL); 3960 } 3961 } 3962 3963 /* Notify interested parties */ 3964 if (sim->path_id != CAM_XPT_PATH_ID) { 3965 xpt_async(AC_PATH_REGISTERED, path, &cpi); 3966 if ((cpi.hba_misc & PIM_NOSCAN) == 0) { 3967 union ccb *scan_ccb; 3968 3969 /* Initiate bus rescan. */ 3970 scan_ccb = xpt_alloc_ccb_nowait(); 3971 if (scan_ccb != NULL) { 3972 scan_ccb->ccb_h.path = path; 3973 scan_ccb->ccb_h.func_code = XPT_SCAN_BUS; 3974 scan_ccb->crcn.flags = 0; 3975 xpt_rescan(scan_ccb); 3976 } else { 3977 xpt_print(path, 3978 "Can't allocate CCB to scan bus\n"); 3979 xpt_free_path(path); 3980 } 3981 } else 3982 xpt_free_path(path); 3983 } else 3984 xpt_free_path(path); 3985 return (CAM_SUCCESS); 3986 } 3987 3988 int 3989 xpt_bus_deregister(path_id_t pathid) 3990 { 3991 struct cam_path bus_path; 3992 cam_status status; 3993 3994 status = xpt_compile_path(&bus_path, NULL, pathid, 3995 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 3996 if (status != CAM_REQ_CMP) 3997 return (ENOMEM); 3998 3999 xpt_async(AC_LOST_DEVICE, &bus_path, NULL); 4000 xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL); 4001 4002 /* Release the reference count held while registered. */ 4003 xpt_release_bus(bus_path.bus); 4004 xpt_release_path(&bus_path); 4005 4006 return (CAM_SUCCESS); 4007 } 4008 4009 static path_id_t 4010 xptnextfreepathid(void) 4011 { 4012 struct cam_eb *bus; 4013 path_id_t pathid; 4014 const char *strval; 4015 4016 mtx_assert(&xsoftc.xpt_topo_lock, MA_OWNED); 4017 pathid = 0; 4018 bus = TAILQ_FIRST(&xsoftc.xpt_busses); 4019 retry: 4020 /* Find an unoccupied pathid */ 4021 while (bus != NULL && bus->path_id <= pathid) { 4022 if (bus->path_id == pathid) 4023 pathid++; 4024 bus = TAILQ_NEXT(bus, links); 4025 } 4026 4027 /* 4028 * Ensure that this pathid is not reserved for 4029 * a bus that may be registered in the future. 4030 */ 4031 if (resource_string_value("scbus", pathid, "at", &strval) == 0) { 4032 ++pathid; 4033 /* Start the search over */ 4034 goto retry; 4035 } 4036 return (pathid); 4037 } 4038 4039 static path_id_t 4040 xptpathid(const char *sim_name, int sim_unit, int sim_bus) 4041 { 4042 path_id_t pathid; 4043 int i, dunit, val; 4044 char buf[32]; 4045 const char *dname; 4046 4047 pathid = CAM_XPT_PATH_ID; 4048 snprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit); 4049 if (strcmp(buf, "xpt0") == 0 && sim_bus == 0) 4050 return (pathid); 4051 i = 0; 4052 while ((resource_find_match(&i, &dname, &dunit, "at", buf)) == 0) { 4053 if (strcmp(dname, "scbus")) { 4054 /* Avoid a bit of foot shooting. */ 4055 continue; 4056 } 4057 if (dunit < 0) /* unwired?! */ 4058 continue; 4059 if (resource_int_value("scbus", dunit, "bus", &val) == 0) { 4060 if (sim_bus == val) { 4061 pathid = dunit; 4062 break; 4063 } 4064 } else if (sim_bus == 0) { 4065 /* Unspecified matches bus 0 */ 4066 pathid = dunit; 4067 break; 4068 } else { 4069 printf("Ambiguous scbus configuration for %s%d " 4070 "bus %d, cannot wire down. The kernel " 4071 "config entry for scbus%d should " 4072 "specify a controller bus.\n" 4073 "Scbus will be assigned dynamically.\n", 4074 sim_name, sim_unit, sim_bus, dunit); 4075 break; 4076 } 4077 } 4078 4079 if (pathid == CAM_XPT_PATH_ID) 4080 pathid = xptnextfreepathid(); 4081 return (pathid); 4082 } 4083 4084 static const char * 4085 xpt_async_string(uint32_t async_code) 4086 { 4087 4088 switch (async_code) { 4089 case AC_BUS_RESET: return ("AC_BUS_RESET"); 4090 case AC_UNSOL_RESEL: return ("AC_UNSOL_RESEL"); 4091 case AC_SCSI_AEN: return ("AC_SCSI_AEN"); 4092 case AC_SENT_BDR: return ("AC_SENT_BDR"); 4093 case AC_PATH_REGISTERED: return ("AC_PATH_REGISTERED"); 4094 case AC_PATH_DEREGISTERED: return ("AC_PATH_DEREGISTERED"); 4095 case AC_FOUND_DEVICE: return ("AC_FOUND_DEVICE"); 4096 case AC_LOST_DEVICE: return ("AC_LOST_DEVICE"); 4097 case AC_TRANSFER_NEG: return ("AC_TRANSFER_NEG"); 4098 case AC_INQ_CHANGED: return ("AC_INQ_CHANGED"); 4099 case AC_GETDEV_CHANGED: return ("AC_GETDEV_CHANGED"); 4100 case AC_CONTRACT: return ("AC_CONTRACT"); 4101 case AC_ADVINFO_CHANGED: return ("AC_ADVINFO_CHANGED"); 4102 case AC_UNIT_ATTENTION: return ("AC_UNIT_ATTENTION"); 4103 } 4104 return ("AC_UNKNOWN"); 4105 } 4106 4107 static int 4108 xpt_async_size(uint32_t async_code) 4109 { 4110 4111 switch (async_code) { 4112 case AC_BUS_RESET: return (0); 4113 case AC_UNSOL_RESEL: return (0); 4114 case AC_SCSI_AEN: return (0); 4115 case AC_SENT_BDR: return (0); 4116 case AC_PATH_REGISTERED: return (sizeof(struct ccb_pathinq)); 4117 case AC_PATH_DEREGISTERED: return (0); 4118 case AC_FOUND_DEVICE: return (sizeof(struct ccb_getdev)); 4119 case AC_LOST_DEVICE: return (0); 4120 case AC_TRANSFER_NEG: return (sizeof(struct ccb_trans_settings)); 4121 case AC_INQ_CHANGED: return (0); 4122 case AC_GETDEV_CHANGED: return (0); 4123 case AC_CONTRACT: return (sizeof(struct ac_contract)); 4124 case AC_ADVINFO_CHANGED: return (-1); 4125 case AC_UNIT_ATTENTION: return (sizeof(struct ccb_scsiio)); 4126 } 4127 return (0); 4128 } 4129 4130 static int 4131 xpt_async_process_dev(struct cam_ed *device, void *arg) 4132 { 4133 union ccb *ccb = arg; 4134 struct cam_path *path = ccb->ccb_h.path; 4135 void *async_arg = ccb->casync.async_arg_ptr; 4136 uint32_t async_code = ccb->casync.async_code; 4137 bool relock; 4138 4139 if (path->device != device 4140 && path->device->lun_id != CAM_LUN_WILDCARD 4141 && device->lun_id != CAM_LUN_WILDCARD) 4142 return (1); 4143 4144 /* 4145 * The async callback could free the device. 4146 * If it is a broadcast async, it doesn't hold 4147 * device reference, so take our own reference. 4148 */ 4149 xpt_acquire_device(device); 4150 4151 /* 4152 * If async for specific device is to be delivered to 4153 * the wildcard client, take the specific device lock. 4154 * XXX: We may need a way for client to specify it. 4155 */ 4156 if ((device->lun_id == CAM_LUN_WILDCARD && 4157 path->device->lun_id != CAM_LUN_WILDCARD) || 4158 (device->target->target_id == CAM_TARGET_WILDCARD && 4159 path->target->target_id != CAM_TARGET_WILDCARD) || 4160 (device->target->bus->path_id == CAM_BUS_WILDCARD && 4161 path->target->bus->path_id != CAM_BUS_WILDCARD)) { 4162 mtx_unlock(&device->device_mtx); 4163 xpt_path_lock(path); 4164 relock = true; 4165 } else 4166 relock = false; 4167 4168 (*(device->target->bus->xport->ops->async))(async_code, 4169 device->target->bus, device->target, device, async_arg); 4170 xpt_async_bcast(&device->asyncs, async_code, path, async_arg); 4171 4172 if (relock) { 4173 xpt_path_unlock(path); 4174 mtx_lock(&device->device_mtx); 4175 } 4176 xpt_release_device(device); 4177 return (1); 4178 } 4179 4180 static int 4181 xpt_async_process_tgt(struct cam_et *target, void *arg) 4182 { 4183 union ccb *ccb = arg; 4184 struct cam_path *path = ccb->ccb_h.path; 4185 4186 if (path->target != target 4187 && path->target->target_id != CAM_TARGET_WILDCARD 4188 && target->target_id != CAM_TARGET_WILDCARD) 4189 return (1); 4190 4191 if (ccb->casync.async_code == AC_SENT_BDR) { 4192 /* Update our notion of when the last reset occurred */ 4193 microtime(&target->last_reset); 4194 } 4195 4196 return (xptdevicetraverse(target, NULL, xpt_async_process_dev, ccb)); 4197 } 4198 4199 static void 4200 xpt_async_process(struct cam_periph *periph, union ccb *ccb) 4201 { 4202 struct cam_eb *bus; 4203 struct cam_path *path; 4204 void *async_arg; 4205 uint32_t async_code; 4206 4207 path = ccb->ccb_h.path; 4208 async_code = ccb->casync.async_code; 4209 async_arg = ccb->casync.async_arg_ptr; 4210 CAM_DEBUG(path, CAM_DEBUG_TRACE | CAM_DEBUG_INFO, 4211 ("xpt_async(%s)\n", xpt_async_string(async_code))); 4212 bus = path->bus; 4213 4214 if (async_code == AC_BUS_RESET) { 4215 /* Update our notion of when the last reset occurred */ 4216 microtime(&bus->last_reset); 4217 } 4218 4219 xpttargettraverse(bus, NULL, xpt_async_process_tgt, ccb); 4220 4221 /* 4222 * If this wasn't a fully wildcarded async, tell all 4223 * clients that want all async events. 4224 */ 4225 if (bus != xpt_periph->path->bus) { 4226 xpt_path_lock(xpt_periph->path); 4227 xpt_async_process_dev(xpt_periph->path->device, ccb); 4228 xpt_path_unlock(xpt_periph->path); 4229 } 4230 4231 if (path->device != NULL && path->device->lun_id != CAM_LUN_WILDCARD) 4232 xpt_release_devq(path, 1, TRUE); 4233 else 4234 xpt_release_simq(path->bus->sim, TRUE); 4235 if (ccb->casync.async_arg_size > 0) 4236 free(async_arg, M_CAMXPT); 4237 xpt_free_path(path); 4238 xpt_free_ccb(ccb); 4239 } 4240 4241 static void 4242 xpt_async_bcast(struct async_list *async_head, 4243 uint32_t async_code, 4244 struct cam_path *path, void *async_arg) 4245 { 4246 struct async_node *cur_entry; 4247 struct mtx *mtx; 4248 4249 cur_entry = SLIST_FIRST(async_head); 4250 while (cur_entry != NULL) { 4251 struct async_node *next_entry; 4252 /* 4253 * Grab the next list entry before we call the current 4254 * entry's callback. This is because the callback function 4255 * can delete its async callback entry. 4256 */ 4257 next_entry = SLIST_NEXT(cur_entry, links); 4258 if ((cur_entry->event_enable & async_code) != 0) { 4259 mtx = cur_entry->event_lock ? 4260 path->device->sim->mtx : NULL; 4261 if (mtx) 4262 mtx_lock(mtx); 4263 cur_entry->callback(cur_entry->callback_arg, 4264 async_code, path, 4265 async_arg); 4266 if (mtx) 4267 mtx_unlock(mtx); 4268 } 4269 cur_entry = next_entry; 4270 } 4271 } 4272 4273 void 4274 xpt_async(uint32_t async_code, struct cam_path *path, void *async_arg) 4275 { 4276 union ccb *ccb; 4277 int size; 4278 4279 ccb = xpt_alloc_ccb_nowait(); 4280 if (ccb == NULL) { 4281 xpt_print(path, "Can't allocate CCB to send %s\n", 4282 xpt_async_string(async_code)); 4283 return; 4284 } 4285 4286 if (xpt_clone_path(&ccb->ccb_h.path, path) != 0) { 4287 xpt_print(path, "Can't allocate path to send %s\n", 4288 xpt_async_string(async_code)); 4289 xpt_free_ccb(ccb); 4290 return; 4291 } 4292 ccb->ccb_h.path->periph = NULL; 4293 ccb->ccb_h.func_code = XPT_ASYNC; 4294 ccb->ccb_h.cbfcnp = xpt_async_process; 4295 ccb->ccb_h.flags |= CAM_UNLOCKED; 4296 ccb->casync.async_code = async_code; 4297 ccb->casync.async_arg_size = 0; 4298 size = xpt_async_size(async_code); 4299 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, 4300 ("xpt_async: func %#x %s aync_code %d %s\n", 4301 ccb->ccb_h.func_code, 4302 xpt_action_name(ccb->ccb_h.func_code), 4303 async_code, 4304 xpt_async_string(async_code))); 4305 if (size > 0 && async_arg != NULL) { 4306 ccb->casync.async_arg_ptr = malloc(size, M_CAMXPT, M_NOWAIT); 4307 if (ccb->casync.async_arg_ptr == NULL) { 4308 xpt_print(path, "Can't allocate argument to send %s\n", 4309 xpt_async_string(async_code)); 4310 xpt_free_path(ccb->ccb_h.path); 4311 xpt_free_ccb(ccb); 4312 return; 4313 } 4314 memcpy(ccb->casync.async_arg_ptr, async_arg, size); 4315 ccb->casync.async_arg_size = size; 4316 } else if (size < 0) { 4317 ccb->casync.async_arg_ptr = async_arg; 4318 ccb->casync.async_arg_size = size; 4319 } 4320 if (path->device != NULL && path->device->lun_id != CAM_LUN_WILDCARD) 4321 xpt_freeze_devq(path, 1); 4322 else 4323 xpt_freeze_simq(path->bus->sim, 1); 4324 xpt_action(ccb); 4325 } 4326 4327 static void 4328 xpt_dev_async_default(uint32_t async_code, struct cam_eb *bus, 4329 struct cam_et *target, struct cam_ed *device, 4330 void *async_arg) 4331 { 4332 4333 /* 4334 * We only need to handle events for real devices. 4335 */ 4336 if (target->target_id == CAM_TARGET_WILDCARD 4337 || device->lun_id == CAM_LUN_WILDCARD) 4338 return; 4339 4340 printf("%s called\n", __func__); 4341 } 4342 4343 static uint32_t 4344 xpt_freeze_devq_device(struct cam_ed *dev, u_int count) 4345 { 4346 struct cam_devq *devq; 4347 uint32_t freeze; 4348 4349 devq = dev->sim->devq; 4350 mtx_assert(&devq->send_mtx, MA_OWNED); 4351 CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE, 4352 ("xpt_freeze_devq_device(%d) %u->%u\n", count, 4353 dev->ccbq.queue.qfrozen_cnt, dev->ccbq.queue.qfrozen_cnt + count)); 4354 freeze = (dev->ccbq.queue.qfrozen_cnt += count); 4355 /* Remove frozen device from sendq. */ 4356 if (device_is_queued(dev)) 4357 camq_remove(&devq->send_queue, dev->devq_entry.index); 4358 return (freeze); 4359 } 4360 4361 uint32_t 4362 xpt_freeze_devq(struct cam_path *path, u_int count) 4363 { 4364 struct cam_ed *dev = path->device; 4365 struct cam_devq *devq; 4366 uint32_t freeze; 4367 4368 devq = dev->sim->devq; 4369 mtx_lock(&devq->send_mtx); 4370 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_freeze_devq(%d)\n", count)); 4371 freeze = xpt_freeze_devq_device(dev, count); 4372 mtx_unlock(&devq->send_mtx); 4373 return (freeze); 4374 } 4375 4376 uint32_t 4377 xpt_freeze_simq(struct cam_sim *sim, u_int count) 4378 { 4379 struct cam_devq *devq; 4380 uint32_t freeze; 4381 4382 devq = sim->devq; 4383 mtx_lock(&devq->send_mtx); 4384 freeze = (devq->send_queue.qfrozen_cnt += count); 4385 mtx_unlock(&devq->send_mtx); 4386 return (freeze); 4387 } 4388 4389 static void 4390 xpt_release_devq_timeout(void *arg) 4391 { 4392 struct cam_ed *dev; 4393 struct cam_devq *devq; 4394 4395 dev = (struct cam_ed *)arg; 4396 CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE, ("xpt_release_devq_timeout\n")); 4397 devq = dev->sim->devq; 4398 mtx_assert(&devq->send_mtx, MA_OWNED); 4399 if (xpt_release_devq_device(dev, /*count*/1, /*run_queue*/TRUE)) 4400 xpt_run_devq(devq); 4401 } 4402 4403 void 4404 xpt_release_devq(struct cam_path *path, u_int count, int run_queue) 4405 { 4406 struct cam_ed *dev; 4407 struct cam_devq *devq; 4408 4409 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_devq(%d, %d)\n", 4410 count, run_queue)); 4411 dev = path->device; 4412 devq = dev->sim->devq; 4413 mtx_lock(&devq->send_mtx); 4414 if (xpt_release_devq_device(dev, count, run_queue)) 4415 xpt_run_devq(dev->sim->devq); 4416 mtx_unlock(&devq->send_mtx); 4417 } 4418 4419 static int 4420 xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue) 4421 { 4422 4423 mtx_assert(&dev->sim->devq->send_mtx, MA_OWNED); 4424 CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE, 4425 ("xpt_release_devq_device(%d, %d) %u->%u\n", count, run_queue, 4426 dev->ccbq.queue.qfrozen_cnt, dev->ccbq.queue.qfrozen_cnt - count)); 4427 if (count > dev->ccbq.queue.qfrozen_cnt) { 4428 #ifdef INVARIANTS 4429 printf("xpt_release_devq(): requested %u > present %u\n", 4430 count, dev->ccbq.queue.qfrozen_cnt); 4431 #endif 4432 count = dev->ccbq.queue.qfrozen_cnt; 4433 } 4434 dev->ccbq.queue.qfrozen_cnt -= count; 4435 if (dev->ccbq.queue.qfrozen_cnt == 0) { 4436 /* 4437 * No longer need to wait for a successful 4438 * command completion. 4439 */ 4440 dev->flags &= ~CAM_DEV_REL_ON_COMPLETE; 4441 /* 4442 * Remove any timeouts that might be scheduled 4443 * to release this queue. 4444 */ 4445 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) { 4446 callout_stop(&dev->callout); 4447 dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING; 4448 } 4449 /* 4450 * Now that we are unfrozen schedule the 4451 * device so any pending transactions are 4452 * run. 4453 */ 4454 xpt_schedule_devq(dev->sim->devq, dev); 4455 } else 4456 run_queue = 0; 4457 return (run_queue); 4458 } 4459 4460 void 4461 xpt_release_simq(struct cam_sim *sim, int run_queue) 4462 { 4463 struct cam_devq *devq; 4464 4465 devq = sim->devq; 4466 mtx_lock(&devq->send_mtx); 4467 if (devq->send_queue.qfrozen_cnt <= 0) { 4468 #ifdef INVARIANTS 4469 printf("xpt_release_simq: requested 1 > present %u\n", 4470 devq->send_queue.qfrozen_cnt); 4471 #endif 4472 } else 4473 devq->send_queue.qfrozen_cnt--; 4474 if (devq->send_queue.qfrozen_cnt == 0) { 4475 if (run_queue) { 4476 /* 4477 * Now that we are unfrozen run the send queue. 4478 */ 4479 xpt_run_devq(sim->devq); 4480 } 4481 } 4482 mtx_unlock(&devq->send_mtx); 4483 } 4484 4485 void 4486 xpt_done(union ccb *done_ccb) 4487 { 4488 struct cam_doneq *queue; 4489 int run, hash; 4490 4491 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) 4492 if (done_ccb->ccb_h.func_code == XPT_SCSI_IO && 4493 done_ccb->csio.bio != NULL) 4494 biotrack(done_ccb->csio.bio, __func__); 4495 #endif 4496 4497 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, 4498 ("xpt_done: func= %#x %s status %#x\n", 4499 done_ccb->ccb_h.func_code, 4500 xpt_action_name(done_ccb->ccb_h.func_code), 4501 done_ccb->ccb_h.status)); 4502 if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0) 4503 return; 4504 4505 /* Store the time the ccb was in the sim */ 4506 done_ccb->ccb_h.qos.periph_data = cam_iosched_delta_t(done_ccb->ccb_h.qos.periph_data); 4507 done_ccb->ccb_h.status |= CAM_QOS_VALID; 4508 hash = (u_int)(done_ccb->ccb_h.path_id + done_ccb->ccb_h.target_id + 4509 done_ccb->ccb_h.target_lun) % cam_num_doneqs; 4510 queue = &cam_doneqs[hash]; 4511 mtx_lock(&queue->cam_doneq_mtx); 4512 run = (queue->cam_doneq_sleep && STAILQ_EMPTY(&queue->cam_doneq)); 4513 STAILQ_INSERT_TAIL(&queue->cam_doneq, &done_ccb->ccb_h, sim_links.stqe); 4514 done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX; 4515 mtx_unlock(&queue->cam_doneq_mtx); 4516 if (run && !dumping) 4517 wakeup(&queue->cam_doneq); 4518 } 4519 4520 void 4521 xpt_done_direct(union ccb *done_ccb) 4522 { 4523 4524 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, 4525 ("xpt_done_direct: status %#x\n", done_ccb->ccb_h.status)); 4526 if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0) 4527 return; 4528 4529 /* Store the time the ccb was in the sim */ 4530 done_ccb->ccb_h.qos.periph_data = cam_iosched_delta_t(done_ccb->ccb_h.qos.periph_data); 4531 done_ccb->ccb_h.status |= CAM_QOS_VALID; 4532 xpt_done_process(&done_ccb->ccb_h); 4533 } 4534 4535 union ccb * 4536 xpt_alloc_ccb(void) 4537 { 4538 union ccb *new_ccb; 4539 4540 new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_WAITOK); 4541 return (new_ccb); 4542 } 4543 4544 union ccb * 4545 xpt_alloc_ccb_nowait(void) 4546 { 4547 union ccb *new_ccb; 4548 4549 new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_NOWAIT); 4550 return (new_ccb); 4551 } 4552 4553 void 4554 xpt_free_ccb(union ccb *free_ccb) 4555 { 4556 struct cam_periph *periph; 4557 4558 if (free_ccb->ccb_h.alloc_flags & CAM_CCB_FROM_UMA) { 4559 /* 4560 * Looks like a CCB allocated from a periph UMA zone. 4561 */ 4562 periph = free_ccb->ccb_h.path->periph; 4563 uma_zfree(periph->ccb_zone, free_ccb); 4564 } else { 4565 free(free_ccb, M_CAMCCB); 4566 } 4567 } 4568 4569 /* Private XPT functions */ 4570 4571 /* 4572 * Get a CAM control block for the caller. Charge the structure to the device 4573 * referenced by the path. If we don't have sufficient resources to allocate 4574 * more ccbs, we return NULL. 4575 */ 4576 static union ccb * 4577 xpt_get_ccb_nowait(struct cam_periph *periph) 4578 { 4579 union ccb *new_ccb; 4580 int alloc_flags; 4581 4582 if (periph->ccb_zone != NULL) { 4583 alloc_flags = CAM_CCB_FROM_UMA; 4584 new_ccb = uma_zalloc(periph->ccb_zone, M_ZERO|M_NOWAIT); 4585 } else { 4586 alloc_flags = 0; 4587 new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_NOWAIT); 4588 } 4589 if (new_ccb == NULL) 4590 return (NULL); 4591 new_ccb->ccb_h.alloc_flags = alloc_flags; 4592 periph->periph_allocated++; 4593 cam_ccbq_take_opening(&periph->path->device->ccbq); 4594 return (new_ccb); 4595 } 4596 4597 static union ccb * 4598 xpt_get_ccb(struct cam_periph *periph) 4599 { 4600 union ccb *new_ccb; 4601 int alloc_flags; 4602 4603 cam_periph_unlock(periph); 4604 if (periph->ccb_zone != NULL) { 4605 alloc_flags = CAM_CCB_FROM_UMA; 4606 new_ccb = uma_zalloc(periph->ccb_zone, M_ZERO|M_WAITOK); 4607 } else { 4608 alloc_flags = 0; 4609 new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_WAITOK); 4610 } 4611 new_ccb->ccb_h.alloc_flags = alloc_flags; 4612 cam_periph_lock(periph); 4613 periph->periph_allocated++; 4614 cam_ccbq_take_opening(&periph->path->device->ccbq); 4615 return (new_ccb); 4616 } 4617 4618 union ccb * 4619 cam_periph_getccb(struct cam_periph *periph, uint32_t priority) 4620 { 4621 struct ccb_hdr *ccb_h; 4622 4623 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("cam_periph_getccb\n")); 4624 cam_periph_assert(periph, MA_OWNED); 4625 while ((ccb_h = SLIST_FIRST(&periph->ccb_list)) == NULL || 4626 ccb_h->pinfo.priority != priority) { 4627 if (priority < periph->immediate_priority) { 4628 periph->immediate_priority = priority; 4629 xpt_run_allocq(periph, 0); 4630 } else 4631 cam_periph_sleep(periph, &periph->ccb_list, PRIBIO, 4632 "cgticb", 0); 4633 } 4634 SLIST_REMOVE_HEAD(&periph->ccb_list, periph_links.sle); 4635 return ((union ccb *)ccb_h); 4636 } 4637 4638 static void 4639 xpt_acquire_bus(struct cam_eb *bus) 4640 { 4641 4642 xpt_lock_buses(); 4643 bus->refcount++; 4644 xpt_unlock_buses(); 4645 } 4646 4647 static void 4648 xpt_release_bus(struct cam_eb *bus) 4649 { 4650 4651 xpt_lock_buses(); 4652 KASSERT(bus->refcount >= 1, ("bus->refcount >= 1")); 4653 if (--bus->refcount > 0) { 4654 xpt_unlock_buses(); 4655 return; 4656 } 4657 TAILQ_REMOVE(&xsoftc.xpt_busses, bus, links); 4658 xsoftc.bus_generation++; 4659 xpt_unlock_buses(); 4660 KASSERT(TAILQ_EMPTY(&bus->et_entries), 4661 ("destroying bus, but target list is not empty")); 4662 cam_sim_release(bus->sim); 4663 mtx_destroy(&bus->eb_mtx); 4664 free(bus, M_CAMXPT); 4665 } 4666 4667 static struct cam_et * 4668 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id) 4669 { 4670 struct cam_et *cur_target, *target; 4671 4672 mtx_assert(&xsoftc.xpt_topo_lock, MA_OWNED); 4673 mtx_assert(&bus->eb_mtx, MA_OWNED); 4674 target = (struct cam_et *)malloc(sizeof(*target), M_CAMXPT, 4675 M_NOWAIT|M_ZERO); 4676 if (target == NULL) 4677 return (NULL); 4678 4679 TAILQ_INIT(&target->ed_entries); 4680 target->bus = bus; 4681 target->target_id = target_id; 4682 target->refcount = 1; 4683 target->generation = 0; 4684 target->luns = NULL; 4685 mtx_init(&target->luns_mtx, "CAM LUNs lock", NULL, MTX_DEF); 4686 timevalclear(&target->last_reset); 4687 /* 4688 * Hold a reference to our parent bus so it 4689 * will not go away before we do. 4690 */ 4691 bus->refcount++; 4692 4693 /* Insertion sort into our bus's target list */ 4694 cur_target = TAILQ_FIRST(&bus->et_entries); 4695 while (cur_target != NULL && cur_target->target_id < target_id) 4696 cur_target = TAILQ_NEXT(cur_target, links); 4697 if (cur_target != NULL) { 4698 TAILQ_INSERT_BEFORE(cur_target, target, links); 4699 } else { 4700 TAILQ_INSERT_TAIL(&bus->et_entries, target, links); 4701 } 4702 bus->generation++; 4703 return (target); 4704 } 4705 4706 static void 4707 xpt_acquire_target(struct cam_et *target) 4708 { 4709 struct cam_eb *bus = target->bus; 4710 4711 mtx_lock(&bus->eb_mtx); 4712 target->refcount++; 4713 mtx_unlock(&bus->eb_mtx); 4714 } 4715 4716 static void 4717 xpt_release_target(struct cam_et *target) 4718 { 4719 struct cam_eb *bus = target->bus; 4720 4721 mtx_lock(&bus->eb_mtx); 4722 if (--target->refcount > 0) { 4723 mtx_unlock(&bus->eb_mtx); 4724 return; 4725 } 4726 TAILQ_REMOVE(&bus->et_entries, target, links); 4727 bus->generation++; 4728 mtx_unlock(&bus->eb_mtx); 4729 KASSERT(TAILQ_EMPTY(&target->ed_entries), 4730 ("destroying target, but device list is not empty")); 4731 xpt_release_bus(bus); 4732 mtx_destroy(&target->luns_mtx); 4733 if (target->luns) 4734 free(target->luns, M_CAMXPT); 4735 free(target, M_CAMXPT); 4736 } 4737 4738 static struct cam_ed * 4739 xpt_alloc_device_default(struct cam_eb *bus, struct cam_et *target, 4740 lun_id_t lun_id) 4741 { 4742 struct cam_ed *device; 4743 4744 device = xpt_alloc_device(bus, target, lun_id); 4745 if (device == NULL) 4746 return (NULL); 4747 4748 device->mintags = 1; 4749 device->maxtags = 1; 4750 return (device); 4751 } 4752 4753 static void 4754 xpt_destroy_device(void *context, int pending) 4755 { 4756 struct cam_ed *device = context; 4757 4758 mtx_lock(&device->device_mtx); 4759 mtx_destroy(&device->device_mtx); 4760 free(device, M_CAMDEV); 4761 } 4762 4763 struct cam_ed * 4764 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id) 4765 { 4766 struct cam_ed *cur_device, *device; 4767 struct cam_devq *devq; 4768 cam_status status; 4769 4770 mtx_assert(&bus->eb_mtx, MA_OWNED); 4771 /* Make space for us in the device queue on our bus */ 4772 devq = bus->sim->devq; 4773 mtx_lock(&devq->send_mtx); 4774 status = cam_devq_resize(devq, devq->send_queue.array_size + 1); 4775 mtx_unlock(&devq->send_mtx); 4776 if (status != CAM_REQ_CMP) 4777 return (NULL); 4778 4779 device = (struct cam_ed *)malloc(sizeof(*device), 4780 M_CAMDEV, M_NOWAIT|M_ZERO); 4781 if (device == NULL) 4782 return (NULL); 4783 4784 cam_init_pinfo(&device->devq_entry); 4785 device->target = target; 4786 device->lun_id = lun_id; 4787 device->sim = bus->sim; 4788 if (cam_ccbq_init(&device->ccbq, 4789 bus->sim->max_dev_openings) != 0) { 4790 free(device, M_CAMDEV); 4791 return (NULL); 4792 } 4793 SLIST_INIT(&device->asyncs); 4794 SLIST_INIT(&device->periphs); 4795 device->generation = 0; 4796 device->flags = CAM_DEV_UNCONFIGURED; 4797 device->tag_delay_count = 0; 4798 device->tag_saved_openings = 0; 4799 device->refcount = 1; 4800 mtx_init(&device->device_mtx, "CAM device lock", NULL, MTX_DEF); 4801 callout_init_mtx(&device->callout, &devq->send_mtx, 0); 4802 TASK_INIT(&device->device_destroy_task, 0, xpt_destroy_device, device); 4803 /* 4804 * Hold a reference to our parent bus so it 4805 * will not go away before we do. 4806 */ 4807 target->refcount++; 4808 4809 cur_device = TAILQ_FIRST(&target->ed_entries); 4810 while (cur_device != NULL && cur_device->lun_id < lun_id) 4811 cur_device = TAILQ_NEXT(cur_device, links); 4812 if (cur_device != NULL) 4813 TAILQ_INSERT_BEFORE(cur_device, device, links); 4814 else 4815 TAILQ_INSERT_TAIL(&target->ed_entries, device, links); 4816 target->generation++; 4817 return (device); 4818 } 4819 4820 void 4821 xpt_acquire_device(struct cam_ed *device) 4822 { 4823 struct cam_eb *bus = device->target->bus; 4824 4825 mtx_lock(&bus->eb_mtx); 4826 device->refcount++; 4827 mtx_unlock(&bus->eb_mtx); 4828 } 4829 4830 void 4831 xpt_release_device(struct cam_ed *device) 4832 { 4833 struct cam_eb *bus = device->target->bus; 4834 struct cam_devq *devq; 4835 4836 mtx_lock(&bus->eb_mtx); 4837 if (--device->refcount > 0) { 4838 mtx_unlock(&bus->eb_mtx); 4839 return; 4840 } 4841 4842 TAILQ_REMOVE(&device->target->ed_entries, device,links); 4843 device->target->generation++; 4844 mtx_unlock(&bus->eb_mtx); 4845 4846 /* Release our slot in the devq */ 4847 devq = bus->sim->devq; 4848 mtx_lock(&devq->send_mtx); 4849 cam_devq_resize(devq, devq->send_queue.array_size - 1); 4850 4851 KASSERT(SLIST_EMPTY(&device->periphs), 4852 ("destroying device, but periphs list is not empty")); 4853 KASSERT(device->devq_entry.index == CAM_UNQUEUED_INDEX, 4854 ("destroying device while still queued for ccbs")); 4855 4856 /* The send_mtx must be held when accessing the callout */ 4857 if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) 4858 callout_stop(&device->callout); 4859 4860 mtx_unlock(&devq->send_mtx); 4861 4862 xpt_release_target(device->target); 4863 4864 cam_ccbq_fini(&device->ccbq); 4865 /* 4866 * Free allocated memory. free(9) does nothing if the 4867 * supplied pointer is NULL, so it is safe to call without 4868 * checking. 4869 */ 4870 free(device->supported_vpds, M_CAMXPT); 4871 free(device->device_id, M_CAMXPT); 4872 free(device->ext_inq, M_CAMXPT); 4873 free(device->physpath, M_CAMXPT); 4874 free(device->rcap_buf, M_CAMXPT); 4875 free(device->serial_num, M_CAMXPT); 4876 free(device->nvme_data, M_CAMXPT); 4877 free(device->nvme_cdata, M_CAMXPT); 4878 taskqueue_enqueue(xsoftc.xpt_taskq, &device->device_destroy_task); 4879 } 4880 4881 uint32_t 4882 xpt_dev_ccbq_resize(struct cam_path *path, int newopenings) 4883 { 4884 int result; 4885 struct cam_ed *dev; 4886 4887 dev = path->device; 4888 mtx_lock(&dev->sim->devq->send_mtx); 4889 result = cam_ccbq_resize(&dev->ccbq, newopenings); 4890 mtx_unlock(&dev->sim->devq->send_mtx); 4891 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 4892 || (dev->inq_flags & SID_CmdQue) != 0) 4893 dev->tag_saved_openings = newopenings; 4894 return (result); 4895 } 4896 4897 static struct cam_eb * 4898 xpt_find_bus(path_id_t path_id) 4899 { 4900 struct cam_eb *bus; 4901 4902 xpt_lock_buses(); 4903 for (bus = TAILQ_FIRST(&xsoftc.xpt_busses); 4904 bus != NULL; 4905 bus = TAILQ_NEXT(bus, links)) { 4906 if (bus->path_id == path_id) { 4907 bus->refcount++; 4908 break; 4909 } 4910 } 4911 xpt_unlock_buses(); 4912 return (bus); 4913 } 4914 4915 static struct cam_et * 4916 xpt_find_target(struct cam_eb *bus, target_id_t target_id) 4917 { 4918 struct cam_et *target; 4919 4920 mtx_assert(&bus->eb_mtx, MA_OWNED); 4921 for (target = TAILQ_FIRST(&bus->et_entries); 4922 target != NULL; 4923 target = TAILQ_NEXT(target, links)) { 4924 if (target->target_id == target_id) { 4925 target->refcount++; 4926 break; 4927 } 4928 } 4929 return (target); 4930 } 4931 4932 static struct cam_ed * 4933 xpt_find_device(struct cam_et *target, lun_id_t lun_id) 4934 { 4935 struct cam_ed *device; 4936 4937 mtx_assert(&target->bus->eb_mtx, MA_OWNED); 4938 for (device = TAILQ_FIRST(&target->ed_entries); 4939 device != NULL; 4940 device = TAILQ_NEXT(device, links)) { 4941 if (device->lun_id == lun_id) { 4942 device->refcount++; 4943 break; 4944 } 4945 } 4946 return (device); 4947 } 4948 4949 void 4950 xpt_start_tags(struct cam_path *path) 4951 { 4952 struct ccb_relsim crs; 4953 struct cam_ed *device; 4954 struct cam_sim *sim; 4955 int newopenings; 4956 4957 device = path->device; 4958 sim = path->bus->sim; 4959 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT; 4960 xpt_freeze_devq(path, /*count*/1); 4961 device->inq_flags |= SID_CmdQue; 4962 if (device->tag_saved_openings != 0) 4963 newopenings = device->tag_saved_openings; 4964 else 4965 newopenings = min(device->maxtags, 4966 sim->max_tagged_dev_openings); 4967 xpt_dev_ccbq_resize(path, newopenings); 4968 xpt_async(AC_GETDEV_CHANGED, path, NULL); 4969 memset(&crs, 0, sizeof(crs)); 4970 xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL); 4971 crs.ccb_h.func_code = XPT_REL_SIMQ; 4972 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY; 4973 crs.openings 4974 = crs.release_timeout 4975 = crs.qfrozen_cnt 4976 = 0; 4977 xpt_action((union ccb *)&crs); 4978 } 4979 4980 void 4981 xpt_stop_tags(struct cam_path *path) 4982 { 4983 struct ccb_relsim crs; 4984 struct cam_ed *device; 4985 struct cam_sim *sim; 4986 4987 device = path->device; 4988 sim = path->bus->sim; 4989 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT; 4990 device->tag_delay_count = 0; 4991 xpt_freeze_devq(path, /*count*/1); 4992 device->inq_flags &= ~SID_CmdQue; 4993 xpt_dev_ccbq_resize(path, sim->max_dev_openings); 4994 xpt_async(AC_GETDEV_CHANGED, path, NULL); 4995 memset(&crs, 0, sizeof(crs)); 4996 xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL); 4997 crs.ccb_h.func_code = XPT_REL_SIMQ; 4998 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY; 4999 crs.openings 5000 = crs.release_timeout 5001 = crs.qfrozen_cnt 5002 = 0; 5003 xpt_action((union ccb *)&crs); 5004 } 5005 5006 /* 5007 * Assume all possible buses are detected by this time, so allow boot 5008 * as soon as they all are scanned. 5009 */ 5010 static void 5011 xpt_boot_delay(void *arg) 5012 { 5013 5014 xpt_release_boot(); 5015 } 5016 5017 /* 5018 * Now that all config hooks have completed, start boot_delay timer, 5019 * waiting for possibly still undetected buses (USB) to appear. 5020 */ 5021 static void 5022 xpt_ch_done(void *arg) 5023 { 5024 5025 callout_init(&xsoftc.boot_callout, 1); 5026 callout_reset_sbt(&xsoftc.boot_callout, SBT_1MS * xsoftc.boot_delay, 5027 SBT_1MS, xpt_boot_delay, NULL, 0); 5028 } 5029 SYSINIT(xpt_hw_delay, SI_SUB_INT_CONFIG_HOOKS, SI_ORDER_ANY, xpt_ch_done, NULL); 5030 5031 /* 5032 * Now that interrupts are enabled, go find our devices 5033 */ 5034 static void 5035 xpt_config(void *arg) 5036 { 5037 if (taskqueue_start_threads(&xsoftc.xpt_taskq, 1, PRIBIO, "CAM taskq")) 5038 printf("xpt_config: failed to create taskqueue thread.\n"); 5039 5040 /* Setup debugging path */ 5041 if (cam_dflags != CAM_DEBUG_NONE) { 5042 if (xpt_create_path(&cam_dpath, NULL, 5043 CAM_DEBUG_BUS, CAM_DEBUG_TARGET, 5044 CAM_DEBUG_LUN) != CAM_REQ_CMP) { 5045 printf("xpt_config: xpt_create_path() failed for debug" 5046 " target %d:%d:%d, debugging disabled\n", 5047 CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN); 5048 cam_dflags = CAM_DEBUG_NONE; 5049 } 5050 } else 5051 cam_dpath = NULL; 5052 5053 periphdriver_init(1); 5054 xpt_hold_boot(); 5055 5056 /* Fire up rescan thread. */ 5057 if (kproc_kthread_add(xpt_scanner_thread, NULL, &cam_proc, NULL, 0, 0, 5058 "cam", "scanner")) { 5059 printf("xpt_config: failed to create rescan thread.\n"); 5060 } 5061 } 5062 5063 void 5064 xpt_hold_boot_locked(void) 5065 { 5066 5067 if (xsoftc.buses_to_config++ == 0) 5068 root_mount_hold_token("CAM", &xsoftc.xpt_rootmount); 5069 } 5070 5071 void 5072 xpt_hold_boot(void) 5073 { 5074 5075 xpt_lock_buses(); 5076 xpt_hold_boot_locked(); 5077 xpt_unlock_buses(); 5078 } 5079 5080 void 5081 xpt_release_boot(void) 5082 { 5083 5084 xpt_lock_buses(); 5085 if (--xsoftc.buses_to_config == 0) { 5086 if (xsoftc.buses_config_done == 0) { 5087 xsoftc.buses_config_done = 1; 5088 xsoftc.buses_to_config++; 5089 TASK_INIT(&xsoftc.boot_task, 0, xpt_finishconfig_task, 5090 NULL); 5091 taskqueue_enqueue(taskqueue_thread, &xsoftc.boot_task); 5092 } else 5093 root_mount_rel(&xsoftc.xpt_rootmount); 5094 } 5095 xpt_unlock_buses(); 5096 } 5097 5098 /* 5099 * If the given device only has one peripheral attached to it, and if that 5100 * peripheral is the passthrough driver, announce it. This insures that the 5101 * user sees some sort of announcement for every peripheral in their system. 5102 */ 5103 static int 5104 xptpassannouncefunc(struct cam_ed *device, void *arg) 5105 { 5106 struct cam_periph *periph; 5107 int i; 5108 5109 for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL; 5110 periph = SLIST_NEXT(periph, periph_links), i++); 5111 5112 periph = SLIST_FIRST(&device->periphs); 5113 if ((i == 1) 5114 && (strncmp(periph->periph_name, "pass", 4) == 0)) 5115 xpt_announce_periph(periph, NULL); 5116 5117 return(1); 5118 } 5119 5120 static void 5121 xpt_finishconfig_task(void *context, int pending) 5122 { 5123 5124 periphdriver_init(2); 5125 /* 5126 * Check for devices with no "standard" peripheral driver 5127 * attached. For any devices like that, announce the 5128 * passthrough driver so the user will see something. 5129 */ 5130 if (!bootverbose) 5131 xpt_for_all_devices(xptpassannouncefunc, NULL); 5132 5133 xpt_release_boot(); 5134 } 5135 5136 cam_status 5137 xpt_register_async(int event, ac_callback_t *cbfunc, void *cbarg, 5138 struct cam_path *path) 5139 { 5140 struct ccb_setasync csa; 5141 cam_status status; 5142 bool xptpath = false; 5143 5144 if (path == NULL) { 5145 status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID, 5146 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 5147 if (status != CAM_REQ_CMP) 5148 return (status); 5149 xpt_path_lock(path); 5150 xptpath = true; 5151 } 5152 5153 memset(&csa, 0, sizeof(csa)); 5154 xpt_setup_ccb(&csa.ccb_h, path, CAM_PRIORITY_NORMAL); 5155 csa.ccb_h.func_code = XPT_SASYNC_CB; 5156 csa.event_enable = event; 5157 csa.callback = cbfunc; 5158 csa.callback_arg = cbarg; 5159 xpt_action((union ccb *)&csa); 5160 status = csa.ccb_h.status; 5161 5162 CAM_DEBUG(csa.ccb_h.path, CAM_DEBUG_TRACE, 5163 ("xpt_register_async: func %p\n", cbfunc)); 5164 5165 if (xptpath) { 5166 xpt_path_unlock(path); 5167 xpt_free_path(path); 5168 } 5169 5170 if ((status == CAM_REQ_CMP) && 5171 (csa.event_enable & AC_FOUND_DEVICE)) { 5172 /* 5173 * Get this peripheral up to date with all 5174 * the currently existing devices. 5175 */ 5176 xpt_for_all_devices(xptsetasyncfunc, &csa); 5177 } 5178 if ((status == CAM_REQ_CMP) && 5179 (csa.event_enable & AC_PATH_REGISTERED)) { 5180 /* 5181 * Get this peripheral up to date with all 5182 * the currently existing buses. 5183 */ 5184 xpt_for_all_busses(xptsetasyncbusfunc, &csa); 5185 } 5186 5187 return (status); 5188 } 5189 5190 static void 5191 xptaction(struct cam_sim *sim, union ccb *work_ccb) 5192 { 5193 CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n")); 5194 5195 switch (work_ccb->ccb_h.func_code) { 5196 /* Common cases first */ 5197 case XPT_PATH_INQ: /* Path routing inquiry */ 5198 { 5199 struct ccb_pathinq *cpi; 5200 5201 cpi = &work_ccb->cpi; 5202 cpi->version_num = 1; /* XXX??? */ 5203 cpi->hba_inquiry = 0; 5204 cpi->target_sprt = 0; 5205 cpi->hba_misc = 0; 5206 cpi->hba_eng_cnt = 0; 5207 cpi->max_target = 0; 5208 cpi->max_lun = 0; 5209 cpi->initiator_id = 0; 5210 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 5211 strlcpy(cpi->hba_vid, "", HBA_IDLEN); 5212 strlcpy(cpi->dev_name, sim->sim_name, DEV_IDLEN); 5213 cpi->unit_number = sim->unit_number; 5214 cpi->bus_id = sim->bus_id; 5215 cpi->base_transfer_speed = 0; 5216 cpi->protocol = PROTO_UNSPECIFIED; 5217 cpi->protocol_version = PROTO_VERSION_UNSPECIFIED; 5218 cpi->transport = XPORT_UNSPECIFIED; 5219 cpi->transport_version = XPORT_VERSION_UNSPECIFIED; 5220 cpi->ccb_h.status = CAM_REQ_CMP; 5221 break; 5222 } 5223 default: 5224 work_ccb->ccb_h.status = CAM_REQ_INVALID; 5225 break; 5226 } 5227 xpt_done(work_ccb); 5228 } 5229 5230 /* 5231 * The xpt as a "controller" has no interrupt sources, so polling 5232 * is a no-op. 5233 */ 5234 static void 5235 xptpoll(struct cam_sim *sim) 5236 { 5237 } 5238 5239 void 5240 xpt_lock_buses(void) 5241 { 5242 mtx_lock(&xsoftc.xpt_topo_lock); 5243 } 5244 5245 void 5246 xpt_unlock_buses(void) 5247 { 5248 mtx_unlock(&xsoftc.xpt_topo_lock); 5249 } 5250 5251 struct mtx * 5252 xpt_path_mtx(struct cam_path *path) 5253 { 5254 5255 return (&path->device->device_mtx); 5256 } 5257 5258 static void 5259 xpt_done_process(struct ccb_hdr *ccb_h) 5260 { 5261 struct cam_sim *sim = NULL; 5262 struct cam_devq *devq = NULL; 5263 struct mtx *mtx = NULL; 5264 5265 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) 5266 struct ccb_scsiio *csio; 5267 5268 if (ccb_h->func_code == XPT_SCSI_IO) { 5269 csio = &((union ccb *)ccb_h)->csio; 5270 if (csio->bio != NULL) 5271 biotrack(csio->bio, __func__); 5272 } 5273 #endif 5274 5275 if (ccb_h->flags & CAM_HIGH_POWER) { 5276 struct highpowerlist *hphead; 5277 struct cam_ed *device; 5278 5279 mtx_lock(&xsoftc.xpt_highpower_lock); 5280 hphead = &xsoftc.highpowerq; 5281 5282 device = STAILQ_FIRST(hphead); 5283 5284 /* 5285 * Increment the count since this command is done. 5286 */ 5287 xsoftc.num_highpower++; 5288 5289 /* 5290 * Any high powered commands queued up? 5291 */ 5292 if (device != NULL) { 5293 STAILQ_REMOVE_HEAD(hphead, highpowerq_entry); 5294 mtx_unlock(&xsoftc.xpt_highpower_lock); 5295 5296 mtx_lock(&device->sim->devq->send_mtx); 5297 xpt_release_devq_device(device, 5298 /*count*/1, /*runqueue*/TRUE); 5299 mtx_unlock(&device->sim->devq->send_mtx); 5300 } else 5301 mtx_unlock(&xsoftc.xpt_highpower_lock); 5302 } 5303 5304 /* 5305 * Insulate against a race where the periph is destroyed but CCBs are 5306 * still not all processed. This shouldn't happen, but allows us better 5307 * bug diagnostic when it does. 5308 */ 5309 if (ccb_h->path->bus) 5310 sim = ccb_h->path->bus->sim; 5311 5312 if (ccb_h->status & CAM_RELEASE_SIMQ) { 5313 KASSERT(sim, ("sim missing for CAM_RELEASE_SIMQ request")); 5314 xpt_release_simq(sim, /*run_queue*/FALSE); 5315 ccb_h->status &= ~CAM_RELEASE_SIMQ; 5316 } 5317 5318 if ((ccb_h->flags & CAM_DEV_QFRZDIS) 5319 && (ccb_h->status & CAM_DEV_QFRZN)) { 5320 xpt_release_devq(ccb_h->path, /*count*/1, /*run_queue*/TRUE); 5321 ccb_h->status &= ~CAM_DEV_QFRZN; 5322 } 5323 5324 if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) { 5325 struct cam_ed *dev = ccb_h->path->device; 5326 5327 if (sim) 5328 devq = sim->devq; 5329 KASSERT(devq, ("Periph disappeared with CCB %p %s request pending.", 5330 ccb_h, xpt_action_name(ccb_h->func_code))); 5331 5332 mtx_lock(&devq->send_mtx); 5333 devq->send_active--; 5334 devq->send_openings++; 5335 cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h); 5336 5337 if (((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0 5338 && (dev->ccbq.dev_active == 0))) { 5339 dev->flags &= ~CAM_DEV_REL_ON_QUEUE_EMPTY; 5340 xpt_release_devq_device(dev, /*count*/1, 5341 /*run_queue*/FALSE); 5342 } 5343 5344 if (((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0 5345 && (ccb_h->status&CAM_STATUS_MASK) != CAM_REQUEUE_REQ)) { 5346 dev->flags &= ~CAM_DEV_REL_ON_COMPLETE; 5347 xpt_release_devq_device(dev, /*count*/1, 5348 /*run_queue*/FALSE); 5349 } 5350 5351 if (!device_is_queued(dev)) 5352 (void)xpt_schedule_devq(devq, dev); 5353 xpt_run_devq(devq); 5354 mtx_unlock(&devq->send_mtx); 5355 5356 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0) { 5357 mtx = xpt_path_mtx(ccb_h->path); 5358 mtx_lock(mtx); 5359 5360 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 5361 && (--dev->tag_delay_count == 0)) 5362 xpt_start_tags(ccb_h->path); 5363 } 5364 } 5365 5366 if ((ccb_h->flags & CAM_UNLOCKED) == 0) { 5367 if (mtx == NULL) { 5368 mtx = xpt_path_mtx(ccb_h->path); 5369 mtx_lock(mtx); 5370 } 5371 } else { 5372 if (mtx != NULL) { 5373 mtx_unlock(mtx); 5374 mtx = NULL; 5375 } 5376 } 5377 5378 /* Call the peripheral driver's callback */ 5379 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX; 5380 (*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h); 5381 if (mtx != NULL) 5382 mtx_unlock(mtx); 5383 } 5384 5385 /* 5386 * Parameterize instead and use xpt_done_td? 5387 */ 5388 static void 5389 xpt_async_td(void *arg) 5390 { 5391 struct cam_doneq *queue = arg; 5392 struct ccb_hdr *ccb_h; 5393 STAILQ_HEAD(, ccb_hdr) doneq; 5394 5395 STAILQ_INIT(&doneq); 5396 mtx_lock(&queue->cam_doneq_mtx); 5397 while (1) { 5398 while (STAILQ_EMPTY(&queue->cam_doneq)) 5399 msleep(&queue->cam_doneq, &queue->cam_doneq_mtx, 5400 PRIBIO, "-", 0); 5401 STAILQ_CONCAT(&doneq, &queue->cam_doneq); 5402 mtx_unlock(&queue->cam_doneq_mtx); 5403 5404 while ((ccb_h = STAILQ_FIRST(&doneq)) != NULL) { 5405 STAILQ_REMOVE_HEAD(&doneq, sim_links.stqe); 5406 xpt_done_process(ccb_h); 5407 } 5408 5409 mtx_lock(&queue->cam_doneq_mtx); 5410 } 5411 } 5412 5413 void 5414 xpt_done_td(void *arg) 5415 { 5416 struct cam_doneq *queue = arg; 5417 struct ccb_hdr *ccb_h; 5418 STAILQ_HEAD(, ccb_hdr) doneq; 5419 5420 STAILQ_INIT(&doneq); 5421 mtx_lock(&queue->cam_doneq_mtx); 5422 while (1) { 5423 while (STAILQ_EMPTY(&queue->cam_doneq)) { 5424 queue->cam_doneq_sleep = 1; 5425 msleep(&queue->cam_doneq, &queue->cam_doneq_mtx, 5426 PRIBIO, "-", 0); 5427 queue->cam_doneq_sleep = 0; 5428 } 5429 STAILQ_CONCAT(&doneq, &queue->cam_doneq); 5430 mtx_unlock(&queue->cam_doneq_mtx); 5431 5432 THREAD_NO_SLEEPING(); 5433 while ((ccb_h = STAILQ_FIRST(&doneq)) != NULL) { 5434 STAILQ_REMOVE_HEAD(&doneq, sim_links.stqe); 5435 xpt_done_process(ccb_h); 5436 } 5437 THREAD_SLEEPING_OK(); 5438 5439 mtx_lock(&queue->cam_doneq_mtx); 5440 } 5441 } 5442 5443 static void 5444 camisr_runqueue(void) 5445 { 5446 struct ccb_hdr *ccb_h; 5447 struct cam_doneq *queue; 5448 int i; 5449 5450 /* Process global queues. */ 5451 for (i = 0; i < cam_num_doneqs; i++) { 5452 queue = &cam_doneqs[i]; 5453 mtx_lock(&queue->cam_doneq_mtx); 5454 while ((ccb_h = STAILQ_FIRST(&queue->cam_doneq)) != NULL) { 5455 STAILQ_REMOVE_HEAD(&queue->cam_doneq, sim_links.stqe); 5456 mtx_unlock(&queue->cam_doneq_mtx); 5457 xpt_done_process(ccb_h); 5458 mtx_lock(&queue->cam_doneq_mtx); 5459 } 5460 mtx_unlock(&queue->cam_doneq_mtx); 5461 } 5462 } 5463 5464 /** 5465 * @brief Return the device_t associated with the path 5466 * 5467 * When a SIM is created, it registers a bus with a NEWBUS device_t. This is 5468 * stored in the internal cam_eb bus structure. There is no guarnatee any given 5469 * path will have a @c device_t associated with it (it's legal to call @c 5470 * xpt_bus_register with a @c NULL @c device_t. 5471 * 5472 * @param path Path to return the device_t for. 5473 */ 5474 device_t 5475 xpt_path_sim_device(const struct cam_path *path) 5476 { 5477 return (path->bus->parent_dev); 5478 } 5479 5480 struct kv 5481 { 5482 uint32_t v; 5483 const char *name; 5484 }; 5485 5486 static struct kv map[] = { 5487 { XPT_NOOP, "XPT_NOOP" }, 5488 { XPT_SCSI_IO, "XPT_SCSI_IO" }, 5489 { XPT_GDEV_TYPE, "XPT_GDEV_TYPE" }, 5490 { XPT_GDEVLIST, "XPT_GDEVLIST" }, 5491 { XPT_PATH_INQ, "XPT_PATH_INQ" }, 5492 { XPT_REL_SIMQ, "XPT_REL_SIMQ" }, 5493 { XPT_SASYNC_CB, "XPT_SASYNC_CB" }, 5494 { XPT_SDEV_TYPE, "XPT_SDEV_TYPE" }, 5495 { XPT_SCAN_BUS, "XPT_SCAN_BUS" }, 5496 { XPT_DEV_MATCH, "XPT_DEV_MATCH" }, 5497 { XPT_DEBUG, "XPT_DEBUG" }, 5498 { XPT_PATH_STATS, "XPT_PATH_STATS" }, 5499 { XPT_GDEV_STATS, "XPT_GDEV_STATS" }, 5500 { XPT_DEV_ADVINFO, "XPT_DEV_ADVINFO" }, 5501 { XPT_ASYNC, "XPT_ASYNC" }, 5502 { XPT_ABORT, "XPT_ABORT" }, 5503 { XPT_RESET_BUS, "XPT_RESET_BUS" }, 5504 { XPT_RESET_DEV, "XPT_RESET_DEV" }, 5505 { XPT_TERM_IO, "XPT_TERM_IO" }, 5506 { XPT_SCAN_LUN, "XPT_SCAN_LUN" }, 5507 { XPT_GET_TRAN_SETTINGS, "XPT_GET_TRAN_SETTINGS" }, 5508 { XPT_SET_TRAN_SETTINGS, "XPT_SET_TRAN_SETTINGS" }, 5509 { XPT_CALC_GEOMETRY, "XPT_CALC_GEOMETRY" }, 5510 { XPT_ATA_IO, "XPT_ATA_IO" }, 5511 { XPT_GET_SIM_KNOB, "XPT_GET_SIM_KNOB" }, 5512 { XPT_SET_SIM_KNOB, "XPT_SET_SIM_KNOB" }, 5513 { XPT_NVME_IO, "XPT_NVME_IO" }, 5514 { XPT_MMC_IO, "XPT_MMC_IO" }, 5515 { XPT_SMP_IO, "XPT_SMP_IO" }, 5516 { XPT_SCAN_TGT, "XPT_SCAN_TGT" }, 5517 { XPT_NVME_ADMIN, "XPT_NVME_ADMIN" }, 5518 { XPT_ENG_INQ, "XPT_ENG_INQ" }, 5519 { XPT_ENG_EXEC, "XPT_ENG_EXEC" }, 5520 { XPT_EN_LUN, "XPT_EN_LUN" }, 5521 { XPT_TARGET_IO, "XPT_TARGET_IO" }, 5522 { XPT_ACCEPT_TARGET_IO, "XPT_ACCEPT_TARGET_IO" }, 5523 { XPT_CONT_TARGET_IO, "XPT_CONT_TARGET_IO" }, 5524 { XPT_IMMED_NOTIFY, "XPT_IMMED_NOTIFY" }, 5525 { XPT_NOTIFY_ACK, "XPT_NOTIFY_ACK" }, 5526 { XPT_IMMEDIATE_NOTIFY, "XPT_IMMEDIATE_NOTIFY" }, 5527 { XPT_NOTIFY_ACKNOWLEDGE, "XPT_NOTIFY_ACKNOWLEDGE" }, 5528 { 0, 0 } 5529 }; 5530 5531 const char * 5532 xpt_action_name(uint32_t action) 5533 { 5534 static char buffer[32]; /* Only for unknown messages -- racy */ 5535 struct kv *walker = map; 5536 5537 while (walker->name != NULL) { 5538 if (walker->v == action) 5539 return (walker->name); 5540 walker++; 5541 } 5542 5543 snprintf(buffer, sizeof(buffer), "%#x", action); 5544 return (buffer); 5545 } 5546