1 /*- 2 * Implementation of the Common Access Method Transport (XPT) layer. 3 * 4 * SPDX-License-Identifier: BSD-2-Clause 5 * 6 * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs. 7 * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry. 8 * All rights reserved. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions, and the following disclaimer, 15 * without modification, immediately at the beginning of the file. 16 * 2. The name of the author may not be used to endorse or promote products 17 * derived from this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 23 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 #include "opt_printf.h" 33 34 #include <sys/param.h> 35 #include <sys/bio.h> 36 #include <sys/bus.h> 37 #include <sys/systm.h> 38 #include <sys/types.h> 39 #include <sys/malloc.h> 40 #include <sys/kernel.h> 41 #include <sys/time.h> 42 #include <sys/conf.h> 43 #include <sys/fcntl.h> 44 #include <sys/proc.h> 45 #include <sys/sbuf.h> 46 #include <sys/smp.h> 47 #include <sys/stdarg.h> 48 #include <sys/taskqueue.h> 49 50 #include <sys/lock.h> 51 #include <sys/mutex.h> 52 #include <sys/sysctl.h> 53 #include <sys/kthread.h> 54 55 #include <cam/cam.h> 56 #include <cam/cam_ccb.h> 57 #include <cam/cam_iosched.h> 58 #include <cam/cam_periph.h> 59 #include <cam/cam_queue.h> 60 #include <cam/cam_sim.h> 61 #include <cam/cam_xpt.h> 62 #include <cam/cam_xpt_sim.h> 63 #include <cam/cam_xpt_periph.h> 64 #include <cam/cam_xpt_internal.h> 65 #include <cam/cam_debug.h> 66 #include <cam/cam_compat.h> 67 68 #include <cam/scsi/scsi_all.h> 69 #include <cam/scsi/scsi_message.h> 70 #include <cam/scsi/scsi_pass.h> 71 72 73 /* Wild guess based on not wanting to grow the stack too much */ 74 #define XPT_PRINT_MAXLEN 512 75 #ifdef PRINTF_BUFR_SIZE 76 #define XPT_PRINT_LEN PRINTF_BUFR_SIZE 77 #else 78 #define XPT_PRINT_LEN 128 79 #endif 80 _Static_assert(XPT_PRINT_LEN <= XPT_PRINT_MAXLEN, "XPT_PRINT_LEN is too large"); 81 82 /* 83 * This is the maximum number of high powered commands (e.g. start unit) 84 * that can be outstanding at a particular time. 85 */ 86 #ifndef CAM_MAX_HIGHPOWER 87 #define CAM_MAX_HIGHPOWER 4 88 #endif 89 90 /* Datastructures internal to the xpt layer */ 91 MALLOC_DEFINE(M_CAMXPT, "CAM XPT", "CAM XPT buffers"); 92 MALLOC_DEFINE(M_CAMDEV, "CAM DEV", "CAM devices"); 93 MALLOC_DEFINE(M_CAMCCB, "CAM CCB", "CAM CCBs"); 94 MALLOC_DEFINE(M_CAMPATH, "CAM path", "CAM paths"); 95 96 struct xpt_softc { 97 uint32_t xpt_generation; 98 99 /* number of high powered commands that can go through right now */ 100 struct mtx xpt_highpower_lock; 101 STAILQ_HEAD(highpowerlist, cam_ed) highpowerq; 102 int num_highpower; 103 104 /* queue for handling async rescan requests. */ 105 TAILQ_HEAD(, ccb_hdr) ccb_scanq; 106 int buses_to_config; 107 int buses_config_done; 108 109 /* 110 * Registered buses 111 * 112 * N.B., "busses" is an archaic spelling of "buses". In new code 113 * "buses" is preferred. 114 */ 115 TAILQ_HEAD(,cam_eb) xpt_busses; 116 u_int bus_generation; 117 118 int boot_delay; 119 struct callout boot_callout; 120 struct task boot_task; 121 struct root_hold_token xpt_rootmount; 122 123 struct mtx xpt_topo_lock; 124 struct taskqueue *xpt_taskq; 125 }; 126 127 typedef enum { 128 DM_RET_COPY = 0x01, 129 DM_RET_FLAG_MASK = 0x0f, 130 DM_RET_NONE = 0x00, 131 DM_RET_STOP = 0x10, 132 DM_RET_DESCEND = 0x20, 133 DM_RET_ERROR = 0x30, 134 DM_RET_ACTION_MASK = 0xf0 135 } dev_match_ret; 136 137 typedef enum { 138 XPT_DEPTH_BUS, 139 XPT_DEPTH_TARGET, 140 XPT_DEPTH_DEVICE, 141 XPT_DEPTH_PERIPH 142 } xpt_traverse_depth; 143 144 struct xpt_traverse_config { 145 xpt_traverse_depth depth; 146 void *tr_func; 147 void *tr_arg; 148 }; 149 150 typedef int xpt_busfunc_t (struct cam_eb *bus, void *arg); 151 typedef int xpt_targetfunc_t (struct cam_et *target, void *arg); 152 typedef int xpt_devicefunc_t (struct cam_ed *device, void *arg); 153 typedef int xpt_periphfunc_t (struct cam_periph *periph, void *arg); 154 typedef int xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg); 155 156 /* Transport layer configuration information */ 157 static struct xpt_softc xsoftc; 158 159 MTX_SYSINIT(xpt_topo_init, &xsoftc.xpt_topo_lock, "XPT topology lock", MTX_DEF); 160 161 SYSCTL_INT(_kern_cam, OID_AUTO, boot_delay, CTLFLAG_RDTUN, 162 &xsoftc.boot_delay, 0, "Bus registration wait time"); 163 SYSCTL_UINT(_kern_cam, OID_AUTO, xpt_generation, CTLFLAG_RD, 164 &xsoftc.xpt_generation, 0, "CAM peripheral generation count"); 165 166 struct cam_doneq { 167 struct mtx_padalign cam_doneq_mtx; 168 STAILQ_HEAD(, ccb_hdr) cam_doneq; 169 int cam_doneq_sleep; 170 }; 171 172 static struct cam_doneq cam_doneqs[MAXCPU]; 173 static u_int __read_mostly cam_num_doneqs; 174 static struct proc *cam_proc; 175 static struct cam_doneq cam_async; 176 177 SYSCTL_INT(_kern_cam, OID_AUTO, num_doneqs, CTLFLAG_RDTUN, 178 &cam_num_doneqs, 0, "Number of completion queues/threads"); 179 180 struct cam_periph *xpt_periph; 181 182 static periph_init_t xpt_periph_init; 183 184 static struct periph_driver xpt_driver = 185 { 186 xpt_periph_init, "xpt", 187 TAILQ_HEAD_INITIALIZER(xpt_driver.units), /* generation */ 0, 188 CAM_PERIPH_DRV_EARLY 189 }; 190 191 PERIPHDRIVER_DECLARE(xpt, xpt_driver); 192 193 static d_open_t xptopen; 194 static d_close_t xptclose; 195 static d_ioctl_t xptioctl; 196 static d_ioctl_t xptdoioctl; 197 198 static struct cdevsw xpt_cdevsw = { 199 .d_version = D_VERSION, 200 .d_flags = 0, 201 .d_open = xptopen, 202 .d_close = xptclose, 203 .d_ioctl = xptioctl, 204 .d_name = "xpt", 205 }; 206 207 /* Storage for debugging datastructures */ 208 struct cam_path *cam_dpath; 209 uint32_t __read_mostly cam_dflags = CAM_DEBUG_FLAGS; 210 SYSCTL_UINT(_kern_cam, OID_AUTO, dflags, CTLFLAG_RWTUN, 211 &cam_dflags, 0, "Enabled debug flags"); 212 uint32_t cam_debug_delay = CAM_DEBUG_DELAY; 213 SYSCTL_UINT(_kern_cam, OID_AUTO, debug_delay, CTLFLAG_RWTUN, 214 &cam_debug_delay, 0, "Delay in us after each debug message"); 215 216 /* Our boot-time initialization hook */ 217 static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *); 218 219 static moduledata_t cam_moduledata = { 220 "cam", 221 cam_module_event_handler, 222 NULL 223 }; 224 225 static int xpt_init(void *); 226 227 DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND); 228 MODULE_VERSION(cam, 1); 229 230 static void xpt_async_bcast(struct async_list *async_head, 231 uint32_t async_code, 232 struct cam_path *path, 233 void *async_arg); 234 static path_id_t xptnextfreepathid(void); 235 static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus); 236 static union ccb *xpt_get_ccb(struct cam_periph *periph); 237 static union ccb *xpt_get_ccb_nowait(struct cam_periph *periph); 238 static void xpt_run_allocq(struct cam_periph *periph, int sleep); 239 static void xpt_run_allocq_task(void *context, int pending); 240 static void xpt_run_devq(struct cam_devq *devq); 241 static callout_func_t xpt_release_devq_timeout; 242 static void xpt_acquire_bus(struct cam_eb *bus); 243 static void xpt_release_bus(struct cam_eb *bus); 244 static uint32_t xpt_freeze_devq_device(struct cam_ed *dev, u_int count); 245 static int xpt_release_devq_device(struct cam_ed *dev, u_int count, 246 int run_queue); 247 static struct cam_et* 248 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id); 249 static void xpt_acquire_target(struct cam_et *target); 250 static void xpt_release_target(struct cam_et *target); 251 static struct cam_eb* 252 xpt_find_bus(path_id_t path_id); 253 static struct cam_et* 254 xpt_find_target(struct cam_eb *bus, target_id_t target_id); 255 static struct cam_ed* 256 xpt_find_device(struct cam_et *target, lun_id_t lun_id); 257 static void xpt_config(void *arg); 258 static void xpt_hold_boot_locked(void); 259 static int xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo, 260 uint32_t new_priority); 261 static xpt_devicefunc_t xptpassannouncefunc; 262 static void xptaction(struct cam_sim *sim, union ccb *work_ccb); 263 static void xptpoll(struct cam_sim *sim); 264 static void camisr_runqueue(void); 265 static void xpt_done_process(struct ccb_hdr *ccb_h); 266 static void xpt_done_td(void *); 267 static void xpt_async_td(void *); 268 static dev_match_ret xptbusmatch(struct dev_match_pattern *patterns, 269 u_int num_patterns, struct cam_eb *bus); 270 static dev_match_ret xptdevicematch(struct dev_match_pattern *patterns, 271 u_int num_patterns, 272 struct cam_ed *device); 273 static dev_match_ret xptperiphmatch(struct dev_match_pattern *patterns, 274 u_int num_patterns, 275 struct cam_periph *periph); 276 static xpt_busfunc_t xptedtbusfunc; 277 static xpt_targetfunc_t xptedttargetfunc; 278 static xpt_devicefunc_t xptedtdevicefunc; 279 static xpt_periphfunc_t xptedtperiphfunc; 280 static xpt_pdrvfunc_t xptplistpdrvfunc; 281 static xpt_periphfunc_t xptplistperiphfunc; 282 static int xptedtmatch(struct ccb_dev_match *cdm); 283 static int xptperiphlistmatch(struct ccb_dev_match *cdm); 284 static int xptbustraverse(struct cam_eb *start_bus, 285 xpt_busfunc_t *tr_func, void *arg); 286 static int xpttargettraverse(struct cam_eb *bus, 287 struct cam_et *start_target, 288 xpt_targetfunc_t *tr_func, void *arg); 289 static int xptdevicetraverse(struct cam_et *target, 290 struct cam_ed *start_device, 291 xpt_devicefunc_t *tr_func, void *arg); 292 static int xptperiphtraverse(struct cam_ed *device, 293 struct cam_periph *start_periph, 294 xpt_periphfunc_t *tr_func, void *arg); 295 static int xptpdrvtraverse(struct periph_driver **start_pdrv, 296 xpt_pdrvfunc_t *tr_func, void *arg); 297 static int xptpdperiphtraverse(struct periph_driver **pdrv, 298 struct cam_periph *start_periph, 299 xpt_periphfunc_t *tr_func, 300 void *arg); 301 static xpt_busfunc_t xptdefbusfunc; 302 static xpt_targetfunc_t xptdeftargetfunc; 303 static xpt_devicefunc_t xptdefdevicefunc; 304 static xpt_periphfunc_t xptdefperiphfunc; 305 static void xpt_finishconfig_task(void *context, int pending); 306 static void xpt_dev_async_default(uint32_t async_code, 307 struct cam_eb *bus, 308 struct cam_et *target, 309 struct cam_ed *device, 310 void *async_arg); 311 static struct cam_ed * xpt_alloc_device_default(struct cam_eb *bus, 312 struct cam_et *target, 313 lun_id_t lun_id); 314 static xpt_devicefunc_t xptsetasyncfunc; 315 static xpt_busfunc_t xptsetasyncbusfunc; 316 static cam_status xptregister(struct cam_periph *periph, 317 void *arg); 318 319 static __inline int 320 xpt_schedule_devq(struct cam_devq *devq, struct cam_ed *dev) 321 { 322 int retval; 323 324 mtx_assert(&devq->send_mtx, MA_OWNED); 325 if ((dev->ccbq.queue.entries > 0) && 326 (dev->ccbq.dev_openings > 0) && 327 (dev->ccbq.queue.qfrozen_cnt == 0)) { 328 /* 329 * The priority of a device waiting for controller 330 * resources is that of the highest priority CCB 331 * enqueued. 332 */ 333 retval = 334 xpt_schedule_dev(&devq->send_queue, 335 &dev->devq_entry, 336 CAMQ_GET_PRIO(&dev->ccbq.queue)); 337 } else { 338 retval = 0; 339 } 340 return (retval); 341 } 342 343 static __inline int 344 device_is_queued(struct cam_ed *device) 345 { 346 return (device->devq_entry.index != CAM_UNQUEUED_INDEX); 347 } 348 349 static void 350 xpt_periph_init(void) 351 { 352 make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0"); 353 } 354 355 static int 356 xptopen(struct cdev *dev, int flags, int fmt, struct thread *td) 357 { 358 359 /* 360 * Only allow read-write access. 361 */ 362 if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0)) 363 return(EPERM); 364 365 /* 366 * We don't allow nonblocking access. 367 */ 368 if ((flags & O_NONBLOCK) != 0) { 369 printf("%s: can't do nonblocking access\n", devtoname(dev)); 370 return(ENODEV); 371 } 372 373 return(0); 374 } 375 376 static int 377 xptclose(struct cdev *dev, int flag, int fmt, struct thread *td) 378 { 379 380 return(0); 381 } 382 383 /* 384 * Don't automatically grab the xpt softc lock here even though this is going 385 * through the xpt device. The xpt device is really just a back door for 386 * accessing other devices and SIMs, so the right thing to do is to grab 387 * the appropriate SIM lock once the bus/SIM is located. 388 */ 389 static int 390 xptioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) 391 { 392 int error; 393 394 if ((error = xptdoioctl(dev, cmd, addr, flag, td)) == ENOTTY) { 395 error = cam_compat_ioctl(dev, cmd, addr, flag, td, xptdoioctl); 396 } 397 return (error); 398 } 399 400 static int 401 xptdoioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) 402 { 403 int error; 404 405 error = 0; 406 407 switch(cmd) { 408 /* 409 * For the transport layer CAMIOCOMMAND ioctl, we really only want 410 * to accept CCB types that don't quite make sense to send through a 411 * passthrough driver. XPT_PATH_INQ is an exception to this, as stated 412 * in the CAM spec. 413 */ 414 case CAMIOCOMMAND: { 415 union ccb *ccb; 416 union ccb *inccb; 417 struct cam_eb *bus; 418 419 inccb = (union ccb *)addr; 420 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) 421 if (inccb->ccb_h.func_code == XPT_SCSI_IO) 422 inccb->csio.bio = NULL; 423 #endif 424 425 if (inccb->ccb_h.flags & CAM_UNLOCKED) 426 return (EINVAL); 427 428 bus = xpt_find_bus(inccb->ccb_h.path_id); 429 if (bus == NULL) 430 return (EINVAL); 431 432 switch (inccb->ccb_h.func_code) { 433 case XPT_SCAN_BUS: 434 case XPT_RESET_BUS: 435 if (inccb->ccb_h.target_id != CAM_TARGET_WILDCARD || 436 inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) { 437 xpt_release_bus(bus); 438 return (EINVAL); 439 } 440 break; 441 case XPT_SCAN_TGT: 442 if (inccb->ccb_h.target_id == CAM_TARGET_WILDCARD || 443 inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) { 444 xpt_release_bus(bus); 445 return (EINVAL); 446 } 447 break; 448 default: 449 break; 450 } 451 452 switch(inccb->ccb_h.func_code) { 453 case XPT_SCAN_BUS: 454 case XPT_RESET_BUS: 455 case XPT_PATH_INQ: 456 case XPT_ENG_INQ: 457 case XPT_SCAN_LUN: 458 case XPT_SCAN_TGT: 459 460 ccb = xpt_alloc_ccb(); 461 462 /* 463 * Create a path using the bus, target, and lun the 464 * user passed in. 465 */ 466 if (xpt_create_path(&ccb->ccb_h.path, NULL, 467 inccb->ccb_h.path_id, 468 inccb->ccb_h.target_id, 469 inccb->ccb_h.target_lun) != 470 CAM_REQ_CMP){ 471 error = EINVAL; 472 xpt_free_ccb(ccb); 473 break; 474 } 475 /* Ensure all of our fields are correct */ 476 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, 477 inccb->ccb_h.pinfo.priority); 478 xpt_merge_ccb(ccb, inccb); 479 xpt_path_lock(ccb->ccb_h.path); 480 cam_periph_runccb(ccb, NULL, 0, 0, NULL); 481 xpt_path_unlock(ccb->ccb_h.path); 482 bcopy(ccb, inccb, sizeof(union ccb)); 483 xpt_free_path(ccb->ccb_h.path); 484 xpt_free_ccb(ccb); 485 break; 486 487 case XPT_DEBUG: { 488 union ccb ccb; 489 490 /* 491 * This is an immediate CCB, so it's okay to 492 * allocate it on the stack. 493 */ 494 memset(&ccb, 0, sizeof(ccb)); 495 496 /* 497 * Create a path using the bus, target, and lun the 498 * user passed in. 499 */ 500 if (xpt_create_path(&ccb.ccb_h.path, NULL, 501 inccb->ccb_h.path_id, 502 inccb->ccb_h.target_id, 503 inccb->ccb_h.target_lun) != 504 CAM_REQ_CMP){ 505 error = EINVAL; 506 break; 507 } 508 /* Ensure all of our fields are correct */ 509 xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path, 510 inccb->ccb_h.pinfo.priority); 511 xpt_merge_ccb(&ccb, inccb); 512 xpt_action(&ccb); 513 bcopy(&ccb, inccb, sizeof(union ccb)); 514 xpt_free_path(ccb.ccb_h.path); 515 break; 516 } 517 case XPT_DEV_MATCH: { 518 struct cam_periph_map_info mapinfo; 519 struct cam_path *old_path; 520 521 /* 522 * We can't deal with physical addresses for this 523 * type of transaction. 524 */ 525 if ((inccb->ccb_h.flags & CAM_DATA_MASK) != 526 CAM_DATA_VADDR) { 527 error = EINVAL; 528 break; 529 } 530 531 /* 532 * Save this in case the caller had it set to 533 * something in particular. 534 */ 535 old_path = inccb->ccb_h.path; 536 537 /* 538 * We really don't need a path for the matching 539 * code. The path is needed because of the 540 * debugging statements in xpt_action(). They 541 * assume that the CCB has a valid path. 542 */ 543 inccb->ccb_h.path = xpt_periph->path; 544 545 bzero(&mapinfo, sizeof(mapinfo)); 546 547 /* 548 * Map the pattern and match buffers into kernel 549 * virtual address space. 550 */ 551 error = cam_periph_mapmem(inccb, &mapinfo, maxphys); 552 553 if (error) { 554 inccb->ccb_h.path = old_path; 555 break; 556 } 557 558 /* 559 * This is an immediate CCB, we can send it on directly. 560 */ 561 xpt_action(inccb); 562 563 /* 564 * Map the buffers back into user space. 565 */ 566 error = cam_periph_unmapmem(inccb, &mapinfo); 567 568 inccb->ccb_h.path = old_path; 569 break; 570 } 571 default: 572 error = ENOTSUP; 573 break; 574 } 575 xpt_release_bus(bus); 576 break; 577 } 578 /* 579 * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input, 580 * with the periphal driver name and unit name filled in. The other 581 * fields don't really matter as input. The passthrough driver name 582 * ("pass"), and unit number are passed back in the ccb. The current 583 * device generation number, and the index into the device peripheral 584 * driver list, and the status are also passed back. Note that 585 * since we do everything in one pass, unlike the XPT_GDEVLIST ccb, 586 * we never return a status of CAM_GDEVLIST_LIST_CHANGED. It is 587 * (or rather should be) impossible for the device peripheral driver 588 * list to change since we look at the whole thing in one pass, and 589 * we do it with lock protection. 590 * 591 */ 592 case CAMGETPASSTHRU: { 593 union ccb *ccb; 594 struct cam_periph *periph; 595 struct periph_driver **p_drv; 596 char *name; 597 u_int unit; 598 bool base_periph_found; 599 600 ccb = (union ccb *)addr; 601 unit = ccb->cgdl.unit_number; 602 name = ccb->cgdl.periph_name; 603 base_periph_found = false; 604 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) 605 if (ccb->ccb_h.func_code == XPT_SCSI_IO) 606 ccb->csio.bio = NULL; 607 #endif 608 609 /* 610 * Sanity check -- make sure we don't get a null peripheral 611 * driver name. 612 */ 613 if (*ccb->cgdl.periph_name == '\0') { 614 error = EINVAL; 615 break; 616 } 617 618 /* Keep the list from changing while we traverse it */ 619 xpt_lock_buses(); 620 621 /* first find our driver in the list of drivers */ 622 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) 623 if (strcmp((*p_drv)->driver_name, name) == 0) 624 break; 625 626 if (*p_drv == NULL) { 627 xpt_unlock_buses(); 628 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 629 ccb->cgdl.status = CAM_GDEVLIST_ERROR; 630 *ccb->cgdl.periph_name = '\0'; 631 ccb->cgdl.unit_number = 0; 632 error = ENOENT; 633 break; 634 } 635 636 /* 637 * Run through every peripheral instance of this driver 638 * and check to see whether it matches the unit passed 639 * in by the user. If it does, get out of the loops and 640 * find the passthrough driver associated with that 641 * peripheral driver. 642 */ 643 for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL; 644 periph = TAILQ_NEXT(periph, unit_links)) { 645 if (periph->unit_number == unit) 646 break; 647 } 648 /* 649 * If we found the peripheral driver that the user passed 650 * in, go through all of the peripheral drivers for that 651 * particular device and look for a passthrough driver. 652 */ 653 if (periph != NULL) { 654 struct cam_ed *device; 655 int i; 656 657 base_periph_found = true; 658 device = periph->path->device; 659 for (i = 0, periph = SLIST_FIRST(&device->periphs); 660 periph != NULL; 661 periph = SLIST_NEXT(periph, periph_links), i++) { 662 /* 663 * Check to see whether we have a 664 * passthrough device or not. 665 */ 666 if (strcmp(periph->periph_name, "pass") == 0) { 667 /* 668 * Fill in the getdevlist fields. 669 */ 670 strlcpy(ccb->cgdl.periph_name, 671 periph->periph_name, 672 sizeof(ccb->cgdl.periph_name)); 673 ccb->cgdl.unit_number = 674 periph->unit_number; 675 if (SLIST_NEXT(periph, periph_links)) 676 ccb->cgdl.status = 677 CAM_GDEVLIST_MORE_DEVS; 678 else 679 ccb->cgdl.status = 680 CAM_GDEVLIST_LAST_DEVICE; 681 ccb->cgdl.generation = 682 device->generation; 683 ccb->cgdl.index = i; 684 /* 685 * Fill in some CCB header fields 686 * that the user may want. 687 */ 688 ccb->ccb_h.path_id = 689 periph->path->bus->path_id; 690 ccb->ccb_h.target_id = 691 periph->path->target->target_id; 692 ccb->ccb_h.target_lun = 693 periph->path->device->lun_id; 694 ccb->ccb_h.status = CAM_REQ_CMP; 695 break; 696 } 697 } 698 } 699 700 /* 701 * If the periph is null here, one of two things has 702 * happened. The first possibility is that we couldn't 703 * find the unit number of the particular peripheral driver 704 * that the user is asking about. e.g. the user asks for 705 * the passthrough driver for "da11". We find the list of 706 * "da" peripherals all right, but there is no unit 11. 707 * The other possibility is that we went through the list 708 * of peripheral drivers attached to the device structure, 709 * but didn't find one with the name "pass". Either way, 710 * we return ENOENT, since we couldn't find something. 711 */ 712 if (periph == NULL) { 713 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 714 ccb->cgdl.status = CAM_GDEVLIST_ERROR; 715 *ccb->cgdl.periph_name = '\0'; 716 ccb->cgdl.unit_number = 0; 717 error = ENOENT; 718 /* 719 * It is unfortunate that this is even necessary, 720 * but there are many, many clueless users out there. 721 * If this is true, the user is looking for the 722 * passthrough driver, but doesn't have one in his 723 * kernel. 724 */ 725 if (base_periph_found) { 726 printf( 727 "xptioctl: pass driver is not in the kernel\n" 728 "xptioctl: put \"device pass\" in your kernel config file\n"); 729 } 730 } 731 xpt_unlock_buses(); 732 break; 733 } 734 default: 735 error = ENOTTY; 736 break; 737 } 738 739 return(error); 740 } 741 742 static int 743 cam_module_event_handler(module_t mod, int what, void *arg) 744 { 745 int error; 746 747 switch (what) { 748 case MOD_LOAD: 749 if ((error = xpt_init(NULL)) != 0) 750 return (error); 751 break; 752 case MOD_UNLOAD: 753 return EBUSY; 754 default: 755 return EOPNOTSUPP; 756 } 757 758 return 0; 759 } 760 761 static struct xpt_proto * 762 xpt_proto_find(cam_proto proto) 763 { 764 struct xpt_proto **pp; 765 766 SET_FOREACH(pp, cam_xpt_proto_set) { 767 if ((*pp)->proto == proto) 768 return *pp; 769 } 770 771 return NULL; 772 } 773 774 static void 775 xpt_rescan_done(struct cam_periph *periph, union ccb *done_ccb) 776 { 777 778 if (done_ccb->ccb_h.ppriv_ptr1 == NULL) { 779 xpt_free_path(done_ccb->ccb_h.path); 780 xpt_free_ccb(done_ccb); 781 } else { 782 done_ccb->ccb_h.cbfcnp = done_ccb->ccb_h.ppriv_ptr1; 783 (*done_ccb->ccb_h.cbfcnp)(periph, done_ccb); 784 } 785 xpt_release_boot(); 786 } 787 788 /* thread to handle bus rescans */ 789 static void 790 xpt_scanner_thread(void *dummy) 791 { 792 union ccb *ccb; 793 struct mtx *mtx; 794 struct cam_ed *device; 795 796 xpt_lock_buses(); 797 for (;;) { 798 if (TAILQ_EMPTY(&xsoftc.ccb_scanq)) 799 msleep(&xsoftc.ccb_scanq, &xsoftc.xpt_topo_lock, PRIBIO, 800 "-", 0); 801 if ((ccb = (union ccb *)TAILQ_FIRST(&xsoftc.ccb_scanq)) != NULL) { 802 TAILQ_REMOVE(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe); 803 xpt_unlock_buses(); 804 805 /* 806 * We need to lock the device's mutex which we use as 807 * the path mutex. We can't do it directly because the 808 * cam_path in the ccb may wind up going away because 809 * the path lock may be dropped and the path retired in 810 * the completion callback. We do this directly to keep 811 * the reference counts in cam_path sane. We also have 812 * to copy the device pointer because ccb_h.path may 813 * be freed in the callback. 814 */ 815 mtx = xpt_path_mtx(ccb->ccb_h.path); 816 device = ccb->ccb_h.path->device; 817 xpt_acquire_device(device); 818 mtx_lock(mtx); 819 xpt_action(ccb); 820 mtx_unlock(mtx); 821 xpt_release_device(device); 822 823 xpt_lock_buses(); 824 } 825 } 826 } 827 828 void 829 xpt_rescan(union ccb *ccb) 830 { 831 struct ccb_hdr *hdr; 832 833 /* Prepare request */ 834 if (ccb->ccb_h.path->target->target_id == CAM_TARGET_WILDCARD && 835 ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD) 836 ccb->ccb_h.func_code = XPT_SCAN_BUS; 837 else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD && 838 ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD) 839 ccb->ccb_h.func_code = XPT_SCAN_TGT; 840 else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD && 841 ccb->ccb_h.path->device->lun_id != CAM_LUN_WILDCARD) 842 ccb->ccb_h.func_code = XPT_SCAN_LUN; 843 else { 844 xpt_print(ccb->ccb_h.path, "illegal scan path\n"); 845 xpt_free_path(ccb->ccb_h.path); 846 xpt_free_ccb(ccb); 847 return; 848 } 849 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, 850 ("xpt_rescan: func %#x %s\n", ccb->ccb_h.func_code, 851 xpt_action_name(ccb->ccb_h.func_code))); 852 853 ccb->ccb_h.ppriv_ptr1 = ccb->ccb_h.cbfcnp; 854 ccb->ccb_h.cbfcnp = xpt_rescan_done; 855 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, CAM_PRIORITY_XPT); 856 /* Don't make duplicate entries for the same paths. */ 857 xpt_lock_buses(); 858 if (ccb->ccb_h.ppriv_ptr1 == NULL) { 859 TAILQ_FOREACH(hdr, &xsoftc.ccb_scanq, sim_links.tqe) { 860 if (xpt_path_comp(hdr->path, ccb->ccb_h.path) == 0) { 861 wakeup(&xsoftc.ccb_scanq); 862 xpt_unlock_buses(); 863 xpt_print(ccb->ccb_h.path, "rescan already queued\n"); 864 xpt_free_path(ccb->ccb_h.path); 865 xpt_free_ccb(ccb); 866 return; 867 } 868 } 869 } 870 TAILQ_INSERT_TAIL(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe); 871 xpt_hold_boot_locked(); 872 wakeup(&xsoftc.ccb_scanq); 873 xpt_unlock_buses(); 874 } 875 876 /* Functions accessed by the peripheral drivers */ 877 static int 878 xpt_init(void *dummy) 879 { 880 struct cam_sim *xpt_sim; 881 struct cam_path *path; 882 struct cam_devq *devq; 883 cam_status status; 884 int error, i; 885 886 TAILQ_INIT(&xsoftc.xpt_busses); 887 TAILQ_INIT(&xsoftc.ccb_scanq); 888 STAILQ_INIT(&xsoftc.highpowerq); 889 xsoftc.num_highpower = CAM_MAX_HIGHPOWER; 890 891 mtx_init(&xsoftc.xpt_highpower_lock, "XPT highpower lock", NULL, MTX_DEF); 892 xsoftc.xpt_taskq = taskqueue_create("CAM XPT task", M_WAITOK, 893 taskqueue_thread_enqueue, /*context*/&xsoftc.xpt_taskq); 894 895 #ifdef CAM_BOOT_DELAY 896 /* 897 * Override this value at compile time to assist our users 898 * who don't use loader to boot a kernel. 899 */ 900 xsoftc.boot_delay = CAM_BOOT_DELAY; 901 #endif 902 903 /* 904 * The xpt layer is, itself, the equivalent of a SIM. 905 * Allow 16 ccbs in the ccb pool for it. This should 906 * give decent parallelism when we probe buses and 907 * perform other XPT functions. 908 */ 909 devq = cam_simq_alloc(16); 910 if (devq == NULL) 911 return (ENOMEM); 912 xpt_sim = cam_sim_alloc(xptaction, 913 xptpoll, 914 "xpt", 915 /*softc*/NULL, 916 /*unit*/0, 917 /*mtx*/NULL, 918 /*max_dev_transactions*/0, 919 /*max_tagged_dev_transactions*/0, 920 devq); 921 if (xpt_sim == NULL) 922 return (ENOMEM); 923 924 if ((error = xpt_bus_register(xpt_sim, NULL, 0)) != CAM_SUCCESS) { 925 printf( 926 "xpt_init: xpt_bus_register failed with errno %d, failing attach\n", 927 error); 928 return (EINVAL); 929 } 930 931 /* 932 * Looking at the XPT from the SIM layer, the XPT is 933 * the equivalent of a peripheral driver. Allocate 934 * a peripheral driver entry for us. 935 */ 936 if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID, 937 CAM_TARGET_WILDCARD, 938 CAM_LUN_WILDCARD)) != CAM_REQ_CMP) { 939 printf( 940 "xpt_init: xpt_create_path failed with status %#x, failing attach\n", 941 status); 942 return (EINVAL); 943 } 944 xpt_path_lock(path); 945 cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO, 946 path, NULL, 0, xpt_sim); 947 xpt_path_unlock(path); 948 xpt_free_path(path); 949 950 if (cam_num_doneqs < 1) 951 cam_num_doneqs = 1 + mp_ncpus / 6; 952 else if (cam_num_doneqs > MAXCPU) 953 cam_num_doneqs = MAXCPU; 954 for (i = 0; i < cam_num_doneqs; i++) { 955 mtx_init(&cam_doneqs[i].cam_doneq_mtx, "CAM doneq", NULL, 956 MTX_DEF); 957 STAILQ_INIT(&cam_doneqs[i].cam_doneq); 958 error = kproc_kthread_add(xpt_done_td, &cam_doneqs[i], 959 &cam_proc, NULL, 0, 0, "cam", "doneq%d", i); 960 if (error != 0) { 961 cam_num_doneqs = i; 962 break; 963 } 964 } 965 if (cam_num_doneqs < 1) { 966 printf("xpt_init: Cannot init completion queues - failing attach\n"); 967 return (ENOMEM); 968 } 969 970 mtx_init(&cam_async.cam_doneq_mtx, "CAM async", NULL, MTX_DEF); 971 STAILQ_INIT(&cam_async.cam_doneq); 972 if (kproc_kthread_add(xpt_async_td, &cam_async, 973 &cam_proc, NULL, 0, 0, "cam", "async") != 0) { 974 printf("xpt_init: Cannot init async thread - failing attach\n"); 975 return (ENOMEM); 976 } 977 978 /* 979 * Register a callback for when interrupts are enabled. 980 */ 981 config_intrhook_oneshot(xpt_config, NULL); 982 983 return (0); 984 } 985 986 static cam_status 987 xptregister(struct cam_periph *periph, void *arg) 988 { 989 struct cam_sim *xpt_sim; 990 991 if (periph == NULL) { 992 printf("xptregister: periph was NULL!!\n"); 993 return(CAM_REQ_CMP_ERR); 994 } 995 996 xpt_sim = (struct cam_sim *)arg; 997 xpt_sim->softc = periph; 998 xpt_periph = periph; 999 periph->softc = NULL; 1000 1001 return(CAM_REQ_CMP); 1002 } 1003 1004 int32_t 1005 xpt_add_periph(struct cam_periph *periph) 1006 { 1007 struct cam_ed *device; 1008 int32_t status; 1009 1010 TASK_INIT(&periph->periph_run_task, 0, xpt_run_allocq_task, periph); 1011 device = periph->path->device; 1012 status = CAM_REQ_CMP; 1013 if (device != NULL) { 1014 mtx_lock(&device->target->bus->eb_mtx); 1015 device->generation++; 1016 SLIST_INSERT_HEAD(&device->periphs, periph, periph_links); 1017 mtx_unlock(&device->target->bus->eb_mtx); 1018 atomic_add_32(&xsoftc.xpt_generation, 1); 1019 } 1020 1021 return (status); 1022 } 1023 1024 void 1025 xpt_remove_periph(struct cam_periph *periph) 1026 { 1027 struct cam_ed *device; 1028 1029 device = periph->path->device; 1030 if (device != NULL) { 1031 mtx_lock(&device->target->bus->eb_mtx); 1032 device->generation++; 1033 SLIST_REMOVE(&device->periphs, periph, cam_periph, periph_links); 1034 mtx_unlock(&device->target->bus->eb_mtx); 1035 atomic_add_32(&xsoftc.xpt_generation, 1); 1036 } 1037 } 1038 1039 void 1040 xpt_announce_periph(struct cam_periph *periph, char *announce_string) 1041 { 1042 char buf[128]; 1043 struct sbuf sb; 1044 1045 (void)sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN | SBUF_INCLUDENUL); 1046 sbuf_set_drain(&sb, sbuf_printf_drain, NULL); 1047 xpt_announce_periph_sbuf(periph, &sb, announce_string); 1048 (void)sbuf_finish(&sb); 1049 (void)sbuf_delete(&sb); 1050 } 1051 1052 void 1053 xpt_announce_periph_sbuf(struct cam_periph *periph, struct sbuf *sb, 1054 char *announce_string) 1055 { 1056 struct cam_path *path = periph->path; 1057 struct xpt_proto *proto; 1058 1059 cam_periph_assert(periph, MA_OWNED); 1060 periph->flags |= CAM_PERIPH_ANNOUNCED; 1061 1062 sbuf_printf(sb, "%s%d at %s%d bus %d scbus%d target %d lun %jx\n", 1063 periph->periph_name, periph->unit_number, 1064 path->bus->sim->sim_name, 1065 path->bus->sim->unit_number, 1066 path->bus->sim->bus_id, 1067 path->bus->path_id, 1068 path->target->target_id, 1069 (uintmax_t)path->device->lun_id); 1070 sbuf_printf(sb, "%s%d: ", periph->periph_name, periph->unit_number); 1071 proto = xpt_proto_find(path->device->protocol); 1072 if (proto) 1073 proto->ops->announce_sbuf(path->device, sb); 1074 else 1075 sbuf_printf(sb, "Unknown protocol device %d\n", 1076 path->device->protocol); 1077 if (path->device->serial_num_len > 0) { 1078 /* Don't wrap the screen - print only the first 60 chars */ 1079 sbuf_printf(sb, "%s%d: Serial Number %.60s\n", 1080 periph->periph_name, periph->unit_number, 1081 path->device->serial_num); 1082 } 1083 /* Announce transport details. */ 1084 path->bus->xport->ops->announce_sbuf(periph, sb); 1085 /* Announce command queueing. */ 1086 if (path->device->inq_flags & SID_CmdQue 1087 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) { 1088 sbuf_printf(sb, "%s%d: Command Queueing enabled\n", 1089 periph->periph_name, periph->unit_number); 1090 } 1091 /* Announce caller's details if they've passed in. */ 1092 if (announce_string != NULL) 1093 sbuf_printf(sb, "%s%d: %s\n", periph->periph_name, 1094 periph->unit_number, announce_string); 1095 } 1096 1097 void 1098 xpt_announce_quirks(struct cam_periph *periph, int quirks, char *bit_string) 1099 { 1100 if (quirks != 0) { 1101 printf("%s%d: quirks=0x%b\n", periph->periph_name, 1102 periph->unit_number, quirks, bit_string); 1103 } 1104 } 1105 1106 void 1107 xpt_announce_quirks_sbuf(struct cam_periph *periph, struct sbuf *sb, 1108 int quirks, char *bit_string) 1109 { 1110 if (quirks != 0) { 1111 sbuf_printf(sb, "%s%d: quirks=0x%b\n", periph->periph_name, 1112 periph->unit_number, quirks, bit_string); 1113 } 1114 } 1115 1116 void 1117 xpt_denounce_periph(struct cam_periph *periph) 1118 { 1119 char buf[128]; 1120 struct sbuf sb; 1121 1122 (void)sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN | SBUF_INCLUDENUL); 1123 sbuf_set_drain(&sb, sbuf_printf_drain, NULL); 1124 xpt_denounce_periph_sbuf(periph, &sb); 1125 (void)sbuf_finish(&sb); 1126 (void)sbuf_delete(&sb); 1127 } 1128 1129 void 1130 xpt_denounce_periph_sbuf(struct cam_periph *periph, struct sbuf *sb) 1131 { 1132 struct cam_path *path = periph->path; 1133 struct xpt_proto *proto; 1134 1135 cam_periph_assert(periph, MA_OWNED); 1136 1137 sbuf_printf(sb, "%s%d at %s%d bus %d scbus%d target %d lun %jx\n", 1138 periph->periph_name, periph->unit_number, 1139 path->bus->sim->sim_name, 1140 path->bus->sim->unit_number, 1141 path->bus->sim->bus_id, 1142 path->bus->path_id, 1143 path->target->target_id, 1144 (uintmax_t)path->device->lun_id); 1145 sbuf_printf(sb, "%s%d: ", periph->periph_name, periph->unit_number); 1146 proto = xpt_proto_find(path->device->protocol); 1147 if (proto) 1148 proto->ops->denounce_sbuf(path->device, sb); 1149 else 1150 sbuf_printf(sb, "Unknown protocol device %d", 1151 path->device->protocol); 1152 if (path->device->serial_num_len > 0) 1153 sbuf_printf(sb, " s/n %.60s", path->device->serial_num); 1154 sbuf_cat(sb, " detached\n"); 1155 } 1156 1157 int 1158 xpt_getattr(char *buf, size_t len, const char *attr, struct cam_path *path) 1159 { 1160 int ret = -1, l, o; 1161 struct ccb_dev_advinfo cdai; 1162 struct scsi_vpd_device_id *did; 1163 struct scsi_vpd_id_descriptor *idd; 1164 1165 xpt_path_assert(path, MA_OWNED); 1166 1167 memset(&cdai, 0, sizeof(cdai)); 1168 xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL); 1169 cdai.ccb_h.func_code = XPT_DEV_ADVINFO; 1170 cdai.flags = CDAI_FLAG_NONE; 1171 cdai.bufsiz = len; 1172 cdai.buf = buf; 1173 1174 if (!strcmp(attr, "GEOM::ident")) 1175 cdai.buftype = CDAI_TYPE_SERIAL_NUM; 1176 else if (!strcmp(attr, "GEOM::physpath")) 1177 cdai.buftype = CDAI_TYPE_PHYS_PATH; 1178 else if (strcmp(attr, "GEOM::lunid") == 0 || 1179 strcmp(attr, "GEOM::lunname") == 0) { 1180 cdai.buftype = CDAI_TYPE_SCSI_DEVID; 1181 cdai.bufsiz = CAM_SCSI_DEVID_MAXLEN; 1182 cdai.buf = malloc(cdai.bufsiz, M_CAMXPT, M_NOWAIT); 1183 if (cdai.buf == NULL) { 1184 ret = ENOMEM; 1185 goto out; 1186 } 1187 } else 1188 goto out; 1189 1190 xpt_action((union ccb *)&cdai); /* can only be synchronous */ 1191 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0) 1192 cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE); 1193 if (cdai.provsiz == 0) 1194 goto out; 1195 switch(cdai.buftype) { 1196 case CDAI_TYPE_SCSI_DEVID: 1197 did = (struct scsi_vpd_device_id *)cdai.buf; 1198 if (strcmp(attr, "GEOM::lunid") == 0) { 1199 idd = scsi_get_devid(did, cdai.provsiz, 1200 scsi_devid_is_lun_naa); 1201 if (idd == NULL) 1202 idd = scsi_get_devid(did, cdai.provsiz, 1203 scsi_devid_is_lun_eui64); 1204 if (idd == NULL) 1205 idd = scsi_get_devid(did, cdai.provsiz, 1206 scsi_devid_is_lun_uuid); 1207 if (idd == NULL) 1208 idd = scsi_get_devid(did, cdai.provsiz, 1209 scsi_devid_is_lun_md5); 1210 } else 1211 idd = NULL; 1212 1213 if (idd == NULL) 1214 idd = scsi_get_devid(did, cdai.provsiz, 1215 scsi_devid_is_lun_t10); 1216 if (idd == NULL) 1217 idd = scsi_get_devid(did, cdai.provsiz, 1218 scsi_devid_is_lun_name); 1219 if (idd == NULL) 1220 break; 1221 1222 ret = 0; 1223 if ((idd->proto_codeset & SVPD_ID_CODESET_MASK) == 1224 SVPD_ID_CODESET_ASCII) { 1225 if (idd->length < len) { 1226 for (l = 0; l < idd->length; l++) 1227 buf[l] = idd->identifier[l] ? 1228 idd->identifier[l] : ' '; 1229 buf[l] = 0; 1230 } else 1231 ret = EFAULT; 1232 break; 1233 } 1234 if ((idd->proto_codeset & SVPD_ID_CODESET_MASK) == 1235 SVPD_ID_CODESET_UTF8) { 1236 l = strnlen(idd->identifier, idd->length); 1237 if (l < len) { 1238 bcopy(idd->identifier, buf, l); 1239 buf[l] = 0; 1240 } else 1241 ret = EFAULT; 1242 break; 1243 } 1244 if ((idd->id_type & SVPD_ID_TYPE_MASK) == 1245 SVPD_ID_TYPE_UUID && idd->identifier[0] == 0x10) { 1246 if ((idd->length - 2) * 2 + 4 >= len) { 1247 ret = EFAULT; 1248 break; 1249 } 1250 for (l = 2, o = 0; l < idd->length; l++) { 1251 if (l == 6 || l == 8 || l == 10 || l == 12) 1252 o += sprintf(buf + o, "-"); 1253 o += sprintf(buf + o, "%02x", 1254 idd->identifier[l]); 1255 } 1256 break; 1257 } 1258 if (idd->length * 2 < len) { 1259 for (l = 0; l < idd->length; l++) 1260 sprintf(buf + l * 2, "%02x", 1261 idd->identifier[l]); 1262 } else 1263 ret = EFAULT; 1264 break; 1265 default: 1266 if (cdai.provsiz < len) { 1267 cdai.buf[cdai.provsiz] = 0; 1268 ret = 0; 1269 } else 1270 ret = EFAULT; 1271 break; 1272 } 1273 1274 out: 1275 if ((char *)cdai.buf != buf) 1276 free(cdai.buf, M_CAMXPT); 1277 return ret; 1278 } 1279 1280 static dev_match_ret 1281 xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns, 1282 struct cam_eb *bus) 1283 { 1284 dev_match_ret retval; 1285 u_int i; 1286 1287 retval = DM_RET_NONE; 1288 1289 /* 1290 * If we aren't given something to match against, that's an error. 1291 */ 1292 if (bus == NULL) 1293 return(DM_RET_ERROR); 1294 1295 /* 1296 * If there are no match entries, then this bus matches no 1297 * matter what. 1298 */ 1299 if ((patterns == NULL) || (num_patterns == 0)) 1300 return(DM_RET_DESCEND | DM_RET_COPY); 1301 1302 for (i = 0; i < num_patterns; i++) { 1303 struct bus_match_pattern *cur_pattern; 1304 struct device_match_pattern *dp = &patterns[i].pattern.device_pattern; 1305 struct periph_match_pattern *pp = &patterns[i].pattern.periph_pattern; 1306 1307 /* 1308 * If the pattern in question isn't for a bus node, we 1309 * aren't interested. However, we do indicate to the 1310 * calling routine that we should continue descending the 1311 * tree, since the user wants to match against lower-level 1312 * EDT elements. 1313 */ 1314 if (patterns[i].type == DEV_MATCH_DEVICE && 1315 (dp->flags & DEV_MATCH_PATH) != 0 && 1316 dp->path_id != bus->path_id) 1317 continue; 1318 if (patterns[i].type == DEV_MATCH_PERIPH && 1319 (pp->flags & PERIPH_MATCH_PATH) != 0 && 1320 pp->path_id != bus->path_id) 1321 continue; 1322 if (patterns[i].type != DEV_MATCH_BUS) { 1323 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) 1324 retval |= DM_RET_DESCEND; 1325 continue; 1326 } 1327 1328 cur_pattern = &patterns[i].pattern.bus_pattern; 1329 1330 if (((cur_pattern->flags & BUS_MATCH_PATH) != 0) 1331 && (cur_pattern->path_id != bus->path_id)) 1332 continue; 1333 1334 if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0) 1335 && (cur_pattern->bus_id != bus->sim->bus_id)) 1336 continue; 1337 1338 if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0) 1339 && (cur_pattern->unit_number != bus->sim->unit_number)) 1340 continue; 1341 1342 if (((cur_pattern->flags & BUS_MATCH_NAME) != 0) 1343 && (strncmp(cur_pattern->dev_name, bus->sim->sim_name, 1344 DEV_IDLEN) != 0)) 1345 continue; 1346 1347 /* 1348 * If we get to this point, the user definitely wants 1349 * information on this bus. So tell the caller to copy the 1350 * data out. 1351 */ 1352 retval |= DM_RET_COPY; 1353 1354 /* 1355 * If the return action has been set to descend, then we 1356 * know that we've already seen a non-bus matching 1357 * expression, therefore we need to further descend the tree. 1358 * This won't change by continuing around the loop, so we 1359 * go ahead and return. If we haven't seen a non-bus 1360 * matching expression, we keep going around the loop until 1361 * we exhaust the matching expressions. We'll set the stop 1362 * flag once we fall out of the loop. 1363 */ 1364 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND) 1365 return(retval); 1366 } 1367 1368 /* 1369 * If the return action hasn't been set to descend yet, that means 1370 * we haven't seen anything other than bus matching patterns. So 1371 * tell the caller to stop descending the tree -- the user doesn't 1372 * want to match against lower level tree elements. 1373 */ 1374 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) 1375 retval |= DM_RET_STOP; 1376 1377 return(retval); 1378 } 1379 1380 static dev_match_ret 1381 xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns, 1382 struct cam_ed *device) 1383 { 1384 dev_match_ret retval; 1385 u_int i; 1386 1387 retval = DM_RET_NONE; 1388 1389 /* 1390 * If we aren't given something to match against, that's an error. 1391 */ 1392 if (device == NULL) 1393 return(DM_RET_ERROR); 1394 1395 /* 1396 * If there are no match entries, then this device matches no 1397 * matter what. 1398 */ 1399 if ((patterns == NULL) || (num_patterns == 0)) 1400 return(DM_RET_DESCEND | DM_RET_COPY); 1401 1402 for (i = 0; i < num_patterns; i++) { 1403 struct device_match_pattern *cur_pattern; 1404 struct scsi_vpd_device_id *device_id_page; 1405 struct periph_match_pattern *pp = &patterns[i].pattern.periph_pattern; 1406 1407 /* 1408 * If the pattern in question isn't for a device node, we 1409 * aren't interested. 1410 */ 1411 if (patterns[i].type == DEV_MATCH_PERIPH && 1412 (pp->flags & PERIPH_MATCH_TARGET) != 0 && 1413 pp->target_id != device->target->target_id) 1414 continue; 1415 if (patterns[i].type == DEV_MATCH_PERIPH && 1416 (pp->flags & PERIPH_MATCH_LUN) != 0 && 1417 pp->target_lun != device->lun_id) 1418 continue; 1419 if (patterns[i].type != DEV_MATCH_DEVICE) { 1420 if ((patterns[i].type == DEV_MATCH_PERIPH) 1421 && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)) 1422 retval |= DM_RET_DESCEND; 1423 continue; 1424 } 1425 1426 cur_pattern = &patterns[i].pattern.device_pattern; 1427 1428 /* Error out if mutually exclusive options are specified. */ 1429 if ((cur_pattern->flags & (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID)) 1430 == (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID)) 1431 return(DM_RET_ERROR); 1432 1433 if (((cur_pattern->flags & DEV_MATCH_PATH) != 0) 1434 && (cur_pattern->path_id != device->target->bus->path_id)) 1435 continue; 1436 1437 if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0) 1438 && (cur_pattern->target_id != device->target->target_id)) 1439 continue; 1440 1441 if (((cur_pattern->flags & DEV_MATCH_LUN) != 0) 1442 && (cur_pattern->target_lun != device->lun_id)) 1443 continue; 1444 1445 if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0) 1446 && (cam_quirkmatch((caddr_t)&device->inq_data, 1447 (caddr_t)&cur_pattern->data.inq_pat, 1448 1, sizeof(cur_pattern->data.inq_pat), 1449 scsi_static_inquiry_match) == NULL)) 1450 continue; 1451 1452 device_id_page = (struct scsi_vpd_device_id *)device->device_id; 1453 if (((cur_pattern->flags & DEV_MATCH_DEVID) != 0) 1454 && (device->device_id_len < SVPD_DEVICE_ID_HDR_LEN 1455 || scsi_devid_match((uint8_t *)device_id_page->desc_list, 1456 device->device_id_len 1457 - SVPD_DEVICE_ID_HDR_LEN, 1458 cur_pattern->data.devid_pat.id, 1459 cur_pattern->data.devid_pat.id_len) != 0)) 1460 continue; 1461 1462 /* 1463 * If we get to this point, the user definitely wants 1464 * information on this device. So tell the caller to copy 1465 * the data out. 1466 */ 1467 retval |= DM_RET_COPY; 1468 1469 /* 1470 * If the return action has been set to descend, then we 1471 * know that we've already seen a peripheral matching 1472 * expression, therefore we need to further descend the tree. 1473 * This won't change by continuing around the loop, so we 1474 * go ahead and return. If we haven't seen a peripheral 1475 * matching expression, we keep going around the loop until 1476 * we exhaust the matching expressions. We'll set the stop 1477 * flag once we fall out of the loop. 1478 */ 1479 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND) 1480 return(retval); 1481 } 1482 1483 /* 1484 * If the return action hasn't been set to descend yet, that means 1485 * we haven't seen any peripheral matching patterns. So tell the 1486 * caller to stop descending the tree -- the user doesn't want to 1487 * match against lower level tree elements. 1488 */ 1489 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) 1490 retval |= DM_RET_STOP; 1491 1492 return(retval); 1493 } 1494 1495 /* 1496 * Match a single peripheral against any number of match patterns. 1497 */ 1498 static dev_match_ret 1499 xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns, 1500 struct cam_periph *periph) 1501 { 1502 dev_match_ret retval; 1503 u_int i; 1504 1505 /* 1506 * If we aren't given something to match against, that's an error. 1507 */ 1508 if (periph == NULL) 1509 return(DM_RET_ERROR); 1510 1511 /* 1512 * If there are no match entries, then this peripheral matches no 1513 * matter what. 1514 */ 1515 if ((patterns == NULL) || (num_patterns == 0)) 1516 return(DM_RET_STOP | DM_RET_COPY); 1517 1518 /* 1519 * There aren't any nodes below a peripheral node, so there's no 1520 * reason to descend the tree any further. 1521 */ 1522 retval = DM_RET_STOP; 1523 1524 for (i = 0; i < num_patterns; i++) { 1525 struct periph_match_pattern *cur_pattern; 1526 1527 /* 1528 * If the pattern in question isn't for a peripheral, we 1529 * aren't interested. 1530 */ 1531 if (patterns[i].type != DEV_MATCH_PERIPH) 1532 continue; 1533 1534 cur_pattern = &patterns[i].pattern.periph_pattern; 1535 1536 if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0) 1537 && (cur_pattern->path_id != periph->path->bus->path_id)) 1538 continue; 1539 1540 /* 1541 * For the target and lun id's, we have to make sure the 1542 * target and lun pointers aren't NULL. The xpt peripheral 1543 * has a wildcard target and device. 1544 */ 1545 if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0) 1546 && ((periph->path->target == NULL) 1547 ||(cur_pattern->target_id != periph->path->target->target_id))) 1548 continue; 1549 1550 if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0) 1551 && ((periph->path->device == NULL) 1552 || (cur_pattern->target_lun != periph->path->device->lun_id))) 1553 continue; 1554 1555 if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0) 1556 && (cur_pattern->unit_number != periph->unit_number)) 1557 continue; 1558 1559 if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0) 1560 && (strncmp(cur_pattern->periph_name, periph->periph_name, 1561 DEV_IDLEN) != 0)) 1562 continue; 1563 1564 /* 1565 * If we get to this point, the user definitely wants 1566 * information on this peripheral. So tell the caller to 1567 * copy the data out. 1568 */ 1569 retval |= DM_RET_COPY; 1570 1571 /* 1572 * The return action has already been set to stop, since 1573 * peripherals don't have any nodes below them in the EDT. 1574 */ 1575 return(retval); 1576 } 1577 1578 /* 1579 * If we get to this point, the peripheral that was passed in 1580 * doesn't match any of the patterns. 1581 */ 1582 return(retval); 1583 } 1584 1585 static int 1586 xptedtbusfunc(struct cam_eb *bus, void *arg) 1587 { 1588 struct ccb_dev_match *cdm; 1589 struct cam_et *target; 1590 dev_match_ret retval; 1591 1592 cdm = (struct ccb_dev_match *)arg; 1593 1594 /* 1595 * If our position is for something deeper in the tree, that means 1596 * that we've already seen this node. So, we keep going down. 1597 */ 1598 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1599 && (cdm->pos.cookie.bus == bus) 1600 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1601 && (cdm->pos.cookie.target != NULL)) 1602 retval = DM_RET_DESCEND; 1603 else 1604 retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus); 1605 1606 /* 1607 * If we got an error, bail out of the search. 1608 */ 1609 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 1610 cdm->status = CAM_DEV_MATCH_ERROR; 1611 return(0); 1612 } 1613 1614 /* 1615 * If the copy flag is set, copy this bus out. 1616 */ 1617 if (retval & DM_RET_COPY) { 1618 int spaceleft, j; 1619 1620 spaceleft = cdm->match_buf_len - (cdm->num_matches * 1621 sizeof(struct dev_match_result)); 1622 1623 /* 1624 * If we don't have enough space to put in another 1625 * match result, save our position and tell the 1626 * user there are more devices to check. 1627 */ 1628 if (spaceleft < sizeof(struct dev_match_result)) { 1629 bzero(&cdm->pos, sizeof(cdm->pos)); 1630 cdm->pos.position_type = 1631 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS; 1632 1633 cdm->pos.cookie.bus = bus; 1634 cdm->pos.generations[CAM_BUS_GENERATION]= 1635 xsoftc.bus_generation; 1636 cdm->status = CAM_DEV_MATCH_MORE; 1637 return(0); 1638 } 1639 j = cdm->num_matches; 1640 cdm->num_matches++; 1641 cdm->matches[j].type = DEV_MATCH_BUS; 1642 cdm->matches[j].result.bus_result.path_id = bus->path_id; 1643 cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id; 1644 cdm->matches[j].result.bus_result.unit_number = 1645 bus->sim->unit_number; 1646 strlcpy(cdm->matches[j].result.bus_result.dev_name, 1647 bus->sim->sim_name, 1648 sizeof(cdm->matches[j].result.bus_result.dev_name)); 1649 } 1650 1651 /* 1652 * If the user is only interested in buses, there's no 1653 * reason to descend to the next level in the tree. 1654 */ 1655 if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP) 1656 return(1); 1657 1658 /* 1659 * If there is a target generation recorded, check it to 1660 * make sure the target list hasn't changed. 1661 */ 1662 mtx_lock(&bus->eb_mtx); 1663 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1664 && (cdm->pos.cookie.bus == bus) 1665 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1666 && (cdm->pos.cookie.target != NULL)) { 1667 if ((cdm->pos.generations[CAM_TARGET_GENERATION] != 1668 bus->generation)) { 1669 mtx_unlock(&bus->eb_mtx); 1670 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 1671 return (0); 1672 } 1673 target = (struct cam_et *)cdm->pos.cookie.target; 1674 target->refcount++; 1675 } else 1676 target = NULL; 1677 mtx_unlock(&bus->eb_mtx); 1678 1679 return (xpttargettraverse(bus, target, xptedttargetfunc, arg)); 1680 } 1681 1682 static int 1683 xptedttargetfunc(struct cam_et *target, void *arg) 1684 { 1685 struct ccb_dev_match *cdm; 1686 struct cam_eb *bus; 1687 struct cam_ed *device; 1688 1689 cdm = (struct ccb_dev_match *)arg; 1690 bus = target->bus; 1691 1692 /* 1693 * If there is a device list generation recorded, check it to 1694 * make sure the device list hasn't changed. 1695 */ 1696 mtx_lock(&bus->eb_mtx); 1697 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1698 && (cdm->pos.cookie.bus == bus) 1699 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1700 && (cdm->pos.cookie.target == target) 1701 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) 1702 && (cdm->pos.cookie.device != NULL)) { 1703 if (cdm->pos.generations[CAM_DEV_GENERATION] != 1704 target->generation) { 1705 mtx_unlock(&bus->eb_mtx); 1706 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 1707 return(0); 1708 } 1709 device = (struct cam_ed *)cdm->pos.cookie.device; 1710 device->refcount++; 1711 } else 1712 device = NULL; 1713 mtx_unlock(&bus->eb_mtx); 1714 1715 return (xptdevicetraverse(target, device, xptedtdevicefunc, arg)); 1716 } 1717 1718 static int 1719 xptedtdevicefunc(struct cam_ed *device, void *arg) 1720 { 1721 struct cam_eb *bus; 1722 struct cam_periph *periph; 1723 struct ccb_dev_match *cdm; 1724 dev_match_ret retval; 1725 1726 cdm = (struct ccb_dev_match *)arg; 1727 bus = device->target->bus; 1728 1729 /* 1730 * If our position is for something deeper in the tree, that means 1731 * that we've already seen this node. So, we keep going down. 1732 */ 1733 if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE) 1734 && (cdm->pos.cookie.device == device) 1735 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 1736 && (cdm->pos.cookie.periph != NULL)) 1737 retval = DM_RET_DESCEND; 1738 else 1739 retval = xptdevicematch(cdm->patterns, cdm->num_patterns, 1740 device); 1741 1742 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 1743 cdm->status = CAM_DEV_MATCH_ERROR; 1744 return(0); 1745 } 1746 1747 /* 1748 * If the copy flag is set, copy this device out. 1749 */ 1750 if (retval & DM_RET_COPY) { 1751 int spaceleft, j; 1752 1753 spaceleft = cdm->match_buf_len - (cdm->num_matches * 1754 sizeof(struct dev_match_result)); 1755 1756 /* 1757 * If we don't have enough space to put in another 1758 * match result, save our position and tell the 1759 * user there are more devices to check. 1760 */ 1761 if (spaceleft < sizeof(struct dev_match_result)) { 1762 bzero(&cdm->pos, sizeof(cdm->pos)); 1763 cdm->pos.position_type = 1764 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS | 1765 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE; 1766 1767 cdm->pos.cookie.bus = device->target->bus; 1768 cdm->pos.generations[CAM_BUS_GENERATION]= 1769 xsoftc.bus_generation; 1770 cdm->pos.cookie.target = device->target; 1771 cdm->pos.generations[CAM_TARGET_GENERATION] = 1772 device->target->bus->generation; 1773 cdm->pos.cookie.device = device; 1774 cdm->pos.generations[CAM_DEV_GENERATION] = 1775 device->target->generation; 1776 cdm->status = CAM_DEV_MATCH_MORE; 1777 return(0); 1778 } 1779 j = cdm->num_matches; 1780 cdm->num_matches++; 1781 cdm->matches[j].type = DEV_MATCH_DEVICE; 1782 cdm->matches[j].result.device_result.path_id = 1783 device->target->bus->path_id; 1784 cdm->matches[j].result.device_result.target_id = 1785 device->target->target_id; 1786 cdm->matches[j].result.device_result.target_lun = 1787 device->lun_id; 1788 cdm->matches[j].result.device_result.protocol = 1789 device->protocol; 1790 bcopy(&device->inq_data, 1791 &cdm->matches[j].result.device_result.inq_data, 1792 sizeof(struct scsi_inquiry_data)); 1793 bcopy(&device->ident_data, 1794 &cdm->matches[j].result.device_result.ident_data, 1795 sizeof(struct ata_params)); 1796 1797 /* Let the user know whether this device is unconfigured */ 1798 if (device->flags & CAM_DEV_UNCONFIGURED) 1799 cdm->matches[j].result.device_result.flags = 1800 DEV_RESULT_UNCONFIGURED; 1801 else 1802 cdm->matches[j].result.device_result.flags = 1803 DEV_RESULT_NOFLAG; 1804 } 1805 1806 /* 1807 * If the user isn't interested in peripherals, don't descend 1808 * the tree any further. 1809 */ 1810 if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP) 1811 return(1); 1812 1813 /* 1814 * If there is a peripheral list generation recorded, make sure 1815 * it hasn't changed. 1816 */ 1817 xpt_lock_buses(); 1818 mtx_lock(&bus->eb_mtx); 1819 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1820 && (cdm->pos.cookie.bus == bus) 1821 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1822 && (cdm->pos.cookie.target == device->target) 1823 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) 1824 && (cdm->pos.cookie.device == device) 1825 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 1826 && (cdm->pos.cookie.periph != NULL)) { 1827 if (cdm->pos.generations[CAM_PERIPH_GENERATION] != 1828 device->generation) { 1829 mtx_unlock(&bus->eb_mtx); 1830 xpt_unlock_buses(); 1831 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 1832 return(0); 1833 } 1834 periph = (struct cam_periph *)cdm->pos.cookie.periph; 1835 periph->refcount++; 1836 } else 1837 periph = NULL; 1838 mtx_unlock(&bus->eb_mtx); 1839 xpt_unlock_buses(); 1840 1841 return (xptperiphtraverse(device, periph, xptedtperiphfunc, arg)); 1842 } 1843 1844 static int 1845 xptedtperiphfunc(struct cam_periph *periph, void *arg) 1846 { 1847 struct ccb_dev_match *cdm; 1848 dev_match_ret retval; 1849 1850 cdm = (struct ccb_dev_match *)arg; 1851 1852 retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph); 1853 1854 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 1855 cdm->status = CAM_DEV_MATCH_ERROR; 1856 return(0); 1857 } 1858 1859 /* 1860 * If the copy flag is set, copy this peripheral out. 1861 */ 1862 if (retval & DM_RET_COPY) { 1863 int spaceleft, j; 1864 size_t l; 1865 1866 spaceleft = cdm->match_buf_len - (cdm->num_matches * 1867 sizeof(struct dev_match_result)); 1868 1869 /* 1870 * If we don't have enough space to put in another 1871 * match result, save our position and tell the 1872 * user there are more devices to check. 1873 */ 1874 if (spaceleft < sizeof(struct dev_match_result)) { 1875 bzero(&cdm->pos, sizeof(cdm->pos)); 1876 cdm->pos.position_type = 1877 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS | 1878 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE | 1879 CAM_DEV_POS_PERIPH; 1880 1881 cdm->pos.cookie.bus = periph->path->bus; 1882 cdm->pos.generations[CAM_BUS_GENERATION]= 1883 xsoftc.bus_generation; 1884 cdm->pos.cookie.target = periph->path->target; 1885 cdm->pos.generations[CAM_TARGET_GENERATION] = 1886 periph->path->bus->generation; 1887 cdm->pos.cookie.device = periph->path->device; 1888 cdm->pos.generations[CAM_DEV_GENERATION] = 1889 periph->path->target->generation; 1890 cdm->pos.cookie.periph = periph; 1891 cdm->pos.generations[CAM_PERIPH_GENERATION] = 1892 periph->path->device->generation; 1893 cdm->status = CAM_DEV_MATCH_MORE; 1894 return(0); 1895 } 1896 1897 j = cdm->num_matches; 1898 cdm->num_matches++; 1899 cdm->matches[j].type = DEV_MATCH_PERIPH; 1900 cdm->matches[j].result.periph_result.path_id = 1901 periph->path->bus->path_id; 1902 cdm->matches[j].result.periph_result.target_id = 1903 periph->path->target->target_id; 1904 cdm->matches[j].result.periph_result.target_lun = 1905 periph->path->device->lun_id; 1906 cdm->matches[j].result.periph_result.unit_number = 1907 periph->unit_number; 1908 l = sizeof(cdm->matches[j].result.periph_result.periph_name); 1909 strlcpy(cdm->matches[j].result.periph_result.periph_name, 1910 periph->periph_name, l); 1911 } 1912 1913 return(1); 1914 } 1915 1916 static int 1917 xptedtmatch(struct ccb_dev_match *cdm) 1918 { 1919 struct cam_eb *bus; 1920 int ret; 1921 1922 cdm->num_matches = 0; 1923 1924 /* 1925 * Check the bus list generation. If it has changed, the user 1926 * needs to reset everything and start over. 1927 */ 1928 xpt_lock_buses(); 1929 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1930 && (cdm->pos.cookie.bus != NULL)) { 1931 if (cdm->pos.generations[CAM_BUS_GENERATION] != 1932 xsoftc.bus_generation) { 1933 xpt_unlock_buses(); 1934 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 1935 return(0); 1936 } 1937 bus = (struct cam_eb *)cdm->pos.cookie.bus; 1938 bus->refcount++; 1939 } else 1940 bus = NULL; 1941 xpt_unlock_buses(); 1942 1943 ret = xptbustraverse(bus, xptedtbusfunc, cdm); 1944 1945 /* 1946 * If we get back 0, that means that we had to stop before fully 1947 * traversing the EDT. It also means that one of the subroutines 1948 * has set the status field to the proper value. If we get back 1, 1949 * we've fully traversed the EDT and copied out any matching entries. 1950 */ 1951 if (ret == 1) 1952 cdm->status = CAM_DEV_MATCH_LAST; 1953 1954 return(ret); 1955 } 1956 1957 static int 1958 xptplistpdrvfunc(struct periph_driver **pdrv, void *arg) 1959 { 1960 struct cam_periph *periph; 1961 struct ccb_dev_match *cdm; 1962 1963 cdm = (struct ccb_dev_match *)arg; 1964 1965 xpt_lock_buses(); 1966 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR) 1967 && (cdm->pos.cookie.pdrv == pdrv) 1968 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 1969 && (cdm->pos.cookie.periph != NULL)) { 1970 if (cdm->pos.generations[CAM_PERIPH_GENERATION] != 1971 (*pdrv)->generation) { 1972 xpt_unlock_buses(); 1973 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 1974 return(0); 1975 } 1976 periph = (struct cam_periph *)cdm->pos.cookie.periph; 1977 periph->refcount++; 1978 } else 1979 periph = NULL; 1980 xpt_unlock_buses(); 1981 1982 return (xptpdperiphtraverse(pdrv, periph, xptplistperiphfunc, arg)); 1983 } 1984 1985 static int 1986 xptplistperiphfunc(struct cam_periph *periph, void *arg) 1987 { 1988 struct ccb_dev_match *cdm; 1989 dev_match_ret retval; 1990 1991 cdm = (struct ccb_dev_match *)arg; 1992 1993 retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph); 1994 1995 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 1996 cdm->status = CAM_DEV_MATCH_ERROR; 1997 return(0); 1998 } 1999 2000 /* 2001 * If the copy flag is set, copy this peripheral out. 2002 */ 2003 if (retval & DM_RET_COPY) { 2004 int spaceleft, j; 2005 size_t l; 2006 2007 spaceleft = cdm->match_buf_len - (cdm->num_matches * 2008 sizeof(struct dev_match_result)); 2009 2010 /* 2011 * If we don't have enough space to put in another 2012 * match result, save our position and tell the 2013 * user there are more devices to check. 2014 */ 2015 if (spaceleft < sizeof(struct dev_match_result)) { 2016 struct periph_driver **pdrv; 2017 2018 pdrv = NULL; 2019 bzero(&cdm->pos, sizeof(cdm->pos)); 2020 cdm->pos.position_type = 2021 CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR | 2022 CAM_DEV_POS_PERIPH; 2023 2024 /* 2025 * This may look a bit non-sensical, but it is 2026 * actually quite logical. There are very few 2027 * peripheral drivers, and bloating every peripheral 2028 * structure with a pointer back to its parent 2029 * peripheral driver linker set entry would cost 2030 * more in the long run than doing this quick lookup. 2031 */ 2032 for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) { 2033 if (strcmp((*pdrv)->driver_name, 2034 periph->periph_name) == 0) 2035 break; 2036 } 2037 2038 if (*pdrv == NULL) { 2039 cdm->status = CAM_DEV_MATCH_ERROR; 2040 return(0); 2041 } 2042 2043 cdm->pos.cookie.pdrv = pdrv; 2044 /* 2045 * The periph generation slot does double duty, as 2046 * does the periph pointer slot. They are used for 2047 * both edt and pdrv lookups and positioning. 2048 */ 2049 cdm->pos.cookie.periph = periph; 2050 cdm->pos.generations[CAM_PERIPH_GENERATION] = 2051 (*pdrv)->generation; 2052 cdm->status = CAM_DEV_MATCH_MORE; 2053 return(0); 2054 } 2055 2056 j = cdm->num_matches; 2057 cdm->num_matches++; 2058 cdm->matches[j].type = DEV_MATCH_PERIPH; 2059 cdm->matches[j].result.periph_result.path_id = 2060 periph->path->bus->path_id; 2061 2062 /* 2063 * The transport layer peripheral doesn't have a target or 2064 * lun. 2065 */ 2066 if (periph->path->target) 2067 cdm->matches[j].result.periph_result.target_id = 2068 periph->path->target->target_id; 2069 else 2070 cdm->matches[j].result.periph_result.target_id = 2071 CAM_TARGET_WILDCARD; 2072 2073 if (periph->path->device) 2074 cdm->matches[j].result.periph_result.target_lun = 2075 periph->path->device->lun_id; 2076 else 2077 cdm->matches[j].result.periph_result.target_lun = 2078 CAM_LUN_WILDCARD; 2079 2080 cdm->matches[j].result.periph_result.unit_number = 2081 periph->unit_number; 2082 l = sizeof(cdm->matches[j].result.periph_result.periph_name); 2083 strlcpy(cdm->matches[j].result.periph_result.periph_name, 2084 periph->periph_name, l); 2085 } 2086 2087 return(1); 2088 } 2089 2090 static int 2091 xptperiphlistmatch(struct ccb_dev_match *cdm) 2092 { 2093 int ret; 2094 2095 cdm->num_matches = 0; 2096 2097 /* 2098 * At this point in the edt traversal function, we check the bus 2099 * list generation to make sure that no buses have been added or 2100 * removed since the user last sent a XPT_DEV_MATCH ccb through. 2101 * For the peripheral driver list traversal function, however, we 2102 * don't have to worry about new peripheral driver types coming or 2103 * going; they're in a linker set, and therefore can't change 2104 * without a recompile. 2105 */ 2106 2107 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR) 2108 && (cdm->pos.cookie.pdrv != NULL)) 2109 ret = xptpdrvtraverse( 2110 (struct periph_driver **)cdm->pos.cookie.pdrv, 2111 xptplistpdrvfunc, cdm); 2112 else 2113 ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm); 2114 2115 /* 2116 * If we get back 0, that means that we had to stop before fully 2117 * traversing the peripheral driver tree. It also means that one of 2118 * the subroutines has set the status field to the proper value. If 2119 * we get back 1, we've fully traversed the EDT and copied out any 2120 * matching entries. 2121 */ 2122 if (ret == 1) 2123 cdm->status = CAM_DEV_MATCH_LAST; 2124 2125 return(ret); 2126 } 2127 2128 static int 2129 xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg) 2130 { 2131 struct cam_eb *bus, *next_bus; 2132 int retval; 2133 2134 retval = 1; 2135 if (start_bus) 2136 bus = start_bus; 2137 else { 2138 xpt_lock_buses(); 2139 bus = TAILQ_FIRST(&xsoftc.xpt_busses); 2140 if (bus == NULL) { 2141 xpt_unlock_buses(); 2142 return (retval); 2143 } 2144 bus->refcount++; 2145 xpt_unlock_buses(); 2146 } 2147 for (; bus != NULL; bus = next_bus) { 2148 retval = tr_func(bus, arg); 2149 if (retval == 0) { 2150 xpt_release_bus(bus); 2151 break; 2152 } 2153 xpt_lock_buses(); 2154 next_bus = TAILQ_NEXT(bus, links); 2155 if (next_bus) 2156 next_bus->refcount++; 2157 xpt_unlock_buses(); 2158 xpt_release_bus(bus); 2159 } 2160 return(retval); 2161 } 2162 2163 static int 2164 xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target, 2165 xpt_targetfunc_t *tr_func, void *arg) 2166 { 2167 struct cam_et *target, *next_target; 2168 int retval; 2169 2170 retval = 1; 2171 if (start_target) 2172 target = start_target; 2173 else { 2174 mtx_lock(&bus->eb_mtx); 2175 target = TAILQ_FIRST(&bus->et_entries); 2176 if (target == NULL) { 2177 mtx_unlock(&bus->eb_mtx); 2178 return (retval); 2179 } 2180 target->refcount++; 2181 mtx_unlock(&bus->eb_mtx); 2182 } 2183 for (; target != NULL; target = next_target) { 2184 retval = tr_func(target, arg); 2185 if (retval == 0) { 2186 xpt_release_target(target); 2187 break; 2188 } 2189 mtx_lock(&bus->eb_mtx); 2190 next_target = TAILQ_NEXT(target, links); 2191 if (next_target) 2192 next_target->refcount++; 2193 mtx_unlock(&bus->eb_mtx); 2194 xpt_release_target(target); 2195 } 2196 return(retval); 2197 } 2198 2199 static int 2200 xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device, 2201 xpt_devicefunc_t *tr_func, void *arg) 2202 { 2203 struct cam_eb *bus; 2204 struct cam_ed *device, *next_device; 2205 int retval; 2206 2207 retval = 1; 2208 bus = target->bus; 2209 if (start_device) 2210 device = start_device; 2211 else { 2212 mtx_lock(&bus->eb_mtx); 2213 device = TAILQ_FIRST(&target->ed_entries); 2214 if (device == NULL) { 2215 mtx_unlock(&bus->eb_mtx); 2216 return (retval); 2217 } 2218 device->refcount++; 2219 mtx_unlock(&bus->eb_mtx); 2220 } 2221 for (; device != NULL; device = next_device) { 2222 mtx_lock(&device->device_mtx); 2223 retval = tr_func(device, arg); 2224 mtx_unlock(&device->device_mtx); 2225 if (retval == 0) { 2226 xpt_release_device(device); 2227 break; 2228 } 2229 mtx_lock(&bus->eb_mtx); 2230 next_device = TAILQ_NEXT(device, links); 2231 if (next_device) 2232 next_device->refcount++; 2233 mtx_unlock(&bus->eb_mtx); 2234 xpt_release_device(device); 2235 } 2236 return(retval); 2237 } 2238 2239 static int 2240 xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph, 2241 xpt_periphfunc_t *tr_func, void *arg) 2242 { 2243 struct cam_eb *bus; 2244 struct cam_periph *periph, *next_periph; 2245 int retval; 2246 2247 retval = 1; 2248 2249 bus = device->target->bus; 2250 if (start_periph) 2251 periph = start_periph; 2252 else { 2253 xpt_lock_buses(); 2254 mtx_lock(&bus->eb_mtx); 2255 periph = SLIST_FIRST(&device->periphs); 2256 while (periph != NULL && (periph->flags & CAM_PERIPH_FREE) != 0) 2257 periph = SLIST_NEXT(periph, periph_links); 2258 if (periph == NULL) { 2259 mtx_unlock(&bus->eb_mtx); 2260 xpt_unlock_buses(); 2261 return (retval); 2262 } 2263 periph->refcount++; 2264 mtx_unlock(&bus->eb_mtx); 2265 xpt_unlock_buses(); 2266 } 2267 for (; periph != NULL; periph = next_periph) { 2268 retval = tr_func(periph, arg); 2269 if (retval == 0) { 2270 cam_periph_release_locked(periph); 2271 break; 2272 } 2273 xpt_lock_buses(); 2274 mtx_lock(&bus->eb_mtx); 2275 next_periph = SLIST_NEXT(periph, periph_links); 2276 while (next_periph != NULL && 2277 (next_periph->flags & CAM_PERIPH_FREE) != 0) 2278 next_periph = SLIST_NEXT(next_periph, periph_links); 2279 if (next_periph) 2280 next_periph->refcount++; 2281 mtx_unlock(&bus->eb_mtx); 2282 xpt_unlock_buses(); 2283 cam_periph_release_locked(periph); 2284 } 2285 return(retval); 2286 } 2287 2288 static int 2289 xptpdrvtraverse(struct periph_driver **start_pdrv, 2290 xpt_pdrvfunc_t *tr_func, void *arg) 2291 { 2292 struct periph_driver **pdrv; 2293 int retval; 2294 2295 retval = 1; 2296 2297 /* 2298 * We don't traverse the peripheral driver list like we do the 2299 * other lists, because it is a linker set, and therefore cannot be 2300 * changed during runtime. If the peripheral driver list is ever 2301 * re-done to be something other than a linker set (i.e. it can 2302 * change while the system is running), the list traversal should 2303 * be modified to work like the other traversal functions. 2304 */ 2305 for (pdrv = (start_pdrv ? start_pdrv : periph_drivers); 2306 *pdrv != NULL; pdrv++) { 2307 retval = tr_func(pdrv, arg); 2308 2309 if (retval == 0) 2310 return(retval); 2311 } 2312 2313 return(retval); 2314 } 2315 2316 static int 2317 xptpdperiphtraverse(struct periph_driver **pdrv, 2318 struct cam_periph *start_periph, 2319 xpt_periphfunc_t *tr_func, void *arg) 2320 { 2321 struct cam_periph *periph, *next_periph; 2322 int retval; 2323 2324 retval = 1; 2325 2326 if (start_periph) 2327 periph = start_periph; 2328 else { 2329 xpt_lock_buses(); 2330 periph = TAILQ_FIRST(&(*pdrv)->units); 2331 while (periph != NULL && (periph->flags & CAM_PERIPH_FREE) != 0) 2332 periph = TAILQ_NEXT(periph, unit_links); 2333 if (periph == NULL) { 2334 xpt_unlock_buses(); 2335 return (retval); 2336 } 2337 periph->refcount++; 2338 xpt_unlock_buses(); 2339 } 2340 for (; periph != NULL; periph = next_periph) { 2341 cam_periph_lock(periph); 2342 retval = tr_func(periph, arg); 2343 cam_periph_unlock(periph); 2344 if (retval == 0) { 2345 cam_periph_release(periph); 2346 break; 2347 } 2348 xpt_lock_buses(); 2349 next_periph = TAILQ_NEXT(periph, unit_links); 2350 while (next_periph != NULL && 2351 (next_periph->flags & CAM_PERIPH_FREE) != 0) 2352 next_periph = TAILQ_NEXT(next_periph, unit_links); 2353 if (next_periph) 2354 next_periph->refcount++; 2355 xpt_unlock_buses(); 2356 cam_periph_release(periph); 2357 } 2358 return(retval); 2359 } 2360 2361 static int 2362 xptdefbusfunc(struct cam_eb *bus, void *arg) 2363 { 2364 struct xpt_traverse_config *tr_config; 2365 2366 tr_config = (struct xpt_traverse_config *)arg; 2367 2368 if (tr_config->depth == XPT_DEPTH_BUS) { 2369 xpt_busfunc_t *tr_func; 2370 2371 tr_func = (xpt_busfunc_t *)tr_config->tr_func; 2372 2373 return(tr_func(bus, tr_config->tr_arg)); 2374 } else 2375 return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg)); 2376 } 2377 2378 static int 2379 xptdeftargetfunc(struct cam_et *target, void *arg) 2380 { 2381 struct xpt_traverse_config *tr_config; 2382 2383 tr_config = (struct xpt_traverse_config *)arg; 2384 2385 if (tr_config->depth == XPT_DEPTH_TARGET) { 2386 xpt_targetfunc_t *tr_func; 2387 2388 tr_func = (xpt_targetfunc_t *)tr_config->tr_func; 2389 2390 return(tr_func(target, tr_config->tr_arg)); 2391 } else 2392 return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg)); 2393 } 2394 2395 static int 2396 xptdefdevicefunc(struct cam_ed *device, void *arg) 2397 { 2398 struct xpt_traverse_config *tr_config; 2399 2400 tr_config = (struct xpt_traverse_config *)arg; 2401 2402 if (tr_config->depth == XPT_DEPTH_DEVICE) { 2403 xpt_devicefunc_t *tr_func; 2404 2405 tr_func = (xpt_devicefunc_t *)tr_config->tr_func; 2406 2407 return(tr_func(device, tr_config->tr_arg)); 2408 } else 2409 return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg)); 2410 } 2411 2412 static int 2413 xptdefperiphfunc(struct cam_periph *periph, void *arg) 2414 { 2415 struct xpt_traverse_config *tr_config; 2416 xpt_periphfunc_t *tr_func; 2417 2418 tr_config = (struct xpt_traverse_config *)arg; 2419 2420 tr_func = (xpt_periphfunc_t *)tr_config->tr_func; 2421 2422 /* 2423 * Unlike the other default functions, we don't check for depth 2424 * here. The peripheral driver level is the last level in the EDT, 2425 * so if we're here, we should execute the function in question. 2426 */ 2427 return(tr_func(periph, tr_config->tr_arg)); 2428 } 2429 2430 /* 2431 * Execute the given function for every bus in the EDT. 2432 */ 2433 static int 2434 xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg) 2435 { 2436 struct xpt_traverse_config tr_config; 2437 2438 tr_config.depth = XPT_DEPTH_BUS; 2439 tr_config.tr_func = tr_func; 2440 tr_config.tr_arg = arg; 2441 2442 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config)); 2443 } 2444 2445 /* 2446 * Execute the given function for every device in the EDT. 2447 */ 2448 static int 2449 xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg) 2450 { 2451 struct xpt_traverse_config tr_config; 2452 2453 tr_config.depth = XPT_DEPTH_DEVICE; 2454 tr_config.tr_func = tr_func; 2455 tr_config.tr_arg = arg; 2456 2457 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config)); 2458 } 2459 2460 static int 2461 xptsetasyncfunc(struct cam_ed *device, void *arg) 2462 { 2463 struct cam_path path; 2464 struct ccb_getdev cgd; 2465 struct ccb_setasync *csa = (struct ccb_setasync *)arg; 2466 2467 /* 2468 * Don't report unconfigured devices (Wildcard devs, 2469 * devices only for target mode, device instances 2470 * that have been invalidated but are waiting for 2471 * their last reference count to be released). 2472 */ 2473 if ((device->flags & CAM_DEV_UNCONFIGURED) != 0) 2474 return (1); 2475 2476 xpt_compile_path(&path, 2477 NULL, 2478 device->target->bus->path_id, 2479 device->target->target_id, 2480 device->lun_id); 2481 xpt_gdev_type(&cgd, &path); 2482 csa->callback(csa->callback_arg, 2483 AC_FOUND_DEVICE, 2484 &path, &cgd); 2485 xpt_release_path(&path); 2486 2487 return(1); 2488 } 2489 2490 static int 2491 xptsetasyncbusfunc(struct cam_eb *bus, void *arg) 2492 { 2493 struct cam_path path; 2494 struct ccb_pathinq cpi; 2495 struct ccb_setasync *csa = (struct ccb_setasync *)arg; 2496 2497 xpt_compile_path(&path, /*periph*/NULL, 2498 bus->path_id, 2499 CAM_TARGET_WILDCARD, 2500 CAM_LUN_WILDCARD); 2501 xpt_path_lock(&path); 2502 xpt_path_inq(&cpi, &path); 2503 csa->callback(csa->callback_arg, 2504 AC_PATH_REGISTERED, 2505 &path, &cpi); 2506 xpt_path_unlock(&path); 2507 xpt_release_path(&path); 2508 2509 return(1); 2510 } 2511 2512 void 2513 xpt_action(union ccb *start_ccb) 2514 { 2515 2516 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, 2517 ("xpt_action: func %#x %s\n", start_ccb->ccb_h.func_code, 2518 xpt_action_name(start_ccb->ccb_h.func_code))); 2519 2520 /* 2521 * Either it isn't queued, or it has a real priority. There still too 2522 * many places that reuse CCBs with a real priority to do immediate 2523 * queries to do the other side of this assert. 2524 */ 2525 KASSERT((start_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0 || 2526 start_ccb->ccb_h.pinfo.priority != CAM_PRIORITY_NONE, 2527 ("%s: queued ccb and CAM_PRIORITY_NONE illegal.", __func__)); 2528 2529 start_ccb->ccb_h.status = CAM_REQ_INPROG; 2530 (*(start_ccb->ccb_h.path->bus->xport->ops->action))(start_ccb); 2531 } 2532 2533 void 2534 xpt_action_default(union ccb *start_ccb) 2535 { 2536 struct cam_path *path; 2537 struct cam_sim *sim; 2538 struct mtx *mtx; 2539 2540 path = start_ccb->ccb_h.path; 2541 CAM_DEBUG(path, CAM_DEBUG_TRACE, 2542 ("xpt_action_default: func %#x %s\n", start_ccb->ccb_h.func_code, 2543 xpt_action_name(start_ccb->ccb_h.func_code))); 2544 2545 switch (start_ccb->ccb_h.func_code) { 2546 case XPT_SCSI_IO: 2547 { 2548 struct cam_ed *device; 2549 2550 /* 2551 * For the sake of compatibility with SCSI-1 2552 * devices that may not understand the identify 2553 * message, we include lun information in the 2554 * second byte of all commands. SCSI-1 specifies 2555 * that luns are a 3 bit value and reserves only 3 2556 * bits for lun information in the CDB. Later 2557 * revisions of the SCSI spec allow for more than 8 2558 * luns, but have deprecated lun information in the 2559 * CDB. So, if the lun won't fit, we must omit. 2560 * 2561 * Also be aware that during initial probing for devices, 2562 * the inquiry information is unknown but initialized to 0. 2563 * This means that this code will be exercised while probing 2564 * devices with an ANSI revision greater than 2. 2565 */ 2566 device = path->device; 2567 if (device->protocol_version <= SCSI_REV_2 2568 && start_ccb->ccb_h.target_lun < 8 2569 && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) { 2570 start_ccb->csio.cdb_io.cdb_bytes[1] |= 2571 start_ccb->ccb_h.target_lun << 5; 2572 } 2573 start_ccb->csio.scsi_status = SCSI_STATUS_OK; 2574 } 2575 /* FALLTHROUGH */ 2576 case XPT_TARGET_IO: 2577 case XPT_CONT_TARGET_IO: 2578 start_ccb->csio.sense_resid = 0; 2579 start_ccb->csio.resid = 0; 2580 /* FALLTHROUGH */ 2581 case XPT_ATA_IO: 2582 if (start_ccb->ccb_h.func_code == XPT_ATA_IO) 2583 start_ccb->ataio.resid = 0; 2584 /* FALLTHROUGH */ 2585 case XPT_NVME_IO: 2586 case XPT_NVME_ADMIN: 2587 case XPT_MMC_IO: 2588 case XPT_MMC_GET_TRAN_SETTINGS: 2589 case XPT_MMC_SET_TRAN_SETTINGS: 2590 case XPT_RESET_DEV: 2591 case XPT_ENG_EXEC: 2592 case XPT_SMP_IO: 2593 { 2594 struct cam_devq *devq; 2595 2596 devq = path->bus->sim->devq; 2597 mtx_lock(&devq->send_mtx); 2598 cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb); 2599 if (xpt_schedule_devq(devq, path->device) != 0) 2600 xpt_run_devq(devq); 2601 mtx_unlock(&devq->send_mtx); 2602 break; 2603 } 2604 case XPT_CALC_GEOMETRY: 2605 /* Filter out garbage */ 2606 if (start_ccb->ccg.block_size == 0 2607 || start_ccb->ccg.volume_size == 0) { 2608 start_ccb->ccg.cylinders = 0; 2609 start_ccb->ccg.heads = 0; 2610 start_ccb->ccg.secs_per_track = 0; 2611 start_ccb->ccb_h.status = CAM_REQ_CMP; 2612 break; 2613 } 2614 goto call_sim; 2615 case XPT_ABORT: 2616 { 2617 union ccb* abort_ccb; 2618 2619 abort_ccb = start_ccb->cab.abort_ccb; 2620 if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) { 2621 struct cam_ed *device; 2622 struct cam_devq *devq; 2623 2624 device = abort_ccb->ccb_h.path->device; 2625 devq = device->sim->devq; 2626 2627 mtx_lock(&devq->send_mtx); 2628 if (abort_ccb->ccb_h.pinfo.index > 0) { 2629 cam_ccbq_remove_ccb(&device->ccbq, abort_ccb); 2630 abort_ccb->ccb_h.status = 2631 CAM_REQ_ABORTED|CAM_DEV_QFRZN; 2632 xpt_freeze_devq_device(device, 1); 2633 mtx_unlock(&devq->send_mtx); 2634 xpt_done(abort_ccb); 2635 start_ccb->ccb_h.status = CAM_REQ_CMP; 2636 break; 2637 } 2638 mtx_unlock(&devq->send_mtx); 2639 2640 if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX 2641 && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) { 2642 /* 2643 * We've caught this ccb en route to 2644 * the SIM. Flag it for abort and the 2645 * SIM will do so just before starting 2646 * real work on the CCB. 2647 */ 2648 abort_ccb->ccb_h.status = 2649 CAM_REQ_ABORTED|CAM_DEV_QFRZN; 2650 xpt_freeze_devq(abort_ccb->ccb_h.path, 1); 2651 start_ccb->ccb_h.status = CAM_REQ_CMP; 2652 break; 2653 } 2654 } 2655 if (XPT_FC_IS_QUEUED(abort_ccb) 2656 && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) { 2657 /* 2658 * It's already completed but waiting 2659 * for our SWI to get to it. 2660 */ 2661 start_ccb->ccb_h.status = CAM_UA_ABORT; 2662 break; 2663 } 2664 /* 2665 * If we weren't able to take care of the abort request 2666 * in the XPT, pass the request down to the SIM for processing. 2667 */ 2668 } 2669 /* FALLTHROUGH */ 2670 case XPT_ACCEPT_TARGET_IO: 2671 case XPT_EN_LUN: 2672 case XPT_IMMED_NOTIFY: 2673 case XPT_NOTIFY_ACK: 2674 case XPT_RESET_BUS: 2675 case XPT_IMMEDIATE_NOTIFY: 2676 case XPT_NOTIFY_ACKNOWLEDGE: 2677 case XPT_GET_SIM_KNOB_OLD: 2678 case XPT_GET_SIM_KNOB: 2679 case XPT_SET_SIM_KNOB: 2680 case XPT_GET_TRAN_SETTINGS: 2681 case XPT_SET_TRAN_SETTINGS: 2682 case XPT_PATH_INQ: 2683 call_sim: 2684 sim = path->bus->sim; 2685 mtx = sim->mtx; 2686 if (mtx && !mtx_owned(mtx)) 2687 mtx_lock(mtx); 2688 else 2689 mtx = NULL; 2690 2691 CAM_DEBUG(path, CAM_DEBUG_TRACE, 2692 ("Calling sim->sim_action(): func=%#x\n", start_ccb->ccb_h.func_code)); 2693 (*(sim->sim_action))(sim, start_ccb); 2694 CAM_DEBUG(path, CAM_DEBUG_TRACE, 2695 ("sim->sim_action returned: status=%#x\n", start_ccb->ccb_h.status)); 2696 if (mtx) 2697 mtx_unlock(mtx); 2698 break; 2699 case XPT_PATH_STATS: 2700 start_ccb->cpis.last_reset = path->bus->last_reset; 2701 start_ccb->ccb_h.status = CAM_REQ_CMP; 2702 break; 2703 case XPT_GDEV_TYPE: 2704 { 2705 struct cam_ed *dev; 2706 2707 dev = path->device; 2708 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) { 2709 start_ccb->ccb_h.status = CAM_DEV_NOT_THERE; 2710 } else { 2711 struct ccb_getdev *cgd; 2712 2713 cgd = &start_ccb->cgd; 2714 cgd->protocol = dev->protocol; 2715 cgd->inq_data = dev->inq_data; 2716 cgd->ident_data = dev->ident_data; 2717 cgd->inq_flags = dev->inq_flags; 2718 cgd->ccb_h.status = CAM_REQ_CMP; 2719 cgd->serial_num_len = dev->serial_num_len; 2720 if ((dev->serial_num_len > 0) 2721 && (dev->serial_num != NULL)) 2722 bcopy(dev->serial_num, cgd->serial_num, 2723 dev->serial_num_len); 2724 } 2725 break; 2726 } 2727 case XPT_GDEV_STATS: 2728 { 2729 struct ccb_getdevstats *cgds = &start_ccb->cgds; 2730 struct cam_ed *dev = path->device; 2731 struct cam_eb *bus = path->bus; 2732 struct cam_et *tar = path->target; 2733 struct cam_devq *devq = bus->sim->devq; 2734 2735 mtx_lock(&devq->send_mtx); 2736 cgds->dev_openings = dev->ccbq.dev_openings; 2737 cgds->dev_active = dev->ccbq.dev_active; 2738 cgds->allocated = dev->ccbq.allocated; 2739 cgds->queued = cam_ccbq_pending_ccb_count(&dev->ccbq); 2740 cgds->held = cgds->allocated - cgds->dev_active - cgds->queued; 2741 cgds->last_reset = tar->last_reset; 2742 cgds->maxtags = dev->maxtags; 2743 cgds->mintags = dev->mintags; 2744 if (timevalcmp(&tar->last_reset, &bus->last_reset, <)) 2745 cgds->last_reset = bus->last_reset; 2746 mtx_unlock(&devq->send_mtx); 2747 cgds->ccb_h.status = CAM_REQ_CMP; 2748 break; 2749 } 2750 case XPT_GDEVLIST: 2751 { 2752 struct cam_periph *nperiph; 2753 struct periph_list *periph_head; 2754 struct ccb_getdevlist *cgdl; 2755 u_int i; 2756 struct cam_ed *device; 2757 bool found; 2758 2759 found = false; 2760 2761 /* 2762 * Don't want anyone mucking with our data. 2763 */ 2764 device = path->device; 2765 periph_head = &device->periphs; 2766 cgdl = &start_ccb->cgdl; 2767 2768 /* 2769 * Check and see if the list has changed since the user 2770 * last requested a list member. If so, tell them that the 2771 * list has changed, and therefore they need to start over 2772 * from the beginning. 2773 */ 2774 if ((cgdl->index != 0) && 2775 (cgdl->generation != device->generation)) { 2776 cgdl->status = CAM_GDEVLIST_LIST_CHANGED; 2777 break; 2778 } 2779 2780 /* 2781 * Traverse the list of peripherals and attempt to find 2782 * the requested peripheral. 2783 */ 2784 for (nperiph = SLIST_FIRST(periph_head), i = 0; 2785 (nperiph != NULL) && (i <= cgdl->index); 2786 nperiph = SLIST_NEXT(nperiph, periph_links), i++) { 2787 if (i == cgdl->index) { 2788 strlcpy(cgdl->periph_name, 2789 nperiph->periph_name, 2790 sizeof(cgdl->periph_name)); 2791 cgdl->unit_number = nperiph->unit_number; 2792 found = true; 2793 } 2794 } 2795 if (!found) { 2796 cgdl->status = CAM_GDEVLIST_ERROR; 2797 break; 2798 } 2799 2800 if (nperiph == NULL) 2801 cgdl->status = CAM_GDEVLIST_LAST_DEVICE; 2802 else 2803 cgdl->status = CAM_GDEVLIST_MORE_DEVS; 2804 2805 cgdl->index++; 2806 cgdl->generation = device->generation; 2807 2808 cgdl->ccb_h.status = CAM_REQ_CMP; 2809 break; 2810 } 2811 case XPT_DEV_MATCH: 2812 { 2813 dev_pos_type position_type; 2814 struct ccb_dev_match *cdm; 2815 2816 cdm = &start_ccb->cdm; 2817 2818 /* 2819 * There are two ways of getting at information in the EDT. 2820 * The first way is via the primary EDT tree. It starts 2821 * with a list of buses, then a list of targets on a bus, 2822 * then devices/luns on a target, and then peripherals on a 2823 * device/lun. The "other" way is by the peripheral driver 2824 * lists. The peripheral driver lists are organized by 2825 * peripheral driver. (obviously) So it makes sense to 2826 * use the peripheral driver list if the user is looking 2827 * for something like "da1", or all "da" devices. If the 2828 * user is looking for something on a particular bus/target 2829 * or lun, it's generally better to go through the EDT tree. 2830 */ 2831 2832 if (cdm->pos.position_type != CAM_DEV_POS_NONE) 2833 position_type = cdm->pos.position_type; 2834 else { 2835 u_int i; 2836 2837 position_type = CAM_DEV_POS_NONE; 2838 2839 for (i = 0; i < cdm->num_patterns; i++) { 2840 if ((cdm->patterns[i].type == DEV_MATCH_BUS) 2841 ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){ 2842 position_type = CAM_DEV_POS_EDT; 2843 break; 2844 } 2845 } 2846 2847 if (cdm->num_patterns == 0) 2848 position_type = CAM_DEV_POS_EDT; 2849 else if (position_type == CAM_DEV_POS_NONE) 2850 position_type = CAM_DEV_POS_PDRV; 2851 } 2852 2853 switch(position_type & CAM_DEV_POS_TYPEMASK) { 2854 case CAM_DEV_POS_EDT: 2855 xptedtmatch(cdm); 2856 break; 2857 case CAM_DEV_POS_PDRV: 2858 xptperiphlistmatch(cdm); 2859 break; 2860 default: 2861 cdm->status = CAM_DEV_MATCH_ERROR; 2862 break; 2863 } 2864 2865 if (cdm->status == CAM_DEV_MATCH_ERROR) 2866 start_ccb->ccb_h.status = CAM_REQ_CMP_ERR; 2867 else 2868 start_ccb->ccb_h.status = CAM_REQ_CMP; 2869 2870 break; 2871 } 2872 case XPT_SASYNC_CB: 2873 { 2874 struct ccb_setasync *csa; 2875 struct async_node *cur_entry; 2876 struct async_list *async_head; 2877 uint32_t added; 2878 2879 csa = &start_ccb->csa; 2880 added = csa->event_enable; 2881 async_head = &path->device->asyncs; 2882 2883 /* 2884 * If there is already an entry for us, simply 2885 * update it. 2886 */ 2887 cur_entry = SLIST_FIRST(async_head); 2888 while (cur_entry != NULL) { 2889 if ((cur_entry->callback_arg == csa->callback_arg) 2890 && (cur_entry->callback == csa->callback)) 2891 break; 2892 cur_entry = SLIST_NEXT(cur_entry, links); 2893 } 2894 2895 if (cur_entry != NULL) { 2896 /* 2897 * If the request has no flags set, 2898 * remove the entry. 2899 */ 2900 added &= ~cur_entry->event_enable; 2901 if (csa->event_enable == 0) { 2902 SLIST_REMOVE(async_head, cur_entry, 2903 async_node, links); 2904 xpt_release_device(path->device); 2905 free(cur_entry, M_CAMXPT); 2906 } else { 2907 cur_entry->event_enable = csa->event_enable; 2908 } 2909 csa->event_enable = added; 2910 } else { 2911 cur_entry = malloc(sizeof(*cur_entry), M_CAMXPT, 2912 M_NOWAIT); 2913 if (cur_entry == NULL) { 2914 csa->ccb_h.status = CAM_RESRC_UNAVAIL; 2915 break; 2916 } 2917 cur_entry->event_enable = csa->event_enable; 2918 cur_entry->event_lock = (path->bus->sim->mtx && 2919 mtx_owned(path->bus->sim->mtx)) ? 1 : 0; 2920 cur_entry->callback_arg = csa->callback_arg; 2921 cur_entry->callback = csa->callback; 2922 SLIST_INSERT_HEAD(async_head, cur_entry, links); 2923 xpt_acquire_device(path->device); 2924 } 2925 start_ccb->ccb_h.status = CAM_REQ_CMP; 2926 break; 2927 } 2928 case XPT_REL_SIMQ: 2929 { 2930 struct ccb_relsim *crs; 2931 struct cam_ed *dev; 2932 2933 crs = &start_ccb->crs; 2934 dev = path->device; 2935 if (dev == NULL) { 2936 crs->ccb_h.status = CAM_DEV_NOT_THERE; 2937 break; 2938 } 2939 2940 if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) { 2941 /* Don't ever go below one opening */ 2942 if (crs->openings > 0) { 2943 xpt_dev_ccbq_resize(path, crs->openings); 2944 if (bootverbose) { 2945 xpt_print(path, 2946 "number of openings is now %d\n", 2947 crs->openings); 2948 } 2949 } 2950 } 2951 2952 mtx_lock(&dev->sim->devq->send_mtx); 2953 if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) { 2954 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) { 2955 /* 2956 * Just extend the old timeout and decrement 2957 * the freeze count so that a single timeout 2958 * is sufficient for releasing the queue. 2959 */ 2960 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE; 2961 callout_stop(&dev->callout); 2962 } else { 2963 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 2964 } 2965 2966 callout_reset_sbt(&dev->callout, 2967 SBT_1MS * crs->release_timeout, SBT_1MS, 2968 xpt_release_devq_timeout, dev, 0); 2969 2970 dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING; 2971 } 2972 2973 if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) { 2974 if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) { 2975 /* 2976 * Decrement the freeze count so that a single 2977 * completion is still sufficient to unfreeze 2978 * the queue. 2979 */ 2980 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE; 2981 } else { 2982 dev->flags |= CAM_DEV_REL_ON_COMPLETE; 2983 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 2984 } 2985 } 2986 2987 if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) { 2988 if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0 2989 || (dev->ccbq.dev_active == 0)) { 2990 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE; 2991 } else { 2992 dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY; 2993 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 2994 } 2995 } 2996 mtx_unlock(&dev->sim->devq->send_mtx); 2997 2998 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) 2999 xpt_release_devq(path, /*count*/1, /*run_queue*/TRUE); 3000 start_ccb->crs.qfrozen_cnt = dev->ccbq.queue.qfrozen_cnt; 3001 start_ccb->ccb_h.status = CAM_REQ_CMP; 3002 break; 3003 } 3004 case XPT_DEBUG: { 3005 struct cam_path *oldpath; 3006 3007 /* Check that all request bits are supported. */ 3008 if (start_ccb->cdbg.flags & ~(CAM_DEBUG_COMPILE)) { 3009 start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 3010 break; 3011 } 3012 3013 cam_dflags = CAM_DEBUG_NONE; 3014 if (cam_dpath != NULL) { 3015 oldpath = cam_dpath; 3016 cam_dpath = NULL; 3017 xpt_free_path(oldpath); 3018 } 3019 if (start_ccb->cdbg.flags != CAM_DEBUG_NONE) { 3020 if (xpt_create_path(&cam_dpath, NULL, 3021 start_ccb->ccb_h.path_id, 3022 start_ccb->ccb_h.target_id, 3023 start_ccb->ccb_h.target_lun) != 3024 CAM_REQ_CMP) { 3025 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 3026 } else { 3027 cam_dflags = start_ccb->cdbg.flags; 3028 start_ccb->ccb_h.status = CAM_REQ_CMP; 3029 xpt_print(cam_dpath, "debugging flags now %x\n", 3030 cam_dflags); 3031 } 3032 } else 3033 start_ccb->ccb_h.status = CAM_REQ_CMP; 3034 break; 3035 } 3036 case XPT_NOOP: 3037 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0) 3038 xpt_freeze_devq(path, 1); 3039 start_ccb->ccb_h.status = CAM_REQ_CMP; 3040 break; 3041 case XPT_REPROBE_LUN: 3042 xpt_async(AC_INQ_CHANGED, path, NULL); 3043 start_ccb->ccb_h.status = CAM_REQ_CMP; 3044 xpt_done(start_ccb); 3045 break; 3046 case XPT_ASYNC: 3047 /* 3048 * Queue the async operation so it can be run from a sleepable 3049 * context. 3050 */ 3051 start_ccb->ccb_h.status = CAM_REQ_CMP; 3052 mtx_lock(&cam_async.cam_doneq_mtx); 3053 STAILQ_INSERT_TAIL(&cam_async.cam_doneq, &start_ccb->ccb_h, sim_links.stqe); 3054 start_ccb->ccb_h.pinfo.index = CAM_ASYNC_INDEX; 3055 mtx_unlock(&cam_async.cam_doneq_mtx); 3056 wakeup(&cam_async.cam_doneq); 3057 break; 3058 default: 3059 case XPT_SDEV_TYPE: 3060 case XPT_TERM_IO: 3061 case XPT_ENG_INQ: 3062 /* XXX Implement */ 3063 xpt_print(start_ccb->ccb_h.path, 3064 "%s: CCB type %#x %s not supported\n", __func__, 3065 start_ccb->ccb_h.func_code, 3066 xpt_action_name(start_ccb->ccb_h.func_code)); 3067 start_ccb->ccb_h.status = CAM_PROVIDE_FAIL; 3068 if (start_ccb->ccb_h.func_code & XPT_FC_DEV_QUEUED) { 3069 xpt_done(start_ccb); 3070 } 3071 break; 3072 } 3073 CAM_DEBUG(path, CAM_DEBUG_TRACE, 3074 ("xpt_action_default: func= %#x %s status %#x\n", 3075 start_ccb->ccb_h.func_code, 3076 xpt_action_name(start_ccb->ccb_h.func_code), 3077 start_ccb->ccb_h.status)); 3078 } 3079 3080 /* 3081 * Call the sim poll routine to allow the sim to complete 3082 * any inflight requests, then call camisr_runqueue to 3083 * complete any CCB that the polling completed. 3084 */ 3085 void 3086 xpt_sim_poll(struct cam_sim *sim) 3087 { 3088 struct mtx *mtx; 3089 3090 KASSERT(cam_sim_pollable(sim), ("%s: non-pollable sim", __func__)); 3091 mtx = sim->mtx; 3092 if (mtx) 3093 mtx_lock(mtx); 3094 (*(sim->sim_poll))(sim); 3095 if (mtx) 3096 mtx_unlock(mtx); 3097 camisr_runqueue(); 3098 } 3099 3100 uint32_t 3101 xpt_poll_setup(union ccb *start_ccb) 3102 { 3103 uint32_t timeout; 3104 struct cam_sim *sim; 3105 struct cam_devq *devq; 3106 struct cam_ed *dev; 3107 3108 timeout = start_ccb->ccb_h.timeout * 10; 3109 sim = start_ccb->ccb_h.path->bus->sim; 3110 devq = sim->devq; 3111 dev = start_ccb->ccb_h.path->device; 3112 3113 KASSERT(cam_sim_pollable(sim), ("%s: non-pollable sim", __func__)); 3114 3115 /* 3116 * Steal an opening so that no other queued requests 3117 * can get it before us while we simulate interrupts. 3118 */ 3119 mtx_lock(&devq->send_mtx); 3120 dev->ccbq.dev_openings--; 3121 while((devq->send_openings <= 0 || dev->ccbq.dev_openings < 0) && 3122 (--timeout > 0)) { 3123 mtx_unlock(&devq->send_mtx); 3124 DELAY(100); 3125 xpt_sim_poll(sim); 3126 mtx_lock(&devq->send_mtx); 3127 } 3128 dev->ccbq.dev_openings++; 3129 mtx_unlock(&devq->send_mtx); 3130 3131 return (timeout); 3132 } 3133 3134 void 3135 xpt_pollwait(union ccb *start_ccb, uint32_t timeout) 3136 { 3137 3138 KASSERT(cam_sim_pollable(start_ccb->ccb_h.path->bus->sim), 3139 ("%s: non-pollable sim", __func__)); 3140 while (--timeout > 0) { 3141 xpt_sim_poll(start_ccb->ccb_h.path->bus->sim); 3142 if ((start_ccb->ccb_h.status & CAM_STATUS_MASK) 3143 != CAM_REQ_INPROG) 3144 break; 3145 DELAY(100); 3146 } 3147 3148 if (timeout == 0) { 3149 /* 3150 * XXX Is it worth adding a sim_timeout entry 3151 * point so we can attempt recovery? If 3152 * this is only used for dumps, I don't think 3153 * it is. 3154 */ 3155 start_ccb->ccb_h.status = CAM_CMD_TIMEOUT; 3156 } 3157 } 3158 3159 /* 3160 * Schedule a peripheral driver to receive a ccb when its 3161 * target device has space for more transactions. 3162 */ 3163 void 3164 xpt_schedule(struct cam_periph *periph, uint32_t new_priority) 3165 { 3166 3167 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n")); 3168 cam_periph_assert(periph, MA_OWNED); 3169 if (new_priority < periph->scheduled_priority) { 3170 periph->scheduled_priority = new_priority; 3171 xpt_run_allocq(periph, 0); 3172 } 3173 } 3174 3175 /* 3176 * Schedule a device to run on a given queue. 3177 * If the device was inserted as a new entry on the queue, 3178 * return 1 meaning the device queue should be run. If we 3179 * were already queued, implying someone else has already 3180 * started the queue, return 0 so the caller doesn't attempt 3181 * to run the queue. 3182 */ 3183 static int 3184 xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo, 3185 uint32_t new_priority) 3186 { 3187 int retval; 3188 uint32_t old_priority; 3189 3190 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n")); 3191 3192 old_priority = pinfo->priority; 3193 3194 /* 3195 * Are we already queued? 3196 */ 3197 if (pinfo->index != CAM_UNQUEUED_INDEX) { 3198 /* Simply reorder based on new priority */ 3199 if (new_priority < old_priority) { 3200 camq_change_priority(queue, pinfo->index, 3201 new_priority); 3202 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3203 ("changed priority to %d\n", 3204 new_priority)); 3205 retval = 1; 3206 } else 3207 retval = 0; 3208 } else { 3209 /* New entry on the queue */ 3210 if (new_priority < old_priority) 3211 pinfo->priority = new_priority; 3212 3213 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3214 ("Inserting onto queue\n")); 3215 pinfo->generation = ++queue->generation; 3216 camq_insert(queue, pinfo); 3217 retval = 1; 3218 } 3219 return (retval); 3220 } 3221 3222 static void 3223 xpt_run_allocq_task(void *context, int pending) 3224 { 3225 struct cam_periph *periph = context; 3226 3227 cam_periph_lock(periph); 3228 periph->flags &= ~CAM_PERIPH_RUN_TASK; 3229 xpt_run_allocq(periph, 1); 3230 cam_periph_unlock(periph); 3231 cam_periph_release(periph); 3232 } 3233 3234 static void 3235 xpt_run_allocq(struct cam_periph *periph, int sleep) 3236 { 3237 struct cam_ed *device; 3238 union ccb *ccb; 3239 uint32_t prio; 3240 3241 cam_periph_assert(periph, MA_OWNED); 3242 if (periph->periph_allocating) 3243 return; 3244 cam_periph_doacquire(periph); 3245 periph->periph_allocating = 1; 3246 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_allocq(%p)\n", periph)); 3247 device = periph->path->device; 3248 ccb = NULL; 3249 restart: 3250 while ((prio = min(periph->scheduled_priority, 3251 periph->immediate_priority)) != CAM_PRIORITY_NONE && 3252 (periph->periph_allocated - (ccb != NULL ? 1 : 0) < 3253 device->ccbq.total_openings || prio <= CAM_PRIORITY_OOB)) { 3254 if (ccb == NULL && 3255 (ccb = xpt_get_ccb_nowait(periph)) == NULL) { 3256 if (sleep) { 3257 ccb = xpt_get_ccb(periph); 3258 goto restart; 3259 } 3260 if (periph->flags & CAM_PERIPH_RUN_TASK) 3261 break; 3262 cam_periph_doacquire(periph); 3263 periph->flags |= CAM_PERIPH_RUN_TASK; 3264 taskqueue_enqueue(xsoftc.xpt_taskq, 3265 &periph->periph_run_task); 3266 break; 3267 } 3268 xpt_setup_ccb(&ccb->ccb_h, periph->path, prio); 3269 if (prio == periph->immediate_priority) { 3270 periph->immediate_priority = CAM_PRIORITY_NONE; 3271 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3272 ("waking cam_periph_getccb()\n")); 3273 SLIST_INSERT_HEAD(&periph->ccb_list, &ccb->ccb_h, 3274 periph_links.sle); 3275 wakeup(&periph->ccb_list); 3276 } else { 3277 periph->scheduled_priority = CAM_PRIORITY_NONE; 3278 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3279 ("calling periph_start()\n")); 3280 periph->periph_start(periph, ccb); 3281 } 3282 ccb = NULL; 3283 } 3284 if (ccb != NULL) 3285 xpt_release_ccb(ccb); 3286 periph->periph_allocating = 0; 3287 cam_periph_release_locked(periph); 3288 } 3289 3290 static void 3291 xpt_run_devq(struct cam_devq *devq) 3292 { 3293 struct mtx *mtx; 3294 3295 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_devq\n")); 3296 3297 devq->send_queue.qfrozen_cnt++; 3298 while ((devq->send_queue.entries > 0) 3299 && (devq->send_openings > 0) 3300 && (devq->send_queue.qfrozen_cnt <= 1)) { 3301 struct cam_ed *device; 3302 union ccb *work_ccb; 3303 struct cam_sim *sim; 3304 struct xpt_proto *proto; 3305 3306 device = (struct cam_ed *)camq_remove(&devq->send_queue, 3307 CAMQ_HEAD); 3308 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3309 ("running device %p\n", device)); 3310 3311 work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD); 3312 if (work_ccb == NULL) { 3313 printf("device on run queue with no ccbs???\n"); 3314 continue; 3315 } 3316 3317 if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) { 3318 mtx_lock(&xsoftc.xpt_highpower_lock); 3319 if (xsoftc.num_highpower <= 0) { 3320 /* 3321 * We got a high power command, but we 3322 * don't have any available slots. Freeze 3323 * the device queue until we have a slot 3324 * available. 3325 */ 3326 xpt_freeze_devq_device(device, 1); 3327 STAILQ_INSERT_TAIL(&xsoftc.highpowerq, device, 3328 highpowerq_entry); 3329 3330 mtx_unlock(&xsoftc.xpt_highpower_lock); 3331 continue; 3332 } else { 3333 /* 3334 * Consume a high power slot while 3335 * this ccb runs. 3336 */ 3337 xsoftc.num_highpower--; 3338 } 3339 mtx_unlock(&xsoftc.xpt_highpower_lock); 3340 } 3341 cam_ccbq_remove_ccb(&device->ccbq, work_ccb); 3342 cam_ccbq_send_ccb(&device->ccbq, work_ccb); 3343 devq->send_openings--; 3344 devq->send_active++; 3345 xpt_schedule_devq(devq, device); 3346 mtx_unlock(&devq->send_mtx); 3347 3348 if ((work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0) { 3349 /* 3350 * The client wants to freeze the queue 3351 * after this CCB is sent. 3352 */ 3353 xpt_freeze_devq(work_ccb->ccb_h.path, 1); 3354 } 3355 3356 /* In Target mode, the peripheral driver knows best... */ 3357 if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) { 3358 if ((device->inq_flags & SID_CmdQue) != 0 3359 && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE) 3360 work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID; 3361 else 3362 /* 3363 * Clear this in case of a retried CCB that 3364 * failed due to a rejected tag. 3365 */ 3366 work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID; 3367 } 3368 3369 KASSERT(device == work_ccb->ccb_h.path->device, 3370 ("device (%p) / path->device (%p) mismatch", 3371 device, work_ccb->ccb_h.path->device)); 3372 proto = xpt_proto_find(device->protocol); 3373 if (proto && proto->ops->debug_out) 3374 proto->ops->debug_out(work_ccb); 3375 3376 /* 3377 * Device queues can be shared among multiple SIM instances 3378 * that reside on different buses. Use the SIM from the 3379 * queued device, rather than the one from the calling bus. 3380 */ 3381 sim = device->sim; 3382 mtx = sim->mtx; 3383 if (mtx && !mtx_owned(mtx)) 3384 mtx_lock(mtx); 3385 else 3386 mtx = NULL; 3387 work_ccb->ccb_h.qos.periph_data = cam_iosched_now(); 3388 (*(sim->sim_action))(sim, work_ccb); 3389 if (mtx) 3390 mtx_unlock(mtx); 3391 mtx_lock(&devq->send_mtx); 3392 } 3393 devq->send_queue.qfrozen_cnt--; 3394 } 3395 3396 /* 3397 * This function merges stuff from the src ccb into the dst ccb, while keeping 3398 * important fields in the dst ccb constant. 3399 */ 3400 void 3401 xpt_merge_ccb(union ccb *dst_ccb, union ccb *src_ccb) 3402 { 3403 3404 /* 3405 * Pull fields that are valid for peripheral drivers to set 3406 * into the dst CCB along with the CCB "payload". 3407 */ 3408 dst_ccb->ccb_h.retry_count = src_ccb->ccb_h.retry_count; 3409 dst_ccb->ccb_h.func_code = src_ccb->ccb_h.func_code; 3410 dst_ccb->ccb_h.timeout = src_ccb->ccb_h.timeout; 3411 dst_ccb->ccb_h.flags = src_ccb->ccb_h.flags; 3412 bcopy(&(&src_ccb->ccb_h)[1], &(&dst_ccb->ccb_h)[1], 3413 sizeof(union ccb) - sizeof(struct ccb_hdr)); 3414 } 3415 3416 void 3417 xpt_setup_ccb_flags(struct ccb_hdr *ccb_h, struct cam_path *path, 3418 uint32_t priority, uint32_t flags) 3419 { 3420 3421 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n")); 3422 ccb_h->pinfo.priority = priority; 3423 ccb_h->path = path; 3424 ccb_h->path_id = path->bus->path_id; 3425 if (path->target) 3426 ccb_h->target_id = path->target->target_id; 3427 else 3428 ccb_h->target_id = CAM_TARGET_WILDCARD; 3429 if (path->device) { 3430 ccb_h->target_lun = path->device->lun_id; 3431 ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation; 3432 } else { 3433 ccb_h->target_lun = CAM_TARGET_WILDCARD; 3434 } 3435 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX; 3436 ccb_h->flags = flags; 3437 ccb_h->xflags = 0; 3438 } 3439 3440 void 3441 xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, uint32_t priority) 3442 { 3443 xpt_setup_ccb_flags(ccb_h, path, priority, /*flags*/ 0); 3444 } 3445 3446 /* Path manipulation functions */ 3447 cam_status 3448 xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph, 3449 path_id_t path_id, target_id_t target_id, lun_id_t lun_id) 3450 { 3451 struct cam_path *path; 3452 cam_status status; 3453 3454 path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_NOWAIT); 3455 3456 if (path == NULL) { 3457 status = CAM_RESRC_UNAVAIL; 3458 return(status); 3459 } 3460 status = xpt_compile_path(path, perph, path_id, target_id, lun_id); 3461 if (status != CAM_REQ_CMP) { 3462 free(path, M_CAMPATH); 3463 path = NULL; 3464 } 3465 *new_path_ptr = path; 3466 return (status); 3467 } 3468 3469 cam_status 3470 xpt_create_path_unlocked(struct cam_path **new_path_ptr, 3471 struct cam_periph *periph, path_id_t path_id, 3472 target_id_t target_id, lun_id_t lun_id) 3473 { 3474 3475 return (xpt_create_path(new_path_ptr, periph, path_id, target_id, 3476 lun_id)); 3477 } 3478 3479 cam_status 3480 xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph, 3481 path_id_t path_id, target_id_t target_id, lun_id_t lun_id) 3482 { 3483 struct cam_eb *bus; 3484 struct cam_et *target; 3485 struct cam_ed *device; 3486 cam_status status; 3487 3488 status = CAM_REQ_CMP; /* Completed without error */ 3489 target = NULL; /* Wildcarded */ 3490 device = NULL; /* Wildcarded */ 3491 3492 /* 3493 * We will potentially modify the EDT, so block interrupts 3494 * that may attempt to create cam paths. 3495 */ 3496 bus = xpt_find_bus(path_id); 3497 if (bus == NULL) { 3498 status = CAM_PATH_INVALID; 3499 } else { 3500 xpt_lock_buses(); 3501 mtx_lock(&bus->eb_mtx); 3502 target = xpt_find_target(bus, target_id); 3503 if (target == NULL) { 3504 /* Create one */ 3505 struct cam_et *new_target; 3506 3507 new_target = xpt_alloc_target(bus, target_id); 3508 if (new_target == NULL) { 3509 status = CAM_RESRC_UNAVAIL; 3510 } else { 3511 target = new_target; 3512 } 3513 } 3514 xpt_unlock_buses(); 3515 if (target != NULL) { 3516 device = xpt_find_device(target, lun_id); 3517 if (device == NULL) { 3518 /* Create one */ 3519 struct cam_ed *new_device; 3520 3521 new_device = 3522 (*(bus->xport->ops->alloc_device))(bus, 3523 target, 3524 lun_id); 3525 if (new_device == NULL) { 3526 status = CAM_RESRC_UNAVAIL; 3527 } else { 3528 device = new_device; 3529 } 3530 } 3531 } 3532 mtx_unlock(&bus->eb_mtx); 3533 } 3534 3535 /* 3536 * Only touch the user's data if we are successful. 3537 */ 3538 if (status == CAM_REQ_CMP) { 3539 new_path->periph = perph; 3540 new_path->bus = bus; 3541 new_path->target = target; 3542 new_path->device = device; 3543 CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n")); 3544 } else { 3545 if (device != NULL) 3546 xpt_release_device(device); 3547 if (target != NULL) 3548 xpt_release_target(target); 3549 if (bus != NULL) 3550 xpt_release_bus(bus); 3551 } 3552 return (status); 3553 } 3554 3555 int 3556 xpt_clone_path(struct cam_path **new_path_ptr, struct cam_path *path) 3557 { 3558 struct cam_path *new_path; 3559 3560 new_path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_NOWAIT); 3561 if (new_path == NULL) 3562 return (ENOMEM); 3563 *new_path = *path; 3564 if (path->bus != NULL) 3565 xpt_acquire_bus(path->bus); 3566 if (path->target != NULL) 3567 xpt_acquire_target(path->target); 3568 if (path->device != NULL) 3569 xpt_acquire_device(path->device); 3570 *new_path_ptr = new_path; 3571 return (0); 3572 } 3573 3574 void 3575 xpt_release_path(struct cam_path *path) 3576 { 3577 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n")); 3578 if (path->device != NULL) { 3579 xpt_release_device(path->device); 3580 path->device = NULL; 3581 } 3582 if (path->target != NULL) { 3583 xpt_release_target(path->target); 3584 path->target = NULL; 3585 } 3586 if (path->bus != NULL) { 3587 xpt_release_bus(path->bus); 3588 path->bus = NULL; 3589 } 3590 } 3591 3592 void 3593 xpt_free_path(struct cam_path *path) 3594 { 3595 3596 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n")); 3597 xpt_release_path(path); 3598 free(path, M_CAMPATH); 3599 } 3600 3601 void 3602 xpt_path_counts(struct cam_path *path, uint32_t *bus_ref, 3603 uint32_t *periph_ref, uint32_t *target_ref, uint32_t *device_ref) 3604 { 3605 3606 xpt_lock_buses(); 3607 if (bus_ref) { 3608 if (path->bus) 3609 *bus_ref = path->bus->refcount; 3610 else 3611 *bus_ref = 0; 3612 } 3613 if (periph_ref) { 3614 if (path->periph) 3615 *periph_ref = path->periph->refcount; 3616 else 3617 *periph_ref = 0; 3618 } 3619 xpt_unlock_buses(); 3620 if (target_ref) { 3621 if (path->target) 3622 *target_ref = path->target->refcount; 3623 else 3624 *target_ref = 0; 3625 } 3626 if (device_ref) { 3627 if (path->device) 3628 *device_ref = path->device->refcount; 3629 else 3630 *device_ref = 0; 3631 } 3632 } 3633 3634 /* 3635 * Return -1 for failure, 0 for exact match, 1 for match with wildcards 3636 * in path1, 2 for match with wildcards in path2. 3637 */ 3638 int 3639 xpt_path_comp(struct cam_path *path1, struct cam_path *path2) 3640 { 3641 int retval = 0; 3642 3643 if (path1->bus != path2->bus) { 3644 if (path1->bus->path_id == CAM_BUS_WILDCARD) 3645 retval = 1; 3646 else if (path2->bus->path_id == CAM_BUS_WILDCARD) 3647 retval = 2; 3648 else 3649 return (-1); 3650 } 3651 if (path1->target != path2->target) { 3652 if (path1->target->target_id == CAM_TARGET_WILDCARD) { 3653 if (retval == 0) 3654 retval = 1; 3655 } else if (path2->target->target_id == CAM_TARGET_WILDCARD) 3656 retval = 2; 3657 else 3658 return (-1); 3659 } 3660 if (path1->device != path2->device) { 3661 if (path1->device->lun_id == CAM_LUN_WILDCARD) { 3662 if (retval == 0) 3663 retval = 1; 3664 } else if (path2->device->lun_id == CAM_LUN_WILDCARD) 3665 retval = 2; 3666 else 3667 return (-1); 3668 } 3669 return (retval); 3670 } 3671 3672 int 3673 xpt_path_comp_dev(struct cam_path *path, struct cam_ed *dev) 3674 { 3675 int retval = 0; 3676 3677 if (path->bus != dev->target->bus) { 3678 if (path->bus->path_id == CAM_BUS_WILDCARD) 3679 retval = 1; 3680 else if (dev->target->bus->path_id == CAM_BUS_WILDCARD) 3681 retval = 2; 3682 else 3683 return (-1); 3684 } 3685 if (path->target != dev->target) { 3686 if (path->target->target_id == CAM_TARGET_WILDCARD) { 3687 if (retval == 0) 3688 retval = 1; 3689 } else if (dev->target->target_id == CAM_TARGET_WILDCARD) 3690 retval = 2; 3691 else 3692 return (-1); 3693 } 3694 if (path->device != dev) { 3695 if (path->device->lun_id == CAM_LUN_WILDCARD) { 3696 if (retval == 0) 3697 retval = 1; 3698 } else if (dev->lun_id == CAM_LUN_WILDCARD) 3699 retval = 2; 3700 else 3701 return (-1); 3702 } 3703 return (retval); 3704 } 3705 3706 void 3707 xpt_print_path(struct cam_path *path) 3708 { 3709 struct sbuf sb; 3710 char buffer[XPT_PRINT_LEN]; 3711 3712 sbuf_new(&sb, buffer, XPT_PRINT_LEN, SBUF_FIXEDLEN); 3713 xpt_path_sbuf(path, &sb); 3714 sbuf_finish(&sb); 3715 printf("%s", sbuf_data(&sb)); 3716 sbuf_delete(&sb); 3717 } 3718 3719 static void 3720 xpt_device_sbuf(struct cam_ed *device, struct sbuf *sb) 3721 { 3722 if (device == NULL) 3723 sbuf_cat(sb, "(nopath): "); 3724 else { 3725 sbuf_printf(sb, "(noperiph:%s%d:%d:%d:%jx): ", 3726 device->sim->sim_name, 3727 device->sim->unit_number, 3728 device->sim->bus_id, 3729 device->target->target_id, 3730 (uintmax_t)device->lun_id); 3731 } 3732 } 3733 3734 void 3735 xpt_print(struct cam_path *path, const char *fmt, ...) 3736 { 3737 va_list ap; 3738 struct sbuf sb; 3739 char buffer[XPT_PRINT_LEN]; 3740 3741 sbuf_new(&sb, buffer, XPT_PRINT_LEN, SBUF_FIXEDLEN); 3742 3743 xpt_path_sbuf(path, &sb); 3744 va_start(ap, fmt); 3745 sbuf_vprintf(&sb, fmt, ap); 3746 va_end(ap); 3747 3748 sbuf_finish(&sb); 3749 printf("%s", sbuf_data(&sb)); 3750 sbuf_delete(&sb); 3751 } 3752 3753 char * 3754 xpt_path_string(struct cam_path *path, char *str, size_t str_len) 3755 { 3756 struct sbuf sb; 3757 3758 sbuf_new(&sb, str, str_len, 0); 3759 xpt_path_sbuf(path, &sb); 3760 sbuf_finish(&sb); 3761 return (str); 3762 } 3763 3764 void 3765 xpt_path_sbuf(struct cam_path *path, struct sbuf *sb) 3766 { 3767 3768 if (path == NULL) 3769 sbuf_cat(sb, "(nopath): "); 3770 else { 3771 if (path->periph != NULL) 3772 sbuf_printf(sb, "(%s%d:", path->periph->periph_name, 3773 path->periph->unit_number); 3774 else 3775 sbuf_cat(sb, "(noperiph:"); 3776 3777 if (path->bus != NULL) 3778 sbuf_printf(sb, "%s%d:%d:", path->bus->sim->sim_name, 3779 path->bus->sim->unit_number, 3780 path->bus->sim->bus_id); 3781 else 3782 sbuf_cat(sb, "nobus:"); 3783 3784 if (path->target != NULL) 3785 sbuf_printf(sb, "%d:", path->target->target_id); 3786 else 3787 sbuf_cat(sb, "X:"); 3788 3789 if (path->device != NULL) 3790 sbuf_printf(sb, "%jx): ", 3791 (uintmax_t)path->device->lun_id); 3792 else 3793 sbuf_cat(sb, "X): "); 3794 } 3795 } 3796 3797 path_id_t 3798 xpt_path_path_id(struct cam_path *path) 3799 { 3800 return(path->bus->path_id); 3801 } 3802 3803 target_id_t 3804 xpt_path_target_id(struct cam_path *path) 3805 { 3806 if (path->target != NULL) 3807 return (path->target->target_id); 3808 else 3809 return (CAM_TARGET_WILDCARD); 3810 } 3811 3812 lun_id_t 3813 xpt_path_lun_id(struct cam_path *path) 3814 { 3815 if (path->device != NULL) 3816 return (path->device->lun_id); 3817 else 3818 return (CAM_LUN_WILDCARD); 3819 } 3820 3821 struct cam_sim * 3822 xpt_path_sim(struct cam_path *path) 3823 { 3824 3825 return (path->bus->sim); 3826 } 3827 3828 struct cam_periph* 3829 xpt_path_periph(struct cam_path *path) 3830 { 3831 3832 return (path->periph); 3833 } 3834 3835 /* 3836 * Release a CAM control block for the caller. Remit the cost of the structure 3837 * to the device referenced by the path. If the this device had no 'credits' 3838 * and peripheral drivers have registered async callbacks for this notification 3839 * call them now. 3840 */ 3841 void 3842 xpt_release_ccb(union ccb *free_ccb) 3843 { 3844 struct cam_ed *device; 3845 struct cam_periph *periph; 3846 3847 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n")); 3848 xpt_path_assert(free_ccb->ccb_h.path, MA_OWNED); 3849 device = free_ccb->ccb_h.path->device; 3850 periph = free_ccb->ccb_h.path->periph; 3851 3852 xpt_free_ccb(free_ccb); 3853 periph->periph_allocated--; 3854 cam_ccbq_release_opening(&device->ccbq); 3855 xpt_run_allocq(periph, 0); 3856 } 3857 3858 /* Functions accessed by SIM drivers */ 3859 3860 static struct xpt_xport_ops xport_default_ops = { 3861 .alloc_device = xpt_alloc_device_default, 3862 .action = xpt_action_default, 3863 .async = xpt_dev_async_default, 3864 }; 3865 static struct xpt_xport xport_default = { 3866 .xport = XPORT_UNKNOWN, 3867 .name = "unknown", 3868 .ops = &xport_default_ops, 3869 }; 3870 3871 CAM_XPT_XPORT(xport_default); 3872 3873 /* 3874 * A sim structure, listing the SIM entry points and instance 3875 * identification info is passed to xpt_bus_register to hook the SIM 3876 * into the CAM framework. xpt_bus_register creates a cam_eb entry 3877 * for this new bus and places it in the array of buses and assigns 3878 * it a path_id. The path_id may be influenced by "hard wiring" 3879 * information specified by the user. Once interrupt services are 3880 * available, the bus will be probed. 3881 */ 3882 int 3883 xpt_bus_register(struct cam_sim *sim, device_t parent, uint32_t bus) 3884 { 3885 struct cam_eb *new_bus; 3886 struct cam_eb *old_bus; 3887 struct ccb_pathinq cpi; 3888 struct cam_path *path; 3889 cam_status status; 3890 3891 sim->bus_id = bus; 3892 new_bus = (struct cam_eb *)malloc(sizeof(*new_bus), 3893 M_CAMXPT, M_NOWAIT|M_ZERO); 3894 if (new_bus == NULL) { 3895 /* Couldn't satisfy request */ 3896 return (ENOMEM); 3897 } 3898 3899 mtx_init(&new_bus->eb_mtx, "CAM bus lock", NULL, MTX_DEF); 3900 TAILQ_INIT(&new_bus->et_entries); 3901 cam_sim_hold(sim); 3902 new_bus->sim = sim; 3903 timevalclear(&new_bus->last_reset); 3904 new_bus->flags = 0; 3905 new_bus->refcount = 1; /* Held until a bus_deregister event */ 3906 new_bus->generation = 0; 3907 new_bus->parent_dev = parent; 3908 3909 xpt_lock_buses(); 3910 sim->path_id = new_bus->path_id = 3911 xptpathid(sim->sim_name, sim->unit_number, sim->bus_id); 3912 old_bus = TAILQ_FIRST(&xsoftc.xpt_busses); 3913 while (old_bus != NULL 3914 && old_bus->path_id < new_bus->path_id) 3915 old_bus = TAILQ_NEXT(old_bus, links); 3916 if (old_bus != NULL) 3917 TAILQ_INSERT_BEFORE(old_bus, new_bus, links); 3918 else 3919 TAILQ_INSERT_TAIL(&xsoftc.xpt_busses, new_bus, links); 3920 xsoftc.bus_generation++; 3921 xpt_unlock_buses(); 3922 3923 /* 3924 * Set a default transport so that a PATH_INQ can be issued to 3925 * the SIM. This will then allow for probing and attaching of 3926 * a more appropriate transport. 3927 */ 3928 new_bus->xport = &xport_default; 3929 3930 status = xpt_create_path(&path, /*periph*/NULL, sim->path_id, 3931 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 3932 if (status != CAM_REQ_CMP) { 3933 xpt_release_bus(new_bus); 3934 return (ENOMEM); 3935 } 3936 3937 xpt_path_inq(&cpi, path); 3938 3939 /* 3940 * Use the results of PATH_INQ to pick a transport. Note that 3941 * the xpt bus (which uses XPORT_UNSPECIFIED) always uses 3942 * xport_default instead of a transport from 3943 * cam_xpt_port_set. 3944 */ 3945 if (cam_ccb_success((union ccb *)&cpi) && 3946 cpi.transport != XPORT_UNSPECIFIED) { 3947 struct xpt_xport **xpt; 3948 3949 SET_FOREACH(xpt, cam_xpt_xport_set) { 3950 if ((*xpt)->xport == cpi.transport) { 3951 new_bus->xport = *xpt; 3952 break; 3953 } 3954 } 3955 if (new_bus->xport == &xport_default) { 3956 xpt_print(path, 3957 "No transport found for %d\n", cpi.transport); 3958 xpt_release_bus(new_bus); 3959 xpt_free_path(path); 3960 return (EINVAL); 3961 } 3962 } 3963 3964 /* Notify interested parties */ 3965 if (sim->path_id != CAM_XPT_PATH_ID) { 3966 xpt_async(AC_PATH_REGISTERED, path, &cpi); 3967 if ((cpi.hba_misc & PIM_NOSCAN) == 0) { 3968 union ccb *scan_ccb; 3969 3970 /* Initiate bus rescan. */ 3971 scan_ccb = xpt_alloc_ccb_nowait(); 3972 if (scan_ccb != NULL) { 3973 scan_ccb->ccb_h.path = path; 3974 scan_ccb->ccb_h.func_code = XPT_SCAN_BUS; 3975 scan_ccb->crcn.flags = 0; 3976 xpt_rescan(scan_ccb); 3977 } else { 3978 xpt_print(path, 3979 "Can't allocate CCB to scan bus\n"); 3980 xpt_free_path(path); 3981 } 3982 } else 3983 xpt_free_path(path); 3984 } else 3985 xpt_free_path(path); 3986 return (CAM_SUCCESS); 3987 } 3988 3989 int 3990 xpt_bus_deregister(path_id_t pathid) 3991 { 3992 struct cam_path bus_path; 3993 cam_status status; 3994 3995 status = xpt_compile_path(&bus_path, NULL, pathid, 3996 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 3997 if (status != CAM_REQ_CMP) 3998 return (ENOMEM); 3999 4000 xpt_async(AC_LOST_DEVICE, &bus_path, NULL); 4001 xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL); 4002 4003 /* Release the reference count held while registered. */ 4004 xpt_release_bus(bus_path.bus); 4005 xpt_release_path(&bus_path); 4006 4007 return (CAM_SUCCESS); 4008 } 4009 4010 static path_id_t 4011 xptnextfreepathid(void) 4012 { 4013 struct cam_eb *bus; 4014 path_id_t pathid; 4015 const char *strval; 4016 4017 mtx_assert(&xsoftc.xpt_topo_lock, MA_OWNED); 4018 pathid = 0; 4019 bus = TAILQ_FIRST(&xsoftc.xpt_busses); 4020 retry: 4021 /* Find an unoccupied pathid */ 4022 while (bus != NULL && bus->path_id <= pathid) { 4023 if (bus->path_id == pathid) 4024 pathid++; 4025 bus = TAILQ_NEXT(bus, links); 4026 } 4027 4028 /* 4029 * Ensure that this pathid is not reserved for 4030 * a bus that may be registered in the future. 4031 */ 4032 if (resource_string_value("scbus", pathid, "at", &strval) == 0) { 4033 ++pathid; 4034 /* Start the search over */ 4035 goto retry; 4036 } 4037 return (pathid); 4038 } 4039 4040 static path_id_t 4041 xptpathid(const char *sim_name, int sim_unit, int sim_bus) 4042 { 4043 path_id_t pathid; 4044 int i, dunit, val; 4045 char buf[32]; 4046 const char *dname; 4047 4048 pathid = CAM_XPT_PATH_ID; 4049 snprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit); 4050 if (strcmp(buf, "xpt0") == 0 && sim_bus == 0) 4051 return (pathid); 4052 i = 0; 4053 while ((resource_find_match(&i, &dname, &dunit, "at", buf)) == 0) { 4054 if (strcmp(dname, "scbus")) { 4055 /* Avoid a bit of foot shooting. */ 4056 continue; 4057 } 4058 if (dunit < 0) /* unwired?! */ 4059 continue; 4060 if (resource_int_value("scbus", dunit, "bus", &val) == 0) { 4061 if (sim_bus == val) { 4062 pathid = dunit; 4063 break; 4064 } 4065 } else if (sim_bus == 0) { 4066 /* Unspecified matches bus 0 */ 4067 pathid = dunit; 4068 break; 4069 } else { 4070 printf( 4071 "Ambiguous scbus configuration for %s%d bus %d, cannot wire down. The kernel\n" 4072 "config entry for scbus%d should specify a controller bus.\n" 4073 "Scbus will be assigned dynamically.\n", 4074 sim_name, sim_unit, sim_bus, dunit); 4075 break; 4076 } 4077 } 4078 4079 if (pathid == CAM_XPT_PATH_ID) 4080 pathid = xptnextfreepathid(); 4081 return (pathid); 4082 } 4083 4084 static const char * 4085 xpt_async_string(uint32_t async_code) 4086 { 4087 4088 switch (async_code) { 4089 case AC_BUS_RESET: return ("AC_BUS_RESET"); 4090 case AC_UNSOL_RESEL: return ("AC_UNSOL_RESEL"); 4091 case AC_SCSI_AEN: return ("AC_SCSI_AEN"); 4092 case AC_SENT_BDR: return ("AC_SENT_BDR"); 4093 case AC_PATH_REGISTERED: return ("AC_PATH_REGISTERED"); 4094 case AC_PATH_DEREGISTERED: return ("AC_PATH_DEREGISTERED"); 4095 case AC_FOUND_DEVICE: return ("AC_FOUND_DEVICE"); 4096 case AC_LOST_DEVICE: return ("AC_LOST_DEVICE"); 4097 case AC_TRANSFER_NEG: return ("AC_TRANSFER_NEG"); 4098 case AC_INQ_CHANGED: return ("AC_INQ_CHANGED"); 4099 case AC_GETDEV_CHANGED: return ("AC_GETDEV_CHANGED"); 4100 case AC_CONTRACT: return ("AC_CONTRACT"); 4101 case AC_ADVINFO_CHANGED: return ("AC_ADVINFO_CHANGED"); 4102 case AC_UNIT_ATTENTION: return ("AC_UNIT_ATTENTION"); 4103 } 4104 return ("AC_UNKNOWN"); 4105 } 4106 4107 static int 4108 xpt_async_size(uint32_t async_code) 4109 { 4110 4111 switch (async_code) { 4112 case AC_BUS_RESET: return (0); 4113 case AC_UNSOL_RESEL: return (0); 4114 case AC_SCSI_AEN: return (0); 4115 case AC_SENT_BDR: return (0); 4116 case AC_PATH_REGISTERED: return (sizeof(struct ccb_pathinq)); 4117 case AC_PATH_DEREGISTERED: return (0); 4118 case AC_FOUND_DEVICE: return (sizeof(struct ccb_getdev)); 4119 case AC_LOST_DEVICE: return (0); 4120 case AC_TRANSFER_NEG: return (sizeof(struct ccb_trans_settings)); 4121 case AC_INQ_CHANGED: return (0); 4122 case AC_GETDEV_CHANGED: return (0); 4123 case AC_CONTRACT: return (sizeof(struct ac_contract)); 4124 case AC_ADVINFO_CHANGED: return (-1); 4125 case AC_UNIT_ATTENTION: return (sizeof(struct ccb_scsiio)); 4126 } 4127 return (0); 4128 } 4129 4130 static int 4131 xpt_async_process_dev(struct cam_ed *device, void *arg) 4132 { 4133 union ccb *ccb = arg; 4134 struct cam_path *path = ccb->ccb_h.path; 4135 void *async_arg = ccb->casync.async_arg_ptr; 4136 uint32_t async_code = ccb->casync.async_code; 4137 bool relock; 4138 4139 if (path->device != device 4140 && path->device->lun_id != CAM_LUN_WILDCARD 4141 && device->lun_id != CAM_LUN_WILDCARD) 4142 return (1); 4143 4144 /* 4145 * The async callback could free the device. 4146 * If it is a broadcast async, it doesn't hold 4147 * device reference, so take our own reference. 4148 */ 4149 xpt_acquire_device(device); 4150 4151 /* 4152 * If async for specific device is to be delivered to 4153 * the wildcard client, take the specific device lock. 4154 * XXX: We may need a way for client to specify it. 4155 */ 4156 if ((device->lun_id == CAM_LUN_WILDCARD && 4157 path->device->lun_id != CAM_LUN_WILDCARD) || 4158 (device->target->target_id == CAM_TARGET_WILDCARD && 4159 path->target->target_id != CAM_TARGET_WILDCARD) || 4160 (device->target->bus->path_id == CAM_BUS_WILDCARD && 4161 path->target->bus->path_id != CAM_BUS_WILDCARD)) { 4162 mtx_unlock(&device->device_mtx); 4163 xpt_path_lock(path); 4164 relock = true; 4165 } else 4166 relock = false; 4167 4168 (*(device->target->bus->xport->ops->async))(async_code, 4169 device->target->bus, device->target, device, async_arg); 4170 xpt_async_bcast(&device->asyncs, async_code, path, async_arg); 4171 4172 if (relock) { 4173 xpt_path_unlock(path); 4174 mtx_lock(&device->device_mtx); 4175 } 4176 xpt_release_device(device); 4177 return (1); 4178 } 4179 4180 static int 4181 xpt_async_process_tgt(struct cam_et *target, void *arg) 4182 { 4183 union ccb *ccb = arg; 4184 struct cam_path *path = ccb->ccb_h.path; 4185 4186 if (path->target != target 4187 && path->target->target_id != CAM_TARGET_WILDCARD 4188 && target->target_id != CAM_TARGET_WILDCARD) 4189 return (1); 4190 4191 if (ccb->casync.async_code == AC_SENT_BDR) { 4192 /* Update our notion of when the last reset occurred */ 4193 microtime(&target->last_reset); 4194 } 4195 4196 return (xptdevicetraverse(target, NULL, xpt_async_process_dev, ccb)); 4197 } 4198 4199 static void 4200 xpt_async_process(struct cam_periph *periph, union ccb *ccb) 4201 { 4202 struct cam_eb *bus; 4203 struct cam_path *path; 4204 void *async_arg; 4205 uint32_t async_code; 4206 4207 path = ccb->ccb_h.path; 4208 async_code = ccb->casync.async_code; 4209 async_arg = ccb->casync.async_arg_ptr; 4210 CAM_DEBUG(path, CAM_DEBUG_TRACE | CAM_DEBUG_INFO, 4211 ("xpt_async(%s)\n", xpt_async_string(async_code))); 4212 bus = path->bus; 4213 4214 if (async_code == AC_BUS_RESET) { 4215 /* Update our notion of when the last reset occurred */ 4216 microtime(&bus->last_reset); 4217 } 4218 4219 xpttargettraverse(bus, NULL, xpt_async_process_tgt, ccb); 4220 4221 /* 4222 * If this wasn't a fully wildcarded async, tell all 4223 * clients that want all async events. 4224 */ 4225 if (bus != xpt_periph->path->bus) { 4226 xpt_path_lock(xpt_periph->path); 4227 xpt_async_process_dev(xpt_periph->path->device, ccb); 4228 xpt_path_unlock(xpt_periph->path); 4229 } 4230 4231 if (path->device != NULL && path->device->lun_id != CAM_LUN_WILDCARD) 4232 xpt_release_devq(path, 1, TRUE); 4233 else 4234 xpt_release_simq(path->bus->sim, TRUE); 4235 if (ccb->casync.async_arg_size > 0) 4236 free(async_arg, M_CAMXPT); 4237 xpt_free_path(path); 4238 xpt_free_ccb(ccb); 4239 } 4240 4241 static void 4242 xpt_async_bcast(struct async_list *async_head, 4243 uint32_t async_code, 4244 struct cam_path *path, void *async_arg) 4245 { 4246 struct async_node *cur_entry; 4247 struct mtx *mtx; 4248 4249 cur_entry = SLIST_FIRST(async_head); 4250 while (cur_entry != NULL) { 4251 struct async_node *next_entry; 4252 /* 4253 * Grab the next list entry before we call the current 4254 * entry's callback. This is because the callback function 4255 * can delete its async callback entry. 4256 */ 4257 next_entry = SLIST_NEXT(cur_entry, links); 4258 if ((cur_entry->event_enable & async_code) != 0) { 4259 mtx = cur_entry->event_lock ? 4260 path->device->sim->mtx : NULL; 4261 if (mtx) 4262 mtx_lock(mtx); 4263 cur_entry->callback(cur_entry->callback_arg, 4264 async_code, path, 4265 async_arg); 4266 if (mtx) 4267 mtx_unlock(mtx); 4268 } 4269 cur_entry = next_entry; 4270 } 4271 } 4272 4273 void 4274 xpt_async(uint32_t async_code, struct cam_path *path, void *async_arg) 4275 { 4276 union ccb *ccb; 4277 int size; 4278 4279 ccb = xpt_alloc_ccb_nowait(); 4280 if (ccb == NULL) { 4281 xpt_print(path, "Can't allocate CCB to send %s\n", 4282 xpt_async_string(async_code)); 4283 return; 4284 } 4285 4286 if (xpt_clone_path(&ccb->ccb_h.path, path) != 0) { 4287 xpt_print(path, "Can't allocate path to send %s\n", 4288 xpt_async_string(async_code)); 4289 xpt_free_ccb(ccb); 4290 return; 4291 } 4292 ccb->ccb_h.path->periph = NULL; 4293 ccb->ccb_h.func_code = XPT_ASYNC; 4294 ccb->ccb_h.cbfcnp = xpt_async_process; 4295 ccb->ccb_h.flags |= CAM_UNLOCKED; 4296 ccb->casync.async_code = async_code; 4297 ccb->casync.async_arg_size = 0; 4298 size = xpt_async_size(async_code); 4299 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, 4300 ("xpt_async: func %#x %s aync_code %d %s\n", 4301 ccb->ccb_h.func_code, 4302 xpt_action_name(ccb->ccb_h.func_code), 4303 async_code, 4304 xpt_async_string(async_code))); 4305 if (size > 0 && async_arg != NULL) { 4306 ccb->casync.async_arg_ptr = malloc(size, M_CAMXPT, M_NOWAIT); 4307 if (ccb->casync.async_arg_ptr == NULL) { 4308 xpt_print(path, "Can't allocate argument to send %s\n", 4309 xpt_async_string(async_code)); 4310 xpt_free_path(ccb->ccb_h.path); 4311 xpt_free_ccb(ccb); 4312 return; 4313 } 4314 memcpy(ccb->casync.async_arg_ptr, async_arg, size); 4315 ccb->casync.async_arg_size = size; 4316 } else if (size < 0) { 4317 ccb->casync.async_arg_ptr = async_arg; 4318 ccb->casync.async_arg_size = size; 4319 } 4320 if (path->device != NULL && path->device->lun_id != CAM_LUN_WILDCARD) 4321 xpt_freeze_devq(path, 1); 4322 else 4323 xpt_freeze_simq(path->bus->sim, 1); 4324 xpt_action(ccb); 4325 } 4326 4327 static void 4328 xpt_dev_async_default(uint32_t async_code, struct cam_eb *bus, 4329 struct cam_et *target, struct cam_ed *device, 4330 void *async_arg) 4331 { 4332 4333 /* 4334 * We only need to handle events for real devices. 4335 */ 4336 if (target->target_id == CAM_TARGET_WILDCARD 4337 || device->lun_id == CAM_LUN_WILDCARD) 4338 return; 4339 4340 printf("%s called\n", __func__); 4341 } 4342 4343 static uint32_t 4344 xpt_freeze_devq_device(struct cam_ed *dev, u_int count) 4345 { 4346 struct cam_devq *devq; 4347 uint32_t freeze; 4348 4349 devq = dev->sim->devq; 4350 mtx_assert(&devq->send_mtx, MA_OWNED); 4351 CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE, 4352 ("xpt_freeze_devq_device(%d) %u->%u\n", count, 4353 dev->ccbq.queue.qfrozen_cnt, dev->ccbq.queue.qfrozen_cnt + count)); 4354 freeze = (dev->ccbq.queue.qfrozen_cnt += count); 4355 /* Remove frozen device from sendq. */ 4356 if (device_is_queued(dev)) 4357 camq_remove(&devq->send_queue, dev->devq_entry.index); 4358 return (freeze); 4359 } 4360 4361 uint32_t 4362 xpt_freeze_devq(struct cam_path *path, u_int count) 4363 { 4364 struct cam_ed *dev = path->device; 4365 struct cam_devq *devq; 4366 uint32_t freeze; 4367 4368 devq = dev->sim->devq; 4369 mtx_lock(&devq->send_mtx); 4370 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_freeze_devq(%d)\n", count)); 4371 freeze = xpt_freeze_devq_device(dev, count); 4372 mtx_unlock(&devq->send_mtx); 4373 return (freeze); 4374 } 4375 4376 uint32_t 4377 xpt_freeze_simq(struct cam_sim *sim, u_int count) 4378 { 4379 struct cam_devq *devq; 4380 uint32_t freeze; 4381 4382 devq = sim->devq; 4383 mtx_lock(&devq->send_mtx); 4384 freeze = (devq->send_queue.qfrozen_cnt += count); 4385 mtx_unlock(&devq->send_mtx); 4386 return (freeze); 4387 } 4388 4389 static void 4390 xpt_release_devq_timeout(void *arg) 4391 { 4392 struct cam_ed *dev; 4393 struct cam_devq *devq; 4394 4395 dev = (struct cam_ed *)arg; 4396 CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE, ("xpt_release_devq_timeout\n")); 4397 devq = dev->sim->devq; 4398 mtx_assert(&devq->send_mtx, MA_OWNED); 4399 if (xpt_release_devq_device(dev, /*count*/1, /*run_queue*/TRUE)) 4400 xpt_run_devq(devq); 4401 } 4402 4403 void 4404 xpt_release_devq(struct cam_path *path, u_int count, int run_queue) 4405 { 4406 struct cam_ed *dev; 4407 struct cam_devq *devq; 4408 4409 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_devq(%d, %d)\n", 4410 count, run_queue)); 4411 dev = path->device; 4412 devq = dev->sim->devq; 4413 mtx_lock(&devq->send_mtx); 4414 if (xpt_release_devq_device(dev, count, run_queue)) 4415 xpt_run_devq(dev->sim->devq); 4416 mtx_unlock(&devq->send_mtx); 4417 } 4418 4419 static int 4420 xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue) 4421 { 4422 4423 mtx_assert(&dev->sim->devq->send_mtx, MA_OWNED); 4424 CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE, 4425 ("xpt_release_devq_device(%d, %d) %u->%u\n", count, run_queue, 4426 dev->ccbq.queue.qfrozen_cnt, dev->ccbq.queue.qfrozen_cnt - count)); 4427 if (count > dev->ccbq.queue.qfrozen_cnt) { 4428 #ifdef INVARIANTS 4429 printf("xpt_release_devq(): requested %u > present %u\n", 4430 count, dev->ccbq.queue.qfrozen_cnt); 4431 #endif 4432 count = dev->ccbq.queue.qfrozen_cnt; 4433 } 4434 dev->ccbq.queue.qfrozen_cnt -= count; 4435 if (dev->ccbq.queue.qfrozen_cnt == 0) { 4436 /* 4437 * No longer need to wait for a successful 4438 * command completion. 4439 */ 4440 dev->flags &= ~CAM_DEV_REL_ON_COMPLETE; 4441 /* 4442 * Remove any timeouts that might be scheduled 4443 * to release this queue. 4444 */ 4445 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) { 4446 callout_stop(&dev->callout); 4447 dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING; 4448 } 4449 /* 4450 * Now that we are unfrozen schedule the 4451 * device so any pending transactions are 4452 * run. 4453 */ 4454 xpt_schedule_devq(dev->sim->devq, dev); 4455 } else 4456 run_queue = 0; 4457 return (run_queue); 4458 } 4459 4460 void 4461 xpt_release_simq(struct cam_sim *sim, int run_queue) 4462 { 4463 struct cam_devq *devq; 4464 4465 devq = sim->devq; 4466 mtx_lock(&devq->send_mtx); 4467 if (devq->send_queue.qfrozen_cnt <= 0) { 4468 #ifdef INVARIANTS 4469 printf("xpt_release_simq: requested 1 > present %u\n", 4470 devq->send_queue.qfrozen_cnt); 4471 #endif 4472 } else 4473 devq->send_queue.qfrozen_cnt--; 4474 if (devq->send_queue.qfrozen_cnt == 0) { 4475 if (run_queue) { 4476 /* 4477 * Now that we are unfrozen run the send queue. 4478 */ 4479 xpt_run_devq(sim->devq); 4480 } 4481 } 4482 mtx_unlock(&devq->send_mtx); 4483 } 4484 4485 void 4486 xpt_done(union ccb *done_ccb) 4487 { 4488 struct cam_doneq *queue; 4489 int run, hash; 4490 4491 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) 4492 if (done_ccb->ccb_h.func_code == XPT_SCSI_IO && 4493 done_ccb->csio.bio != NULL) 4494 biotrack(done_ccb->csio.bio, __func__); 4495 #endif 4496 4497 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, 4498 ("xpt_done: func= %#x %s status %#x\n", 4499 done_ccb->ccb_h.func_code, 4500 xpt_action_name(done_ccb->ccb_h.func_code), 4501 done_ccb->ccb_h.status)); 4502 if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0) 4503 return; 4504 4505 /* Store the time the ccb was in the sim */ 4506 done_ccb->ccb_h.qos.periph_data = cam_iosched_delta_t(done_ccb->ccb_h.qos.periph_data); 4507 done_ccb->ccb_h.status |= CAM_QOS_VALID; 4508 hash = (u_int)(done_ccb->ccb_h.path_id + done_ccb->ccb_h.target_id + 4509 done_ccb->ccb_h.target_lun) % cam_num_doneqs; 4510 queue = &cam_doneqs[hash]; 4511 mtx_lock(&queue->cam_doneq_mtx); 4512 run = (queue->cam_doneq_sleep && STAILQ_EMPTY(&queue->cam_doneq)); 4513 STAILQ_INSERT_TAIL(&queue->cam_doneq, &done_ccb->ccb_h, sim_links.stqe); 4514 done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX; 4515 mtx_unlock(&queue->cam_doneq_mtx); 4516 if (run && !dumping) 4517 wakeup(&queue->cam_doneq); 4518 } 4519 4520 void 4521 xpt_done_direct(union ccb *done_ccb) 4522 { 4523 4524 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, 4525 ("xpt_done_direct: status %#x\n", done_ccb->ccb_h.status)); 4526 if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0) 4527 return; 4528 4529 /* Store the time the ccb was in the sim */ 4530 done_ccb->ccb_h.qos.periph_data = cam_iosched_delta_t(done_ccb->ccb_h.qos.periph_data); 4531 done_ccb->ccb_h.status |= CAM_QOS_VALID; 4532 xpt_done_process(&done_ccb->ccb_h); 4533 } 4534 4535 union ccb * 4536 xpt_alloc_ccb(void) 4537 { 4538 union ccb *new_ccb; 4539 4540 new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_WAITOK); 4541 return (new_ccb); 4542 } 4543 4544 union ccb * 4545 xpt_alloc_ccb_nowait(void) 4546 { 4547 union ccb *new_ccb; 4548 4549 new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_NOWAIT); 4550 return (new_ccb); 4551 } 4552 4553 void 4554 xpt_free_ccb(union ccb *free_ccb) 4555 { 4556 struct cam_periph *periph; 4557 4558 if (free_ccb->ccb_h.alloc_flags & CAM_CCB_FROM_UMA) { 4559 /* 4560 * Looks like a CCB allocated from a periph UMA zone. 4561 */ 4562 periph = free_ccb->ccb_h.path->periph; 4563 uma_zfree(periph->ccb_zone, free_ccb); 4564 } else { 4565 free(free_ccb, M_CAMCCB); 4566 } 4567 } 4568 4569 /* Private XPT functions */ 4570 4571 /* 4572 * Get a CAM control block for the caller. Charge the structure to the device 4573 * referenced by the path. If we don't have sufficient resources to allocate 4574 * more ccbs, we return NULL. 4575 */ 4576 static union ccb * 4577 xpt_get_ccb_nowait(struct cam_periph *periph) 4578 { 4579 union ccb *new_ccb; 4580 int alloc_flags; 4581 4582 if (periph->ccb_zone != NULL) { 4583 alloc_flags = CAM_CCB_FROM_UMA; 4584 new_ccb = uma_zalloc(periph->ccb_zone, M_ZERO|M_NOWAIT); 4585 } else { 4586 alloc_flags = 0; 4587 new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_NOWAIT); 4588 } 4589 if (new_ccb == NULL) 4590 return (NULL); 4591 new_ccb->ccb_h.alloc_flags = alloc_flags; 4592 periph->periph_allocated++; 4593 cam_ccbq_take_opening(&periph->path->device->ccbq); 4594 return (new_ccb); 4595 } 4596 4597 static union ccb * 4598 xpt_get_ccb(struct cam_periph *periph) 4599 { 4600 union ccb *new_ccb; 4601 int alloc_flags; 4602 4603 cam_periph_unlock(periph); 4604 if (periph->ccb_zone != NULL) { 4605 alloc_flags = CAM_CCB_FROM_UMA; 4606 new_ccb = uma_zalloc(periph->ccb_zone, M_ZERO|M_WAITOK); 4607 } else { 4608 alloc_flags = 0; 4609 new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_WAITOK); 4610 } 4611 new_ccb->ccb_h.alloc_flags = alloc_flags; 4612 cam_periph_lock(periph); 4613 periph->periph_allocated++; 4614 cam_ccbq_take_opening(&periph->path->device->ccbq); 4615 return (new_ccb); 4616 } 4617 4618 union ccb * 4619 cam_periph_getccb(struct cam_periph *periph, uint32_t priority) 4620 { 4621 struct ccb_hdr *ccb_h; 4622 4623 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("cam_periph_getccb\n")); 4624 cam_periph_assert(periph, MA_OWNED); 4625 while ((ccb_h = SLIST_FIRST(&periph->ccb_list)) == NULL || 4626 ccb_h->pinfo.priority != priority) { 4627 if (priority < periph->immediate_priority) { 4628 periph->immediate_priority = priority; 4629 xpt_run_allocq(periph, 0); 4630 } else 4631 cam_periph_sleep(periph, &periph->ccb_list, PRIBIO, 4632 "cgticb", 0); 4633 } 4634 SLIST_REMOVE_HEAD(&periph->ccb_list, periph_links.sle); 4635 return ((union ccb *)ccb_h); 4636 } 4637 4638 static void 4639 xpt_acquire_bus(struct cam_eb *bus) 4640 { 4641 4642 xpt_lock_buses(); 4643 bus->refcount++; 4644 xpt_unlock_buses(); 4645 } 4646 4647 static void 4648 xpt_release_bus(struct cam_eb *bus) 4649 { 4650 4651 xpt_lock_buses(); 4652 KASSERT(bus->refcount >= 1, ("bus->refcount >= 1")); 4653 if (--bus->refcount > 0) { 4654 xpt_unlock_buses(); 4655 return; 4656 } 4657 TAILQ_REMOVE(&xsoftc.xpt_busses, bus, links); 4658 xsoftc.bus_generation++; 4659 xpt_unlock_buses(); 4660 KASSERT(TAILQ_EMPTY(&bus->et_entries), 4661 ("destroying bus, but target list is not empty")); 4662 cam_sim_release(bus->sim); 4663 mtx_destroy(&bus->eb_mtx); 4664 free(bus, M_CAMXPT); 4665 } 4666 4667 static struct cam_et * 4668 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id) 4669 { 4670 struct cam_et *cur_target, *target; 4671 4672 mtx_assert(&xsoftc.xpt_topo_lock, MA_OWNED); 4673 mtx_assert(&bus->eb_mtx, MA_OWNED); 4674 target = (struct cam_et *)malloc(sizeof(*target), M_CAMXPT, 4675 M_NOWAIT|M_ZERO); 4676 if (target == NULL) 4677 return (NULL); 4678 4679 TAILQ_INIT(&target->ed_entries); 4680 target->bus = bus; 4681 target->target_id = target_id; 4682 target->refcount = 1; 4683 target->generation = 0; 4684 target->luns = NULL; 4685 mtx_init(&target->luns_mtx, "CAM LUNs lock", NULL, MTX_DEF); 4686 timevalclear(&target->last_reset); 4687 /* 4688 * Hold a reference to our parent bus so it 4689 * will not go away before we do. 4690 */ 4691 bus->refcount++; 4692 4693 /* Insertion sort into our bus's target list */ 4694 cur_target = TAILQ_FIRST(&bus->et_entries); 4695 while (cur_target != NULL && cur_target->target_id < target_id) 4696 cur_target = TAILQ_NEXT(cur_target, links); 4697 if (cur_target != NULL) { 4698 TAILQ_INSERT_BEFORE(cur_target, target, links); 4699 } else { 4700 TAILQ_INSERT_TAIL(&bus->et_entries, target, links); 4701 } 4702 bus->generation++; 4703 return (target); 4704 } 4705 4706 static void 4707 xpt_acquire_target(struct cam_et *target) 4708 { 4709 struct cam_eb *bus = target->bus; 4710 4711 mtx_lock(&bus->eb_mtx); 4712 target->refcount++; 4713 mtx_unlock(&bus->eb_mtx); 4714 } 4715 4716 static void 4717 xpt_release_target(struct cam_et *target) 4718 { 4719 struct cam_eb *bus = target->bus; 4720 4721 mtx_lock(&bus->eb_mtx); 4722 if (--target->refcount > 0) { 4723 mtx_unlock(&bus->eb_mtx); 4724 return; 4725 } 4726 TAILQ_REMOVE(&bus->et_entries, target, links); 4727 bus->generation++; 4728 mtx_unlock(&bus->eb_mtx); 4729 KASSERT(TAILQ_EMPTY(&target->ed_entries), 4730 ("destroying target, but device list is not empty")); 4731 xpt_release_bus(bus); 4732 mtx_destroy(&target->luns_mtx); 4733 if (target->luns) 4734 free(target->luns, M_CAMXPT); 4735 free(target, M_CAMXPT); 4736 } 4737 4738 static struct cam_ed * 4739 xpt_alloc_device_default(struct cam_eb *bus, struct cam_et *target, 4740 lun_id_t lun_id) 4741 { 4742 struct cam_ed *device; 4743 4744 device = xpt_alloc_device(bus, target, lun_id); 4745 if (device == NULL) 4746 return (NULL); 4747 4748 device->mintags = 1; 4749 device->maxtags = 1; 4750 return (device); 4751 } 4752 4753 static void 4754 xpt_destroy_device(void *context, int pending) 4755 { 4756 struct cam_ed *device = context; 4757 4758 mtx_lock(&device->device_mtx); 4759 mtx_destroy(&device->device_mtx); 4760 free(device, M_CAMDEV); 4761 } 4762 4763 struct cam_ed * 4764 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id) 4765 { 4766 struct cam_ed *cur_device, *device; 4767 struct cam_devq *devq; 4768 cam_status status; 4769 4770 mtx_assert(&bus->eb_mtx, MA_OWNED); 4771 /* Make space for us in the device queue on our bus */ 4772 devq = bus->sim->devq; 4773 mtx_lock(&devq->send_mtx); 4774 status = cam_devq_resize(devq, devq->send_queue.array_size + 1); 4775 mtx_unlock(&devq->send_mtx); 4776 if (status != CAM_REQ_CMP) 4777 return (NULL); 4778 4779 device = (struct cam_ed *)malloc(sizeof(*device), 4780 M_CAMDEV, M_NOWAIT|M_ZERO); 4781 if (device == NULL) 4782 return (NULL); 4783 4784 cam_init_pinfo(&device->devq_entry); 4785 device->target = target; 4786 device->lun_id = lun_id; 4787 device->sim = bus->sim; 4788 if (cam_ccbq_init(&device->ccbq, 4789 bus->sim->max_dev_openings) != 0) { 4790 free(device, M_CAMDEV); 4791 return (NULL); 4792 } 4793 SLIST_INIT(&device->asyncs); 4794 SLIST_INIT(&device->periphs); 4795 device->generation = 0; 4796 device->flags = CAM_DEV_UNCONFIGURED; 4797 device->tag_delay_count = 0; 4798 device->tag_saved_openings = 0; 4799 device->refcount = 1; 4800 mtx_init(&device->device_mtx, "CAM device lock", NULL, MTX_DEF); 4801 callout_init_mtx(&device->callout, &devq->send_mtx, 0); 4802 TASK_INIT(&device->device_destroy_task, 0, xpt_destroy_device, device); 4803 /* 4804 * Hold a reference to our parent bus so it 4805 * will not go away before we do. 4806 */ 4807 target->refcount++; 4808 4809 cur_device = TAILQ_FIRST(&target->ed_entries); 4810 while (cur_device != NULL && cur_device->lun_id < lun_id) 4811 cur_device = TAILQ_NEXT(cur_device, links); 4812 if (cur_device != NULL) 4813 TAILQ_INSERT_BEFORE(cur_device, device, links); 4814 else 4815 TAILQ_INSERT_TAIL(&target->ed_entries, device, links); 4816 target->generation++; 4817 return (device); 4818 } 4819 4820 void 4821 xpt_acquire_device(struct cam_ed *device) 4822 { 4823 struct cam_eb *bus = device->target->bus; 4824 4825 mtx_lock(&bus->eb_mtx); 4826 device->refcount++; 4827 mtx_unlock(&bus->eb_mtx); 4828 } 4829 4830 void 4831 xpt_release_device(struct cam_ed *device) 4832 { 4833 struct cam_eb *bus = device->target->bus; 4834 struct cam_devq *devq; 4835 4836 mtx_lock(&bus->eb_mtx); 4837 if (--device->refcount > 0) { 4838 mtx_unlock(&bus->eb_mtx); 4839 return; 4840 } 4841 4842 TAILQ_REMOVE(&device->target->ed_entries, device,links); 4843 device->target->generation++; 4844 mtx_unlock(&bus->eb_mtx); 4845 4846 /* Release our slot in the devq */ 4847 devq = bus->sim->devq; 4848 mtx_lock(&devq->send_mtx); 4849 cam_devq_resize(devq, devq->send_queue.array_size - 1); 4850 4851 KASSERT(SLIST_EMPTY(&device->periphs), 4852 ("destroying device, but periphs list is not empty")); 4853 KASSERT(device->devq_entry.index == CAM_UNQUEUED_INDEX, 4854 ("destroying device while still queued for ccbs")); 4855 4856 /* The send_mtx must be held when accessing the callout */ 4857 if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) 4858 callout_stop(&device->callout); 4859 4860 mtx_unlock(&devq->send_mtx); 4861 4862 xpt_release_target(device->target); 4863 4864 cam_ccbq_fini(&device->ccbq); 4865 /* 4866 * Free allocated memory. free(9) does nothing if the 4867 * supplied pointer is NULL, so it is safe to call without 4868 * checking. 4869 */ 4870 free(device->supported_vpds, M_CAMXPT); 4871 free(device->device_id, M_CAMXPT); 4872 free(device->ext_inq, M_CAMXPT); 4873 free(device->physpath, M_CAMXPT); 4874 free(device->rcap_buf, M_CAMXPT); 4875 free(device->serial_num, M_CAMXPT); 4876 free(device->nvme_data, M_CAMXPT); 4877 free(device->nvme_cdata, M_CAMXPT); 4878 taskqueue_enqueue(xsoftc.xpt_taskq, &device->device_destroy_task); 4879 } 4880 4881 uint32_t 4882 xpt_dev_ccbq_resize(struct cam_path *path, int newopenings) 4883 { 4884 int result; 4885 struct cam_ed *dev; 4886 4887 dev = path->device; 4888 mtx_lock(&dev->sim->devq->send_mtx); 4889 result = cam_ccbq_resize(&dev->ccbq, newopenings); 4890 mtx_unlock(&dev->sim->devq->send_mtx); 4891 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 4892 || (dev->inq_flags & SID_CmdQue) != 0) 4893 dev->tag_saved_openings = newopenings; 4894 return (result); 4895 } 4896 4897 static struct cam_eb * 4898 xpt_find_bus(path_id_t path_id) 4899 { 4900 struct cam_eb *bus; 4901 4902 xpt_lock_buses(); 4903 for (bus = TAILQ_FIRST(&xsoftc.xpt_busses); 4904 bus != NULL; 4905 bus = TAILQ_NEXT(bus, links)) { 4906 if (bus->path_id == path_id) { 4907 bus->refcount++; 4908 break; 4909 } 4910 } 4911 xpt_unlock_buses(); 4912 return (bus); 4913 } 4914 4915 static struct cam_et * 4916 xpt_find_target(struct cam_eb *bus, target_id_t target_id) 4917 { 4918 struct cam_et *target; 4919 4920 mtx_assert(&bus->eb_mtx, MA_OWNED); 4921 for (target = TAILQ_FIRST(&bus->et_entries); 4922 target != NULL; 4923 target = TAILQ_NEXT(target, links)) { 4924 if (target->target_id == target_id) { 4925 target->refcount++; 4926 break; 4927 } 4928 } 4929 return (target); 4930 } 4931 4932 static struct cam_ed * 4933 xpt_find_device(struct cam_et *target, lun_id_t lun_id) 4934 { 4935 struct cam_ed *device; 4936 4937 mtx_assert(&target->bus->eb_mtx, MA_OWNED); 4938 for (device = TAILQ_FIRST(&target->ed_entries); 4939 device != NULL; 4940 device = TAILQ_NEXT(device, links)) { 4941 if (device->lun_id == lun_id) { 4942 device->refcount++; 4943 break; 4944 } 4945 } 4946 return (device); 4947 } 4948 4949 void 4950 xpt_start_tags(struct cam_path *path) 4951 { 4952 struct ccb_relsim crs; 4953 struct cam_ed *device; 4954 struct cam_sim *sim; 4955 int newopenings; 4956 4957 device = path->device; 4958 sim = path->bus->sim; 4959 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT; 4960 xpt_freeze_devq(path, /*count*/1); 4961 device->inq_flags |= SID_CmdQue; 4962 if (device->tag_saved_openings != 0) 4963 newopenings = device->tag_saved_openings; 4964 else 4965 newopenings = min(device->maxtags, 4966 sim->max_tagged_dev_openings); 4967 xpt_dev_ccbq_resize(path, newopenings); 4968 xpt_async(AC_GETDEV_CHANGED, path, NULL); 4969 memset(&crs, 0, sizeof(crs)); 4970 xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL); 4971 crs.ccb_h.func_code = XPT_REL_SIMQ; 4972 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY; 4973 crs.openings 4974 = crs.release_timeout 4975 = crs.qfrozen_cnt 4976 = 0; 4977 xpt_action((union ccb *)&crs); 4978 } 4979 4980 void 4981 xpt_stop_tags(struct cam_path *path) 4982 { 4983 struct ccb_relsim crs; 4984 struct cam_ed *device; 4985 struct cam_sim *sim; 4986 4987 device = path->device; 4988 sim = path->bus->sim; 4989 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT; 4990 device->tag_delay_count = 0; 4991 xpt_freeze_devq(path, /*count*/1); 4992 device->inq_flags &= ~SID_CmdQue; 4993 xpt_dev_ccbq_resize(path, sim->max_dev_openings); 4994 xpt_async(AC_GETDEV_CHANGED, path, NULL); 4995 memset(&crs, 0, sizeof(crs)); 4996 xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL); 4997 crs.ccb_h.func_code = XPT_REL_SIMQ; 4998 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY; 4999 crs.openings 5000 = crs.release_timeout 5001 = crs.qfrozen_cnt 5002 = 0; 5003 xpt_action((union ccb *)&crs); 5004 } 5005 5006 /* 5007 * Assume all possible buses are detected by this time, so allow boot 5008 * as soon as they all are scanned. 5009 */ 5010 static void 5011 xpt_boot_delay(void *arg) 5012 { 5013 5014 xpt_release_boot(); 5015 } 5016 5017 /* 5018 * Now that all config hooks have completed, start boot_delay timer, 5019 * waiting for possibly still undetected buses (USB) to appear. 5020 */ 5021 static void 5022 xpt_ch_done(void *arg) 5023 { 5024 5025 callout_init(&xsoftc.boot_callout, 1); 5026 callout_reset_sbt(&xsoftc.boot_callout, SBT_1MS * xsoftc.boot_delay, 5027 SBT_1MS, xpt_boot_delay, NULL, 0); 5028 } 5029 SYSINIT(xpt_hw_delay, SI_SUB_INT_CONFIG_HOOKS, SI_ORDER_ANY, xpt_ch_done, NULL); 5030 5031 /* 5032 * Now that interrupts are enabled, go find our devices 5033 */ 5034 static void 5035 xpt_config(void *arg) 5036 { 5037 if (taskqueue_start_threads(&xsoftc.xpt_taskq, 1, PRIBIO, "CAM taskq")) 5038 printf("xpt_config: failed to create taskqueue thread.\n"); 5039 5040 /* Setup debugging path */ 5041 if (cam_dflags != CAM_DEBUG_NONE) { 5042 if (xpt_create_path(&cam_dpath, NULL, 5043 CAM_DEBUG_BUS, CAM_DEBUG_TARGET, 5044 CAM_DEBUG_LUN) != CAM_REQ_CMP) { 5045 printf( 5046 "xpt_config: xpt_create_path() failed for debug target %d:%d:%d, debugging disabled\n", 5047 CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN); 5048 cam_dflags = CAM_DEBUG_NONE; 5049 } 5050 } else 5051 cam_dpath = NULL; 5052 5053 periphdriver_init(1); 5054 xpt_hold_boot(); 5055 5056 /* Fire up rescan thread. */ 5057 if (kproc_kthread_add(xpt_scanner_thread, NULL, &cam_proc, NULL, 0, 0, 5058 "cam", "scanner")) { 5059 printf("xpt_config: failed to create rescan thread.\n"); 5060 } 5061 } 5062 5063 void 5064 xpt_hold_boot_locked(void) 5065 { 5066 5067 if (xsoftc.buses_to_config++ == 0) 5068 root_mount_hold_token("CAM", &xsoftc.xpt_rootmount); 5069 } 5070 5071 void 5072 xpt_hold_boot(void) 5073 { 5074 5075 xpt_lock_buses(); 5076 xpt_hold_boot_locked(); 5077 xpt_unlock_buses(); 5078 } 5079 5080 void 5081 xpt_release_boot(void) 5082 { 5083 5084 xpt_lock_buses(); 5085 if (--xsoftc.buses_to_config == 0) { 5086 if (xsoftc.buses_config_done == 0) { 5087 xsoftc.buses_config_done = 1; 5088 xsoftc.buses_to_config++; 5089 TASK_INIT(&xsoftc.boot_task, 0, xpt_finishconfig_task, 5090 NULL); 5091 taskqueue_enqueue(taskqueue_thread, &xsoftc.boot_task); 5092 } else 5093 root_mount_rel(&xsoftc.xpt_rootmount); 5094 } 5095 xpt_unlock_buses(); 5096 } 5097 5098 /* 5099 * If the given device only has one peripheral attached to it, and if that 5100 * peripheral is the passthrough driver, announce it. This insures that the 5101 * user sees some sort of announcement for every peripheral in their system. 5102 */ 5103 static int 5104 xptpassannouncefunc(struct cam_ed *device, void *arg) 5105 { 5106 struct cam_periph *periph; 5107 int i; 5108 5109 for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL; 5110 periph = SLIST_NEXT(periph, periph_links), i++); 5111 5112 periph = SLIST_FIRST(&device->periphs); 5113 if ((i == 1) 5114 && (strncmp(periph->periph_name, "pass", 4) == 0)) 5115 xpt_announce_periph(periph, NULL); 5116 5117 return(1); 5118 } 5119 5120 static void 5121 xpt_finishconfig_task(void *context, int pending) 5122 { 5123 5124 periphdriver_init(2); 5125 /* 5126 * Check for devices with no "standard" peripheral driver 5127 * attached. For any devices like that, announce the 5128 * passthrough driver so the user will see something. 5129 */ 5130 if (!bootverbose) 5131 xpt_for_all_devices(xptpassannouncefunc, NULL); 5132 5133 xpt_release_boot(); 5134 } 5135 5136 cam_status 5137 xpt_register_async(int event, ac_callback_t *cbfunc, void *cbarg, 5138 struct cam_path *path) 5139 { 5140 struct ccb_setasync csa; 5141 cam_status status; 5142 bool xptpath = false; 5143 5144 if (path == NULL) { 5145 status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID, 5146 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 5147 if (status != CAM_REQ_CMP) 5148 return (status); 5149 xpt_path_lock(path); 5150 xptpath = true; 5151 } 5152 5153 memset(&csa, 0, sizeof(csa)); 5154 xpt_setup_ccb(&csa.ccb_h, path, CAM_PRIORITY_NORMAL); 5155 csa.ccb_h.func_code = XPT_SASYNC_CB; 5156 csa.event_enable = event; 5157 csa.callback = cbfunc; 5158 csa.callback_arg = cbarg; 5159 xpt_action((union ccb *)&csa); 5160 status = csa.ccb_h.status; 5161 5162 CAM_DEBUG(csa.ccb_h.path, CAM_DEBUG_TRACE, 5163 ("xpt_register_async: func %p\n", cbfunc)); 5164 5165 if (xptpath) { 5166 xpt_path_unlock(path); 5167 xpt_free_path(path); 5168 } 5169 5170 if ((status == CAM_REQ_CMP) && 5171 (csa.event_enable & AC_FOUND_DEVICE)) { 5172 /* 5173 * Get this peripheral up to date with all 5174 * the currently existing devices. 5175 */ 5176 xpt_for_all_devices(xptsetasyncfunc, &csa); 5177 } 5178 if ((status == CAM_REQ_CMP) && 5179 (csa.event_enable & AC_PATH_REGISTERED)) { 5180 /* 5181 * Get this peripheral up to date with all 5182 * the currently existing buses. 5183 */ 5184 xpt_for_all_busses(xptsetasyncbusfunc, &csa); 5185 } 5186 5187 return (status); 5188 } 5189 5190 static void 5191 xptaction(struct cam_sim *sim, union ccb *work_ccb) 5192 { 5193 CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n")); 5194 5195 switch (work_ccb->ccb_h.func_code) { 5196 /* Common cases first */ 5197 case XPT_PATH_INQ: /* Path routing inquiry */ 5198 { 5199 struct ccb_pathinq *cpi; 5200 5201 cpi = &work_ccb->cpi; 5202 cpi->version_num = 1; /* XXX??? */ 5203 cpi->hba_inquiry = 0; 5204 cpi->target_sprt = 0; 5205 cpi->hba_misc = 0; 5206 cpi->hba_eng_cnt = 0; 5207 cpi->max_target = 0; 5208 cpi->max_lun = 0; 5209 cpi->initiator_id = 0; 5210 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 5211 strlcpy(cpi->hba_vid, "", HBA_IDLEN); 5212 strlcpy(cpi->dev_name, sim->sim_name, DEV_IDLEN); 5213 cpi->unit_number = sim->unit_number; 5214 cpi->bus_id = sim->bus_id; 5215 cpi->base_transfer_speed = 0; 5216 cpi->protocol = PROTO_UNSPECIFIED; 5217 cpi->protocol_version = PROTO_VERSION_UNSPECIFIED; 5218 cpi->transport = XPORT_UNSPECIFIED; 5219 cpi->transport_version = XPORT_VERSION_UNSPECIFIED; 5220 cpi->ccb_h.status = CAM_REQ_CMP; 5221 break; 5222 } 5223 default: 5224 work_ccb->ccb_h.status = CAM_REQ_INVALID; 5225 break; 5226 } 5227 xpt_done(work_ccb); 5228 } 5229 5230 /* 5231 * The xpt as a "controller" has no interrupt sources, so polling 5232 * is a no-op. 5233 */ 5234 static void 5235 xptpoll(struct cam_sim *sim) 5236 { 5237 } 5238 5239 void 5240 xpt_lock_buses(void) 5241 { 5242 mtx_lock(&xsoftc.xpt_topo_lock); 5243 } 5244 5245 void 5246 xpt_unlock_buses(void) 5247 { 5248 mtx_unlock(&xsoftc.xpt_topo_lock); 5249 } 5250 5251 struct mtx * 5252 xpt_path_mtx(struct cam_path *path) 5253 { 5254 5255 return (&path->device->device_mtx); 5256 } 5257 5258 static void 5259 xpt_done_process(struct ccb_hdr *ccb_h) 5260 { 5261 struct cam_sim *sim = NULL; 5262 struct cam_devq *devq = NULL; 5263 struct mtx *mtx = NULL; 5264 5265 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) 5266 struct ccb_scsiio *csio; 5267 5268 if (ccb_h->func_code == XPT_SCSI_IO) { 5269 csio = &((union ccb *)ccb_h)->csio; 5270 if (csio->bio != NULL) 5271 biotrack(csio->bio, __func__); 5272 } 5273 #endif 5274 5275 if (ccb_h->flags & CAM_HIGH_POWER) { 5276 struct highpowerlist *hphead; 5277 struct cam_ed *device; 5278 5279 mtx_lock(&xsoftc.xpt_highpower_lock); 5280 hphead = &xsoftc.highpowerq; 5281 5282 device = STAILQ_FIRST(hphead); 5283 5284 /* 5285 * Increment the count since this command is done. 5286 */ 5287 xsoftc.num_highpower++; 5288 5289 /* 5290 * Any high powered commands queued up? 5291 */ 5292 if (device != NULL) { 5293 STAILQ_REMOVE_HEAD(hphead, highpowerq_entry); 5294 mtx_unlock(&xsoftc.xpt_highpower_lock); 5295 5296 mtx_lock(&device->sim->devq->send_mtx); 5297 xpt_release_devq_device(device, 5298 /*count*/1, /*runqueue*/TRUE); 5299 mtx_unlock(&device->sim->devq->send_mtx); 5300 } else 5301 mtx_unlock(&xsoftc.xpt_highpower_lock); 5302 } 5303 5304 /* 5305 * Insulate against a race where the periph is destroyed but CCBs are 5306 * still not all processed. This shouldn't happen, but allows us better 5307 * bug diagnostic when it does. 5308 */ 5309 if (ccb_h->path->bus) 5310 sim = ccb_h->path->bus->sim; 5311 5312 if (ccb_h->status & CAM_RELEASE_SIMQ) { 5313 KASSERT(sim, ("sim missing for CAM_RELEASE_SIMQ request")); 5314 xpt_release_simq(sim, /*run_queue*/FALSE); 5315 ccb_h->status &= ~CAM_RELEASE_SIMQ; 5316 } 5317 5318 if ((ccb_h->flags & CAM_DEV_QFRZDIS) 5319 && (ccb_h->status & CAM_DEV_QFRZN)) { 5320 xpt_release_devq(ccb_h->path, /*count*/1, /*run_queue*/TRUE); 5321 ccb_h->status &= ~CAM_DEV_QFRZN; 5322 } 5323 5324 if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) { 5325 struct cam_ed *dev = ccb_h->path->device; 5326 5327 if (sim) 5328 devq = sim->devq; 5329 KASSERT(devq, ("Periph disappeared with CCB %p %s request pending.", 5330 ccb_h, xpt_action_name(ccb_h->func_code))); 5331 5332 mtx_lock(&devq->send_mtx); 5333 devq->send_active--; 5334 devq->send_openings++; 5335 cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h); 5336 5337 if (((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0 5338 && (dev->ccbq.dev_active == 0))) { 5339 dev->flags &= ~CAM_DEV_REL_ON_QUEUE_EMPTY; 5340 xpt_release_devq_device(dev, /*count*/1, 5341 /*run_queue*/FALSE); 5342 } 5343 5344 if (((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0 5345 && (ccb_h->status&CAM_STATUS_MASK) != CAM_REQUEUE_REQ)) { 5346 dev->flags &= ~CAM_DEV_REL_ON_COMPLETE; 5347 xpt_release_devq_device(dev, /*count*/1, 5348 /*run_queue*/FALSE); 5349 } 5350 5351 if (!device_is_queued(dev)) 5352 (void)xpt_schedule_devq(devq, dev); 5353 xpt_run_devq(devq); 5354 mtx_unlock(&devq->send_mtx); 5355 5356 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0) { 5357 mtx = xpt_path_mtx(ccb_h->path); 5358 mtx_lock(mtx); 5359 5360 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 5361 && (--dev->tag_delay_count == 0)) 5362 xpt_start_tags(ccb_h->path); 5363 } 5364 } 5365 5366 if ((ccb_h->flags & CAM_UNLOCKED) == 0) { 5367 if (mtx == NULL) { 5368 mtx = xpt_path_mtx(ccb_h->path); 5369 mtx_lock(mtx); 5370 } 5371 } else { 5372 if (mtx != NULL) { 5373 mtx_unlock(mtx); 5374 mtx = NULL; 5375 } 5376 } 5377 5378 /* Call the peripheral driver's callback */ 5379 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX; 5380 (*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h); 5381 if (mtx != NULL) 5382 mtx_unlock(mtx); 5383 } 5384 5385 /* 5386 * Parameterize instead and use xpt_done_td? 5387 */ 5388 static void 5389 xpt_async_td(void *arg) 5390 { 5391 struct cam_doneq *queue = arg; 5392 struct ccb_hdr *ccb_h; 5393 STAILQ_HEAD(, ccb_hdr) doneq; 5394 5395 STAILQ_INIT(&doneq); 5396 mtx_lock(&queue->cam_doneq_mtx); 5397 while (1) { 5398 while (STAILQ_EMPTY(&queue->cam_doneq)) 5399 msleep(&queue->cam_doneq, &queue->cam_doneq_mtx, 5400 PRIBIO, "-", 0); 5401 STAILQ_CONCAT(&doneq, &queue->cam_doneq); 5402 mtx_unlock(&queue->cam_doneq_mtx); 5403 5404 while ((ccb_h = STAILQ_FIRST(&doneq)) != NULL) { 5405 STAILQ_REMOVE_HEAD(&doneq, sim_links.stqe); 5406 xpt_done_process(ccb_h); 5407 } 5408 5409 mtx_lock(&queue->cam_doneq_mtx); 5410 } 5411 } 5412 5413 void 5414 xpt_done_td(void *arg) 5415 { 5416 struct cam_doneq *queue = arg; 5417 struct ccb_hdr *ccb_h; 5418 STAILQ_HEAD(, ccb_hdr) doneq; 5419 5420 STAILQ_INIT(&doneq); 5421 mtx_lock(&queue->cam_doneq_mtx); 5422 while (1) { 5423 while (STAILQ_EMPTY(&queue->cam_doneq)) { 5424 queue->cam_doneq_sleep = 1; 5425 msleep(&queue->cam_doneq, &queue->cam_doneq_mtx, 5426 PRIBIO, "-", 0); 5427 queue->cam_doneq_sleep = 0; 5428 } 5429 STAILQ_CONCAT(&doneq, &queue->cam_doneq); 5430 mtx_unlock(&queue->cam_doneq_mtx); 5431 5432 THREAD_NO_SLEEPING(); 5433 while ((ccb_h = STAILQ_FIRST(&doneq)) != NULL) { 5434 STAILQ_REMOVE_HEAD(&doneq, sim_links.stqe); 5435 xpt_done_process(ccb_h); 5436 } 5437 THREAD_SLEEPING_OK(); 5438 5439 mtx_lock(&queue->cam_doneq_mtx); 5440 } 5441 } 5442 5443 static void 5444 camisr_runqueue(void) 5445 { 5446 struct ccb_hdr *ccb_h; 5447 struct cam_doneq *queue; 5448 int i; 5449 5450 /* Process global queues. */ 5451 for (i = 0; i < cam_num_doneqs; i++) { 5452 queue = &cam_doneqs[i]; 5453 mtx_lock(&queue->cam_doneq_mtx); 5454 while ((ccb_h = STAILQ_FIRST(&queue->cam_doneq)) != NULL) { 5455 STAILQ_REMOVE_HEAD(&queue->cam_doneq, sim_links.stqe); 5456 mtx_unlock(&queue->cam_doneq_mtx); 5457 xpt_done_process(ccb_h); 5458 mtx_lock(&queue->cam_doneq_mtx); 5459 } 5460 mtx_unlock(&queue->cam_doneq_mtx); 5461 } 5462 } 5463 5464 /** 5465 * @brief Return the device_t associated with the path 5466 * 5467 * When a SIM is created, it registers a bus with a NEWBUS device_t. This is 5468 * stored in the internal cam_eb bus structure. There is no guarnatee any given 5469 * path will have a @c device_t associated with it (it's legal to call @c 5470 * xpt_bus_register with a @c NULL @c device_t. 5471 * 5472 * @param path Path to return the device_t for. 5473 */ 5474 device_t 5475 xpt_path_sim_device(const struct cam_path *path) 5476 { 5477 return (path->bus->parent_dev); 5478 } 5479 5480 struct kv 5481 { 5482 uint32_t v; 5483 const char *name; 5484 }; 5485 5486 static struct kv map[] = { 5487 { XPT_NOOP, "XPT_NOOP" }, 5488 { XPT_SCSI_IO, "XPT_SCSI_IO" }, 5489 { XPT_GDEV_TYPE, "XPT_GDEV_TYPE" }, 5490 { XPT_GDEVLIST, "XPT_GDEVLIST" }, 5491 { XPT_PATH_INQ, "XPT_PATH_INQ" }, 5492 { XPT_REL_SIMQ, "XPT_REL_SIMQ" }, 5493 { XPT_SASYNC_CB, "XPT_SASYNC_CB" }, 5494 { XPT_SDEV_TYPE, "XPT_SDEV_TYPE" }, 5495 { XPT_SCAN_BUS, "XPT_SCAN_BUS" }, 5496 { XPT_DEV_MATCH, "XPT_DEV_MATCH" }, 5497 { XPT_DEBUG, "XPT_DEBUG" }, 5498 { XPT_PATH_STATS, "XPT_PATH_STATS" }, 5499 { XPT_GDEV_STATS, "XPT_GDEV_STATS" }, 5500 { XPT_DEV_ADVINFO, "XPT_DEV_ADVINFO" }, 5501 { XPT_ASYNC, "XPT_ASYNC" }, 5502 { XPT_ABORT, "XPT_ABORT" }, 5503 { XPT_RESET_BUS, "XPT_RESET_BUS" }, 5504 { XPT_RESET_DEV, "XPT_RESET_DEV" }, 5505 { XPT_TERM_IO, "XPT_TERM_IO" }, 5506 { XPT_SCAN_LUN, "XPT_SCAN_LUN" }, 5507 { XPT_GET_TRAN_SETTINGS, "XPT_GET_TRAN_SETTINGS" }, 5508 { XPT_SET_TRAN_SETTINGS, "XPT_SET_TRAN_SETTINGS" }, 5509 { XPT_CALC_GEOMETRY, "XPT_CALC_GEOMETRY" }, 5510 { XPT_ATA_IO, "XPT_ATA_IO" }, 5511 { XPT_GET_SIM_KNOB, "XPT_GET_SIM_KNOB" }, 5512 { XPT_SET_SIM_KNOB, "XPT_SET_SIM_KNOB" }, 5513 { XPT_NVME_IO, "XPT_NVME_IO" }, 5514 { XPT_MMC_IO, "XPT_MMC_IO" }, 5515 { XPT_SMP_IO, "XPT_SMP_IO" }, 5516 { XPT_SCAN_TGT, "XPT_SCAN_TGT" }, 5517 { XPT_NVME_ADMIN, "XPT_NVME_ADMIN" }, 5518 { XPT_ENG_INQ, "XPT_ENG_INQ" }, 5519 { XPT_ENG_EXEC, "XPT_ENG_EXEC" }, 5520 { XPT_EN_LUN, "XPT_EN_LUN" }, 5521 { XPT_TARGET_IO, "XPT_TARGET_IO" }, 5522 { XPT_ACCEPT_TARGET_IO, "XPT_ACCEPT_TARGET_IO" }, 5523 { XPT_CONT_TARGET_IO, "XPT_CONT_TARGET_IO" }, 5524 { XPT_IMMED_NOTIFY, "XPT_IMMED_NOTIFY" }, 5525 { XPT_NOTIFY_ACK, "XPT_NOTIFY_ACK" }, 5526 { XPT_IMMEDIATE_NOTIFY, "XPT_IMMEDIATE_NOTIFY" }, 5527 { XPT_NOTIFY_ACKNOWLEDGE, "XPT_NOTIFY_ACKNOWLEDGE" }, 5528 { 0, 0 } 5529 }; 5530 5531 const char * 5532 xpt_action_name(uint32_t action) 5533 { 5534 static char buffer[32]; /* Only for unknown messages -- racy */ 5535 struct kv *walker = map; 5536 5537 while (walker->name != NULL) { 5538 if (walker->v == action) 5539 return (walker->name); 5540 walker++; 5541 } 5542 5543 snprintf(buffer, sizeof(buffer), "%#x", action); 5544 return (buffer); 5545 } 5546 5547 void 5548 xpt_cam_path_debug(struct cam_path *path, const char *fmt, ...) 5549 { 5550 struct sbuf sbuf; 5551 char buf[XPT_PRINT_LEN]; /* balance to not eat too much stack */ 5552 struct sbuf *sb = sbuf_new(&sbuf, buf, sizeof(buf), SBUF_FIXEDLEN | SBUF_INCLUDENUL); 5553 va_list ap; 5554 5555 sbuf_set_drain(sb, sbuf_printf_drain, NULL); 5556 xpt_path_sbuf(path, sb); 5557 va_start(ap, fmt); 5558 sbuf_vprintf(sb, fmt, ap); 5559 va_end(ap); 5560 sbuf_finish(sb); 5561 sbuf_delete(sb); 5562 if (cam_debug_delay != 0) 5563 DELAY(cam_debug_delay); 5564 } 5565 5566 void 5567 xpt_cam_dev_debug(struct cam_ed *dev, const char *fmt, ...) 5568 { 5569 struct sbuf sbuf; 5570 char buf[XPT_PRINT_LEN]; /* balance to not eat too much stack */ 5571 struct sbuf *sb = sbuf_new(&sbuf, buf, sizeof(buf), SBUF_FIXEDLEN | SBUF_INCLUDENUL); 5572 va_list ap; 5573 5574 sbuf_set_drain(sb, sbuf_printf_drain, NULL); 5575 xpt_device_sbuf(dev, sb); 5576 va_start(ap, fmt); 5577 sbuf_vprintf(sb, fmt, ap); 5578 va_end(ap); 5579 sbuf_finish(sb); 5580 sbuf_delete(sb); 5581 if (cam_debug_delay != 0) 5582 DELAY(cam_debug_delay); 5583 } 5584 5585 void 5586 xpt_cam_debug(const char *fmt, ...) 5587 { 5588 struct sbuf sbuf; 5589 char buf[XPT_PRINT_LEN]; /* balance to not eat too much stack */ 5590 struct sbuf *sb = sbuf_new(&sbuf, buf, sizeof(buf), SBUF_FIXEDLEN | SBUF_INCLUDENUL); 5591 va_list ap; 5592 5593 sbuf_set_drain(sb, sbuf_printf_drain, NULL); 5594 sbuf_cat(sb, "cam_debug: "); 5595 va_start(ap, fmt); 5596 sbuf_vprintf(sb, fmt, ap); 5597 va_end(ap); 5598 sbuf_finish(sb); 5599 sbuf_delete(sb); 5600 if (cam_debug_delay != 0) 5601 DELAY(cam_debug_delay); 5602 } 5603