1 /*- 2 * Implementation of the Common Access Method Transport (XPT) layer. 3 * 4 * SPDX-License-Identifier: BSD-2-Clause 5 * 6 * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs. 7 * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry. 8 * All rights reserved. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions, and the following disclaimer, 15 * without modification, immediately at the beginning of the file. 16 * 2. The name of the author may not be used to endorse or promote products 17 * derived from this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 23 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 #include "opt_printf.h" 33 34 #include <sys/param.h> 35 #include <sys/bio.h> 36 #include <sys/bus.h> 37 #include <sys/systm.h> 38 #include <sys/types.h> 39 #include <sys/malloc.h> 40 #include <sys/kernel.h> 41 #include <sys/time.h> 42 #include <sys/conf.h> 43 #include <sys/fcntl.h> 44 #include <sys/proc.h> 45 #include <sys/sbuf.h> 46 #include <sys/smp.h> 47 #include <sys/taskqueue.h> 48 49 #include <sys/lock.h> 50 #include <sys/mutex.h> 51 #include <sys/sysctl.h> 52 #include <sys/kthread.h> 53 54 #include <cam/cam.h> 55 #include <cam/cam_ccb.h> 56 #include <cam/cam_iosched.h> 57 #include <cam/cam_periph.h> 58 #include <cam/cam_queue.h> 59 #include <cam/cam_sim.h> 60 #include <cam/cam_xpt.h> 61 #include <cam/cam_xpt_sim.h> 62 #include <cam/cam_xpt_periph.h> 63 #include <cam/cam_xpt_internal.h> 64 #include <cam/cam_debug.h> 65 #include <cam/cam_compat.h> 66 67 #include <cam/scsi/scsi_all.h> 68 #include <cam/scsi/scsi_message.h> 69 #include <cam/scsi/scsi_pass.h> 70 71 #include <machine/stdarg.h> /* for xpt_print below */ 72 73 /* Wild guess based on not wanting to grow the stack too much */ 74 #define XPT_PRINT_MAXLEN 512 75 #ifdef PRINTF_BUFR_SIZE 76 #define XPT_PRINT_LEN PRINTF_BUFR_SIZE 77 #else 78 #define XPT_PRINT_LEN 128 79 #endif 80 _Static_assert(XPT_PRINT_LEN <= XPT_PRINT_MAXLEN, "XPT_PRINT_LEN is too large"); 81 82 /* 83 * This is the maximum number of high powered commands (e.g. start unit) 84 * that can be outstanding at a particular time. 85 */ 86 #ifndef CAM_MAX_HIGHPOWER 87 #define CAM_MAX_HIGHPOWER 4 88 #endif 89 90 /* Datastructures internal to the xpt layer */ 91 MALLOC_DEFINE(M_CAMXPT, "CAM XPT", "CAM XPT buffers"); 92 MALLOC_DEFINE(M_CAMDEV, "CAM DEV", "CAM devices"); 93 MALLOC_DEFINE(M_CAMCCB, "CAM CCB", "CAM CCBs"); 94 MALLOC_DEFINE(M_CAMPATH, "CAM path", "CAM paths"); 95 96 struct xpt_softc { 97 uint32_t xpt_generation; 98 99 /* number of high powered commands that can go through right now */ 100 struct mtx xpt_highpower_lock; 101 STAILQ_HEAD(highpowerlist, cam_ed) highpowerq; 102 int num_highpower; 103 104 /* queue for handling async rescan requests. */ 105 TAILQ_HEAD(, ccb_hdr) ccb_scanq; 106 int buses_to_config; 107 int buses_config_done; 108 109 /* 110 * Registered buses 111 * 112 * N.B., "busses" is an archaic spelling of "buses". In new code 113 * "buses" is preferred. 114 */ 115 TAILQ_HEAD(,cam_eb) xpt_busses; 116 u_int bus_generation; 117 118 int boot_delay; 119 struct callout boot_callout; 120 struct task boot_task; 121 struct root_hold_token xpt_rootmount; 122 123 struct mtx xpt_topo_lock; 124 struct taskqueue *xpt_taskq; 125 }; 126 127 typedef enum { 128 DM_RET_COPY = 0x01, 129 DM_RET_FLAG_MASK = 0x0f, 130 DM_RET_NONE = 0x00, 131 DM_RET_STOP = 0x10, 132 DM_RET_DESCEND = 0x20, 133 DM_RET_ERROR = 0x30, 134 DM_RET_ACTION_MASK = 0xf0 135 } dev_match_ret; 136 137 typedef enum { 138 XPT_DEPTH_BUS, 139 XPT_DEPTH_TARGET, 140 XPT_DEPTH_DEVICE, 141 XPT_DEPTH_PERIPH 142 } xpt_traverse_depth; 143 144 struct xpt_traverse_config { 145 xpt_traverse_depth depth; 146 void *tr_func; 147 void *tr_arg; 148 }; 149 150 typedef int xpt_busfunc_t (struct cam_eb *bus, void *arg); 151 typedef int xpt_targetfunc_t (struct cam_et *target, void *arg); 152 typedef int xpt_devicefunc_t (struct cam_ed *device, void *arg); 153 typedef int xpt_periphfunc_t (struct cam_periph *periph, void *arg); 154 typedef int xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg); 155 156 /* Transport layer configuration information */ 157 static struct xpt_softc xsoftc; 158 159 MTX_SYSINIT(xpt_topo_init, &xsoftc.xpt_topo_lock, "XPT topology lock", MTX_DEF); 160 161 SYSCTL_INT(_kern_cam, OID_AUTO, boot_delay, CTLFLAG_RDTUN, 162 &xsoftc.boot_delay, 0, "Bus registration wait time"); 163 SYSCTL_UINT(_kern_cam, OID_AUTO, xpt_generation, CTLFLAG_RD, 164 &xsoftc.xpt_generation, 0, "CAM peripheral generation count"); 165 166 struct cam_doneq { 167 struct mtx_padalign cam_doneq_mtx; 168 STAILQ_HEAD(, ccb_hdr) cam_doneq; 169 int cam_doneq_sleep; 170 }; 171 172 static struct cam_doneq cam_doneqs[MAXCPU]; 173 static u_int __read_mostly cam_num_doneqs; 174 static struct proc *cam_proc; 175 static struct cam_doneq cam_async; 176 177 SYSCTL_INT(_kern_cam, OID_AUTO, num_doneqs, CTLFLAG_RDTUN, 178 &cam_num_doneqs, 0, "Number of completion queues/threads"); 179 180 struct cam_periph *xpt_periph; 181 182 static periph_init_t xpt_periph_init; 183 184 static struct periph_driver xpt_driver = 185 { 186 xpt_periph_init, "xpt", 187 TAILQ_HEAD_INITIALIZER(xpt_driver.units), /* generation */ 0, 188 CAM_PERIPH_DRV_EARLY 189 }; 190 191 PERIPHDRIVER_DECLARE(xpt, xpt_driver); 192 193 static d_open_t xptopen; 194 static d_close_t xptclose; 195 static d_ioctl_t xptioctl; 196 static d_ioctl_t xptdoioctl; 197 198 static struct cdevsw xpt_cdevsw = { 199 .d_version = D_VERSION, 200 .d_flags = 0, 201 .d_open = xptopen, 202 .d_close = xptclose, 203 .d_ioctl = xptioctl, 204 .d_name = "xpt", 205 }; 206 207 /* Storage for debugging datastructures */ 208 struct cam_path *cam_dpath; 209 uint32_t __read_mostly cam_dflags = CAM_DEBUG_FLAGS; 210 SYSCTL_UINT(_kern_cam, OID_AUTO, dflags, CTLFLAG_RWTUN, 211 &cam_dflags, 0, "Enabled debug flags"); 212 uint32_t cam_debug_delay = CAM_DEBUG_DELAY; 213 SYSCTL_UINT(_kern_cam, OID_AUTO, debug_delay, CTLFLAG_RWTUN, 214 &cam_debug_delay, 0, "Delay in us after each debug message"); 215 216 /* Our boot-time initialization hook */ 217 static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *); 218 219 static moduledata_t cam_moduledata = { 220 "cam", 221 cam_module_event_handler, 222 NULL 223 }; 224 225 static int xpt_init(void *); 226 227 DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND); 228 MODULE_VERSION(cam, 1); 229 230 static void xpt_async_bcast(struct async_list *async_head, 231 uint32_t async_code, 232 struct cam_path *path, 233 void *async_arg); 234 static path_id_t xptnextfreepathid(void); 235 static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus); 236 static union ccb *xpt_get_ccb(struct cam_periph *periph); 237 static union ccb *xpt_get_ccb_nowait(struct cam_periph *periph); 238 static void xpt_run_allocq(struct cam_periph *periph, int sleep); 239 static void xpt_run_allocq_task(void *context, int pending); 240 static void xpt_run_devq(struct cam_devq *devq); 241 static callout_func_t xpt_release_devq_timeout; 242 static void xpt_acquire_bus(struct cam_eb *bus); 243 static void xpt_release_bus(struct cam_eb *bus); 244 static uint32_t xpt_freeze_devq_device(struct cam_ed *dev, u_int count); 245 static int xpt_release_devq_device(struct cam_ed *dev, u_int count, 246 int run_queue); 247 static struct cam_et* 248 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id); 249 static void xpt_acquire_target(struct cam_et *target); 250 static void xpt_release_target(struct cam_et *target); 251 static struct cam_eb* 252 xpt_find_bus(path_id_t path_id); 253 static struct cam_et* 254 xpt_find_target(struct cam_eb *bus, target_id_t target_id); 255 static struct cam_ed* 256 xpt_find_device(struct cam_et *target, lun_id_t lun_id); 257 static void xpt_config(void *arg); 258 static void xpt_hold_boot_locked(void); 259 static int xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo, 260 uint32_t new_priority); 261 static xpt_devicefunc_t xptpassannouncefunc; 262 static void xptaction(struct cam_sim *sim, union ccb *work_ccb); 263 static void xptpoll(struct cam_sim *sim); 264 static void camisr_runqueue(void); 265 static void xpt_done_process(struct ccb_hdr *ccb_h); 266 static void xpt_done_td(void *); 267 static void xpt_async_td(void *); 268 static dev_match_ret xptbusmatch(struct dev_match_pattern *patterns, 269 u_int num_patterns, struct cam_eb *bus); 270 static dev_match_ret xptdevicematch(struct dev_match_pattern *patterns, 271 u_int num_patterns, 272 struct cam_ed *device); 273 static dev_match_ret xptperiphmatch(struct dev_match_pattern *patterns, 274 u_int num_patterns, 275 struct cam_periph *periph); 276 static xpt_busfunc_t xptedtbusfunc; 277 static xpt_targetfunc_t xptedttargetfunc; 278 static xpt_devicefunc_t xptedtdevicefunc; 279 static xpt_periphfunc_t xptedtperiphfunc; 280 static xpt_pdrvfunc_t xptplistpdrvfunc; 281 static xpt_periphfunc_t xptplistperiphfunc; 282 static int xptedtmatch(struct ccb_dev_match *cdm); 283 static int xptperiphlistmatch(struct ccb_dev_match *cdm); 284 static int xptbustraverse(struct cam_eb *start_bus, 285 xpt_busfunc_t *tr_func, void *arg); 286 static int xpttargettraverse(struct cam_eb *bus, 287 struct cam_et *start_target, 288 xpt_targetfunc_t *tr_func, void *arg); 289 static int xptdevicetraverse(struct cam_et *target, 290 struct cam_ed *start_device, 291 xpt_devicefunc_t *tr_func, void *arg); 292 static int xptperiphtraverse(struct cam_ed *device, 293 struct cam_periph *start_periph, 294 xpt_periphfunc_t *tr_func, void *arg); 295 static int xptpdrvtraverse(struct periph_driver **start_pdrv, 296 xpt_pdrvfunc_t *tr_func, void *arg); 297 static int xptpdperiphtraverse(struct periph_driver **pdrv, 298 struct cam_periph *start_periph, 299 xpt_periphfunc_t *tr_func, 300 void *arg); 301 static xpt_busfunc_t xptdefbusfunc; 302 static xpt_targetfunc_t xptdeftargetfunc; 303 static xpt_devicefunc_t xptdefdevicefunc; 304 static xpt_periphfunc_t xptdefperiphfunc; 305 static void xpt_finishconfig_task(void *context, int pending); 306 static void xpt_dev_async_default(uint32_t async_code, 307 struct cam_eb *bus, 308 struct cam_et *target, 309 struct cam_ed *device, 310 void *async_arg); 311 static struct cam_ed * xpt_alloc_device_default(struct cam_eb *bus, 312 struct cam_et *target, 313 lun_id_t lun_id); 314 static xpt_devicefunc_t xptsetasyncfunc; 315 static xpt_busfunc_t xptsetasyncbusfunc; 316 static cam_status xptregister(struct cam_periph *periph, 317 void *arg); 318 319 static __inline int 320 xpt_schedule_devq(struct cam_devq *devq, struct cam_ed *dev) 321 { 322 int retval; 323 324 mtx_assert(&devq->send_mtx, MA_OWNED); 325 if ((dev->ccbq.queue.entries > 0) && 326 (dev->ccbq.dev_openings > 0) && 327 (dev->ccbq.queue.qfrozen_cnt == 0)) { 328 /* 329 * The priority of a device waiting for controller 330 * resources is that of the highest priority CCB 331 * enqueued. 332 */ 333 retval = 334 xpt_schedule_dev(&devq->send_queue, 335 &dev->devq_entry, 336 CAMQ_GET_PRIO(&dev->ccbq.queue)); 337 } else { 338 retval = 0; 339 } 340 return (retval); 341 } 342 343 static __inline int 344 device_is_queued(struct cam_ed *device) 345 { 346 return (device->devq_entry.index != CAM_UNQUEUED_INDEX); 347 } 348 349 static void 350 xpt_periph_init(void) 351 { 352 make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0"); 353 } 354 355 static int 356 xptopen(struct cdev *dev, int flags, int fmt, struct thread *td) 357 { 358 359 /* 360 * Only allow read-write access. 361 */ 362 if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0)) 363 return(EPERM); 364 365 /* 366 * We don't allow nonblocking access. 367 */ 368 if ((flags & O_NONBLOCK) != 0) { 369 printf("%s: can't do nonblocking access\n", devtoname(dev)); 370 return(ENODEV); 371 } 372 373 return(0); 374 } 375 376 static int 377 xptclose(struct cdev *dev, int flag, int fmt, struct thread *td) 378 { 379 380 return(0); 381 } 382 383 /* 384 * Don't automatically grab the xpt softc lock here even though this is going 385 * through the xpt device. The xpt device is really just a back door for 386 * accessing other devices and SIMs, so the right thing to do is to grab 387 * the appropriate SIM lock once the bus/SIM is located. 388 */ 389 static int 390 xptioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) 391 { 392 int error; 393 394 if ((error = xptdoioctl(dev, cmd, addr, flag, td)) == ENOTTY) { 395 error = cam_compat_ioctl(dev, cmd, addr, flag, td, xptdoioctl); 396 } 397 return (error); 398 } 399 400 static int 401 xptdoioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) 402 { 403 int error; 404 405 error = 0; 406 407 switch(cmd) { 408 /* 409 * For the transport layer CAMIOCOMMAND ioctl, we really only want 410 * to accept CCB types that don't quite make sense to send through a 411 * passthrough driver. XPT_PATH_INQ is an exception to this, as stated 412 * in the CAM spec. 413 */ 414 case CAMIOCOMMAND: { 415 union ccb *ccb; 416 union ccb *inccb; 417 struct cam_eb *bus; 418 419 inccb = (union ccb *)addr; 420 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) 421 if (inccb->ccb_h.func_code == XPT_SCSI_IO) 422 inccb->csio.bio = NULL; 423 #endif 424 425 if (inccb->ccb_h.flags & CAM_UNLOCKED) 426 return (EINVAL); 427 428 bus = xpt_find_bus(inccb->ccb_h.path_id); 429 if (bus == NULL) 430 return (EINVAL); 431 432 switch (inccb->ccb_h.func_code) { 433 case XPT_SCAN_BUS: 434 case XPT_RESET_BUS: 435 if (inccb->ccb_h.target_id != CAM_TARGET_WILDCARD || 436 inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) { 437 xpt_release_bus(bus); 438 return (EINVAL); 439 } 440 break; 441 case XPT_SCAN_TGT: 442 if (inccb->ccb_h.target_id == CAM_TARGET_WILDCARD || 443 inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) { 444 xpt_release_bus(bus); 445 return (EINVAL); 446 } 447 break; 448 default: 449 break; 450 } 451 452 switch(inccb->ccb_h.func_code) { 453 case XPT_SCAN_BUS: 454 case XPT_RESET_BUS: 455 case XPT_PATH_INQ: 456 case XPT_ENG_INQ: 457 case XPT_SCAN_LUN: 458 case XPT_SCAN_TGT: 459 460 ccb = xpt_alloc_ccb(); 461 462 /* 463 * Create a path using the bus, target, and lun the 464 * user passed in. 465 */ 466 if (xpt_create_path(&ccb->ccb_h.path, NULL, 467 inccb->ccb_h.path_id, 468 inccb->ccb_h.target_id, 469 inccb->ccb_h.target_lun) != 470 CAM_REQ_CMP){ 471 error = EINVAL; 472 xpt_free_ccb(ccb); 473 break; 474 } 475 /* Ensure all of our fields are correct */ 476 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, 477 inccb->ccb_h.pinfo.priority); 478 xpt_merge_ccb(ccb, inccb); 479 xpt_path_lock(ccb->ccb_h.path); 480 cam_periph_runccb(ccb, NULL, 0, 0, NULL); 481 xpt_path_unlock(ccb->ccb_h.path); 482 bcopy(ccb, inccb, sizeof(union ccb)); 483 xpt_free_path(ccb->ccb_h.path); 484 xpt_free_ccb(ccb); 485 break; 486 487 case XPT_DEBUG: { 488 union ccb ccb; 489 490 /* 491 * This is an immediate CCB, so it's okay to 492 * allocate it on the stack. 493 */ 494 memset(&ccb, 0, sizeof(ccb)); 495 496 /* 497 * Create a path using the bus, target, and lun the 498 * user passed in. 499 */ 500 if (xpt_create_path(&ccb.ccb_h.path, NULL, 501 inccb->ccb_h.path_id, 502 inccb->ccb_h.target_id, 503 inccb->ccb_h.target_lun) != 504 CAM_REQ_CMP){ 505 error = EINVAL; 506 break; 507 } 508 /* Ensure all of our fields are correct */ 509 xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path, 510 inccb->ccb_h.pinfo.priority); 511 xpt_merge_ccb(&ccb, inccb); 512 xpt_action(&ccb); 513 bcopy(&ccb, inccb, sizeof(union ccb)); 514 xpt_free_path(ccb.ccb_h.path); 515 break; 516 } 517 case XPT_DEV_MATCH: { 518 struct cam_periph_map_info mapinfo; 519 struct cam_path *old_path; 520 521 /* 522 * We can't deal with physical addresses for this 523 * type of transaction. 524 */ 525 if ((inccb->ccb_h.flags & CAM_DATA_MASK) != 526 CAM_DATA_VADDR) { 527 error = EINVAL; 528 break; 529 } 530 531 /* 532 * Save this in case the caller had it set to 533 * something in particular. 534 */ 535 old_path = inccb->ccb_h.path; 536 537 /* 538 * We really don't need a path for the matching 539 * code. The path is needed because of the 540 * debugging statements in xpt_action(). They 541 * assume that the CCB has a valid path. 542 */ 543 inccb->ccb_h.path = xpt_periph->path; 544 545 bzero(&mapinfo, sizeof(mapinfo)); 546 547 /* 548 * Map the pattern and match buffers into kernel 549 * virtual address space. 550 */ 551 error = cam_periph_mapmem(inccb, &mapinfo, maxphys); 552 553 if (error) { 554 inccb->ccb_h.path = old_path; 555 break; 556 } 557 558 /* 559 * This is an immediate CCB, we can send it on directly. 560 */ 561 xpt_action(inccb); 562 563 /* 564 * Map the buffers back into user space. 565 */ 566 error = cam_periph_unmapmem(inccb, &mapinfo); 567 568 inccb->ccb_h.path = old_path; 569 break; 570 } 571 default: 572 error = ENOTSUP; 573 break; 574 } 575 xpt_release_bus(bus); 576 break; 577 } 578 /* 579 * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input, 580 * with the periphal driver name and unit name filled in. The other 581 * fields don't really matter as input. The passthrough driver name 582 * ("pass"), and unit number are passed back in the ccb. The current 583 * device generation number, and the index into the device peripheral 584 * driver list, and the status are also passed back. Note that 585 * since we do everything in one pass, unlike the XPT_GDEVLIST ccb, 586 * we never return a status of CAM_GDEVLIST_LIST_CHANGED. It is 587 * (or rather should be) impossible for the device peripheral driver 588 * list to change since we look at the whole thing in one pass, and 589 * we do it with lock protection. 590 * 591 */ 592 case CAMGETPASSTHRU: { 593 union ccb *ccb; 594 struct cam_periph *periph; 595 struct periph_driver **p_drv; 596 char *name; 597 u_int unit; 598 bool base_periph_found; 599 600 ccb = (union ccb *)addr; 601 unit = ccb->cgdl.unit_number; 602 name = ccb->cgdl.periph_name; 603 base_periph_found = false; 604 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) 605 if (ccb->ccb_h.func_code == XPT_SCSI_IO) 606 ccb->csio.bio = NULL; 607 #endif 608 609 /* 610 * Sanity check -- make sure we don't get a null peripheral 611 * driver name. 612 */ 613 if (*ccb->cgdl.periph_name == '\0') { 614 error = EINVAL; 615 break; 616 } 617 618 /* Keep the list from changing while we traverse it */ 619 xpt_lock_buses(); 620 621 /* first find our driver in the list of drivers */ 622 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) 623 if (strcmp((*p_drv)->driver_name, name) == 0) 624 break; 625 626 if (*p_drv == NULL) { 627 xpt_unlock_buses(); 628 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 629 ccb->cgdl.status = CAM_GDEVLIST_ERROR; 630 *ccb->cgdl.periph_name = '\0'; 631 ccb->cgdl.unit_number = 0; 632 error = ENOENT; 633 break; 634 } 635 636 /* 637 * Run through every peripheral instance of this driver 638 * and check to see whether it matches the unit passed 639 * in by the user. If it does, get out of the loops and 640 * find the passthrough driver associated with that 641 * peripheral driver. 642 */ 643 for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL; 644 periph = TAILQ_NEXT(periph, unit_links)) { 645 if (periph->unit_number == unit) 646 break; 647 } 648 /* 649 * If we found the peripheral driver that the user passed 650 * in, go through all of the peripheral drivers for that 651 * particular device and look for a passthrough driver. 652 */ 653 if (periph != NULL) { 654 struct cam_ed *device; 655 int i; 656 657 base_periph_found = true; 658 device = periph->path->device; 659 for (i = 0, periph = SLIST_FIRST(&device->periphs); 660 periph != NULL; 661 periph = SLIST_NEXT(periph, periph_links), i++) { 662 /* 663 * Check to see whether we have a 664 * passthrough device or not. 665 */ 666 if (strcmp(periph->periph_name, "pass") == 0) { 667 /* 668 * Fill in the getdevlist fields. 669 */ 670 strlcpy(ccb->cgdl.periph_name, 671 periph->periph_name, 672 sizeof(ccb->cgdl.periph_name)); 673 ccb->cgdl.unit_number = 674 periph->unit_number; 675 if (SLIST_NEXT(periph, periph_links)) 676 ccb->cgdl.status = 677 CAM_GDEVLIST_MORE_DEVS; 678 else 679 ccb->cgdl.status = 680 CAM_GDEVLIST_LAST_DEVICE; 681 ccb->cgdl.generation = 682 device->generation; 683 ccb->cgdl.index = i; 684 /* 685 * Fill in some CCB header fields 686 * that the user may want. 687 */ 688 ccb->ccb_h.path_id = 689 periph->path->bus->path_id; 690 ccb->ccb_h.target_id = 691 periph->path->target->target_id; 692 ccb->ccb_h.target_lun = 693 periph->path->device->lun_id; 694 ccb->ccb_h.status = CAM_REQ_CMP; 695 break; 696 } 697 } 698 } 699 700 /* 701 * If the periph is null here, one of two things has 702 * happened. The first possibility is that we couldn't 703 * find the unit number of the particular peripheral driver 704 * that the user is asking about. e.g. the user asks for 705 * the passthrough driver for "da11". We find the list of 706 * "da" peripherals all right, but there is no unit 11. 707 * The other possibility is that we went through the list 708 * of peripheral drivers attached to the device structure, 709 * but didn't find one with the name "pass". Either way, 710 * we return ENOENT, since we couldn't find something. 711 */ 712 if (periph == NULL) { 713 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 714 ccb->cgdl.status = CAM_GDEVLIST_ERROR; 715 *ccb->cgdl.periph_name = '\0'; 716 ccb->cgdl.unit_number = 0; 717 error = ENOENT; 718 /* 719 * It is unfortunate that this is even necessary, 720 * but there are many, many clueless users out there. 721 * If this is true, the user is looking for the 722 * passthrough driver, but doesn't have one in his 723 * kernel. 724 */ 725 if (base_periph_found) { 726 printf( 727 "xptioctl: pass driver is not in the kernel\n" 728 "xptioctl: put \"device pass\" in your kernel config file\n"); 729 } 730 } 731 xpt_unlock_buses(); 732 break; 733 } 734 default: 735 error = ENOTTY; 736 break; 737 } 738 739 return(error); 740 } 741 742 static int 743 cam_module_event_handler(module_t mod, int what, void *arg) 744 { 745 int error; 746 747 switch (what) { 748 case MOD_LOAD: 749 if ((error = xpt_init(NULL)) != 0) 750 return (error); 751 break; 752 case MOD_UNLOAD: 753 return EBUSY; 754 default: 755 return EOPNOTSUPP; 756 } 757 758 return 0; 759 } 760 761 static struct xpt_proto * 762 xpt_proto_find(cam_proto proto) 763 { 764 struct xpt_proto **pp; 765 766 SET_FOREACH(pp, cam_xpt_proto_set) { 767 if ((*pp)->proto == proto) 768 return *pp; 769 } 770 771 return NULL; 772 } 773 774 static void 775 xpt_rescan_done(struct cam_periph *periph, union ccb *done_ccb) 776 { 777 778 if (done_ccb->ccb_h.ppriv_ptr1 == NULL) { 779 xpt_free_path(done_ccb->ccb_h.path); 780 xpt_free_ccb(done_ccb); 781 } else { 782 done_ccb->ccb_h.cbfcnp = done_ccb->ccb_h.ppriv_ptr1; 783 (*done_ccb->ccb_h.cbfcnp)(periph, done_ccb); 784 } 785 xpt_release_boot(); 786 } 787 788 /* thread to handle bus rescans */ 789 static void 790 xpt_scanner_thread(void *dummy) 791 { 792 union ccb *ccb; 793 struct mtx *mtx; 794 struct cam_ed *device; 795 796 xpt_lock_buses(); 797 for (;;) { 798 if (TAILQ_EMPTY(&xsoftc.ccb_scanq)) 799 msleep(&xsoftc.ccb_scanq, &xsoftc.xpt_topo_lock, PRIBIO, 800 "-", 0); 801 if ((ccb = (union ccb *)TAILQ_FIRST(&xsoftc.ccb_scanq)) != NULL) { 802 TAILQ_REMOVE(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe); 803 xpt_unlock_buses(); 804 805 /* 806 * We need to lock the device's mutex which we use as 807 * the path mutex. We can't do it directly because the 808 * cam_path in the ccb may wind up going away because 809 * the path lock may be dropped and the path retired in 810 * the completion callback. We do this directly to keep 811 * the reference counts in cam_path sane. We also have 812 * to copy the device pointer because ccb_h.path may 813 * be freed in the callback. 814 */ 815 mtx = xpt_path_mtx(ccb->ccb_h.path); 816 device = ccb->ccb_h.path->device; 817 xpt_acquire_device(device); 818 mtx_lock(mtx); 819 xpt_action(ccb); 820 mtx_unlock(mtx); 821 xpt_release_device(device); 822 823 xpt_lock_buses(); 824 } 825 } 826 } 827 828 void 829 xpt_rescan(union ccb *ccb) 830 { 831 struct ccb_hdr *hdr; 832 833 /* Prepare request */ 834 if (ccb->ccb_h.path->target->target_id == CAM_TARGET_WILDCARD && 835 ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD) 836 ccb->ccb_h.func_code = XPT_SCAN_BUS; 837 else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD && 838 ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD) 839 ccb->ccb_h.func_code = XPT_SCAN_TGT; 840 else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD && 841 ccb->ccb_h.path->device->lun_id != CAM_LUN_WILDCARD) 842 ccb->ccb_h.func_code = XPT_SCAN_LUN; 843 else { 844 xpt_print(ccb->ccb_h.path, "illegal scan path\n"); 845 xpt_free_path(ccb->ccb_h.path); 846 xpt_free_ccb(ccb); 847 return; 848 } 849 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, 850 ("xpt_rescan: func %#x %s\n", ccb->ccb_h.func_code, 851 xpt_action_name(ccb->ccb_h.func_code))); 852 853 ccb->ccb_h.ppriv_ptr1 = ccb->ccb_h.cbfcnp; 854 ccb->ccb_h.cbfcnp = xpt_rescan_done; 855 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, CAM_PRIORITY_XPT); 856 /* Don't make duplicate entries for the same paths. */ 857 xpt_lock_buses(); 858 if (ccb->ccb_h.ppriv_ptr1 == NULL) { 859 TAILQ_FOREACH(hdr, &xsoftc.ccb_scanq, sim_links.tqe) { 860 if (xpt_path_comp(hdr->path, ccb->ccb_h.path) == 0) { 861 wakeup(&xsoftc.ccb_scanq); 862 xpt_unlock_buses(); 863 xpt_print(ccb->ccb_h.path, "rescan already queued\n"); 864 xpt_free_path(ccb->ccb_h.path); 865 xpt_free_ccb(ccb); 866 return; 867 } 868 } 869 } 870 TAILQ_INSERT_TAIL(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe); 871 xpt_hold_boot_locked(); 872 wakeup(&xsoftc.ccb_scanq); 873 xpt_unlock_buses(); 874 } 875 876 /* Functions accessed by the peripheral drivers */ 877 static int 878 xpt_init(void *dummy) 879 { 880 struct cam_sim *xpt_sim; 881 struct cam_path *path; 882 struct cam_devq *devq; 883 cam_status status; 884 int error, i; 885 886 TAILQ_INIT(&xsoftc.xpt_busses); 887 TAILQ_INIT(&xsoftc.ccb_scanq); 888 STAILQ_INIT(&xsoftc.highpowerq); 889 xsoftc.num_highpower = CAM_MAX_HIGHPOWER; 890 891 mtx_init(&xsoftc.xpt_highpower_lock, "XPT highpower lock", NULL, MTX_DEF); 892 xsoftc.xpt_taskq = taskqueue_create("CAM XPT task", M_WAITOK, 893 taskqueue_thread_enqueue, /*context*/&xsoftc.xpt_taskq); 894 895 #ifdef CAM_BOOT_DELAY 896 /* 897 * Override this value at compile time to assist our users 898 * who don't use loader to boot a kernel. 899 */ 900 xsoftc.boot_delay = CAM_BOOT_DELAY; 901 #endif 902 903 /* 904 * The xpt layer is, itself, the equivalent of a SIM. 905 * Allow 16 ccbs in the ccb pool for it. This should 906 * give decent parallelism when we probe buses and 907 * perform other XPT functions. 908 */ 909 devq = cam_simq_alloc(16); 910 if (devq == NULL) 911 return (ENOMEM); 912 xpt_sim = cam_sim_alloc(xptaction, 913 xptpoll, 914 "xpt", 915 /*softc*/NULL, 916 /*unit*/0, 917 /*mtx*/NULL, 918 /*max_dev_transactions*/0, 919 /*max_tagged_dev_transactions*/0, 920 devq); 921 if (xpt_sim == NULL) 922 return (ENOMEM); 923 924 if ((error = xpt_bus_register(xpt_sim, NULL, 0)) != CAM_SUCCESS) { 925 printf( 926 "xpt_init: xpt_bus_register failed with errno %d, failing attach\n", 927 error); 928 return (EINVAL); 929 } 930 931 /* 932 * Looking at the XPT from the SIM layer, the XPT is 933 * the equivalent of a peripheral driver. Allocate 934 * a peripheral driver entry for us. 935 */ 936 if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID, 937 CAM_TARGET_WILDCARD, 938 CAM_LUN_WILDCARD)) != CAM_REQ_CMP) { 939 printf( 940 "xpt_init: xpt_create_path failed with status %#x, failing attach\n", 941 status); 942 return (EINVAL); 943 } 944 xpt_path_lock(path); 945 cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO, 946 path, NULL, 0, xpt_sim); 947 xpt_path_unlock(path); 948 xpt_free_path(path); 949 950 if (cam_num_doneqs < 1) 951 cam_num_doneqs = 1 + mp_ncpus / 6; 952 else if (cam_num_doneqs > MAXCPU) 953 cam_num_doneqs = MAXCPU; 954 for (i = 0; i < cam_num_doneqs; i++) { 955 mtx_init(&cam_doneqs[i].cam_doneq_mtx, "CAM doneq", NULL, 956 MTX_DEF); 957 STAILQ_INIT(&cam_doneqs[i].cam_doneq); 958 error = kproc_kthread_add(xpt_done_td, &cam_doneqs[i], 959 &cam_proc, NULL, 0, 0, "cam", "doneq%d", i); 960 if (error != 0) { 961 cam_num_doneqs = i; 962 break; 963 } 964 } 965 if (cam_num_doneqs < 1) { 966 printf("xpt_init: Cannot init completion queues - failing attach\n"); 967 return (ENOMEM); 968 } 969 970 mtx_init(&cam_async.cam_doneq_mtx, "CAM async", NULL, MTX_DEF); 971 STAILQ_INIT(&cam_async.cam_doneq); 972 if (kproc_kthread_add(xpt_async_td, &cam_async, 973 &cam_proc, NULL, 0, 0, "cam", "async") != 0) { 974 printf("xpt_init: Cannot init async thread - failing attach\n"); 975 return (ENOMEM); 976 } 977 978 /* 979 * Register a callback for when interrupts are enabled. 980 */ 981 config_intrhook_oneshot(xpt_config, NULL); 982 983 return (0); 984 } 985 986 static cam_status 987 xptregister(struct cam_periph *periph, void *arg) 988 { 989 struct cam_sim *xpt_sim; 990 991 if (periph == NULL) { 992 printf("xptregister: periph was NULL!!\n"); 993 return(CAM_REQ_CMP_ERR); 994 } 995 996 xpt_sim = (struct cam_sim *)arg; 997 xpt_sim->softc = periph; 998 xpt_periph = periph; 999 periph->softc = NULL; 1000 1001 return(CAM_REQ_CMP); 1002 } 1003 1004 int32_t 1005 xpt_add_periph(struct cam_periph *periph) 1006 { 1007 struct cam_ed *device; 1008 int32_t status; 1009 1010 TASK_INIT(&periph->periph_run_task, 0, xpt_run_allocq_task, periph); 1011 device = periph->path->device; 1012 status = CAM_REQ_CMP; 1013 if (device != NULL) { 1014 mtx_lock(&device->target->bus->eb_mtx); 1015 device->generation++; 1016 SLIST_INSERT_HEAD(&device->periphs, periph, periph_links); 1017 mtx_unlock(&device->target->bus->eb_mtx); 1018 atomic_add_32(&xsoftc.xpt_generation, 1); 1019 } 1020 1021 return (status); 1022 } 1023 1024 void 1025 xpt_remove_periph(struct cam_periph *periph) 1026 { 1027 struct cam_ed *device; 1028 1029 device = periph->path->device; 1030 if (device != NULL) { 1031 mtx_lock(&device->target->bus->eb_mtx); 1032 device->generation++; 1033 SLIST_REMOVE(&device->periphs, periph, cam_periph, periph_links); 1034 mtx_unlock(&device->target->bus->eb_mtx); 1035 atomic_add_32(&xsoftc.xpt_generation, 1); 1036 } 1037 } 1038 1039 void 1040 xpt_announce_periph(struct cam_periph *periph, char *announce_string) 1041 { 1042 char buf[128]; 1043 struct sbuf sb; 1044 1045 (void)sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN | SBUF_INCLUDENUL); 1046 sbuf_set_drain(&sb, sbuf_printf_drain, NULL); 1047 xpt_announce_periph_sbuf(periph, &sb, announce_string); 1048 (void)sbuf_finish(&sb); 1049 } 1050 1051 void 1052 xpt_announce_periph_sbuf(struct cam_periph *periph, struct sbuf *sb, 1053 char *announce_string) 1054 { 1055 struct cam_path *path = periph->path; 1056 struct xpt_proto *proto; 1057 1058 cam_periph_assert(periph, MA_OWNED); 1059 periph->flags |= CAM_PERIPH_ANNOUNCED; 1060 1061 sbuf_printf(sb, "%s%d at %s%d bus %d scbus%d target %d lun %jx\n", 1062 periph->periph_name, periph->unit_number, 1063 path->bus->sim->sim_name, 1064 path->bus->sim->unit_number, 1065 path->bus->sim->bus_id, 1066 path->bus->path_id, 1067 path->target->target_id, 1068 (uintmax_t)path->device->lun_id); 1069 sbuf_printf(sb, "%s%d: ", periph->periph_name, periph->unit_number); 1070 proto = xpt_proto_find(path->device->protocol); 1071 if (proto) 1072 proto->ops->announce_sbuf(path->device, sb); 1073 else 1074 sbuf_printf(sb, "Unknown protocol device %d\n", 1075 path->device->protocol); 1076 if (path->device->serial_num_len > 0) { 1077 /* Don't wrap the screen - print only the first 60 chars */ 1078 sbuf_printf(sb, "%s%d: Serial Number %.60s\n", 1079 periph->periph_name, periph->unit_number, 1080 path->device->serial_num); 1081 } 1082 /* Announce transport details. */ 1083 path->bus->xport->ops->announce_sbuf(periph, sb); 1084 /* Announce command queueing. */ 1085 if (path->device->inq_flags & SID_CmdQue 1086 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) { 1087 sbuf_printf(sb, "%s%d: Command Queueing enabled\n", 1088 periph->periph_name, periph->unit_number); 1089 } 1090 /* Announce caller's details if they've passed in. */ 1091 if (announce_string != NULL) 1092 sbuf_printf(sb, "%s%d: %s\n", periph->periph_name, 1093 periph->unit_number, announce_string); 1094 } 1095 1096 void 1097 xpt_announce_quirks(struct cam_periph *periph, int quirks, char *bit_string) 1098 { 1099 if (quirks != 0) { 1100 printf("%s%d: quirks=0x%b\n", periph->periph_name, 1101 periph->unit_number, quirks, bit_string); 1102 } 1103 } 1104 1105 void 1106 xpt_announce_quirks_sbuf(struct cam_periph *periph, struct sbuf *sb, 1107 int quirks, char *bit_string) 1108 { 1109 if (quirks != 0) { 1110 sbuf_printf(sb, "%s%d: quirks=0x%b\n", periph->periph_name, 1111 periph->unit_number, quirks, bit_string); 1112 } 1113 } 1114 1115 void 1116 xpt_denounce_periph(struct cam_periph *periph) 1117 { 1118 char buf[128]; 1119 struct sbuf sb; 1120 1121 (void)sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN | SBUF_INCLUDENUL); 1122 sbuf_set_drain(&sb, sbuf_printf_drain, NULL); 1123 xpt_denounce_periph_sbuf(periph, &sb); 1124 (void)sbuf_finish(&sb); 1125 } 1126 1127 void 1128 xpt_denounce_periph_sbuf(struct cam_periph *periph, struct sbuf *sb) 1129 { 1130 struct cam_path *path = periph->path; 1131 struct xpt_proto *proto; 1132 1133 cam_periph_assert(periph, MA_OWNED); 1134 1135 sbuf_printf(sb, "%s%d at %s%d bus %d scbus%d target %d lun %jx\n", 1136 periph->periph_name, periph->unit_number, 1137 path->bus->sim->sim_name, 1138 path->bus->sim->unit_number, 1139 path->bus->sim->bus_id, 1140 path->bus->path_id, 1141 path->target->target_id, 1142 (uintmax_t)path->device->lun_id); 1143 sbuf_printf(sb, "%s%d: ", periph->periph_name, periph->unit_number); 1144 proto = xpt_proto_find(path->device->protocol); 1145 if (proto) 1146 proto->ops->denounce_sbuf(path->device, sb); 1147 else 1148 sbuf_printf(sb, "Unknown protocol device %d", 1149 path->device->protocol); 1150 if (path->device->serial_num_len > 0) 1151 sbuf_printf(sb, " s/n %.60s", path->device->serial_num); 1152 sbuf_cat(sb, " detached\n"); 1153 } 1154 1155 int 1156 xpt_getattr(char *buf, size_t len, const char *attr, struct cam_path *path) 1157 { 1158 int ret = -1, l, o; 1159 struct ccb_dev_advinfo cdai; 1160 struct scsi_vpd_device_id *did; 1161 struct scsi_vpd_id_descriptor *idd; 1162 1163 xpt_path_assert(path, MA_OWNED); 1164 1165 memset(&cdai, 0, sizeof(cdai)); 1166 xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL); 1167 cdai.ccb_h.func_code = XPT_DEV_ADVINFO; 1168 cdai.flags = CDAI_FLAG_NONE; 1169 cdai.bufsiz = len; 1170 cdai.buf = buf; 1171 1172 if (!strcmp(attr, "GEOM::ident")) 1173 cdai.buftype = CDAI_TYPE_SERIAL_NUM; 1174 else if (!strcmp(attr, "GEOM::physpath")) 1175 cdai.buftype = CDAI_TYPE_PHYS_PATH; 1176 else if (strcmp(attr, "GEOM::lunid") == 0 || 1177 strcmp(attr, "GEOM::lunname") == 0) { 1178 cdai.buftype = CDAI_TYPE_SCSI_DEVID; 1179 cdai.bufsiz = CAM_SCSI_DEVID_MAXLEN; 1180 cdai.buf = malloc(cdai.bufsiz, M_CAMXPT, M_NOWAIT); 1181 if (cdai.buf == NULL) { 1182 ret = ENOMEM; 1183 goto out; 1184 } 1185 } else 1186 goto out; 1187 1188 xpt_action((union ccb *)&cdai); /* can only be synchronous */ 1189 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0) 1190 cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE); 1191 if (cdai.provsiz == 0) 1192 goto out; 1193 switch(cdai.buftype) { 1194 case CDAI_TYPE_SCSI_DEVID: 1195 did = (struct scsi_vpd_device_id *)cdai.buf; 1196 if (strcmp(attr, "GEOM::lunid") == 0) { 1197 idd = scsi_get_devid(did, cdai.provsiz, 1198 scsi_devid_is_lun_naa); 1199 if (idd == NULL) 1200 idd = scsi_get_devid(did, cdai.provsiz, 1201 scsi_devid_is_lun_eui64); 1202 if (idd == NULL) 1203 idd = scsi_get_devid(did, cdai.provsiz, 1204 scsi_devid_is_lun_uuid); 1205 if (idd == NULL) 1206 idd = scsi_get_devid(did, cdai.provsiz, 1207 scsi_devid_is_lun_md5); 1208 } else 1209 idd = NULL; 1210 1211 if (idd == NULL) 1212 idd = scsi_get_devid(did, cdai.provsiz, 1213 scsi_devid_is_lun_t10); 1214 if (idd == NULL) 1215 idd = scsi_get_devid(did, cdai.provsiz, 1216 scsi_devid_is_lun_name); 1217 if (idd == NULL) 1218 break; 1219 1220 ret = 0; 1221 if ((idd->proto_codeset & SVPD_ID_CODESET_MASK) == 1222 SVPD_ID_CODESET_ASCII) { 1223 if (idd->length < len) { 1224 for (l = 0; l < idd->length; l++) 1225 buf[l] = idd->identifier[l] ? 1226 idd->identifier[l] : ' '; 1227 buf[l] = 0; 1228 } else 1229 ret = EFAULT; 1230 break; 1231 } 1232 if ((idd->proto_codeset & SVPD_ID_CODESET_MASK) == 1233 SVPD_ID_CODESET_UTF8) { 1234 l = strnlen(idd->identifier, idd->length); 1235 if (l < len) { 1236 bcopy(idd->identifier, buf, l); 1237 buf[l] = 0; 1238 } else 1239 ret = EFAULT; 1240 break; 1241 } 1242 if ((idd->id_type & SVPD_ID_TYPE_MASK) == 1243 SVPD_ID_TYPE_UUID && idd->identifier[0] == 0x10) { 1244 if ((idd->length - 2) * 2 + 4 >= len) { 1245 ret = EFAULT; 1246 break; 1247 } 1248 for (l = 2, o = 0; l < idd->length; l++) { 1249 if (l == 6 || l == 8 || l == 10 || l == 12) 1250 o += sprintf(buf + o, "-"); 1251 o += sprintf(buf + o, "%02x", 1252 idd->identifier[l]); 1253 } 1254 break; 1255 } 1256 if (idd->length * 2 < len) { 1257 for (l = 0; l < idd->length; l++) 1258 sprintf(buf + l * 2, "%02x", 1259 idd->identifier[l]); 1260 } else 1261 ret = EFAULT; 1262 break; 1263 default: 1264 if (cdai.provsiz < len) { 1265 cdai.buf[cdai.provsiz] = 0; 1266 ret = 0; 1267 } else 1268 ret = EFAULT; 1269 break; 1270 } 1271 1272 out: 1273 if ((char *)cdai.buf != buf) 1274 free(cdai.buf, M_CAMXPT); 1275 return ret; 1276 } 1277 1278 static dev_match_ret 1279 xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns, 1280 struct cam_eb *bus) 1281 { 1282 dev_match_ret retval; 1283 u_int i; 1284 1285 retval = DM_RET_NONE; 1286 1287 /* 1288 * If we aren't given something to match against, that's an error. 1289 */ 1290 if (bus == NULL) 1291 return(DM_RET_ERROR); 1292 1293 /* 1294 * If there are no match entries, then this bus matches no 1295 * matter what. 1296 */ 1297 if ((patterns == NULL) || (num_patterns == 0)) 1298 return(DM_RET_DESCEND | DM_RET_COPY); 1299 1300 for (i = 0; i < num_patterns; i++) { 1301 struct bus_match_pattern *cur_pattern; 1302 struct device_match_pattern *dp = &patterns[i].pattern.device_pattern; 1303 struct periph_match_pattern *pp = &patterns[i].pattern.periph_pattern; 1304 1305 /* 1306 * If the pattern in question isn't for a bus node, we 1307 * aren't interested. However, we do indicate to the 1308 * calling routine that we should continue descending the 1309 * tree, since the user wants to match against lower-level 1310 * EDT elements. 1311 */ 1312 if (patterns[i].type == DEV_MATCH_DEVICE && 1313 (dp->flags & DEV_MATCH_PATH) != 0 && 1314 dp->path_id != bus->path_id) 1315 continue; 1316 if (patterns[i].type == DEV_MATCH_PERIPH && 1317 (pp->flags & PERIPH_MATCH_PATH) != 0 && 1318 pp->path_id != bus->path_id) 1319 continue; 1320 if (patterns[i].type != DEV_MATCH_BUS) { 1321 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) 1322 retval |= DM_RET_DESCEND; 1323 continue; 1324 } 1325 1326 cur_pattern = &patterns[i].pattern.bus_pattern; 1327 1328 if (((cur_pattern->flags & BUS_MATCH_PATH) != 0) 1329 && (cur_pattern->path_id != bus->path_id)) 1330 continue; 1331 1332 if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0) 1333 && (cur_pattern->bus_id != bus->sim->bus_id)) 1334 continue; 1335 1336 if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0) 1337 && (cur_pattern->unit_number != bus->sim->unit_number)) 1338 continue; 1339 1340 if (((cur_pattern->flags & BUS_MATCH_NAME) != 0) 1341 && (strncmp(cur_pattern->dev_name, bus->sim->sim_name, 1342 DEV_IDLEN) != 0)) 1343 continue; 1344 1345 /* 1346 * If we get to this point, the user definitely wants 1347 * information on this bus. So tell the caller to copy the 1348 * data out. 1349 */ 1350 retval |= DM_RET_COPY; 1351 1352 /* 1353 * If the return action has been set to descend, then we 1354 * know that we've already seen a non-bus matching 1355 * expression, therefore we need to further descend the tree. 1356 * This won't change by continuing around the loop, so we 1357 * go ahead and return. If we haven't seen a non-bus 1358 * matching expression, we keep going around the loop until 1359 * we exhaust the matching expressions. We'll set the stop 1360 * flag once we fall out of the loop. 1361 */ 1362 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND) 1363 return(retval); 1364 } 1365 1366 /* 1367 * If the return action hasn't been set to descend yet, that means 1368 * we haven't seen anything other than bus matching patterns. So 1369 * tell the caller to stop descending the tree -- the user doesn't 1370 * want to match against lower level tree elements. 1371 */ 1372 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) 1373 retval |= DM_RET_STOP; 1374 1375 return(retval); 1376 } 1377 1378 static dev_match_ret 1379 xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns, 1380 struct cam_ed *device) 1381 { 1382 dev_match_ret retval; 1383 u_int i; 1384 1385 retval = DM_RET_NONE; 1386 1387 /* 1388 * If we aren't given something to match against, that's an error. 1389 */ 1390 if (device == NULL) 1391 return(DM_RET_ERROR); 1392 1393 /* 1394 * If there are no match entries, then this device matches no 1395 * matter what. 1396 */ 1397 if ((patterns == NULL) || (num_patterns == 0)) 1398 return(DM_RET_DESCEND | DM_RET_COPY); 1399 1400 for (i = 0; i < num_patterns; i++) { 1401 struct device_match_pattern *cur_pattern; 1402 struct scsi_vpd_device_id *device_id_page; 1403 struct periph_match_pattern *pp = &patterns[i].pattern.periph_pattern; 1404 1405 /* 1406 * If the pattern in question isn't for a device node, we 1407 * aren't interested. 1408 */ 1409 if (patterns[i].type == DEV_MATCH_PERIPH && 1410 (pp->flags & PERIPH_MATCH_TARGET) != 0 && 1411 pp->target_id != device->target->target_id) 1412 continue; 1413 if (patterns[i].type == DEV_MATCH_PERIPH && 1414 (pp->flags & PERIPH_MATCH_LUN) != 0 && 1415 pp->target_lun != device->lun_id) 1416 continue; 1417 if (patterns[i].type != DEV_MATCH_DEVICE) { 1418 if ((patterns[i].type == DEV_MATCH_PERIPH) 1419 && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)) 1420 retval |= DM_RET_DESCEND; 1421 continue; 1422 } 1423 1424 cur_pattern = &patterns[i].pattern.device_pattern; 1425 1426 /* Error out if mutually exclusive options are specified. */ 1427 if ((cur_pattern->flags & (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID)) 1428 == (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID)) 1429 return(DM_RET_ERROR); 1430 1431 if (((cur_pattern->flags & DEV_MATCH_PATH) != 0) 1432 && (cur_pattern->path_id != device->target->bus->path_id)) 1433 continue; 1434 1435 if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0) 1436 && (cur_pattern->target_id != device->target->target_id)) 1437 continue; 1438 1439 if (((cur_pattern->flags & DEV_MATCH_LUN) != 0) 1440 && (cur_pattern->target_lun != device->lun_id)) 1441 continue; 1442 1443 if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0) 1444 && (cam_quirkmatch((caddr_t)&device->inq_data, 1445 (caddr_t)&cur_pattern->data.inq_pat, 1446 1, sizeof(cur_pattern->data.inq_pat), 1447 scsi_static_inquiry_match) == NULL)) 1448 continue; 1449 1450 device_id_page = (struct scsi_vpd_device_id *)device->device_id; 1451 if (((cur_pattern->flags & DEV_MATCH_DEVID) != 0) 1452 && (device->device_id_len < SVPD_DEVICE_ID_HDR_LEN 1453 || scsi_devid_match((uint8_t *)device_id_page->desc_list, 1454 device->device_id_len 1455 - SVPD_DEVICE_ID_HDR_LEN, 1456 cur_pattern->data.devid_pat.id, 1457 cur_pattern->data.devid_pat.id_len) != 0)) 1458 continue; 1459 1460 /* 1461 * If we get to this point, the user definitely wants 1462 * information on this device. So tell the caller to copy 1463 * the data out. 1464 */ 1465 retval |= DM_RET_COPY; 1466 1467 /* 1468 * If the return action has been set to descend, then we 1469 * know that we've already seen a peripheral matching 1470 * expression, therefore we need to further descend the tree. 1471 * This won't change by continuing around the loop, so we 1472 * go ahead and return. If we haven't seen a peripheral 1473 * matching expression, we keep going around the loop until 1474 * we exhaust the matching expressions. We'll set the stop 1475 * flag once we fall out of the loop. 1476 */ 1477 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND) 1478 return(retval); 1479 } 1480 1481 /* 1482 * If the return action hasn't been set to descend yet, that means 1483 * we haven't seen any peripheral matching patterns. So tell the 1484 * caller to stop descending the tree -- the user doesn't want to 1485 * match against lower level tree elements. 1486 */ 1487 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) 1488 retval |= DM_RET_STOP; 1489 1490 return(retval); 1491 } 1492 1493 /* 1494 * Match a single peripheral against any number of match patterns. 1495 */ 1496 static dev_match_ret 1497 xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns, 1498 struct cam_periph *periph) 1499 { 1500 dev_match_ret retval; 1501 u_int i; 1502 1503 /* 1504 * If we aren't given something to match against, that's an error. 1505 */ 1506 if (periph == NULL) 1507 return(DM_RET_ERROR); 1508 1509 /* 1510 * If there are no match entries, then this peripheral matches no 1511 * matter what. 1512 */ 1513 if ((patterns == NULL) || (num_patterns == 0)) 1514 return(DM_RET_STOP | DM_RET_COPY); 1515 1516 /* 1517 * There aren't any nodes below a peripheral node, so there's no 1518 * reason to descend the tree any further. 1519 */ 1520 retval = DM_RET_STOP; 1521 1522 for (i = 0; i < num_patterns; i++) { 1523 struct periph_match_pattern *cur_pattern; 1524 1525 /* 1526 * If the pattern in question isn't for a peripheral, we 1527 * aren't interested. 1528 */ 1529 if (patterns[i].type != DEV_MATCH_PERIPH) 1530 continue; 1531 1532 cur_pattern = &patterns[i].pattern.periph_pattern; 1533 1534 if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0) 1535 && (cur_pattern->path_id != periph->path->bus->path_id)) 1536 continue; 1537 1538 /* 1539 * For the target and lun id's, we have to make sure the 1540 * target and lun pointers aren't NULL. The xpt peripheral 1541 * has a wildcard target and device. 1542 */ 1543 if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0) 1544 && ((periph->path->target == NULL) 1545 ||(cur_pattern->target_id != periph->path->target->target_id))) 1546 continue; 1547 1548 if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0) 1549 && ((periph->path->device == NULL) 1550 || (cur_pattern->target_lun != periph->path->device->lun_id))) 1551 continue; 1552 1553 if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0) 1554 && (cur_pattern->unit_number != periph->unit_number)) 1555 continue; 1556 1557 if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0) 1558 && (strncmp(cur_pattern->periph_name, periph->periph_name, 1559 DEV_IDLEN) != 0)) 1560 continue; 1561 1562 /* 1563 * If we get to this point, the user definitely wants 1564 * information on this peripheral. So tell the caller to 1565 * copy the data out. 1566 */ 1567 retval |= DM_RET_COPY; 1568 1569 /* 1570 * The return action has already been set to stop, since 1571 * peripherals don't have any nodes below them in the EDT. 1572 */ 1573 return(retval); 1574 } 1575 1576 /* 1577 * If we get to this point, the peripheral that was passed in 1578 * doesn't match any of the patterns. 1579 */ 1580 return(retval); 1581 } 1582 1583 static int 1584 xptedtbusfunc(struct cam_eb *bus, void *arg) 1585 { 1586 struct ccb_dev_match *cdm; 1587 struct cam_et *target; 1588 dev_match_ret retval; 1589 1590 cdm = (struct ccb_dev_match *)arg; 1591 1592 /* 1593 * If our position is for something deeper in the tree, that means 1594 * that we've already seen this node. So, we keep going down. 1595 */ 1596 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1597 && (cdm->pos.cookie.bus == bus) 1598 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1599 && (cdm->pos.cookie.target != NULL)) 1600 retval = DM_RET_DESCEND; 1601 else 1602 retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus); 1603 1604 /* 1605 * If we got an error, bail out of the search. 1606 */ 1607 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 1608 cdm->status = CAM_DEV_MATCH_ERROR; 1609 return(0); 1610 } 1611 1612 /* 1613 * If the copy flag is set, copy this bus out. 1614 */ 1615 if (retval & DM_RET_COPY) { 1616 int spaceleft, j; 1617 1618 spaceleft = cdm->match_buf_len - (cdm->num_matches * 1619 sizeof(struct dev_match_result)); 1620 1621 /* 1622 * If we don't have enough space to put in another 1623 * match result, save our position and tell the 1624 * user there are more devices to check. 1625 */ 1626 if (spaceleft < sizeof(struct dev_match_result)) { 1627 bzero(&cdm->pos, sizeof(cdm->pos)); 1628 cdm->pos.position_type = 1629 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS; 1630 1631 cdm->pos.cookie.bus = bus; 1632 cdm->pos.generations[CAM_BUS_GENERATION]= 1633 xsoftc.bus_generation; 1634 cdm->status = CAM_DEV_MATCH_MORE; 1635 return(0); 1636 } 1637 j = cdm->num_matches; 1638 cdm->num_matches++; 1639 cdm->matches[j].type = DEV_MATCH_BUS; 1640 cdm->matches[j].result.bus_result.path_id = bus->path_id; 1641 cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id; 1642 cdm->matches[j].result.bus_result.unit_number = 1643 bus->sim->unit_number; 1644 strlcpy(cdm->matches[j].result.bus_result.dev_name, 1645 bus->sim->sim_name, 1646 sizeof(cdm->matches[j].result.bus_result.dev_name)); 1647 } 1648 1649 /* 1650 * If the user is only interested in buses, there's no 1651 * reason to descend to the next level in the tree. 1652 */ 1653 if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP) 1654 return(1); 1655 1656 /* 1657 * If there is a target generation recorded, check it to 1658 * make sure the target list hasn't changed. 1659 */ 1660 mtx_lock(&bus->eb_mtx); 1661 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1662 && (cdm->pos.cookie.bus == bus) 1663 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1664 && (cdm->pos.cookie.target != NULL)) { 1665 if ((cdm->pos.generations[CAM_TARGET_GENERATION] != 1666 bus->generation)) { 1667 mtx_unlock(&bus->eb_mtx); 1668 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 1669 return (0); 1670 } 1671 target = (struct cam_et *)cdm->pos.cookie.target; 1672 target->refcount++; 1673 } else 1674 target = NULL; 1675 mtx_unlock(&bus->eb_mtx); 1676 1677 return (xpttargettraverse(bus, target, xptedttargetfunc, arg)); 1678 } 1679 1680 static int 1681 xptedttargetfunc(struct cam_et *target, void *arg) 1682 { 1683 struct ccb_dev_match *cdm; 1684 struct cam_eb *bus; 1685 struct cam_ed *device; 1686 1687 cdm = (struct ccb_dev_match *)arg; 1688 bus = target->bus; 1689 1690 /* 1691 * If there is a device list generation recorded, check it to 1692 * make sure the device list hasn't changed. 1693 */ 1694 mtx_lock(&bus->eb_mtx); 1695 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1696 && (cdm->pos.cookie.bus == bus) 1697 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1698 && (cdm->pos.cookie.target == target) 1699 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) 1700 && (cdm->pos.cookie.device != NULL)) { 1701 if (cdm->pos.generations[CAM_DEV_GENERATION] != 1702 target->generation) { 1703 mtx_unlock(&bus->eb_mtx); 1704 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 1705 return(0); 1706 } 1707 device = (struct cam_ed *)cdm->pos.cookie.device; 1708 device->refcount++; 1709 } else 1710 device = NULL; 1711 mtx_unlock(&bus->eb_mtx); 1712 1713 return (xptdevicetraverse(target, device, xptedtdevicefunc, arg)); 1714 } 1715 1716 static int 1717 xptedtdevicefunc(struct cam_ed *device, void *arg) 1718 { 1719 struct cam_eb *bus; 1720 struct cam_periph *periph; 1721 struct ccb_dev_match *cdm; 1722 dev_match_ret retval; 1723 1724 cdm = (struct ccb_dev_match *)arg; 1725 bus = device->target->bus; 1726 1727 /* 1728 * If our position is for something deeper in the tree, that means 1729 * that we've already seen this node. So, we keep going down. 1730 */ 1731 if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE) 1732 && (cdm->pos.cookie.device == device) 1733 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 1734 && (cdm->pos.cookie.periph != NULL)) 1735 retval = DM_RET_DESCEND; 1736 else 1737 retval = xptdevicematch(cdm->patterns, cdm->num_patterns, 1738 device); 1739 1740 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 1741 cdm->status = CAM_DEV_MATCH_ERROR; 1742 return(0); 1743 } 1744 1745 /* 1746 * If the copy flag is set, copy this device out. 1747 */ 1748 if (retval & DM_RET_COPY) { 1749 int spaceleft, j; 1750 1751 spaceleft = cdm->match_buf_len - (cdm->num_matches * 1752 sizeof(struct dev_match_result)); 1753 1754 /* 1755 * If we don't have enough space to put in another 1756 * match result, save our position and tell the 1757 * user there are more devices to check. 1758 */ 1759 if (spaceleft < sizeof(struct dev_match_result)) { 1760 bzero(&cdm->pos, sizeof(cdm->pos)); 1761 cdm->pos.position_type = 1762 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS | 1763 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE; 1764 1765 cdm->pos.cookie.bus = device->target->bus; 1766 cdm->pos.generations[CAM_BUS_GENERATION]= 1767 xsoftc.bus_generation; 1768 cdm->pos.cookie.target = device->target; 1769 cdm->pos.generations[CAM_TARGET_GENERATION] = 1770 device->target->bus->generation; 1771 cdm->pos.cookie.device = device; 1772 cdm->pos.generations[CAM_DEV_GENERATION] = 1773 device->target->generation; 1774 cdm->status = CAM_DEV_MATCH_MORE; 1775 return(0); 1776 } 1777 j = cdm->num_matches; 1778 cdm->num_matches++; 1779 cdm->matches[j].type = DEV_MATCH_DEVICE; 1780 cdm->matches[j].result.device_result.path_id = 1781 device->target->bus->path_id; 1782 cdm->matches[j].result.device_result.target_id = 1783 device->target->target_id; 1784 cdm->matches[j].result.device_result.target_lun = 1785 device->lun_id; 1786 cdm->matches[j].result.device_result.protocol = 1787 device->protocol; 1788 bcopy(&device->inq_data, 1789 &cdm->matches[j].result.device_result.inq_data, 1790 sizeof(struct scsi_inquiry_data)); 1791 bcopy(&device->ident_data, 1792 &cdm->matches[j].result.device_result.ident_data, 1793 sizeof(struct ata_params)); 1794 1795 /* Let the user know whether this device is unconfigured */ 1796 if (device->flags & CAM_DEV_UNCONFIGURED) 1797 cdm->matches[j].result.device_result.flags = 1798 DEV_RESULT_UNCONFIGURED; 1799 else 1800 cdm->matches[j].result.device_result.flags = 1801 DEV_RESULT_NOFLAG; 1802 } 1803 1804 /* 1805 * If the user isn't interested in peripherals, don't descend 1806 * the tree any further. 1807 */ 1808 if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP) 1809 return(1); 1810 1811 /* 1812 * If there is a peripheral list generation recorded, make sure 1813 * it hasn't changed. 1814 */ 1815 xpt_lock_buses(); 1816 mtx_lock(&bus->eb_mtx); 1817 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1818 && (cdm->pos.cookie.bus == bus) 1819 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1820 && (cdm->pos.cookie.target == device->target) 1821 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) 1822 && (cdm->pos.cookie.device == device) 1823 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 1824 && (cdm->pos.cookie.periph != NULL)) { 1825 if (cdm->pos.generations[CAM_PERIPH_GENERATION] != 1826 device->generation) { 1827 mtx_unlock(&bus->eb_mtx); 1828 xpt_unlock_buses(); 1829 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 1830 return(0); 1831 } 1832 periph = (struct cam_periph *)cdm->pos.cookie.periph; 1833 periph->refcount++; 1834 } else 1835 periph = NULL; 1836 mtx_unlock(&bus->eb_mtx); 1837 xpt_unlock_buses(); 1838 1839 return (xptperiphtraverse(device, periph, xptedtperiphfunc, arg)); 1840 } 1841 1842 static int 1843 xptedtperiphfunc(struct cam_periph *periph, void *arg) 1844 { 1845 struct ccb_dev_match *cdm; 1846 dev_match_ret retval; 1847 1848 cdm = (struct ccb_dev_match *)arg; 1849 1850 retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph); 1851 1852 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 1853 cdm->status = CAM_DEV_MATCH_ERROR; 1854 return(0); 1855 } 1856 1857 /* 1858 * If the copy flag is set, copy this peripheral out. 1859 */ 1860 if (retval & DM_RET_COPY) { 1861 int spaceleft, j; 1862 size_t l; 1863 1864 spaceleft = cdm->match_buf_len - (cdm->num_matches * 1865 sizeof(struct dev_match_result)); 1866 1867 /* 1868 * If we don't have enough space to put in another 1869 * match result, save our position and tell the 1870 * user there are more devices to check. 1871 */ 1872 if (spaceleft < sizeof(struct dev_match_result)) { 1873 bzero(&cdm->pos, sizeof(cdm->pos)); 1874 cdm->pos.position_type = 1875 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS | 1876 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE | 1877 CAM_DEV_POS_PERIPH; 1878 1879 cdm->pos.cookie.bus = periph->path->bus; 1880 cdm->pos.generations[CAM_BUS_GENERATION]= 1881 xsoftc.bus_generation; 1882 cdm->pos.cookie.target = periph->path->target; 1883 cdm->pos.generations[CAM_TARGET_GENERATION] = 1884 periph->path->bus->generation; 1885 cdm->pos.cookie.device = periph->path->device; 1886 cdm->pos.generations[CAM_DEV_GENERATION] = 1887 periph->path->target->generation; 1888 cdm->pos.cookie.periph = periph; 1889 cdm->pos.generations[CAM_PERIPH_GENERATION] = 1890 periph->path->device->generation; 1891 cdm->status = CAM_DEV_MATCH_MORE; 1892 return(0); 1893 } 1894 1895 j = cdm->num_matches; 1896 cdm->num_matches++; 1897 cdm->matches[j].type = DEV_MATCH_PERIPH; 1898 cdm->matches[j].result.periph_result.path_id = 1899 periph->path->bus->path_id; 1900 cdm->matches[j].result.periph_result.target_id = 1901 periph->path->target->target_id; 1902 cdm->matches[j].result.periph_result.target_lun = 1903 periph->path->device->lun_id; 1904 cdm->matches[j].result.periph_result.unit_number = 1905 periph->unit_number; 1906 l = sizeof(cdm->matches[j].result.periph_result.periph_name); 1907 strlcpy(cdm->matches[j].result.periph_result.periph_name, 1908 periph->periph_name, l); 1909 } 1910 1911 return(1); 1912 } 1913 1914 static int 1915 xptedtmatch(struct ccb_dev_match *cdm) 1916 { 1917 struct cam_eb *bus; 1918 int ret; 1919 1920 cdm->num_matches = 0; 1921 1922 /* 1923 * Check the bus list generation. If it has changed, the user 1924 * needs to reset everything and start over. 1925 */ 1926 xpt_lock_buses(); 1927 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1928 && (cdm->pos.cookie.bus != NULL)) { 1929 if (cdm->pos.generations[CAM_BUS_GENERATION] != 1930 xsoftc.bus_generation) { 1931 xpt_unlock_buses(); 1932 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 1933 return(0); 1934 } 1935 bus = (struct cam_eb *)cdm->pos.cookie.bus; 1936 bus->refcount++; 1937 } else 1938 bus = NULL; 1939 xpt_unlock_buses(); 1940 1941 ret = xptbustraverse(bus, xptedtbusfunc, cdm); 1942 1943 /* 1944 * If we get back 0, that means that we had to stop before fully 1945 * traversing the EDT. It also means that one of the subroutines 1946 * has set the status field to the proper value. If we get back 1, 1947 * we've fully traversed the EDT and copied out any matching entries. 1948 */ 1949 if (ret == 1) 1950 cdm->status = CAM_DEV_MATCH_LAST; 1951 1952 return(ret); 1953 } 1954 1955 static int 1956 xptplistpdrvfunc(struct periph_driver **pdrv, void *arg) 1957 { 1958 struct cam_periph *periph; 1959 struct ccb_dev_match *cdm; 1960 1961 cdm = (struct ccb_dev_match *)arg; 1962 1963 xpt_lock_buses(); 1964 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR) 1965 && (cdm->pos.cookie.pdrv == pdrv) 1966 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 1967 && (cdm->pos.cookie.periph != NULL)) { 1968 if (cdm->pos.generations[CAM_PERIPH_GENERATION] != 1969 (*pdrv)->generation) { 1970 xpt_unlock_buses(); 1971 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 1972 return(0); 1973 } 1974 periph = (struct cam_periph *)cdm->pos.cookie.periph; 1975 periph->refcount++; 1976 } else 1977 periph = NULL; 1978 xpt_unlock_buses(); 1979 1980 return (xptpdperiphtraverse(pdrv, periph, xptplistperiphfunc, arg)); 1981 } 1982 1983 static int 1984 xptplistperiphfunc(struct cam_periph *periph, void *arg) 1985 { 1986 struct ccb_dev_match *cdm; 1987 dev_match_ret retval; 1988 1989 cdm = (struct ccb_dev_match *)arg; 1990 1991 retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph); 1992 1993 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 1994 cdm->status = CAM_DEV_MATCH_ERROR; 1995 return(0); 1996 } 1997 1998 /* 1999 * If the copy flag is set, copy this peripheral out. 2000 */ 2001 if (retval & DM_RET_COPY) { 2002 int spaceleft, j; 2003 size_t l; 2004 2005 spaceleft = cdm->match_buf_len - (cdm->num_matches * 2006 sizeof(struct dev_match_result)); 2007 2008 /* 2009 * If we don't have enough space to put in another 2010 * match result, save our position and tell the 2011 * user there are more devices to check. 2012 */ 2013 if (spaceleft < sizeof(struct dev_match_result)) { 2014 struct periph_driver **pdrv; 2015 2016 pdrv = NULL; 2017 bzero(&cdm->pos, sizeof(cdm->pos)); 2018 cdm->pos.position_type = 2019 CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR | 2020 CAM_DEV_POS_PERIPH; 2021 2022 /* 2023 * This may look a bit non-sensical, but it is 2024 * actually quite logical. There are very few 2025 * peripheral drivers, and bloating every peripheral 2026 * structure with a pointer back to its parent 2027 * peripheral driver linker set entry would cost 2028 * more in the long run than doing this quick lookup. 2029 */ 2030 for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) { 2031 if (strcmp((*pdrv)->driver_name, 2032 periph->periph_name) == 0) 2033 break; 2034 } 2035 2036 if (*pdrv == NULL) { 2037 cdm->status = CAM_DEV_MATCH_ERROR; 2038 return(0); 2039 } 2040 2041 cdm->pos.cookie.pdrv = pdrv; 2042 /* 2043 * The periph generation slot does double duty, as 2044 * does the periph pointer slot. They are used for 2045 * both edt and pdrv lookups and positioning. 2046 */ 2047 cdm->pos.cookie.periph = periph; 2048 cdm->pos.generations[CAM_PERIPH_GENERATION] = 2049 (*pdrv)->generation; 2050 cdm->status = CAM_DEV_MATCH_MORE; 2051 return(0); 2052 } 2053 2054 j = cdm->num_matches; 2055 cdm->num_matches++; 2056 cdm->matches[j].type = DEV_MATCH_PERIPH; 2057 cdm->matches[j].result.periph_result.path_id = 2058 periph->path->bus->path_id; 2059 2060 /* 2061 * The transport layer peripheral doesn't have a target or 2062 * lun. 2063 */ 2064 if (periph->path->target) 2065 cdm->matches[j].result.periph_result.target_id = 2066 periph->path->target->target_id; 2067 else 2068 cdm->matches[j].result.periph_result.target_id = 2069 CAM_TARGET_WILDCARD; 2070 2071 if (periph->path->device) 2072 cdm->matches[j].result.periph_result.target_lun = 2073 periph->path->device->lun_id; 2074 else 2075 cdm->matches[j].result.periph_result.target_lun = 2076 CAM_LUN_WILDCARD; 2077 2078 cdm->matches[j].result.periph_result.unit_number = 2079 periph->unit_number; 2080 l = sizeof(cdm->matches[j].result.periph_result.periph_name); 2081 strlcpy(cdm->matches[j].result.periph_result.periph_name, 2082 periph->periph_name, l); 2083 } 2084 2085 return(1); 2086 } 2087 2088 static int 2089 xptperiphlistmatch(struct ccb_dev_match *cdm) 2090 { 2091 int ret; 2092 2093 cdm->num_matches = 0; 2094 2095 /* 2096 * At this point in the edt traversal function, we check the bus 2097 * list generation to make sure that no buses have been added or 2098 * removed since the user last sent a XPT_DEV_MATCH ccb through. 2099 * For the peripheral driver list traversal function, however, we 2100 * don't have to worry about new peripheral driver types coming or 2101 * going; they're in a linker set, and therefore can't change 2102 * without a recompile. 2103 */ 2104 2105 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR) 2106 && (cdm->pos.cookie.pdrv != NULL)) 2107 ret = xptpdrvtraverse( 2108 (struct periph_driver **)cdm->pos.cookie.pdrv, 2109 xptplistpdrvfunc, cdm); 2110 else 2111 ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm); 2112 2113 /* 2114 * If we get back 0, that means that we had to stop before fully 2115 * traversing the peripheral driver tree. It also means that one of 2116 * the subroutines has set the status field to the proper value. If 2117 * we get back 1, we've fully traversed the EDT and copied out any 2118 * matching entries. 2119 */ 2120 if (ret == 1) 2121 cdm->status = CAM_DEV_MATCH_LAST; 2122 2123 return(ret); 2124 } 2125 2126 static int 2127 xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg) 2128 { 2129 struct cam_eb *bus, *next_bus; 2130 int retval; 2131 2132 retval = 1; 2133 if (start_bus) 2134 bus = start_bus; 2135 else { 2136 xpt_lock_buses(); 2137 bus = TAILQ_FIRST(&xsoftc.xpt_busses); 2138 if (bus == NULL) { 2139 xpt_unlock_buses(); 2140 return (retval); 2141 } 2142 bus->refcount++; 2143 xpt_unlock_buses(); 2144 } 2145 for (; bus != NULL; bus = next_bus) { 2146 retval = tr_func(bus, arg); 2147 if (retval == 0) { 2148 xpt_release_bus(bus); 2149 break; 2150 } 2151 xpt_lock_buses(); 2152 next_bus = TAILQ_NEXT(bus, links); 2153 if (next_bus) 2154 next_bus->refcount++; 2155 xpt_unlock_buses(); 2156 xpt_release_bus(bus); 2157 } 2158 return(retval); 2159 } 2160 2161 static int 2162 xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target, 2163 xpt_targetfunc_t *tr_func, void *arg) 2164 { 2165 struct cam_et *target, *next_target; 2166 int retval; 2167 2168 retval = 1; 2169 if (start_target) 2170 target = start_target; 2171 else { 2172 mtx_lock(&bus->eb_mtx); 2173 target = TAILQ_FIRST(&bus->et_entries); 2174 if (target == NULL) { 2175 mtx_unlock(&bus->eb_mtx); 2176 return (retval); 2177 } 2178 target->refcount++; 2179 mtx_unlock(&bus->eb_mtx); 2180 } 2181 for (; target != NULL; target = next_target) { 2182 retval = tr_func(target, arg); 2183 if (retval == 0) { 2184 xpt_release_target(target); 2185 break; 2186 } 2187 mtx_lock(&bus->eb_mtx); 2188 next_target = TAILQ_NEXT(target, links); 2189 if (next_target) 2190 next_target->refcount++; 2191 mtx_unlock(&bus->eb_mtx); 2192 xpt_release_target(target); 2193 } 2194 return(retval); 2195 } 2196 2197 static int 2198 xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device, 2199 xpt_devicefunc_t *tr_func, void *arg) 2200 { 2201 struct cam_eb *bus; 2202 struct cam_ed *device, *next_device; 2203 int retval; 2204 2205 retval = 1; 2206 bus = target->bus; 2207 if (start_device) 2208 device = start_device; 2209 else { 2210 mtx_lock(&bus->eb_mtx); 2211 device = TAILQ_FIRST(&target->ed_entries); 2212 if (device == NULL) { 2213 mtx_unlock(&bus->eb_mtx); 2214 return (retval); 2215 } 2216 device->refcount++; 2217 mtx_unlock(&bus->eb_mtx); 2218 } 2219 for (; device != NULL; device = next_device) { 2220 mtx_lock(&device->device_mtx); 2221 retval = tr_func(device, arg); 2222 mtx_unlock(&device->device_mtx); 2223 if (retval == 0) { 2224 xpt_release_device(device); 2225 break; 2226 } 2227 mtx_lock(&bus->eb_mtx); 2228 next_device = TAILQ_NEXT(device, links); 2229 if (next_device) 2230 next_device->refcount++; 2231 mtx_unlock(&bus->eb_mtx); 2232 xpt_release_device(device); 2233 } 2234 return(retval); 2235 } 2236 2237 static int 2238 xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph, 2239 xpt_periphfunc_t *tr_func, void *arg) 2240 { 2241 struct cam_eb *bus; 2242 struct cam_periph *periph, *next_periph; 2243 int retval; 2244 2245 retval = 1; 2246 2247 bus = device->target->bus; 2248 if (start_periph) 2249 periph = start_periph; 2250 else { 2251 xpt_lock_buses(); 2252 mtx_lock(&bus->eb_mtx); 2253 periph = SLIST_FIRST(&device->periphs); 2254 while (periph != NULL && (periph->flags & CAM_PERIPH_FREE) != 0) 2255 periph = SLIST_NEXT(periph, periph_links); 2256 if (periph == NULL) { 2257 mtx_unlock(&bus->eb_mtx); 2258 xpt_unlock_buses(); 2259 return (retval); 2260 } 2261 periph->refcount++; 2262 mtx_unlock(&bus->eb_mtx); 2263 xpt_unlock_buses(); 2264 } 2265 for (; periph != NULL; periph = next_periph) { 2266 retval = tr_func(periph, arg); 2267 if (retval == 0) { 2268 cam_periph_release_locked(periph); 2269 break; 2270 } 2271 xpt_lock_buses(); 2272 mtx_lock(&bus->eb_mtx); 2273 next_periph = SLIST_NEXT(periph, periph_links); 2274 while (next_periph != NULL && 2275 (next_periph->flags & CAM_PERIPH_FREE) != 0) 2276 next_periph = SLIST_NEXT(next_periph, periph_links); 2277 if (next_periph) 2278 next_periph->refcount++; 2279 mtx_unlock(&bus->eb_mtx); 2280 xpt_unlock_buses(); 2281 cam_periph_release_locked(periph); 2282 } 2283 return(retval); 2284 } 2285 2286 static int 2287 xptpdrvtraverse(struct periph_driver **start_pdrv, 2288 xpt_pdrvfunc_t *tr_func, void *arg) 2289 { 2290 struct periph_driver **pdrv; 2291 int retval; 2292 2293 retval = 1; 2294 2295 /* 2296 * We don't traverse the peripheral driver list like we do the 2297 * other lists, because it is a linker set, and therefore cannot be 2298 * changed during runtime. If the peripheral driver list is ever 2299 * re-done to be something other than a linker set (i.e. it can 2300 * change while the system is running), the list traversal should 2301 * be modified to work like the other traversal functions. 2302 */ 2303 for (pdrv = (start_pdrv ? start_pdrv : periph_drivers); 2304 *pdrv != NULL; pdrv++) { 2305 retval = tr_func(pdrv, arg); 2306 2307 if (retval == 0) 2308 return(retval); 2309 } 2310 2311 return(retval); 2312 } 2313 2314 static int 2315 xptpdperiphtraverse(struct periph_driver **pdrv, 2316 struct cam_periph *start_periph, 2317 xpt_periphfunc_t *tr_func, void *arg) 2318 { 2319 struct cam_periph *periph, *next_periph; 2320 int retval; 2321 2322 retval = 1; 2323 2324 if (start_periph) 2325 periph = start_periph; 2326 else { 2327 xpt_lock_buses(); 2328 periph = TAILQ_FIRST(&(*pdrv)->units); 2329 while (periph != NULL && (periph->flags & CAM_PERIPH_FREE) != 0) 2330 periph = TAILQ_NEXT(periph, unit_links); 2331 if (periph == NULL) { 2332 xpt_unlock_buses(); 2333 return (retval); 2334 } 2335 periph->refcount++; 2336 xpt_unlock_buses(); 2337 } 2338 for (; periph != NULL; periph = next_periph) { 2339 cam_periph_lock(periph); 2340 retval = tr_func(periph, arg); 2341 cam_periph_unlock(periph); 2342 if (retval == 0) { 2343 cam_periph_release(periph); 2344 break; 2345 } 2346 xpt_lock_buses(); 2347 next_periph = TAILQ_NEXT(periph, unit_links); 2348 while (next_periph != NULL && 2349 (next_periph->flags & CAM_PERIPH_FREE) != 0) 2350 next_periph = TAILQ_NEXT(next_periph, unit_links); 2351 if (next_periph) 2352 next_periph->refcount++; 2353 xpt_unlock_buses(); 2354 cam_periph_release(periph); 2355 } 2356 return(retval); 2357 } 2358 2359 static int 2360 xptdefbusfunc(struct cam_eb *bus, void *arg) 2361 { 2362 struct xpt_traverse_config *tr_config; 2363 2364 tr_config = (struct xpt_traverse_config *)arg; 2365 2366 if (tr_config->depth == XPT_DEPTH_BUS) { 2367 xpt_busfunc_t *tr_func; 2368 2369 tr_func = (xpt_busfunc_t *)tr_config->tr_func; 2370 2371 return(tr_func(bus, tr_config->tr_arg)); 2372 } else 2373 return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg)); 2374 } 2375 2376 static int 2377 xptdeftargetfunc(struct cam_et *target, void *arg) 2378 { 2379 struct xpt_traverse_config *tr_config; 2380 2381 tr_config = (struct xpt_traverse_config *)arg; 2382 2383 if (tr_config->depth == XPT_DEPTH_TARGET) { 2384 xpt_targetfunc_t *tr_func; 2385 2386 tr_func = (xpt_targetfunc_t *)tr_config->tr_func; 2387 2388 return(tr_func(target, tr_config->tr_arg)); 2389 } else 2390 return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg)); 2391 } 2392 2393 static int 2394 xptdefdevicefunc(struct cam_ed *device, void *arg) 2395 { 2396 struct xpt_traverse_config *tr_config; 2397 2398 tr_config = (struct xpt_traverse_config *)arg; 2399 2400 if (tr_config->depth == XPT_DEPTH_DEVICE) { 2401 xpt_devicefunc_t *tr_func; 2402 2403 tr_func = (xpt_devicefunc_t *)tr_config->tr_func; 2404 2405 return(tr_func(device, tr_config->tr_arg)); 2406 } else 2407 return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg)); 2408 } 2409 2410 static int 2411 xptdefperiphfunc(struct cam_periph *periph, void *arg) 2412 { 2413 struct xpt_traverse_config *tr_config; 2414 xpt_periphfunc_t *tr_func; 2415 2416 tr_config = (struct xpt_traverse_config *)arg; 2417 2418 tr_func = (xpt_periphfunc_t *)tr_config->tr_func; 2419 2420 /* 2421 * Unlike the other default functions, we don't check for depth 2422 * here. The peripheral driver level is the last level in the EDT, 2423 * so if we're here, we should execute the function in question. 2424 */ 2425 return(tr_func(periph, tr_config->tr_arg)); 2426 } 2427 2428 /* 2429 * Execute the given function for every bus in the EDT. 2430 */ 2431 static int 2432 xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg) 2433 { 2434 struct xpt_traverse_config tr_config; 2435 2436 tr_config.depth = XPT_DEPTH_BUS; 2437 tr_config.tr_func = tr_func; 2438 tr_config.tr_arg = arg; 2439 2440 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config)); 2441 } 2442 2443 /* 2444 * Execute the given function for every device in the EDT. 2445 */ 2446 static int 2447 xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg) 2448 { 2449 struct xpt_traverse_config tr_config; 2450 2451 tr_config.depth = XPT_DEPTH_DEVICE; 2452 tr_config.tr_func = tr_func; 2453 tr_config.tr_arg = arg; 2454 2455 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config)); 2456 } 2457 2458 static int 2459 xptsetasyncfunc(struct cam_ed *device, void *arg) 2460 { 2461 struct cam_path path; 2462 struct ccb_getdev cgd; 2463 struct ccb_setasync *csa = (struct ccb_setasync *)arg; 2464 2465 /* 2466 * Don't report unconfigured devices (Wildcard devs, 2467 * devices only for target mode, device instances 2468 * that have been invalidated but are waiting for 2469 * their last reference count to be released). 2470 */ 2471 if ((device->flags & CAM_DEV_UNCONFIGURED) != 0) 2472 return (1); 2473 2474 memset(&cgd, 0, sizeof(cgd)); 2475 xpt_compile_path(&path, 2476 NULL, 2477 device->target->bus->path_id, 2478 device->target->target_id, 2479 device->lun_id); 2480 xpt_setup_ccb(&cgd.ccb_h, &path, CAM_PRIORITY_NORMAL); 2481 cgd.ccb_h.func_code = XPT_GDEV_TYPE; 2482 xpt_action((union ccb *)&cgd); 2483 csa->callback(csa->callback_arg, 2484 AC_FOUND_DEVICE, 2485 &path, &cgd); 2486 xpt_release_path(&path); 2487 2488 return(1); 2489 } 2490 2491 static int 2492 xptsetasyncbusfunc(struct cam_eb *bus, void *arg) 2493 { 2494 struct cam_path path; 2495 struct ccb_pathinq cpi; 2496 struct ccb_setasync *csa = (struct ccb_setasync *)arg; 2497 2498 xpt_compile_path(&path, /*periph*/NULL, 2499 bus->path_id, 2500 CAM_TARGET_WILDCARD, 2501 CAM_LUN_WILDCARD); 2502 xpt_path_lock(&path); 2503 xpt_path_inq(&cpi, &path); 2504 csa->callback(csa->callback_arg, 2505 AC_PATH_REGISTERED, 2506 &path, &cpi); 2507 xpt_path_unlock(&path); 2508 xpt_release_path(&path); 2509 2510 return(1); 2511 } 2512 2513 void 2514 xpt_action(union ccb *start_ccb) 2515 { 2516 2517 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, 2518 ("xpt_action: func %#x %s\n", start_ccb->ccb_h.func_code, 2519 xpt_action_name(start_ccb->ccb_h.func_code))); 2520 2521 start_ccb->ccb_h.status = CAM_REQ_INPROG; 2522 (*(start_ccb->ccb_h.path->bus->xport->ops->action))(start_ccb); 2523 } 2524 2525 void 2526 xpt_action_default(union ccb *start_ccb) 2527 { 2528 struct cam_path *path; 2529 struct cam_sim *sim; 2530 struct mtx *mtx; 2531 2532 path = start_ccb->ccb_h.path; 2533 CAM_DEBUG(path, CAM_DEBUG_TRACE, 2534 ("xpt_action_default: func %#x %s\n", start_ccb->ccb_h.func_code, 2535 xpt_action_name(start_ccb->ccb_h.func_code))); 2536 2537 switch (start_ccb->ccb_h.func_code) { 2538 case XPT_SCSI_IO: 2539 { 2540 struct cam_ed *device; 2541 2542 /* 2543 * For the sake of compatibility with SCSI-1 2544 * devices that may not understand the identify 2545 * message, we include lun information in the 2546 * second byte of all commands. SCSI-1 specifies 2547 * that luns are a 3 bit value and reserves only 3 2548 * bits for lun information in the CDB. Later 2549 * revisions of the SCSI spec allow for more than 8 2550 * luns, but have deprecated lun information in the 2551 * CDB. So, if the lun won't fit, we must omit. 2552 * 2553 * Also be aware that during initial probing for devices, 2554 * the inquiry information is unknown but initialized to 0. 2555 * This means that this code will be exercised while probing 2556 * devices with an ANSI revision greater than 2. 2557 */ 2558 device = path->device; 2559 if (device->protocol_version <= SCSI_REV_2 2560 && start_ccb->ccb_h.target_lun < 8 2561 && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) { 2562 start_ccb->csio.cdb_io.cdb_bytes[1] |= 2563 start_ccb->ccb_h.target_lun << 5; 2564 } 2565 start_ccb->csio.scsi_status = SCSI_STATUS_OK; 2566 } 2567 /* FALLTHROUGH */ 2568 case XPT_TARGET_IO: 2569 case XPT_CONT_TARGET_IO: 2570 start_ccb->csio.sense_resid = 0; 2571 start_ccb->csio.resid = 0; 2572 /* FALLTHROUGH */ 2573 case XPT_ATA_IO: 2574 if (start_ccb->ccb_h.func_code == XPT_ATA_IO) 2575 start_ccb->ataio.resid = 0; 2576 /* FALLTHROUGH */ 2577 case XPT_NVME_IO: 2578 case XPT_NVME_ADMIN: 2579 case XPT_MMC_IO: 2580 case XPT_MMC_GET_TRAN_SETTINGS: 2581 case XPT_MMC_SET_TRAN_SETTINGS: 2582 case XPT_RESET_DEV: 2583 case XPT_ENG_EXEC: 2584 case XPT_SMP_IO: 2585 { 2586 struct cam_devq *devq; 2587 2588 devq = path->bus->sim->devq; 2589 mtx_lock(&devq->send_mtx); 2590 cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb); 2591 if (xpt_schedule_devq(devq, path->device) != 0) 2592 xpt_run_devq(devq); 2593 mtx_unlock(&devq->send_mtx); 2594 break; 2595 } 2596 case XPT_CALC_GEOMETRY: 2597 /* Filter out garbage */ 2598 if (start_ccb->ccg.block_size == 0 2599 || start_ccb->ccg.volume_size == 0) { 2600 start_ccb->ccg.cylinders = 0; 2601 start_ccb->ccg.heads = 0; 2602 start_ccb->ccg.secs_per_track = 0; 2603 start_ccb->ccb_h.status = CAM_REQ_CMP; 2604 break; 2605 } 2606 goto call_sim; 2607 case XPT_ABORT: 2608 { 2609 union ccb* abort_ccb; 2610 2611 abort_ccb = start_ccb->cab.abort_ccb; 2612 if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) { 2613 struct cam_ed *device; 2614 struct cam_devq *devq; 2615 2616 device = abort_ccb->ccb_h.path->device; 2617 devq = device->sim->devq; 2618 2619 mtx_lock(&devq->send_mtx); 2620 if (abort_ccb->ccb_h.pinfo.index > 0) { 2621 cam_ccbq_remove_ccb(&device->ccbq, abort_ccb); 2622 abort_ccb->ccb_h.status = 2623 CAM_REQ_ABORTED|CAM_DEV_QFRZN; 2624 xpt_freeze_devq_device(device, 1); 2625 mtx_unlock(&devq->send_mtx); 2626 xpt_done(abort_ccb); 2627 start_ccb->ccb_h.status = CAM_REQ_CMP; 2628 break; 2629 } 2630 mtx_unlock(&devq->send_mtx); 2631 2632 if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX 2633 && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) { 2634 /* 2635 * We've caught this ccb en route to 2636 * the SIM. Flag it for abort and the 2637 * SIM will do so just before starting 2638 * real work on the CCB. 2639 */ 2640 abort_ccb->ccb_h.status = 2641 CAM_REQ_ABORTED|CAM_DEV_QFRZN; 2642 xpt_freeze_devq(abort_ccb->ccb_h.path, 1); 2643 start_ccb->ccb_h.status = CAM_REQ_CMP; 2644 break; 2645 } 2646 } 2647 if (XPT_FC_IS_QUEUED(abort_ccb) 2648 && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) { 2649 /* 2650 * It's already completed but waiting 2651 * for our SWI to get to it. 2652 */ 2653 start_ccb->ccb_h.status = CAM_UA_ABORT; 2654 break; 2655 } 2656 /* 2657 * If we weren't able to take care of the abort request 2658 * in the XPT, pass the request down to the SIM for processing. 2659 */ 2660 } 2661 /* FALLTHROUGH */ 2662 case XPT_ACCEPT_TARGET_IO: 2663 case XPT_EN_LUN: 2664 case XPT_IMMED_NOTIFY: 2665 case XPT_NOTIFY_ACK: 2666 case XPT_RESET_BUS: 2667 case XPT_IMMEDIATE_NOTIFY: 2668 case XPT_NOTIFY_ACKNOWLEDGE: 2669 case XPT_GET_SIM_KNOB_OLD: 2670 case XPT_GET_SIM_KNOB: 2671 case XPT_SET_SIM_KNOB: 2672 case XPT_GET_TRAN_SETTINGS: 2673 case XPT_SET_TRAN_SETTINGS: 2674 case XPT_PATH_INQ: 2675 call_sim: 2676 sim = path->bus->sim; 2677 mtx = sim->mtx; 2678 if (mtx && !mtx_owned(mtx)) 2679 mtx_lock(mtx); 2680 else 2681 mtx = NULL; 2682 2683 CAM_DEBUG(path, CAM_DEBUG_TRACE, 2684 ("Calling sim->sim_action(): func=%#x\n", start_ccb->ccb_h.func_code)); 2685 (*(sim->sim_action))(sim, start_ccb); 2686 CAM_DEBUG(path, CAM_DEBUG_TRACE, 2687 ("sim->sim_action returned: status=%#x\n", start_ccb->ccb_h.status)); 2688 if (mtx) 2689 mtx_unlock(mtx); 2690 break; 2691 case XPT_PATH_STATS: 2692 start_ccb->cpis.last_reset = path->bus->last_reset; 2693 start_ccb->ccb_h.status = CAM_REQ_CMP; 2694 break; 2695 case XPT_GDEV_TYPE: 2696 { 2697 struct cam_ed *dev; 2698 2699 dev = path->device; 2700 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) { 2701 start_ccb->ccb_h.status = CAM_DEV_NOT_THERE; 2702 } else { 2703 struct ccb_getdev *cgd; 2704 2705 cgd = &start_ccb->cgd; 2706 cgd->protocol = dev->protocol; 2707 cgd->inq_data = dev->inq_data; 2708 cgd->ident_data = dev->ident_data; 2709 cgd->inq_flags = dev->inq_flags; 2710 cgd->ccb_h.status = CAM_REQ_CMP; 2711 cgd->serial_num_len = dev->serial_num_len; 2712 if ((dev->serial_num_len > 0) 2713 && (dev->serial_num != NULL)) 2714 bcopy(dev->serial_num, cgd->serial_num, 2715 dev->serial_num_len); 2716 } 2717 break; 2718 } 2719 case XPT_GDEV_STATS: 2720 { 2721 struct ccb_getdevstats *cgds = &start_ccb->cgds; 2722 struct cam_ed *dev = path->device; 2723 struct cam_eb *bus = path->bus; 2724 struct cam_et *tar = path->target; 2725 struct cam_devq *devq = bus->sim->devq; 2726 2727 mtx_lock(&devq->send_mtx); 2728 cgds->dev_openings = dev->ccbq.dev_openings; 2729 cgds->dev_active = dev->ccbq.dev_active; 2730 cgds->allocated = dev->ccbq.allocated; 2731 cgds->queued = cam_ccbq_pending_ccb_count(&dev->ccbq); 2732 cgds->held = cgds->allocated - cgds->dev_active - cgds->queued; 2733 cgds->last_reset = tar->last_reset; 2734 cgds->maxtags = dev->maxtags; 2735 cgds->mintags = dev->mintags; 2736 if (timevalcmp(&tar->last_reset, &bus->last_reset, <)) 2737 cgds->last_reset = bus->last_reset; 2738 mtx_unlock(&devq->send_mtx); 2739 cgds->ccb_h.status = CAM_REQ_CMP; 2740 break; 2741 } 2742 case XPT_GDEVLIST: 2743 { 2744 struct cam_periph *nperiph; 2745 struct periph_list *periph_head; 2746 struct ccb_getdevlist *cgdl; 2747 u_int i; 2748 struct cam_ed *device; 2749 bool found; 2750 2751 found = false; 2752 2753 /* 2754 * Don't want anyone mucking with our data. 2755 */ 2756 device = path->device; 2757 periph_head = &device->periphs; 2758 cgdl = &start_ccb->cgdl; 2759 2760 /* 2761 * Check and see if the list has changed since the user 2762 * last requested a list member. If so, tell them that the 2763 * list has changed, and therefore they need to start over 2764 * from the beginning. 2765 */ 2766 if ((cgdl->index != 0) && 2767 (cgdl->generation != device->generation)) { 2768 cgdl->status = CAM_GDEVLIST_LIST_CHANGED; 2769 break; 2770 } 2771 2772 /* 2773 * Traverse the list of peripherals and attempt to find 2774 * the requested peripheral. 2775 */ 2776 for (nperiph = SLIST_FIRST(periph_head), i = 0; 2777 (nperiph != NULL) && (i <= cgdl->index); 2778 nperiph = SLIST_NEXT(nperiph, periph_links), i++) { 2779 if (i == cgdl->index) { 2780 strlcpy(cgdl->periph_name, 2781 nperiph->periph_name, 2782 sizeof(cgdl->periph_name)); 2783 cgdl->unit_number = nperiph->unit_number; 2784 found = true; 2785 } 2786 } 2787 if (!found) { 2788 cgdl->status = CAM_GDEVLIST_ERROR; 2789 break; 2790 } 2791 2792 if (nperiph == NULL) 2793 cgdl->status = CAM_GDEVLIST_LAST_DEVICE; 2794 else 2795 cgdl->status = CAM_GDEVLIST_MORE_DEVS; 2796 2797 cgdl->index++; 2798 cgdl->generation = device->generation; 2799 2800 cgdl->ccb_h.status = CAM_REQ_CMP; 2801 break; 2802 } 2803 case XPT_DEV_MATCH: 2804 { 2805 dev_pos_type position_type; 2806 struct ccb_dev_match *cdm; 2807 2808 cdm = &start_ccb->cdm; 2809 2810 /* 2811 * There are two ways of getting at information in the EDT. 2812 * The first way is via the primary EDT tree. It starts 2813 * with a list of buses, then a list of targets on a bus, 2814 * then devices/luns on a target, and then peripherals on a 2815 * device/lun. The "other" way is by the peripheral driver 2816 * lists. The peripheral driver lists are organized by 2817 * peripheral driver. (obviously) So it makes sense to 2818 * use the peripheral driver list if the user is looking 2819 * for something like "da1", or all "da" devices. If the 2820 * user is looking for something on a particular bus/target 2821 * or lun, it's generally better to go through the EDT tree. 2822 */ 2823 2824 if (cdm->pos.position_type != CAM_DEV_POS_NONE) 2825 position_type = cdm->pos.position_type; 2826 else { 2827 u_int i; 2828 2829 position_type = CAM_DEV_POS_NONE; 2830 2831 for (i = 0; i < cdm->num_patterns; i++) { 2832 if ((cdm->patterns[i].type == DEV_MATCH_BUS) 2833 ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){ 2834 position_type = CAM_DEV_POS_EDT; 2835 break; 2836 } 2837 } 2838 2839 if (cdm->num_patterns == 0) 2840 position_type = CAM_DEV_POS_EDT; 2841 else if (position_type == CAM_DEV_POS_NONE) 2842 position_type = CAM_DEV_POS_PDRV; 2843 } 2844 2845 switch(position_type & CAM_DEV_POS_TYPEMASK) { 2846 case CAM_DEV_POS_EDT: 2847 xptedtmatch(cdm); 2848 break; 2849 case CAM_DEV_POS_PDRV: 2850 xptperiphlistmatch(cdm); 2851 break; 2852 default: 2853 cdm->status = CAM_DEV_MATCH_ERROR; 2854 break; 2855 } 2856 2857 if (cdm->status == CAM_DEV_MATCH_ERROR) 2858 start_ccb->ccb_h.status = CAM_REQ_CMP_ERR; 2859 else 2860 start_ccb->ccb_h.status = CAM_REQ_CMP; 2861 2862 break; 2863 } 2864 case XPT_SASYNC_CB: 2865 { 2866 struct ccb_setasync *csa; 2867 struct async_node *cur_entry; 2868 struct async_list *async_head; 2869 uint32_t added; 2870 2871 csa = &start_ccb->csa; 2872 added = csa->event_enable; 2873 async_head = &path->device->asyncs; 2874 2875 /* 2876 * If there is already an entry for us, simply 2877 * update it. 2878 */ 2879 cur_entry = SLIST_FIRST(async_head); 2880 while (cur_entry != NULL) { 2881 if ((cur_entry->callback_arg == csa->callback_arg) 2882 && (cur_entry->callback == csa->callback)) 2883 break; 2884 cur_entry = SLIST_NEXT(cur_entry, links); 2885 } 2886 2887 if (cur_entry != NULL) { 2888 /* 2889 * If the request has no flags set, 2890 * remove the entry. 2891 */ 2892 added &= ~cur_entry->event_enable; 2893 if (csa->event_enable == 0) { 2894 SLIST_REMOVE(async_head, cur_entry, 2895 async_node, links); 2896 xpt_release_device(path->device); 2897 free(cur_entry, M_CAMXPT); 2898 } else { 2899 cur_entry->event_enable = csa->event_enable; 2900 } 2901 csa->event_enable = added; 2902 } else { 2903 cur_entry = malloc(sizeof(*cur_entry), M_CAMXPT, 2904 M_NOWAIT); 2905 if (cur_entry == NULL) { 2906 csa->ccb_h.status = CAM_RESRC_UNAVAIL; 2907 break; 2908 } 2909 cur_entry->event_enable = csa->event_enable; 2910 cur_entry->event_lock = (path->bus->sim->mtx && 2911 mtx_owned(path->bus->sim->mtx)) ? 1 : 0; 2912 cur_entry->callback_arg = csa->callback_arg; 2913 cur_entry->callback = csa->callback; 2914 SLIST_INSERT_HEAD(async_head, cur_entry, links); 2915 xpt_acquire_device(path->device); 2916 } 2917 start_ccb->ccb_h.status = CAM_REQ_CMP; 2918 break; 2919 } 2920 case XPT_REL_SIMQ: 2921 { 2922 struct ccb_relsim *crs; 2923 struct cam_ed *dev; 2924 2925 crs = &start_ccb->crs; 2926 dev = path->device; 2927 if (dev == NULL) { 2928 crs->ccb_h.status = CAM_DEV_NOT_THERE; 2929 break; 2930 } 2931 2932 if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) { 2933 /* Don't ever go below one opening */ 2934 if (crs->openings > 0) { 2935 xpt_dev_ccbq_resize(path, crs->openings); 2936 if (bootverbose) { 2937 xpt_print(path, 2938 "number of openings is now %d\n", 2939 crs->openings); 2940 } 2941 } 2942 } 2943 2944 mtx_lock(&dev->sim->devq->send_mtx); 2945 if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) { 2946 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) { 2947 /* 2948 * Just extend the old timeout and decrement 2949 * the freeze count so that a single timeout 2950 * is sufficient for releasing the queue. 2951 */ 2952 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE; 2953 callout_stop(&dev->callout); 2954 } else { 2955 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 2956 } 2957 2958 callout_reset_sbt(&dev->callout, 2959 SBT_1MS * crs->release_timeout, SBT_1MS, 2960 xpt_release_devq_timeout, dev, 0); 2961 2962 dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING; 2963 } 2964 2965 if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) { 2966 if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) { 2967 /* 2968 * Decrement the freeze count so that a single 2969 * completion is still sufficient to unfreeze 2970 * the queue. 2971 */ 2972 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE; 2973 } else { 2974 dev->flags |= CAM_DEV_REL_ON_COMPLETE; 2975 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 2976 } 2977 } 2978 2979 if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) { 2980 if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0 2981 || (dev->ccbq.dev_active == 0)) { 2982 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE; 2983 } else { 2984 dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY; 2985 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 2986 } 2987 } 2988 mtx_unlock(&dev->sim->devq->send_mtx); 2989 2990 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) 2991 xpt_release_devq(path, /*count*/1, /*run_queue*/TRUE); 2992 start_ccb->crs.qfrozen_cnt = dev->ccbq.queue.qfrozen_cnt; 2993 start_ccb->ccb_h.status = CAM_REQ_CMP; 2994 break; 2995 } 2996 case XPT_DEBUG: { 2997 struct cam_path *oldpath; 2998 2999 /* Check that all request bits are supported. */ 3000 if (start_ccb->cdbg.flags & ~(CAM_DEBUG_COMPILE)) { 3001 start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 3002 break; 3003 } 3004 3005 cam_dflags = CAM_DEBUG_NONE; 3006 if (cam_dpath != NULL) { 3007 oldpath = cam_dpath; 3008 cam_dpath = NULL; 3009 xpt_free_path(oldpath); 3010 } 3011 if (start_ccb->cdbg.flags != CAM_DEBUG_NONE) { 3012 if (xpt_create_path(&cam_dpath, NULL, 3013 start_ccb->ccb_h.path_id, 3014 start_ccb->ccb_h.target_id, 3015 start_ccb->ccb_h.target_lun) != 3016 CAM_REQ_CMP) { 3017 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 3018 } else { 3019 cam_dflags = start_ccb->cdbg.flags; 3020 start_ccb->ccb_h.status = CAM_REQ_CMP; 3021 xpt_print(cam_dpath, "debugging flags now %x\n", 3022 cam_dflags); 3023 } 3024 } else 3025 start_ccb->ccb_h.status = CAM_REQ_CMP; 3026 break; 3027 } 3028 case XPT_NOOP: 3029 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0) 3030 xpt_freeze_devq(path, 1); 3031 start_ccb->ccb_h.status = CAM_REQ_CMP; 3032 break; 3033 case XPT_REPROBE_LUN: 3034 xpt_async(AC_INQ_CHANGED, path, NULL); 3035 start_ccb->ccb_h.status = CAM_REQ_CMP; 3036 xpt_done(start_ccb); 3037 break; 3038 case XPT_ASYNC: 3039 /* 3040 * Queue the async operation so it can be run from a sleepable 3041 * context. 3042 */ 3043 start_ccb->ccb_h.status = CAM_REQ_CMP; 3044 mtx_lock(&cam_async.cam_doneq_mtx); 3045 STAILQ_INSERT_TAIL(&cam_async.cam_doneq, &start_ccb->ccb_h, sim_links.stqe); 3046 start_ccb->ccb_h.pinfo.index = CAM_ASYNC_INDEX; 3047 mtx_unlock(&cam_async.cam_doneq_mtx); 3048 wakeup(&cam_async.cam_doneq); 3049 break; 3050 default: 3051 case XPT_SDEV_TYPE: 3052 case XPT_TERM_IO: 3053 case XPT_ENG_INQ: 3054 /* XXX Implement */ 3055 xpt_print(start_ccb->ccb_h.path, 3056 "%s: CCB type %#x %s not supported\n", __func__, 3057 start_ccb->ccb_h.func_code, 3058 xpt_action_name(start_ccb->ccb_h.func_code)); 3059 start_ccb->ccb_h.status = CAM_PROVIDE_FAIL; 3060 if (start_ccb->ccb_h.func_code & XPT_FC_DEV_QUEUED) { 3061 xpt_done(start_ccb); 3062 } 3063 break; 3064 } 3065 CAM_DEBUG(path, CAM_DEBUG_TRACE, 3066 ("xpt_action_default: func= %#x %s status %#x\n", 3067 start_ccb->ccb_h.func_code, 3068 xpt_action_name(start_ccb->ccb_h.func_code), 3069 start_ccb->ccb_h.status)); 3070 } 3071 3072 /* 3073 * Call the sim poll routine to allow the sim to complete 3074 * any inflight requests, then call camisr_runqueue to 3075 * complete any CCB that the polling completed. 3076 */ 3077 void 3078 xpt_sim_poll(struct cam_sim *sim) 3079 { 3080 struct mtx *mtx; 3081 3082 KASSERT(cam_sim_pollable(sim), ("%s: non-pollable sim", __func__)); 3083 mtx = sim->mtx; 3084 if (mtx) 3085 mtx_lock(mtx); 3086 (*(sim->sim_poll))(sim); 3087 if (mtx) 3088 mtx_unlock(mtx); 3089 camisr_runqueue(); 3090 } 3091 3092 uint32_t 3093 xpt_poll_setup(union ccb *start_ccb) 3094 { 3095 uint32_t timeout; 3096 struct cam_sim *sim; 3097 struct cam_devq *devq; 3098 struct cam_ed *dev; 3099 3100 timeout = start_ccb->ccb_h.timeout * 10; 3101 sim = start_ccb->ccb_h.path->bus->sim; 3102 devq = sim->devq; 3103 dev = start_ccb->ccb_h.path->device; 3104 3105 KASSERT(cam_sim_pollable(sim), ("%s: non-pollable sim", __func__)); 3106 3107 /* 3108 * Steal an opening so that no other queued requests 3109 * can get it before us while we simulate interrupts. 3110 */ 3111 mtx_lock(&devq->send_mtx); 3112 dev->ccbq.dev_openings--; 3113 while((devq->send_openings <= 0 || dev->ccbq.dev_openings < 0) && 3114 (--timeout > 0)) { 3115 mtx_unlock(&devq->send_mtx); 3116 DELAY(100); 3117 xpt_sim_poll(sim); 3118 mtx_lock(&devq->send_mtx); 3119 } 3120 dev->ccbq.dev_openings++; 3121 mtx_unlock(&devq->send_mtx); 3122 3123 return (timeout); 3124 } 3125 3126 void 3127 xpt_pollwait(union ccb *start_ccb, uint32_t timeout) 3128 { 3129 3130 KASSERT(cam_sim_pollable(start_ccb->ccb_h.path->bus->sim), 3131 ("%s: non-pollable sim", __func__)); 3132 while (--timeout > 0) { 3133 xpt_sim_poll(start_ccb->ccb_h.path->bus->sim); 3134 if ((start_ccb->ccb_h.status & CAM_STATUS_MASK) 3135 != CAM_REQ_INPROG) 3136 break; 3137 DELAY(100); 3138 } 3139 3140 if (timeout == 0) { 3141 /* 3142 * XXX Is it worth adding a sim_timeout entry 3143 * point so we can attempt recovery? If 3144 * this is only used for dumps, I don't think 3145 * it is. 3146 */ 3147 start_ccb->ccb_h.status = CAM_CMD_TIMEOUT; 3148 } 3149 } 3150 3151 /* 3152 * Schedule a peripheral driver to receive a ccb when its 3153 * target device has space for more transactions. 3154 */ 3155 void 3156 xpt_schedule(struct cam_periph *periph, uint32_t new_priority) 3157 { 3158 3159 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n")); 3160 cam_periph_assert(periph, MA_OWNED); 3161 if (new_priority < periph->scheduled_priority) { 3162 periph->scheduled_priority = new_priority; 3163 xpt_run_allocq(periph, 0); 3164 } 3165 } 3166 3167 /* 3168 * Schedule a device to run on a given queue. 3169 * If the device was inserted as a new entry on the queue, 3170 * return 1 meaning the device queue should be run. If we 3171 * were already queued, implying someone else has already 3172 * started the queue, return 0 so the caller doesn't attempt 3173 * to run the queue. 3174 */ 3175 static int 3176 xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo, 3177 uint32_t new_priority) 3178 { 3179 int retval; 3180 uint32_t old_priority; 3181 3182 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n")); 3183 3184 old_priority = pinfo->priority; 3185 3186 /* 3187 * Are we already queued? 3188 */ 3189 if (pinfo->index != CAM_UNQUEUED_INDEX) { 3190 /* Simply reorder based on new priority */ 3191 if (new_priority < old_priority) { 3192 camq_change_priority(queue, pinfo->index, 3193 new_priority); 3194 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3195 ("changed priority to %d\n", 3196 new_priority)); 3197 retval = 1; 3198 } else 3199 retval = 0; 3200 } else { 3201 /* New entry on the queue */ 3202 if (new_priority < old_priority) 3203 pinfo->priority = new_priority; 3204 3205 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3206 ("Inserting onto queue\n")); 3207 pinfo->generation = ++queue->generation; 3208 camq_insert(queue, pinfo); 3209 retval = 1; 3210 } 3211 return (retval); 3212 } 3213 3214 static void 3215 xpt_run_allocq_task(void *context, int pending) 3216 { 3217 struct cam_periph *periph = context; 3218 3219 cam_periph_lock(periph); 3220 periph->flags &= ~CAM_PERIPH_RUN_TASK; 3221 xpt_run_allocq(periph, 1); 3222 cam_periph_unlock(periph); 3223 cam_periph_release(periph); 3224 } 3225 3226 static void 3227 xpt_run_allocq(struct cam_periph *periph, int sleep) 3228 { 3229 struct cam_ed *device; 3230 union ccb *ccb; 3231 uint32_t prio; 3232 3233 cam_periph_assert(periph, MA_OWNED); 3234 if (periph->periph_allocating) 3235 return; 3236 cam_periph_doacquire(periph); 3237 periph->periph_allocating = 1; 3238 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_allocq(%p)\n", periph)); 3239 device = periph->path->device; 3240 ccb = NULL; 3241 restart: 3242 while ((prio = min(periph->scheduled_priority, 3243 periph->immediate_priority)) != CAM_PRIORITY_NONE && 3244 (periph->periph_allocated - (ccb != NULL ? 1 : 0) < 3245 device->ccbq.total_openings || prio <= CAM_PRIORITY_OOB)) { 3246 if (ccb == NULL && 3247 (ccb = xpt_get_ccb_nowait(periph)) == NULL) { 3248 if (sleep) { 3249 ccb = xpt_get_ccb(periph); 3250 goto restart; 3251 } 3252 if (periph->flags & CAM_PERIPH_RUN_TASK) 3253 break; 3254 cam_periph_doacquire(periph); 3255 periph->flags |= CAM_PERIPH_RUN_TASK; 3256 taskqueue_enqueue(xsoftc.xpt_taskq, 3257 &periph->periph_run_task); 3258 break; 3259 } 3260 xpt_setup_ccb(&ccb->ccb_h, periph->path, prio); 3261 if (prio == periph->immediate_priority) { 3262 periph->immediate_priority = CAM_PRIORITY_NONE; 3263 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3264 ("waking cam_periph_getccb()\n")); 3265 SLIST_INSERT_HEAD(&periph->ccb_list, &ccb->ccb_h, 3266 periph_links.sle); 3267 wakeup(&periph->ccb_list); 3268 } else { 3269 periph->scheduled_priority = CAM_PRIORITY_NONE; 3270 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3271 ("calling periph_start()\n")); 3272 periph->periph_start(periph, ccb); 3273 } 3274 ccb = NULL; 3275 } 3276 if (ccb != NULL) 3277 xpt_release_ccb(ccb); 3278 periph->periph_allocating = 0; 3279 cam_periph_release_locked(periph); 3280 } 3281 3282 static void 3283 xpt_run_devq(struct cam_devq *devq) 3284 { 3285 struct mtx *mtx; 3286 3287 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_devq\n")); 3288 3289 devq->send_queue.qfrozen_cnt++; 3290 while ((devq->send_queue.entries > 0) 3291 && (devq->send_openings > 0) 3292 && (devq->send_queue.qfrozen_cnt <= 1)) { 3293 struct cam_ed *device; 3294 union ccb *work_ccb; 3295 struct cam_sim *sim; 3296 struct xpt_proto *proto; 3297 3298 device = (struct cam_ed *)camq_remove(&devq->send_queue, 3299 CAMQ_HEAD); 3300 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3301 ("running device %p\n", device)); 3302 3303 work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD); 3304 if (work_ccb == NULL) { 3305 printf("device on run queue with no ccbs???\n"); 3306 continue; 3307 } 3308 3309 if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) { 3310 mtx_lock(&xsoftc.xpt_highpower_lock); 3311 if (xsoftc.num_highpower <= 0) { 3312 /* 3313 * We got a high power command, but we 3314 * don't have any available slots. Freeze 3315 * the device queue until we have a slot 3316 * available. 3317 */ 3318 xpt_freeze_devq_device(device, 1); 3319 STAILQ_INSERT_TAIL(&xsoftc.highpowerq, device, 3320 highpowerq_entry); 3321 3322 mtx_unlock(&xsoftc.xpt_highpower_lock); 3323 continue; 3324 } else { 3325 /* 3326 * Consume a high power slot while 3327 * this ccb runs. 3328 */ 3329 xsoftc.num_highpower--; 3330 } 3331 mtx_unlock(&xsoftc.xpt_highpower_lock); 3332 } 3333 cam_ccbq_remove_ccb(&device->ccbq, work_ccb); 3334 cam_ccbq_send_ccb(&device->ccbq, work_ccb); 3335 devq->send_openings--; 3336 devq->send_active++; 3337 xpt_schedule_devq(devq, device); 3338 mtx_unlock(&devq->send_mtx); 3339 3340 if ((work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0) { 3341 /* 3342 * The client wants to freeze the queue 3343 * after this CCB is sent. 3344 */ 3345 xpt_freeze_devq(work_ccb->ccb_h.path, 1); 3346 } 3347 3348 /* In Target mode, the peripheral driver knows best... */ 3349 if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) { 3350 if ((device->inq_flags & SID_CmdQue) != 0 3351 && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE) 3352 work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID; 3353 else 3354 /* 3355 * Clear this in case of a retried CCB that 3356 * failed due to a rejected tag. 3357 */ 3358 work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID; 3359 } 3360 3361 KASSERT(device == work_ccb->ccb_h.path->device, 3362 ("device (%p) / path->device (%p) mismatch", 3363 device, work_ccb->ccb_h.path->device)); 3364 proto = xpt_proto_find(device->protocol); 3365 if (proto && proto->ops->debug_out) 3366 proto->ops->debug_out(work_ccb); 3367 3368 /* 3369 * Device queues can be shared among multiple SIM instances 3370 * that reside on different buses. Use the SIM from the 3371 * queued device, rather than the one from the calling bus. 3372 */ 3373 sim = device->sim; 3374 mtx = sim->mtx; 3375 if (mtx && !mtx_owned(mtx)) 3376 mtx_lock(mtx); 3377 else 3378 mtx = NULL; 3379 work_ccb->ccb_h.qos.periph_data = cam_iosched_now(); 3380 (*(sim->sim_action))(sim, work_ccb); 3381 if (mtx) 3382 mtx_unlock(mtx); 3383 mtx_lock(&devq->send_mtx); 3384 } 3385 devq->send_queue.qfrozen_cnt--; 3386 } 3387 3388 /* 3389 * This function merges stuff from the src ccb into the dst ccb, while keeping 3390 * important fields in the dst ccb constant. 3391 */ 3392 void 3393 xpt_merge_ccb(union ccb *dst_ccb, union ccb *src_ccb) 3394 { 3395 3396 /* 3397 * Pull fields that are valid for peripheral drivers to set 3398 * into the dst CCB along with the CCB "payload". 3399 */ 3400 dst_ccb->ccb_h.retry_count = src_ccb->ccb_h.retry_count; 3401 dst_ccb->ccb_h.func_code = src_ccb->ccb_h.func_code; 3402 dst_ccb->ccb_h.timeout = src_ccb->ccb_h.timeout; 3403 dst_ccb->ccb_h.flags = src_ccb->ccb_h.flags; 3404 bcopy(&(&src_ccb->ccb_h)[1], &(&dst_ccb->ccb_h)[1], 3405 sizeof(union ccb) - sizeof(struct ccb_hdr)); 3406 } 3407 3408 void 3409 xpt_setup_ccb_flags(struct ccb_hdr *ccb_h, struct cam_path *path, 3410 uint32_t priority, uint32_t flags) 3411 { 3412 3413 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n")); 3414 ccb_h->pinfo.priority = priority; 3415 ccb_h->path = path; 3416 ccb_h->path_id = path->bus->path_id; 3417 if (path->target) 3418 ccb_h->target_id = path->target->target_id; 3419 else 3420 ccb_h->target_id = CAM_TARGET_WILDCARD; 3421 if (path->device) { 3422 ccb_h->target_lun = path->device->lun_id; 3423 ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation; 3424 } else { 3425 ccb_h->target_lun = CAM_TARGET_WILDCARD; 3426 } 3427 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX; 3428 ccb_h->flags = flags; 3429 ccb_h->xflags = 0; 3430 } 3431 3432 void 3433 xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, uint32_t priority) 3434 { 3435 xpt_setup_ccb_flags(ccb_h, path, priority, /*flags*/ 0); 3436 } 3437 3438 /* Path manipulation functions */ 3439 cam_status 3440 xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph, 3441 path_id_t path_id, target_id_t target_id, lun_id_t lun_id) 3442 { 3443 struct cam_path *path; 3444 cam_status status; 3445 3446 path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_NOWAIT); 3447 3448 if (path == NULL) { 3449 status = CAM_RESRC_UNAVAIL; 3450 return(status); 3451 } 3452 status = xpt_compile_path(path, perph, path_id, target_id, lun_id); 3453 if (status != CAM_REQ_CMP) { 3454 free(path, M_CAMPATH); 3455 path = NULL; 3456 } 3457 *new_path_ptr = path; 3458 return (status); 3459 } 3460 3461 cam_status 3462 xpt_create_path_unlocked(struct cam_path **new_path_ptr, 3463 struct cam_periph *periph, path_id_t path_id, 3464 target_id_t target_id, lun_id_t lun_id) 3465 { 3466 3467 return (xpt_create_path(new_path_ptr, periph, path_id, target_id, 3468 lun_id)); 3469 } 3470 3471 cam_status 3472 xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph, 3473 path_id_t path_id, target_id_t target_id, lun_id_t lun_id) 3474 { 3475 struct cam_eb *bus; 3476 struct cam_et *target; 3477 struct cam_ed *device; 3478 cam_status status; 3479 3480 status = CAM_REQ_CMP; /* Completed without error */ 3481 target = NULL; /* Wildcarded */ 3482 device = NULL; /* Wildcarded */ 3483 3484 /* 3485 * We will potentially modify the EDT, so block interrupts 3486 * that may attempt to create cam paths. 3487 */ 3488 bus = xpt_find_bus(path_id); 3489 if (bus == NULL) { 3490 status = CAM_PATH_INVALID; 3491 } else { 3492 xpt_lock_buses(); 3493 mtx_lock(&bus->eb_mtx); 3494 target = xpt_find_target(bus, target_id); 3495 if (target == NULL) { 3496 /* Create one */ 3497 struct cam_et *new_target; 3498 3499 new_target = xpt_alloc_target(bus, target_id); 3500 if (new_target == NULL) { 3501 status = CAM_RESRC_UNAVAIL; 3502 } else { 3503 target = new_target; 3504 } 3505 } 3506 xpt_unlock_buses(); 3507 if (target != NULL) { 3508 device = xpt_find_device(target, lun_id); 3509 if (device == NULL) { 3510 /* Create one */ 3511 struct cam_ed *new_device; 3512 3513 new_device = 3514 (*(bus->xport->ops->alloc_device))(bus, 3515 target, 3516 lun_id); 3517 if (new_device == NULL) { 3518 status = CAM_RESRC_UNAVAIL; 3519 } else { 3520 device = new_device; 3521 } 3522 } 3523 } 3524 mtx_unlock(&bus->eb_mtx); 3525 } 3526 3527 /* 3528 * Only touch the user's data if we are successful. 3529 */ 3530 if (status == CAM_REQ_CMP) { 3531 new_path->periph = perph; 3532 new_path->bus = bus; 3533 new_path->target = target; 3534 new_path->device = device; 3535 CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n")); 3536 } else { 3537 if (device != NULL) 3538 xpt_release_device(device); 3539 if (target != NULL) 3540 xpt_release_target(target); 3541 if (bus != NULL) 3542 xpt_release_bus(bus); 3543 } 3544 return (status); 3545 } 3546 3547 int 3548 xpt_clone_path(struct cam_path **new_path_ptr, struct cam_path *path) 3549 { 3550 struct cam_path *new_path; 3551 3552 new_path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_NOWAIT); 3553 if (new_path == NULL) 3554 return (ENOMEM); 3555 *new_path = *path; 3556 if (path->bus != NULL) 3557 xpt_acquire_bus(path->bus); 3558 if (path->target != NULL) 3559 xpt_acquire_target(path->target); 3560 if (path->device != NULL) 3561 xpt_acquire_device(path->device); 3562 *new_path_ptr = new_path; 3563 return (0); 3564 } 3565 3566 void 3567 xpt_release_path(struct cam_path *path) 3568 { 3569 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n")); 3570 if (path->device != NULL) { 3571 xpt_release_device(path->device); 3572 path->device = NULL; 3573 } 3574 if (path->target != NULL) { 3575 xpt_release_target(path->target); 3576 path->target = NULL; 3577 } 3578 if (path->bus != NULL) { 3579 xpt_release_bus(path->bus); 3580 path->bus = NULL; 3581 } 3582 } 3583 3584 void 3585 xpt_free_path(struct cam_path *path) 3586 { 3587 3588 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n")); 3589 xpt_release_path(path); 3590 free(path, M_CAMPATH); 3591 } 3592 3593 void 3594 xpt_path_counts(struct cam_path *path, uint32_t *bus_ref, 3595 uint32_t *periph_ref, uint32_t *target_ref, uint32_t *device_ref) 3596 { 3597 3598 xpt_lock_buses(); 3599 if (bus_ref) { 3600 if (path->bus) 3601 *bus_ref = path->bus->refcount; 3602 else 3603 *bus_ref = 0; 3604 } 3605 if (periph_ref) { 3606 if (path->periph) 3607 *periph_ref = path->periph->refcount; 3608 else 3609 *periph_ref = 0; 3610 } 3611 xpt_unlock_buses(); 3612 if (target_ref) { 3613 if (path->target) 3614 *target_ref = path->target->refcount; 3615 else 3616 *target_ref = 0; 3617 } 3618 if (device_ref) { 3619 if (path->device) 3620 *device_ref = path->device->refcount; 3621 else 3622 *device_ref = 0; 3623 } 3624 } 3625 3626 /* 3627 * Return -1 for failure, 0 for exact match, 1 for match with wildcards 3628 * in path1, 2 for match with wildcards in path2. 3629 */ 3630 int 3631 xpt_path_comp(struct cam_path *path1, struct cam_path *path2) 3632 { 3633 int retval = 0; 3634 3635 if (path1->bus != path2->bus) { 3636 if (path1->bus->path_id == CAM_BUS_WILDCARD) 3637 retval = 1; 3638 else if (path2->bus->path_id == CAM_BUS_WILDCARD) 3639 retval = 2; 3640 else 3641 return (-1); 3642 } 3643 if (path1->target != path2->target) { 3644 if (path1->target->target_id == CAM_TARGET_WILDCARD) { 3645 if (retval == 0) 3646 retval = 1; 3647 } else if (path2->target->target_id == CAM_TARGET_WILDCARD) 3648 retval = 2; 3649 else 3650 return (-1); 3651 } 3652 if (path1->device != path2->device) { 3653 if (path1->device->lun_id == CAM_LUN_WILDCARD) { 3654 if (retval == 0) 3655 retval = 1; 3656 } else if (path2->device->lun_id == CAM_LUN_WILDCARD) 3657 retval = 2; 3658 else 3659 return (-1); 3660 } 3661 return (retval); 3662 } 3663 3664 int 3665 xpt_path_comp_dev(struct cam_path *path, struct cam_ed *dev) 3666 { 3667 int retval = 0; 3668 3669 if (path->bus != dev->target->bus) { 3670 if (path->bus->path_id == CAM_BUS_WILDCARD) 3671 retval = 1; 3672 else if (dev->target->bus->path_id == CAM_BUS_WILDCARD) 3673 retval = 2; 3674 else 3675 return (-1); 3676 } 3677 if (path->target != dev->target) { 3678 if (path->target->target_id == CAM_TARGET_WILDCARD) { 3679 if (retval == 0) 3680 retval = 1; 3681 } else if (dev->target->target_id == CAM_TARGET_WILDCARD) 3682 retval = 2; 3683 else 3684 return (-1); 3685 } 3686 if (path->device != dev) { 3687 if (path->device->lun_id == CAM_LUN_WILDCARD) { 3688 if (retval == 0) 3689 retval = 1; 3690 } else if (dev->lun_id == CAM_LUN_WILDCARD) 3691 retval = 2; 3692 else 3693 return (-1); 3694 } 3695 return (retval); 3696 } 3697 3698 void 3699 xpt_print_path(struct cam_path *path) 3700 { 3701 struct sbuf sb; 3702 char buffer[XPT_PRINT_LEN]; 3703 3704 sbuf_new(&sb, buffer, XPT_PRINT_LEN, SBUF_FIXEDLEN); 3705 xpt_path_sbuf(path, &sb); 3706 sbuf_finish(&sb); 3707 printf("%s", sbuf_data(&sb)); 3708 sbuf_delete(&sb); 3709 } 3710 3711 static void 3712 xpt_device_sbuf(struct cam_ed *device, struct sbuf *sb) 3713 { 3714 if (device == NULL) 3715 sbuf_cat(sb, "(nopath): "); 3716 else { 3717 sbuf_printf(sb, "(noperiph:%s%d:%d:%d:%jx): ", 3718 device->sim->sim_name, 3719 device->sim->unit_number, 3720 device->sim->bus_id, 3721 device->target->target_id, 3722 (uintmax_t)device->lun_id); 3723 } 3724 } 3725 3726 void 3727 xpt_print(struct cam_path *path, const char *fmt, ...) 3728 { 3729 va_list ap; 3730 struct sbuf sb; 3731 char buffer[XPT_PRINT_LEN]; 3732 3733 sbuf_new(&sb, buffer, XPT_PRINT_LEN, SBUF_FIXEDLEN); 3734 3735 xpt_path_sbuf(path, &sb); 3736 va_start(ap, fmt); 3737 sbuf_vprintf(&sb, fmt, ap); 3738 va_end(ap); 3739 3740 sbuf_finish(&sb); 3741 printf("%s", sbuf_data(&sb)); 3742 sbuf_delete(&sb); 3743 } 3744 3745 char * 3746 xpt_path_string(struct cam_path *path, char *str, size_t str_len) 3747 { 3748 struct sbuf sb; 3749 3750 sbuf_new(&sb, str, str_len, 0); 3751 xpt_path_sbuf(path, &sb); 3752 sbuf_finish(&sb); 3753 return (str); 3754 } 3755 3756 void 3757 xpt_path_sbuf(struct cam_path *path, struct sbuf *sb) 3758 { 3759 3760 if (path == NULL) 3761 sbuf_cat(sb, "(nopath): "); 3762 else { 3763 if (path->periph != NULL) 3764 sbuf_printf(sb, "(%s%d:", path->periph->periph_name, 3765 path->periph->unit_number); 3766 else 3767 sbuf_cat(sb, "(noperiph:"); 3768 3769 if (path->bus != NULL) 3770 sbuf_printf(sb, "%s%d:%d:", path->bus->sim->sim_name, 3771 path->bus->sim->unit_number, 3772 path->bus->sim->bus_id); 3773 else 3774 sbuf_cat(sb, "nobus:"); 3775 3776 if (path->target != NULL) 3777 sbuf_printf(sb, "%d:", path->target->target_id); 3778 else 3779 sbuf_cat(sb, "X:"); 3780 3781 if (path->device != NULL) 3782 sbuf_printf(sb, "%jx): ", 3783 (uintmax_t)path->device->lun_id); 3784 else 3785 sbuf_cat(sb, "X): "); 3786 } 3787 } 3788 3789 path_id_t 3790 xpt_path_path_id(struct cam_path *path) 3791 { 3792 return(path->bus->path_id); 3793 } 3794 3795 target_id_t 3796 xpt_path_target_id(struct cam_path *path) 3797 { 3798 if (path->target != NULL) 3799 return (path->target->target_id); 3800 else 3801 return (CAM_TARGET_WILDCARD); 3802 } 3803 3804 lun_id_t 3805 xpt_path_lun_id(struct cam_path *path) 3806 { 3807 if (path->device != NULL) 3808 return (path->device->lun_id); 3809 else 3810 return (CAM_LUN_WILDCARD); 3811 } 3812 3813 struct cam_sim * 3814 xpt_path_sim(struct cam_path *path) 3815 { 3816 3817 return (path->bus->sim); 3818 } 3819 3820 struct cam_periph* 3821 xpt_path_periph(struct cam_path *path) 3822 { 3823 3824 return (path->periph); 3825 } 3826 3827 /* 3828 * Release a CAM control block for the caller. Remit the cost of the structure 3829 * to the device referenced by the path. If the this device had no 'credits' 3830 * and peripheral drivers have registered async callbacks for this notification 3831 * call them now. 3832 */ 3833 void 3834 xpt_release_ccb(union ccb *free_ccb) 3835 { 3836 struct cam_ed *device; 3837 struct cam_periph *periph; 3838 3839 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n")); 3840 xpt_path_assert(free_ccb->ccb_h.path, MA_OWNED); 3841 device = free_ccb->ccb_h.path->device; 3842 periph = free_ccb->ccb_h.path->periph; 3843 3844 xpt_free_ccb(free_ccb); 3845 periph->periph_allocated--; 3846 cam_ccbq_release_opening(&device->ccbq); 3847 xpt_run_allocq(periph, 0); 3848 } 3849 3850 /* Functions accessed by SIM drivers */ 3851 3852 static struct xpt_xport_ops xport_default_ops = { 3853 .alloc_device = xpt_alloc_device_default, 3854 .action = xpt_action_default, 3855 .async = xpt_dev_async_default, 3856 }; 3857 static struct xpt_xport xport_default = { 3858 .xport = XPORT_UNKNOWN, 3859 .name = "unknown", 3860 .ops = &xport_default_ops, 3861 }; 3862 3863 CAM_XPT_XPORT(xport_default); 3864 3865 /* 3866 * A sim structure, listing the SIM entry points and instance 3867 * identification info is passed to xpt_bus_register to hook the SIM 3868 * into the CAM framework. xpt_bus_register creates a cam_eb entry 3869 * for this new bus and places it in the array of buses and assigns 3870 * it a path_id. The path_id may be influenced by "hard wiring" 3871 * information specified by the user. Once interrupt services are 3872 * available, the bus will be probed. 3873 */ 3874 int 3875 xpt_bus_register(struct cam_sim *sim, device_t parent, uint32_t bus) 3876 { 3877 struct cam_eb *new_bus; 3878 struct cam_eb *old_bus; 3879 struct ccb_pathinq cpi; 3880 struct cam_path *path; 3881 cam_status status; 3882 3883 sim->bus_id = bus; 3884 new_bus = (struct cam_eb *)malloc(sizeof(*new_bus), 3885 M_CAMXPT, M_NOWAIT|M_ZERO); 3886 if (new_bus == NULL) { 3887 /* Couldn't satisfy request */ 3888 return (ENOMEM); 3889 } 3890 3891 mtx_init(&new_bus->eb_mtx, "CAM bus lock", NULL, MTX_DEF); 3892 TAILQ_INIT(&new_bus->et_entries); 3893 cam_sim_hold(sim); 3894 new_bus->sim = sim; 3895 timevalclear(&new_bus->last_reset); 3896 new_bus->flags = 0; 3897 new_bus->refcount = 1; /* Held until a bus_deregister event */ 3898 new_bus->generation = 0; 3899 new_bus->parent_dev = parent; 3900 3901 xpt_lock_buses(); 3902 sim->path_id = new_bus->path_id = 3903 xptpathid(sim->sim_name, sim->unit_number, sim->bus_id); 3904 old_bus = TAILQ_FIRST(&xsoftc.xpt_busses); 3905 while (old_bus != NULL 3906 && old_bus->path_id < new_bus->path_id) 3907 old_bus = TAILQ_NEXT(old_bus, links); 3908 if (old_bus != NULL) 3909 TAILQ_INSERT_BEFORE(old_bus, new_bus, links); 3910 else 3911 TAILQ_INSERT_TAIL(&xsoftc.xpt_busses, new_bus, links); 3912 xsoftc.bus_generation++; 3913 xpt_unlock_buses(); 3914 3915 /* 3916 * Set a default transport so that a PATH_INQ can be issued to 3917 * the SIM. This will then allow for probing and attaching of 3918 * a more appropriate transport. 3919 */ 3920 new_bus->xport = &xport_default; 3921 3922 status = xpt_create_path(&path, /*periph*/NULL, sim->path_id, 3923 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 3924 if (status != CAM_REQ_CMP) { 3925 xpt_release_bus(new_bus); 3926 return (ENOMEM); 3927 } 3928 3929 xpt_path_inq(&cpi, path); 3930 3931 /* 3932 * Use the results of PATH_INQ to pick a transport. Note that 3933 * the xpt bus (which uses XPORT_UNSPECIFIED) always uses 3934 * xport_default instead of a transport from 3935 * cam_xpt_port_set. 3936 */ 3937 if (cam_ccb_success((union ccb *)&cpi) && 3938 cpi.transport != XPORT_UNSPECIFIED) { 3939 struct xpt_xport **xpt; 3940 3941 SET_FOREACH(xpt, cam_xpt_xport_set) { 3942 if ((*xpt)->xport == cpi.transport) { 3943 new_bus->xport = *xpt; 3944 break; 3945 } 3946 } 3947 if (new_bus->xport == &xport_default) { 3948 xpt_print(path, 3949 "No transport found for %d\n", cpi.transport); 3950 xpt_release_bus(new_bus); 3951 xpt_free_path(path); 3952 return (EINVAL); 3953 } 3954 } 3955 3956 /* Notify interested parties */ 3957 if (sim->path_id != CAM_XPT_PATH_ID) { 3958 xpt_async(AC_PATH_REGISTERED, path, &cpi); 3959 if ((cpi.hba_misc & PIM_NOSCAN) == 0) { 3960 union ccb *scan_ccb; 3961 3962 /* Initiate bus rescan. */ 3963 scan_ccb = xpt_alloc_ccb_nowait(); 3964 if (scan_ccb != NULL) { 3965 scan_ccb->ccb_h.path = path; 3966 scan_ccb->ccb_h.func_code = XPT_SCAN_BUS; 3967 scan_ccb->crcn.flags = 0; 3968 xpt_rescan(scan_ccb); 3969 } else { 3970 xpt_print(path, 3971 "Can't allocate CCB to scan bus\n"); 3972 xpt_free_path(path); 3973 } 3974 } else 3975 xpt_free_path(path); 3976 } else 3977 xpt_free_path(path); 3978 return (CAM_SUCCESS); 3979 } 3980 3981 int 3982 xpt_bus_deregister(path_id_t pathid) 3983 { 3984 struct cam_path bus_path; 3985 cam_status status; 3986 3987 status = xpt_compile_path(&bus_path, NULL, pathid, 3988 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 3989 if (status != CAM_REQ_CMP) 3990 return (ENOMEM); 3991 3992 xpt_async(AC_LOST_DEVICE, &bus_path, NULL); 3993 xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL); 3994 3995 /* Release the reference count held while registered. */ 3996 xpt_release_bus(bus_path.bus); 3997 xpt_release_path(&bus_path); 3998 3999 return (CAM_SUCCESS); 4000 } 4001 4002 static path_id_t 4003 xptnextfreepathid(void) 4004 { 4005 struct cam_eb *bus; 4006 path_id_t pathid; 4007 const char *strval; 4008 4009 mtx_assert(&xsoftc.xpt_topo_lock, MA_OWNED); 4010 pathid = 0; 4011 bus = TAILQ_FIRST(&xsoftc.xpt_busses); 4012 retry: 4013 /* Find an unoccupied pathid */ 4014 while (bus != NULL && bus->path_id <= pathid) { 4015 if (bus->path_id == pathid) 4016 pathid++; 4017 bus = TAILQ_NEXT(bus, links); 4018 } 4019 4020 /* 4021 * Ensure that this pathid is not reserved for 4022 * a bus that may be registered in the future. 4023 */ 4024 if (resource_string_value("scbus", pathid, "at", &strval) == 0) { 4025 ++pathid; 4026 /* Start the search over */ 4027 goto retry; 4028 } 4029 return (pathid); 4030 } 4031 4032 static path_id_t 4033 xptpathid(const char *sim_name, int sim_unit, int sim_bus) 4034 { 4035 path_id_t pathid; 4036 int i, dunit, val; 4037 char buf[32]; 4038 const char *dname; 4039 4040 pathid = CAM_XPT_PATH_ID; 4041 snprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit); 4042 if (strcmp(buf, "xpt0") == 0 && sim_bus == 0) 4043 return (pathid); 4044 i = 0; 4045 while ((resource_find_match(&i, &dname, &dunit, "at", buf)) == 0) { 4046 if (strcmp(dname, "scbus")) { 4047 /* Avoid a bit of foot shooting. */ 4048 continue; 4049 } 4050 if (dunit < 0) /* unwired?! */ 4051 continue; 4052 if (resource_int_value("scbus", dunit, "bus", &val) == 0) { 4053 if (sim_bus == val) { 4054 pathid = dunit; 4055 break; 4056 } 4057 } else if (sim_bus == 0) { 4058 /* Unspecified matches bus 0 */ 4059 pathid = dunit; 4060 break; 4061 } else { 4062 printf( 4063 "Ambiguous scbus configuration for %s%d bus %d, cannot wire down. The kernel\n" 4064 "config entry for scbus%d should specify a controller bus.\n" 4065 "Scbus will be assigned dynamically.\n", 4066 sim_name, sim_unit, sim_bus, dunit); 4067 break; 4068 } 4069 } 4070 4071 if (pathid == CAM_XPT_PATH_ID) 4072 pathid = xptnextfreepathid(); 4073 return (pathid); 4074 } 4075 4076 static const char * 4077 xpt_async_string(uint32_t async_code) 4078 { 4079 4080 switch (async_code) { 4081 case AC_BUS_RESET: return ("AC_BUS_RESET"); 4082 case AC_UNSOL_RESEL: return ("AC_UNSOL_RESEL"); 4083 case AC_SCSI_AEN: return ("AC_SCSI_AEN"); 4084 case AC_SENT_BDR: return ("AC_SENT_BDR"); 4085 case AC_PATH_REGISTERED: return ("AC_PATH_REGISTERED"); 4086 case AC_PATH_DEREGISTERED: return ("AC_PATH_DEREGISTERED"); 4087 case AC_FOUND_DEVICE: return ("AC_FOUND_DEVICE"); 4088 case AC_LOST_DEVICE: return ("AC_LOST_DEVICE"); 4089 case AC_TRANSFER_NEG: return ("AC_TRANSFER_NEG"); 4090 case AC_INQ_CHANGED: return ("AC_INQ_CHANGED"); 4091 case AC_GETDEV_CHANGED: return ("AC_GETDEV_CHANGED"); 4092 case AC_CONTRACT: return ("AC_CONTRACT"); 4093 case AC_ADVINFO_CHANGED: return ("AC_ADVINFO_CHANGED"); 4094 case AC_UNIT_ATTENTION: return ("AC_UNIT_ATTENTION"); 4095 } 4096 return ("AC_UNKNOWN"); 4097 } 4098 4099 static int 4100 xpt_async_size(uint32_t async_code) 4101 { 4102 4103 switch (async_code) { 4104 case AC_BUS_RESET: return (0); 4105 case AC_UNSOL_RESEL: return (0); 4106 case AC_SCSI_AEN: return (0); 4107 case AC_SENT_BDR: return (0); 4108 case AC_PATH_REGISTERED: return (sizeof(struct ccb_pathinq)); 4109 case AC_PATH_DEREGISTERED: return (0); 4110 case AC_FOUND_DEVICE: return (sizeof(struct ccb_getdev)); 4111 case AC_LOST_DEVICE: return (0); 4112 case AC_TRANSFER_NEG: return (sizeof(struct ccb_trans_settings)); 4113 case AC_INQ_CHANGED: return (0); 4114 case AC_GETDEV_CHANGED: return (0); 4115 case AC_CONTRACT: return (sizeof(struct ac_contract)); 4116 case AC_ADVINFO_CHANGED: return (-1); 4117 case AC_UNIT_ATTENTION: return (sizeof(struct ccb_scsiio)); 4118 } 4119 return (0); 4120 } 4121 4122 static int 4123 xpt_async_process_dev(struct cam_ed *device, void *arg) 4124 { 4125 union ccb *ccb = arg; 4126 struct cam_path *path = ccb->ccb_h.path; 4127 void *async_arg = ccb->casync.async_arg_ptr; 4128 uint32_t async_code = ccb->casync.async_code; 4129 bool relock; 4130 4131 if (path->device != device 4132 && path->device->lun_id != CAM_LUN_WILDCARD 4133 && device->lun_id != CAM_LUN_WILDCARD) 4134 return (1); 4135 4136 /* 4137 * The async callback could free the device. 4138 * If it is a broadcast async, it doesn't hold 4139 * device reference, so take our own reference. 4140 */ 4141 xpt_acquire_device(device); 4142 4143 /* 4144 * If async for specific device is to be delivered to 4145 * the wildcard client, take the specific device lock. 4146 * XXX: We may need a way for client to specify it. 4147 */ 4148 if ((device->lun_id == CAM_LUN_WILDCARD && 4149 path->device->lun_id != CAM_LUN_WILDCARD) || 4150 (device->target->target_id == CAM_TARGET_WILDCARD && 4151 path->target->target_id != CAM_TARGET_WILDCARD) || 4152 (device->target->bus->path_id == CAM_BUS_WILDCARD && 4153 path->target->bus->path_id != CAM_BUS_WILDCARD)) { 4154 mtx_unlock(&device->device_mtx); 4155 xpt_path_lock(path); 4156 relock = true; 4157 } else 4158 relock = false; 4159 4160 (*(device->target->bus->xport->ops->async))(async_code, 4161 device->target->bus, device->target, device, async_arg); 4162 xpt_async_bcast(&device->asyncs, async_code, path, async_arg); 4163 4164 if (relock) { 4165 xpt_path_unlock(path); 4166 mtx_lock(&device->device_mtx); 4167 } 4168 xpt_release_device(device); 4169 return (1); 4170 } 4171 4172 static int 4173 xpt_async_process_tgt(struct cam_et *target, void *arg) 4174 { 4175 union ccb *ccb = arg; 4176 struct cam_path *path = ccb->ccb_h.path; 4177 4178 if (path->target != target 4179 && path->target->target_id != CAM_TARGET_WILDCARD 4180 && target->target_id != CAM_TARGET_WILDCARD) 4181 return (1); 4182 4183 if (ccb->casync.async_code == AC_SENT_BDR) { 4184 /* Update our notion of when the last reset occurred */ 4185 microtime(&target->last_reset); 4186 } 4187 4188 return (xptdevicetraverse(target, NULL, xpt_async_process_dev, ccb)); 4189 } 4190 4191 static void 4192 xpt_async_process(struct cam_periph *periph, union ccb *ccb) 4193 { 4194 struct cam_eb *bus; 4195 struct cam_path *path; 4196 void *async_arg; 4197 uint32_t async_code; 4198 4199 path = ccb->ccb_h.path; 4200 async_code = ccb->casync.async_code; 4201 async_arg = ccb->casync.async_arg_ptr; 4202 CAM_DEBUG(path, CAM_DEBUG_TRACE | CAM_DEBUG_INFO, 4203 ("xpt_async(%s)\n", xpt_async_string(async_code))); 4204 bus = path->bus; 4205 4206 if (async_code == AC_BUS_RESET) { 4207 /* Update our notion of when the last reset occurred */ 4208 microtime(&bus->last_reset); 4209 } 4210 4211 xpttargettraverse(bus, NULL, xpt_async_process_tgt, ccb); 4212 4213 /* 4214 * If this wasn't a fully wildcarded async, tell all 4215 * clients that want all async events. 4216 */ 4217 if (bus != xpt_periph->path->bus) { 4218 xpt_path_lock(xpt_periph->path); 4219 xpt_async_process_dev(xpt_periph->path->device, ccb); 4220 xpt_path_unlock(xpt_periph->path); 4221 } 4222 4223 if (path->device != NULL && path->device->lun_id != CAM_LUN_WILDCARD) 4224 xpt_release_devq(path, 1, TRUE); 4225 else 4226 xpt_release_simq(path->bus->sim, TRUE); 4227 if (ccb->casync.async_arg_size > 0) 4228 free(async_arg, M_CAMXPT); 4229 xpt_free_path(path); 4230 xpt_free_ccb(ccb); 4231 } 4232 4233 static void 4234 xpt_async_bcast(struct async_list *async_head, 4235 uint32_t async_code, 4236 struct cam_path *path, void *async_arg) 4237 { 4238 struct async_node *cur_entry; 4239 struct mtx *mtx; 4240 4241 cur_entry = SLIST_FIRST(async_head); 4242 while (cur_entry != NULL) { 4243 struct async_node *next_entry; 4244 /* 4245 * Grab the next list entry before we call the current 4246 * entry's callback. This is because the callback function 4247 * can delete its async callback entry. 4248 */ 4249 next_entry = SLIST_NEXT(cur_entry, links); 4250 if ((cur_entry->event_enable & async_code) != 0) { 4251 mtx = cur_entry->event_lock ? 4252 path->device->sim->mtx : NULL; 4253 if (mtx) 4254 mtx_lock(mtx); 4255 cur_entry->callback(cur_entry->callback_arg, 4256 async_code, path, 4257 async_arg); 4258 if (mtx) 4259 mtx_unlock(mtx); 4260 } 4261 cur_entry = next_entry; 4262 } 4263 } 4264 4265 void 4266 xpt_async(uint32_t async_code, struct cam_path *path, void *async_arg) 4267 { 4268 union ccb *ccb; 4269 int size; 4270 4271 ccb = xpt_alloc_ccb_nowait(); 4272 if (ccb == NULL) { 4273 xpt_print(path, "Can't allocate CCB to send %s\n", 4274 xpt_async_string(async_code)); 4275 return; 4276 } 4277 4278 if (xpt_clone_path(&ccb->ccb_h.path, path) != 0) { 4279 xpt_print(path, "Can't allocate path to send %s\n", 4280 xpt_async_string(async_code)); 4281 xpt_free_ccb(ccb); 4282 return; 4283 } 4284 ccb->ccb_h.path->periph = NULL; 4285 ccb->ccb_h.func_code = XPT_ASYNC; 4286 ccb->ccb_h.cbfcnp = xpt_async_process; 4287 ccb->ccb_h.flags |= CAM_UNLOCKED; 4288 ccb->casync.async_code = async_code; 4289 ccb->casync.async_arg_size = 0; 4290 size = xpt_async_size(async_code); 4291 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, 4292 ("xpt_async: func %#x %s aync_code %d %s\n", 4293 ccb->ccb_h.func_code, 4294 xpt_action_name(ccb->ccb_h.func_code), 4295 async_code, 4296 xpt_async_string(async_code))); 4297 if (size > 0 && async_arg != NULL) { 4298 ccb->casync.async_arg_ptr = malloc(size, M_CAMXPT, M_NOWAIT); 4299 if (ccb->casync.async_arg_ptr == NULL) { 4300 xpt_print(path, "Can't allocate argument to send %s\n", 4301 xpt_async_string(async_code)); 4302 xpt_free_path(ccb->ccb_h.path); 4303 xpt_free_ccb(ccb); 4304 return; 4305 } 4306 memcpy(ccb->casync.async_arg_ptr, async_arg, size); 4307 ccb->casync.async_arg_size = size; 4308 } else if (size < 0) { 4309 ccb->casync.async_arg_ptr = async_arg; 4310 ccb->casync.async_arg_size = size; 4311 } 4312 if (path->device != NULL && path->device->lun_id != CAM_LUN_WILDCARD) 4313 xpt_freeze_devq(path, 1); 4314 else 4315 xpt_freeze_simq(path->bus->sim, 1); 4316 xpt_action(ccb); 4317 } 4318 4319 static void 4320 xpt_dev_async_default(uint32_t async_code, struct cam_eb *bus, 4321 struct cam_et *target, struct cam_ed *device, 4322 void *async_arg) 4323 { 4324 4325 /* 4326 * We only need to handle events for real devices. 4327 */ 4328 if (target->target_id == CAM_TARGET_WILDCARD 4329 || device->lun_id == CAM_LUN_WILDCARD) 4330 return; 4331 4332 printf("%s called\n", __func__); 4333 } 4334 4335 static uint32_t 4336 xpt_freeze_devq_device(struct cam_ed *dev, u_int count) 4337 { 4338 struct cam_devq *devq; 4339 uint32_t freeze; 4340 4341 devq = dev->sim->devq; 4342 mtx_assert(&devq->send_mtx, MA_OWNED); 4343 CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE, 4344 ("xpt_freeze_devq_device(%d) %u->%u\n", count, 4345 dev->ccbq.queue.qfrozen_cnt, dev->ccbq.queue.qfrozen_cnt + count)); 4346 freeze = (dev->ccbq.queue.qfrozen_cnt += count); 4347 /* Remove frozen device from sendq. */ 4348 if (device_is_queued(dev)) 4349 camq_remove(&devq->send_queue, dev->devq_entry.index); 4350 return (freeze); 4351 } 4352 4353 uint32_t 4354 xpt_freeze_devq(struct cam_path *path, u_int count) 4355 { 4356 struct cam_ed *dev = path->device; 4357 struct cam_devq *devq; 4358 uint32_t freeze; 4359 4360 devq = dev->sim->devq; 4361 mtx_lock(&devq->send_mtx); 4362 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_freeze_devq(%d)\n", count)); 4363 freeze = xpt_freeze_devq_device(dev, count); 4364 mtx_unlock(&devq->send_mtx); 4365 return (freeze); 4366 } 4367 4368 uint32_t 4369 xpt_freeze_simq(struct cam_sim *sim, u_int count) 4370 { 4371 struct cam_devq *devq; 4372 uint32_t freeze; 4373 4374 devq = sim->devq; 4375 mtx_lock(&devq->send_mtx); 4376 freeze = (devq->send_queue.qfrozen_cnt += count); 4377 mtx_unlock(&devq->send_mtx); 4378 return (freeze); 4379 } 4380 4381 static void 4382 xpt_release_devq_timeout(void *arg) 4383 { 4384 struct cam_ed *dev; 4385 struct cam_devq *devq; 4386 4387 dev = (struct cam_ed *)arg; 4388 CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE, ("xpt_release_devq_timeout\n")); 4389 devq = dev->sim->devq; 4390 mtx_assert(&devq->send_mtx, MA_OWNED); 4391 if (xpt_release_devq_device(dev, /*count*/1, /*run_queue*/TRUE)) 4392 xpt_run_devq(devq); 4393 } 4394 4395 void 4396 xpt_release_devq(struct cam_path *path, u_int count, int run_queue) 4397 { 4398 struct cam_ed *dev; 4399 struct cam_devq *devq; 4400 4401 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_devq(%d, %d)\n", 4402 count, run_queue)); 4403 dev = path->device; 4404 devq = dev->sim->devq; 4405 mtx_lock(&devq->send_mtx); 4406 if (xpt_release_devq_device(dev, count, run_queue)) 4407 xpt_run_devq(dev->sim->devq); 4408 mtx_unlock(&devq->send_mtx); 4409 } 4410 4411 static int 4412 xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue) 4413 { 4414 4415 mtx_assert(&dev->sim->devq->send_mtx, MA_OWNED); 4416 CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE, 4417 ("xpt_release_devq_device(%d, %d) %u->%u\n", count, run_queue, 4418 dev->ccbq.queue.qfrozen_cnt, dev->ccbq.queue.qfrozen_cnt - count)); 4419 if (count > dev->ccbq.queue.qfrozen_cnt) { 4420 #ifdef INVARIANTS 4421 printf("xpt_release_devq(): requested %u > present %u\n", 4422 count, dev->ccbq.queue.qfrozen_cnt); 4423 #endif 4424 count = dev->ccbq.queue.qfrozen_cnt; 4425 } 4426 dev->ccbq.queue.qfrozen_cnt -= count; 4427 if (dev->ccbq.queue.qfrozen_cnt == 0) { 4428 /* 4429 * No longer need to wait for a successful 4430 * command completion. 4431 */ 4432 dev->flags &= ~CAM_DEV_REL_ON_COMPLETE; 4433 /* 4434 * Remove any timeouts that might be scheduled 4435 * to release this queue. 4436 */ 4437 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) { 4438 callout_stop(&dev->callout); 4439 dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING; 4440 } 4441 /* 4442 * Now that we are unfrozen schedule the 4443 * device so any pending transactions are 4444 * run. 4445 */ 4446 xpt_schedule_devq(dev->sim->devq, dev); 4447 } else 4448 run_queue = 0; 4449 return (run_queue); 4450 } 4451 4452 void 4453 xpt_release_simq(struct cam_sim *sim, int run_queue) 4454 { 4455 struct cam_devq *devq; 4456 4457 devq = sim->devq; 4458 mtx_lock(&devq->send_mtx); 4459 if (devq->send_queue.qfrozen_cnt <= 0) { 4460 #ifdef INVARIANTS 4461 printf("xpt_release_simq: requested 1 > present %u\n", 4462 devq->send_queue.qfrozen_cnt); 4463 #endif 4464 } else 4465 devq->send_queue.qfrozen_cnt--; 4466 if (devq->send_queue.qfrozen_cnt == 0) { 4467 if (run_queue) { 4468 /* 4469 * Now that we are unfrozen run the send queue. 4470 */ 4471 xpt_run_devq(sim->devq); 4472 } 4473 } 4474 mtx_unlock(&devq->send_mtx); 4475 } 4476 4477 void 4478 xpt_done(union ccb *done_ccb) 4479 { 4480 struct cam_doneq *queue; 4481 int run, hash; 4482 4483 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) 4484 if (done_ccb->ccb_h.func_code == XPT_SCSI_IO && 4485 done_ccb->csio.bio != NULL) 4486 biotrack(done_ccb->csio.bio, __func__); 4487 #endif 4488 4489 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, 4490 ("xpt_done: func= %#x %s status %#x\n", 4491 done_ccb->ccb_h.func_code, 4492 xpt_action_name(done_ccb->ccb_h.func_code), 4493 done_ccb->ccb_h.status)); 4494 if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0) 4495 return; 4496 4497 /* Store the time the ccb was in the sim */ 4498 done_ccb->ccb_h.qos.periph_data = cam_iosched_delta_t(done_ccb->ccb_h.qos.periph_data); 4499 done_ccb->ccb_h.status |= CAM_QOS_VALID; 4500 hash = (u_int)(done_ccb->ccb_h.path_id + done_ccb->ccb_h.target_id + 4501 done_ccb->ccb_h.target_lun) % cam_num_doneqs; 4502 queue = &cam_doneqs[hash]; 4503 mtx_lock(&queue->cam_doneq_mtx); 4504 run = (queue->cam_doneq_sleep && STAILQ_EMPTY(&queue->cam_doneq)); 4505 STAILQ_INSERT_TAIL(&queue->cam_doneq, &done_ccb->ccb_h, sim_links.stqe); 4506 done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX; 4507 mtx_unlock(&queue->cam_doneq_mtx); 4508 if (run && !dumping) 4509 wakeup(&queue->cam_doneq); 4510 } 4511 4512 void 4513 xpt_done_direct(union ccb *done_ccb) 4514 { 4515 4516 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, 4517 ("xpt_done_direct: status %#x\n", done_ccb->ccb_h.status)); 4518 if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0) 4519 return; 4520 4521 /* Store the time the ccb was in the sim */ 4522 done_ccb->ccb_h.qos.periph_data = cam_iosched_delta_t(done_ccb->ccb_h.qos.periph_data); 4523 done_ccb->ccb_h.status |= CAM_QOS_VALID; 4524 xpt_done_process(&done_ccb->ccb_h); 4525 } 4526 4527 union ccb * 4528 xpt_alloc_ccb(void) 4529 { 4530 union ccb *new_ccb; 4531 4532 new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_WAITOK); 4533 return (new_ccb); 4534 } 4535 4536 union ccb * 4537 xpt_alloc_ccb_nowait(void) 4538 { 4539 union ccb *new_ccb; 4540 4541 new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_NOWAIT); 4542 return (new_ccb); 4543 } 4544 4545 void 4546 xpt_free_ccb(union ccb *free_ccb) 4547 { 4548 struct cam_periph *periph; 4549 4550 if (free_ccb->ccb_h.alloc_flags & CAM_CCB_FROM_UMA) { 4551 /* 4552 * Looks like a CCB allocated from a periph UMA zone. 4553 */ 4554 periph = free_ccb->ccb_h.path->periph; 4555 uma_zfree(periph->ccb_zone, free_ccb); 4556 } else { 4557 free(free_ccb, M_CAMCCB); 4558 } 4559 } 4560 4561 /* Private XPT functions */ 4562 4563 /* 4564 * Get a CAM control block for the caller. Charge the structure to the device 4565 * referenced by the path. If we don't have sufficient resources to allocate 4566 * more ccbs, we return NULL. 4567 */ 4568 static union ccb * 4569 xpt_get_ccb_nowait(struct cam_periph *periph) 4570 { 4571 union ccb *new_ccb; 4572 int alloc_flags; 4573 4574 if (periph->ccb_zone != NULL) { 4575 alloc_flags = CAM_CCB_FROM_UMA; 4576 new_ccb = uma_zalloc(periph->ccb_zone, M_ZERO|M_NOWAIT); 4577 } else { 4578 alloc_flags = 0; 4579 new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_NOWAIT); 4580 } 4581 if (new_ccb == NULL) 4582 return (NULL); 4583 new_ccb->ccb_h.alloc_flags = alloc_flags; 4584 periph->periph_allocated++; 4585 cam_ccbq_take_opening(&periph->path->device->ccbq); 4586 return (new_ccb); 4587 } 4588 4589 static union ccb * 4590 xpt_get_ccb(struct cam_periph *periph) 4591 { 4592 union ccb *new_ccb; 4593 int alloc_flags; 4594 4595 cam_periph_unlock(periph); 4596 if (periph->ccb_zone != NULL) { 4597 alloc_flags = CAM_CCB_FROM_UMA; 4598 new_ccb = uma_zalloc(periph->ccb_zone, M_ZERO|M_WAITOK); 4599 } else { 4600 alloc_flags = 0; 4601 new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_WAITOK); 4602 } 4603 new_ccb->ccb_h.alloc_flags = alloc_flags; 4604 cam_periph_lock(periph); 4605 periph->periph_allocated++; 4606 cam_ccbq_take_opening(&periph->path->device->ccbq); 4607 return (new_ccb); 4608 } 4609 4610 union ccb * 4611 cam_periph_getccb(struct cam_periph *periph, uint32_t priority) 4612 { 4613 struct ccb_hdr *ccb_h; 4614 4615 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("cam_periph_getccb\n")); 4616 cam_periph_assert(periph, MA_OWNED); 4617 while ((ccb_h = SLIST_FIRST(&periph->ccb_list)) == NULL || 4618 ccb_h->pinfo.priority != priority) { 4619 if (priority < periph->immediate_priority) { 4620 periph->immediate_priority = priority; 4621 xpt_run_allocq(periph, 0); 4622 } else 4623 cam_periph_sleep(periph, &periph->ccb_list, PRIBIO, 4624 "cgticb", 0); 4625 } 4626 SLIST_REMOVE_HEAD(&periph->ccb_list, periph_links.sle); 4627 return ((union ccb *)ccb_h); 4628 } 4629 4630 static void 4631 xpt_acquire_bus(struct cam_eb *bus) 4632 { 4633 4634 xpt_lock_buses(); 4635 bus->refcount++; 4636 xpt_unlock_buses(); 4637 } 4638 4639 static void 4640 xpt_release_bus(struct cam_eb *bus) 4641 { 4642 4643 xpt_lock_buses(); 4644 KASSERT(bus->refcount >= 1, ("bus->refcount >= 1")); 4645 if (--bus->refcount > 0) { 4646 xpt_unlock_buses(); 4647 return; 4648 } 4649 TAILQ_REMOVE(&xsoftc.xpt_busses, bus, links); 4650 xsoftc.bus_generation++; 4651 xpt_unlock_buses(); 4652 KASSERT(TAILQ_EMPTY(&bus->et_entries), 4653 ("destroying bus, but target list is not empty")); 4654 cam_sim_release(bus->sim); 4655 mtx_destroy(&bus->eb_mtx); 4656 free(bus, M_CAMXPT); 4657 } 4658 4659 static struct cam_et * 4660 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id) 4661 { 4662 struct cam_et *cur_target, *target; 4663 4664 mtx_assert(&xsoftc.xpt_topo_lock, MA_OWNED); 4665 mtx_assert(&bus->eb_mtx, MA_OWNED); 4666 target = (struct cam_et *)malloc(sizeof(*target), M_CAMXPT, 4667 M_NOWAIT|M_ZERO); 4668 if (target == NULL) 4669 return (NULL); 4670 4671 TAILQ_INIT(&target->ed_entries); 4672 target->bus = bus; 4673 target->target_id = target_id; 4674 target->refcount = 1; 4675 target->generation = 0; 4676 target->luns = NULL; 4677 mtx_init(&target->luns_mtx, "CAM LUNs lock", NULL, MTX_DEF); 4678 timevalclear(&target->last_reset); 4679 /* 4680 * Hold a reference to our parent bus so it 4681 * will not go away before we do. 4682 */ 4683 bus->refcount++; 4684 4685 /* Insertion sort into our bus's target list */ 4686 cur_target = TAILQ_FIRST(&bus->et_entries); 4687 while (cur_target != NULL && cur_target->target_id < target_id) 4688 cur_target = TAILQ_NEXT(cur_target, links); 4689 if (cur_target != NULL) { 4690 TAILQ_INSERT_BEFORE(cur_target, target, links); 4691 } else { 4692 TAILQ_INSERT_TAIL(&bus->et_entries, target, links); 4693 } 4694 bus->generation++; 4695 return (target); 4696 } 4697 4698 static void 4699 xpt_acquire_target(struct cam_et *target) 4700 { 4701 struct cam_eb *bus = target->bus; 4702 4703 mtx_lock(&bus->eb_mtx); 4704 target->refcount++; 4705 mtx_unlock(&bus->eb_mtx); 4706 } 4707 4708 static void 4709 xpt_release_target(struct cam_et *target) 4710 { 4711 struct cam_eb *bus = target->bus; 4712 4713 mtx_lock(&bus->eb_mtx); 4714 if (--target->refcount > 0) { 4715 mtx_unlock(&bus->eb_mtx); 4716 return; 4717 } 4718 TAILQ_REMOVE(&bus->et_entries, target, links); 4719 bus->generation++; 4720 mtx_unlock(&bus->eb_mtx); 4721 KASSERT(TAILQ_EMPTY(&target->ed_entries), 4722 ("destroying target, but device list is not empty")); 4723 xpt_release_bus(bus); 4724 mtx_destroy(&target->luns_mtx); 4725 if (target->luns) 4726 free(target->luns, M_CAMXPT); 4727 free(target, M_CAMXPT); 4728 } 4729 4730 static struct cam_ed * 4731 xpt_alloc_device_default(struct cam_eb *bus, struct cam_et *target, 4732 lun_id_t lun_id) 4733 { 4734 struct cam_ed *device; 4735 4736 device = xpt_alloc_device(bus, target, lun_id); 4737 if (device == NULL) 4738 return (NULL); 4739 4740 device->mintags = 1; 4741 device->maxtags = 1; 4742 return (device); 4743 } 4744 4745 static void 4746 xpt_destroy_device(void *context, int pending) 4747 { 4748 struct cam_ed *device = context; 4749 4750 mtx_lock(&device->device_mtx); 4751 mtx_destroy(&device->device_mtx); 4752 free(device, M_CAMDEV); 4753 } 4754 4755 struct cam_ed * 4756 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id) 4757 { 4758 struct cam_ed *cur_device, *device; 4759 struct cam_devq *devq; 4760 cam_status status; 4761 4762 mtx_assert(&bus->eb_mtx, MA_OWNED); 4763 /* Make space for us in the device queue on our bus */ 4764 devq = bus->sim->devq; 4765 mtx_lock(&devq->send_mtx); 4766 status = cam_devq_resize(devq, devq->send_queue.array_size + 1); 4767 mtx_unlock(&devq->send_mtx); 4768 if (status != CAM_REQ_CMP) 4769 return (NULL); 4770 4771 device = (struct cam_ed *)malloc(sizeof(*device), 4772 M_CAMDEV, M_NOWAIT|M_ZERO); 4773 if (device == NULL) 4774 return (NULL); 4775 4776 cam_init_pinfo(&device->devq_entry); 4777 device->target = target; 4778 device->lun_id = lun_id; 4779 device->sim = bus->sim; 4780 if (cam_ccbq_init(&device->ccbq, 4781 bus->sim->max_dev_openings) != 0) { 4782 free(device, M_CAMDEV); 4783 return (NULL); 4784 } 4785 SLIST_INIT(&device->asyncs); 4786 SLIST_INIT(&device->periphs); 4787 device->generation = 0; 4788 device->flags = CAM_DEV_UNCONFIGURED; 4789 device->tag_delay_count = 0; 4790 device->tag_saved_openings = 0; 4791 device->refcount = 1; 4792 mtx_init(&device->device_mtx, "CAM device lock", NULL, MTX_DEF); 4793 callout_init_mtx(&device->callout, &devq->send_mtx, 0); 4794 TASK_INIT(&device->device_destroy_task, 0, xpt_destroy_device, device); 4795 /* 4796 * Hold a reference to our parent bus so it 4797 * will not go away before we do. 4798 */ 4799 target->refcount++; 4800 4801 cur_device = TAILQ_FIRST(&target->ed_entries); 4802 while (cur_device != NULL && cur_device->lun_id < lun_id) 4803 cur_device = TAILQ_NEXT(cur_device, links); 4804 if (cur_device != NULL) 4805 TAILQ_INSERT_BEFORE(cur_device, device, links); 4806 else 4807 TAILQ_INSERT_TAIL(&target->ed_entries, device, links); 4808 target->generation++; 4809 return (device); 4810 } 4811 4812 void 4813 xpt_acquire_device(struct cam_ed *device) 4814 { 4815 struct cam_eb *bus = device->target->bus; 4816 4817 mtx_lock(&bus->eb_mtx); 4818 device->refcount++; 4819 mtx_unlock(&bus->eb_mtx); 4820 } 4821 4822 void 4823 xpt_release_device(struct cam_ed *device) 4824 { 4825 struct cam_eb *bus = device->target->bus; 4826 struct cam_devq *devq; 4827 4828 mtx_lock(&bus->eb_mtx); 4829 if (--device->refcount > 0) { 4830 mtx_unlock(&bus->eb_mtx); 4831 return; 4832 } 4833 4834 TAILQ_REMOVE(&device->target->ed_entries, device,links); 4835 device->target->generation++; 4836 mtx_unlock(&bus->eb_mtx); 4837 4838 /* Release our slot in the devq */ 4839 devq = bus->sim->devq; 4840 mtx_lock(&devq->send_mtx); 4841 cam_devq_resize(devq, devq->send_queue.array_size - 1); 4842 4843 KASSERT(SLIST_EMPTY(&device->periphs), 4844 ("destroying device, but periphs list is not empty")); 4845 KASSERT(device->devq_entry.index == CAM_UNQUEUED_INDEX, 4846 ("destroying device while still queued for ccbs")); 4847 4848 /* The send_mtx must be held when accessing the callout */ 4849 if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) 4850 callout_stop(&device->callout); 4851 4852 mtx_unlock(&devq->send_mtx); 4853 4854 xpt_release_target(device->target); 4855 4856 cam_ccbq_fini(&device->ccbq); 4857 /* 4858 * Free allocated memory. free(9) does nothing if the 4859 * supplied pointer is NULL, so it is safe to call without 4860 * checking. 4861 */ 4862 free(device->supported_vpds, M_CAMXPT); 4863 free(device->device_id, M_CAMXPT); 4864 free(device->ext_inq, M_CAMXPT); 4865 free(device->physpath, M_CAMXPT); 4866 free(device->rcap_buf, M_CAMXPT); 4867 free(device->serial_num, M_CAMXPT); 4868 free(device->nvme_data, M_CAMXPT); 4869 free(device->nvme_cdata, M_CAMXPT); 4870 taskqueue_enqueue(xsoftc.xpt_taskq, &device->device_destroy_task); 4871 } 4872 4873 uint32_t 4874 xpt_dev_ccbq_resize(struct cam_path *path, int newopenings) 4875 { 4876 int result; 4877 struct cam_ed *dev; 4878 4879 dev = path->device; 4880 mtx_lock(&dev->sim->devq->send_mtx); 4881 result = cam_ccbq_resize(&dev->ccbq, newopenings); 4882 mtx_unlock(&dev->sim->devq->send_mtx); 4883 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 4884 || (dev->inq_flags & SID_CmdQue) != 0) 4885 dev->tag_saved_openings = newopenings; 4886 return (result); 4887 } 4888 4889 static struct cam_eb * 4890 xpt_find_bus(path_id_t path_id) 4891 { 4892 struct cam_eb *bus; 4893 4894 xpt_lock_buses(); 4895 for (bus = TAILQ_FIRST(&xsoftc.xpt_busses); 4896 bus != NULL; 4897 bus = TAILQ_NEXT(bus, links)) { 4898 if (bus->path_id == path_id) { 4899 bus->refcount++; 4900 break; 4901 } 4902 } 4903 xpt_unlock_buses(); 4904 return (bus); 4905 } 4906 4907 static struct cam_et * 4908 xpt_find_target(struct cam_eb *bus, target_id_t target_id) 4909 { 4910 struct cam_et *target; 4911 4912 mtx_assert(&bus->eb_mtx, MA_OWNED); 4913 for (target = TAILQ_FIRST(&bus->et_entries); 4914 target != NULL; 4915 target = TAILQ_NEXT(target, links)) { 4916 if (target->target_id == target_id) { 4917 target->refcount++; 4918 break; 4919 } 4920 } 4921 return (target); 4922 } 4923 4924 static struct cam_ed * 4925 xpt_find_device(struct cam_et *target, lun_id_t lun_id) 4926 { 4927 struct cam_ed *device; 4928 4929 mtx_assert(&target->bus->eb_mtx, MA_OWNED); 4930 for (device = TAILQ_FIRST(&target->ed_entries); 4931 device != NULL; 4932 device = TAILQ_NEXT(device, links)) { 4933 if (device->lun_id == lun_id) { 4934 device->refcount++; 4935 break; 4936 } 4937 } 4938 return (device); 4939 } 4940 4941 void 4942 xpt_start_tags(struct cam_path *path) 4943 { 4944 struct ccb_relsim crs; 4945 struct cam_ed *device; 4946 struct cam_sim *sim; 4947 int newopenings; 4948 4949 device = path->device; 4950 sim = path->bus->sim; 4951 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT; 4952 xpt_freeze_devq(path, /*count*/1); 4953 device->inq_flags |= SID_CmdQue; 4954 if (device->tag_saved_openings != 0) 4955 newopenings = device->tag_saved_openings; 4956 else 4957 newopenings = min(device->maxtags, 4958 sim->max_tagged_dev_openings); 4959 xpt_dev_ccbq_resize(path, newopenings); 4960 xpt_async(AC_GETDEV_CHANGED, path, NULL); 4961 memset(&crs, 0, sizeof(crs)); 4962 xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL); 4963 crs.ccb_h.func_code = XPT_REL_SIMQ; 4964 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY; 4965 crs.openings 4966 = crs.release_timeout 4967 = crs.qfrozen_cnt 4968 = 0; 4969 xpt_action((union ccb *)&crs); 4970 } 4971 4972 void 4973 xpt_stop_tags(struct cam_path *path) 4974 { 4975 struct ccb_relsim crs; 4976 struct cam_ed *device; 4977 struct cam_sim *sim; 4978 4979 device = path->device; 4980 sim = path->bus->sim; 4981 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT; 4982 device->tag_delay_count = 0; 4983 xpt_freeze_devq(path, /*count*/1); 4984 device->inq_flags &= ~SID_CmdQue; 4985 xpt_dev_ccbq_resize(path, sim->max_dev_openings); 4986 xpt_async(AC_GETDEV_CHANGED, path, NULL); 4987 memset(&crs, 0, sizeof(crs)); 4988 xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL); 4989 crs.ccb_h.func_code = XPT_REL_SIMQ; 4990 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY; 4991 crs.openings 4992 = crs.release_timeout 4993 = crs.qfrozen_cnt 4994 = 0; 4995 xpt_action((union ccb *)&crs); 4996 } 4997 4998 /* 4999 * Assume all possible buses are detected by this time, so allow boot 5000 * as soon as they all are scanned. 5001 */ 5002 static void 5003 xpt_boot_delay(void *arg) 5004 { 5005 5006 xpt_release_boot(); 5007 } 5008 5009 /* 5010 * Now that all config hooks have completed, start boot_delay timer, 5011 * waiting for possibly still undetected buses (USB) to appear. 5012 */ 5013 static void 5014 xpt_ch_done(void *arg) 5015 { 5016 5017 callout_init(&xsoftc.boot_callout, 1); 5018 callout_reset_sbt(&xsoftc.boot_callout, SBT_1MS * xsoftc.boot_delay, 5019 SBT_1MS, xpt_boot_delay, NULL, 0); 5020 } 5021 SYSINIT(xpt_hw_delay, SI_SUB_INT_CONFIG_HOOKS, SI_ORDER_ANY, xpt_ch_done, NULL); 5022 5023 /* 5024 * Now that interrupts are enabled, go find our devices 5025 */ 5026 static void 5027 xpt_config(void *arg) 5028 { 5029 if (taskqueue_start_threads(&xsoftc.xpt_taskq, 1, PRIBIO, "CAM taskq")) 5030 printf("xpt_config: failed to create taskqueue thread.\n"); 5031 5032 /* Setup debugging path */ 5033 if (cam_dflags != CAM_DEBUG_NONE) { 5034 if (xpt_create_path(&cam_dpath, NULL, 5035 CAM_DEBUG_BUS, CAM_DEBUG_TARGET, 5036 CAM_DEBUG_LUN) != CAM_REQ_CMP) { 5037 printf( 5038 "xpt_config: xpt_create_path() failed for debug target %d:%d:%d, debugging disabled\n", 5039 CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN); 5040 cam_dflags = CAM_DEBUG_NONE; 5041 } 5042 } else 5043 cam_dpath = NULL; 5044 5045 periphdriver_init(1); 5046 xpt_hold_boot(); 5047 5048 /* Fire up rescan thread. */ 5049 if (kproc_kthread_add(xpt_scanner_thread, NULL, &cam_proc, NULL, 0, 0, 5050 "cam", "scanner")) { 5051 printf("xpt_config: failed to create rescan thread.\n"); 5052 } 5053 } 5054 5055 void 5056 xpt_hold_boot_locked(void) 5057 { 5058 5059 if (xsoftc.buses_to_config++ == 0) 5060 root_mount_hold_token("CAM", &xsoftc.xpt_rootmount); 5061 } 5062 5063 void 5064 xpt_hold_boot(void) 5065 { 5066 5067 xpt_lock_buses(); 5068 xpt_hold_boot_locked(); 5069 xpt_unlock_buses(); 5070 } 5071 5072 void 5073 xpt_release_boot(void) 5074 { 5075 5076 xpt_lock_buses(); 5077 if (--xsoftc.buses_to_config == 0) { 5078 if (xsoftc.buses_config_done == 0) { 5079 xsoftc.buses_config_done = 1; 5080 xsoftc.buses_to_config++; 5081 TASK_INIT(&xsoftc.boot_task, 0, xpt_finishconfig_task, 5082 NULL); 5083 taskqueue_enqueue(taskqueue_thread, &xsoftc.boot_task); 5084 } else 5085 root_mount_rel(&xsoftc.xpt_rootmount); 5086 } 5087 xpt_unlock_buses(); 5088 } 5089 5090 /* 5091 * If the given device only has one peripheral attached to it, and if that 5092 * peripheral is the passthrough driver, announce it. This insures that the 5093 * user sees some sort of announcement for every peripheral in their system. 5094 */ 5095 static int 5096 xptpassannouncefunc(struct cam_ed *device, void *arg) 5097 { 5098 struct cam_periph *periph; 5099 int i; 5100 5101 for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL; 5102 periph = SLIST_NEXT(periph, periph_links), i++); 5103 5104 periph = SLIST_FIRST(&device->periphs); 5105 if ((i == 1) 5106 && (strncmp(periph->periph_name, "pass", 4) == 0)) 5107 xpt_announce_periph(periph, NULL); 5108 5109 return(1); 5110 } 5111 5112 static void 5113 xpt_finishconfig_task(void *context, int pending) 5114 { 5115 5116 periphdriver_init(2); 5117 /* 5118 * Check for devices with no "standard" peripheral driver 5119 * attached. For any devices like that, announce the 5120 * passthrough driver so the user will see something. 5121 */ 5122 if (!bootverbose) 5123 xpt_for_all_devices(xptpassannouncefunc, NULL); 5124 5125 xpt_release_boot(); 5126 } 5127 5128 cam_status 5129 xpt_register_async(int event, ac_callback_t *cbfunc, void *cbarg, 5130 struct cam_path *path) 5131 { 5132 struct ccb_setasync csa; 5133 cam_status status; 5134 bool xptpath = false; 5135 5136 if (path == NULL) { 5137 status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID, 5138 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 5139 if (status != CAM_REQ_CMP) 5140 return (status); 5141 xpt_path_lock(path); 5142 xptpath = true; 5143 } 5144 5145 memset(&csa, 0, sizeof(csa)); 5146 xpt_setup_ccb(&csa.ccb_h, path, CAM_PRIORITY_NORMAL); 5147 csa.ccb_h.func_code = XPT_SASYNC_CB; 5148 csa.event_enable = event; 5149 csa.callback = cbfunc; 5150 csa.callback_arg = cbarg; 5151 xpt_action((union ccb *)&csa); 5152 status = csa.ccb_h.status; 5153 5154 CAM_DEBUG(csa.ccb_h.path, CAM_DEBUG_TRACE, 5155 ("xpt_register_async: func %p\n", cbfunc)); 5156 5157 if (xptpath) { 5158 xpt_path_unlock(path); 5159 xpt_free_path(path); 5160 } 5161 5162 if ((status == CAM_REQ_CMP) && 5163 (csa.event_enable & AC_FOUND_DEVICE)) { 5164 /* 5165 * Get this peripheral up to date with all 5166 * the currently existing devices. 5167 */ 5168 xpt_for_all_devices(xptsetasyncfunc, &csa); 5169 } 5170 if ((status == CAM_REQ_CMP) && 5171 (csa.event_enable & AC_PATH_REGISTERED)) { 5172 /* 5173 * Get this peripheral up to date with all 5174 * the currently existing buses. 5175 */ 5176 xpt_for_all_busses(xptsetasyncbusfunc, &csa); 5177 } 5178 5179 return (status); 5180 } 5181 5182 static void 5183 xptaction(struct cam_sim *sim, union ccb *work_ccb) 5184 { 5185 CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n")); 5186 5187 switch (work_ccb->ccb_h.func_code) { 5188 /* Common cases first */ 5189 case XPT_PATH_INQ: /* Path routing inquiry */ 5190 { 5191 struct ccb_pathinq *cpi; 5192 5193 cpi = &work_ccb->cpi; 5194 cpi->version_num = 1; /* XXX??? */ 5195 cpi->hba_inquiry = 0; 5196 cpi->target_sprt = 0; 5197 cpi->hba_misc = 0; 5198 cpi->hba_eng_cnt = 0; 5199 cpi->max_target = 0; 5200 cpi->max_lun = 0; 5201 cpi->initiator_id = 0; 5202 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 5203 strlcpy(cpi->hba_vid, "", HBA_IDLEN); 5204 strlcpy(cpi->dev_name, sim->sim_name, DEV_IDLEN); 5205 cpi->unit_number = sim->unit_number; 5206 cpi->bus_id = sim->bus_id; 5207 cpi->base_transfer_speed = 0; 5208 cpi->protocol = PROTO_UNSPECIFIED; 5209 cpi->protocol_version = PROTO_VERSION_UNSPECIFIED; 5210 cpi->transport = XPORT_UNSPECIFIED; 5211 cpi->transport_version = XPORT_VERSION_UNSPECIFIED; 5212 cpi->ccb_h.status = CAM_REQ_CMP; 5213 break; 5214 } 5215 default: 5216 work_ccb->ccb_h.status = CAM_REQ_INVALID; 5217 break; 5218 } 5219 xpt_done(work_ccb); 5220 } 5221 5222 /* 5223 * The xpt as a "controller" has no interrupt sources, so polling 5224 * is a no-op. 5225 */ 5226 static void 5227 xptpoll(struct cam_sim *sim) 5228 { 5229 } 5230 5231 void 5232 xpt_lock_buses(void) 5233 { 5234 mtx_lock(&xsoftc.xpt_topo_lock); 5235 } 5236 5237 void 5238 xpt_unlock_buses(void) 5239 { 5240 mtx_unlock(&xsoftc.xpt_topo_lock); 5241 } 5242 5243 struct mtx * 5244 xpt_path_mtx(struct cam_path *path) 5245 { 5246 5247 return (&path->device->device_mtx); 5248 } 5249 5250 static void 5251 xpt_done_process(struct ccb_hdr *ccb_h) 5252 { 5253 struct cam_sim *sim = NULL; 5254 struct cam_devq *devq = NULL; 5255 struct mtx *mtx = NULL; 5256 5257 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) 5258 struct ccb_scsiio *csio; 5259 5260 if (ccb_h->func_code == XPT_SCSI_IO) { 5261 csio = &((union ccb *)ccb_h)->csio; 5262 if (csio->bio != NULL) 5263 biotrack(csio->bio, __func__); 5264 } 5265 #endif 5266 5267 if (ccb_h->flags & CAM_HIGH_POWER) { 5268 struct highpowerlist *hphead; 5269 struct cam_ed *device; 5270 5271 mtx_lock(&xsoftc.xpt_highpower_lock); 5272 hphead = &xsoftc.highpowerq; 5273 5274 device = STAILQ_FIRST(hphead); 5275 5276 /* 5277 * Increment the count since this command is done. 5278 */ 5279 xsoftc.num_highpower++; 5280 5281 /* 5282 * Any high powered commands queued up? 5283 */ 5284 if (device != NULL) { 5285 STAILQ_REMOVE_HEAD(hphead, highpowerq_entry); 5286 mtx_unlock(&xsoftc.xpt_highpower_lock); 5287 5288 mtx_lock(&device->sim->devq->send_mtx); 5289 xpt_release_devq_device(device, 5290 /*count*/1, /*runqueue*/TRUE); 5291 mtx_unlock(&device->sim->devq->send_mtx); 5292 } else 5293 mtx_unlock(&xsoftc.xpt_highpower_lock); 5294 } 5295 5296 /* 5297 * Insulate against a race where the periph is destroyed but CCBs are 5298 * still not all processed. This shouldn't happen, but allows us better 5299 * bug diagnostic when it does. 5300 */ 5301 if (ccb_h->path->bus) 5302 sim = ccb_h->path->bus->sim; 5303 5304 if (ccb_h->status & CAM_RELEASE_SIMQ) { 5305 KASSERT(sim, ("sim missing for CAM_RELEASE_SIMQ request")); 5306 xpt_release_simq(sim, /*run_queue*/FALSE); 5307 ccb_h->status &= ~CAM_RELEASE_SIMQ; 5308 } 5309 5310 if ((ccb_h->flags & CAM_DEV_QFRZDIS) 5311 && (ccb_h->status & CAM_DEV_QFRZN)) { 5312 xpt_release_devq(ccb_h->path, /*count*/1, /*run_queue*/TRUE); 5313 ccb_h->status &= ~CAM_DEV_QFRZN; 5314 } 5315 5316 if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) { 5317 struct cam_ed *dev = ccb_h->path->device; 5318 5319 if (sim) 5320 devq = sim->devq; 5321 KASSERT(devq, ("Periph disappeared with CCB %p %s request pending.", 5322 ccb_h, xpt_action_name(ccb_h->func_code))); 5323 5324 mtx_lock(&devq->send_mtx); 5325 devq->send_active--; 5326 devq->send_openings++; 5327 cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h); 5328 5329 if (((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0 5330 && (dev->ccbq.dev_active == 0))) { 5331 dev->flags &= ~CAM_DEV_REL_ON_QUEUE_EMPTY; 5332 xpt_release_devq_device(dev, /*count*/1, 5333 /*run_queue*/FALSE); 5334 } 5335 5336 if (((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0 5337 && (ccb_h->status&CAM_STATUS_MASK) != CAM_REQUEUE_REQ)) { 5338 dev->flags &= ~CAM_DEV_REL_ON_COMPLETE; 5339 xpt_release_devq_device(dev, /*count*/1, 5340 /*run_queue*/FALSE); 5341 } 5342 5343 if (!device_is_queued(dev)) 5344 (void)xpt_schedule_devq(devq, dev); 5345 xpt_run_devq(devq); 5346 mtx_unlock(&devq->send_mtx); 5347 5348 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0) { 5349 mtx = xpt_path_mtx(ccb_h->path); 5350 mtx_lock(mtx); 5351 5352 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 5353 && (--dev->tag_delay_count == 0)) 5354 xpt_start_tags(ccb_h->path); 5355 } 5356 } 5357 5358 if ((ccb_h->flags & CAM_UNLOCKED) == 0) { 5359 if (mtx == NULL) { 5360 mtx = xpt_path_mtx(ccb_h->path); 5361 mtx_lock(mtx); 5362 } 5363 } else { 5364 if (mtx != NULL) { 5365 mtx_unlock(mtx); 5366 mtx = NULL; 5367 } 5368 } 5369 5370 /* Call the peripheral driver's callback */ 5371 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX; 5372 (*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h); 5373 if (mtx != NULL) 5374 mtx_unlock(mtx); 5375 } 5376 5377 /* 5378 * Parameterize instead and use xpt_done_td? 5379 */ 5380 static void 5381 xpt_async_td(void *arg) 5382 { 5383 struct cam_doneq *queue = arg; 5384 struct ccb_hdr *ccb_h; 5385 STAILQ_HEAD(, ccb_hdr) doneq; 5386 5387 STAILQ_INIT(&doneq); 5388 mtx_lock(&queue->cam_doneq_mtx); 5389 while (1) { 5390 while (STAILQ_EMPTY(&queue->cam_doneq)) 5391 msleep(&queue->cam_doneq, &queue->cam_doneq_mtx, 5392 PRIBIO, "-", 0); 5393 STAILQ_CONCAT(&doneq, &queue->cam_doneq); 5394 mtx_unlock(&queue->cam_doneq_mtx); 5395 5396 while ((ccb_h = STAILQ_FIRST(&doneq)) != NULL) { 5397 STAILQ_REMOVE_HEAD(&doneq, sim_links.stqe); 5398 xpt_done_process(ccb_h); 5399 } 5400 5401 mtx_lock(&queue->cam_doneq_mtx); 5402 } 5403 } 5404 5405 void 5406 xpt_done_td(void *arg) 5407 { 5408 struct cam_doneq *queue = arg; 5409 struct ccb_hdr *ccb_h; 5410 STAILQ_HEAD(, ccb_hdr) doneq; 5411 5412 STAILQ_INIT(&doneq); 5413 mtx_lock(&queue->cam_doneq_mtx); 5414 while (1) { 5415 while (STAILQ_EMPTY(&queue->cam_doneq)) { 5416 queue->cam_doneq_sleep = 1; 5417 msleep(&queue->cam_doneq, &queue->cam_doneq_mtx, 5418 PRIBIO, "-", 0); 5419 queue->cam_doneq_sleep = 0; 5420 } 5421 STAILQ_CONCAT(&doneq, &queue->cam_doneq); 5422 mtx_unlock(&queue->cam_doneq_mtx); 5423 5424 THREAD_NO_SLEEPING(); 5425 while ((ccb_h = STAILQ_FIRST(&doneq)) != NULL) { 5426 STAILQ_REMOVE_HEAD(&doneq, sim_links.stqe); 5427 xpt_done_process(ccb_h); 5428 } 5429 THREAD_SLEEPING_OK(); 5430 5431 mtx_lock(&queue->cam_doneq_mtx); 5432 } 5433 } 5434 5435 static void 5436 camisr_runqueue(void) 5437 { 5438 struct ccb_hdr *ccb_h; 5439 struct cam_doneq *queue; 5440 int i; 5441 5442 /* Process global queues. */ 5443 for (i = 0; i < cam_num_doneqs; i++) { 5444 queue = &cam_doneqs[i]; 5445 mtx_lock(&queue->cam_doneq_mtx); 5446 while ((ccb_h = STAILQ_FIRST(&queue->cam_doneq)) != NULL) { 5447 STAILQ_REMOVE_HEAD(&queue->cam_doneq, sim_links.stqe); 5448 mtx_unlock(&queue->cam_doneq_mtx); 5449 xpt_done_process(ccb_h); 5450 mtx_lock(&queue->cam_doneq_mtx); 5451 } 5452 mtx_unlock(&queue->cam_doneq_mtx); 5453 } 5454 } 5455 5456 /** 5457 * @brief Return the device_t associated with the path 5458 * 5459 * When a SIM is created, it registers a bus with a NEWBUS device_t. This is 5460 * stored in the internal cam_eb bus structure. There is no guarnatee any given 5461 * path will have a @c device_t associated with it (it's legal to call @c 5462 * xpt_bus_register with a @c NULL @c device_t. 5463 * 5464 * @param path Path to return the device_t for. 5465 */ 5466 device_t 5467 xpt_path_sim_device(const struct cam_path *path) 5468 { 5469 return (path->bus->parent_dev); 5470 } 5471 5472 struct kv 5473 { 5474 uint32_t v; 5475 const char *name; 5476 }; 5477 5478 static struct kv map[] = { 5479 { XPT_NOOP, "XPT_NOOP" }, 5480 { XPT_SCSI_IO, "XPT_SCSI_IO" }, 5481 { XPT_GDEV_TYPE, "XPT_GDEV_TYPE" }, 5482 { XPT_GDEVLIST, "XPT_GDEVLIST" }, 5483 { XPT_PATH_INQ, "XPT_PATH_INQ" }, 5484 { XPT_REL_SIMQ, "XPT_REL_SIMQ" }, 5485 { XPT_SASYNC_CB, "XPT_SASYNC_CB" }, 5486 { XPT_SDEV_TYPE, "XPT_SDEV_TYPE" }, 5487 { XPT_SCAN_BUS, "XPT_SCAN_BUS" }, 5488 { XPT_DEV_MATCH, "XPT_DEV_MATCH" }, 5489 { XPT_DEBUG, "XPT_DEBUG" }, 5490 { XPT_PATH_STATS, "XPT_PATH_STATS" }, 5491 { XPT_GDEV_STATS, "XPT_GDEV_STATS" }, 5492 { XPT_DEV_ADVINFO, "XPT_DEV_ADVINFO" }, 5493 { XPT_ASYNC, "XPT_ASYNC" }, 5494 { XPT_ABORT, "XPT_ABORT" }, 5495 { XPT_RESET_BUS, "XPT_RESET_BUS" }, 5496 { XPT_RESET_DEV, "XPT_RESET_DEV" }, 5497 { XPT_TERM_IO, "XPT_TERM_IO" }, 5498 { XPT_SCAN_LUN, "XPT_SCAN_LUN" }, 5499 { XPT_GET_TRAN_SETTINGS, "XPT_GET_TRAN_SETTINGS" }, 5500 { XPT_SET_TRAN_SETTINGS, "XPT_SET_TRAN_SETTINGS" }, 5501 { XPT_CALC_GEOMETRY, "XPT_CALC_GEOMETRY" }, 5502 { XPT_ATA_IO, "XPT_ATA_IO" }, 5503 { XPT_GET_SIM_KNOB, "XPT_GET_SIM_KNOB" }, 5504 { XPT_SET_SIM_KNOB, "XPT_SET_SIM_KNOB" }, 5505 { XPT_NVME_IO, "XPT_NVME_IO" }, 5506 { XPT_MMC_IO, "XPT_MMC_IO" }, 5507 { XPT_SMP_IO, "XPT_SMP_IO" }, 5508 { XPT_SCAN_TGT, "XPT_SCAN_TGT" }, 5509 { XPT_NVME_ADMIN, "XPT_NVME_ADMIN" }, 5510 { XPT_ENG_INQ, "XPT_ENG_INQ" }, 5511 { XPT_ENG_EXEC, "XPT_ENG_EXEC" }, 5512 { XPT_EN_LUN, "XPT_EN_LUN" }, 5513 { XPT_TARGET_IO, "XPT_TARGET_IO" }, 5514 { XPT_ACCEPT_TARGET_IO, "XPT_ACCEPT_TARGET_IO" }, 5515 { XPT_CONT_TARGET_IO, "XPT_CONT_TARGET_IO" }, 5516 { XPT_IMMED_NOTIFY, "XPT_IMMED_NOTIFY" }, 5517 { XPT_NOTIFY_ACK, "XPT_NOTIFY_ACK" }, 5518 { XPT_IMMEDIATE_NOTIFY, "XPT_IMMEDIATE_NOTIFY" }, 5519 { XPT_NOTIFY_ACKNOWLEDGE, "XPT_NOTIFY_ACKNOWLEDGE" }, 5520 { 0, 0 } 5521 }; 5522 5523 const char * 5524 xpt_action_name(uint32_t action) 5525 { 5526 static char buffer[32]; /* Only for unknown messages -- racy */ 5527 struct kv *walker = map; 5528 5529 while (walker->name != NULL) { 5530 if (walker->v == action) 5531 return (walker->name); 5532 walker++; 5533 } 5534 5535 snprintf(buffer, sizeof(buffer), "%#x", action); 5536 return (buffer); 5537 } 5538 5539 void 5540 xpt_cam_path_debug(struct cam_path *path, const char *fmt, ...) 5541 { 5542 struct sbuf sbuf; 5543 char buf[XPT_PRINT_LEN]; /* balance to not eat too much stack */ 5544 struct sbuf *sb = sbuf_new(&sbuf, buf, sizeof(buf), SBUF_FIXEDLEN); 5545 va_list ap; 5546 5547 sbuf_set_drain(sb, sbuf_printf_drain, NULL); 5548 xpt_path_sbuf(path, sb); 5549 va_start(ap, fmt); 5550 sbuf_vprintf(sb, fmt, ap); 5551 va_end(ap); 5552 sbuf_finish(sb); 5553 sbuf_delete(sb); 5554 if (cam_debug_delay != 0) 5555 DELAY(cam_debug_delay); 5556 } 5557 5558 void 5559 xpt_cam_dev_debug(struct cam_ed *dev, const char *fmt, ...) 5560 { 5561 struct sbuf sbuf; 5562 char buf[XPT_PRINT_LEN]; /* balance to not eat too much stack */ 5563 struct sbuf *sb = sbuf_new(&sbuf, buf, sizeof(buf), SBUF_FIXEDLEN); 5564 va_list ap; 5565 5566 sbuf_set_drain(sb, sbuf_printf_drain, NULL); 5567 xpt_device_sbuf(dev, sb); 5568 va_start(ap, fmt); 5569 sbuf_vprintf(sb, fmt, ap); 5570 va_end(ap); 5571 sbuf_finish(sb); 5572 sbuf_delete(sb); 5573 if (cam_debug_delay != 0) 5574 DELAY(cam_debug_delay); 5575 } 5576 5577 void 5578 xpt_cam_debug(const char *fmt, ...) 5579 { 5580 struct sbuf sbuf; 5581 char buf[XPT_PRINT_LEN]; /* balance to not eat too much stack */ 5582 struct sbuf *sb = sbuf_new(&sbuf, buf, sizeof(buf), SBUF_FIXEDLEN); 5583 va_list ap; 5584 5585 sbuf_set_drain(sb, sbuf_printf_drain, NULL); 5586 sbuf_cat(sb, "cam_debug: "); 5587 va_start(ap, fmt); 5588 sbuf_vprintf(sb, fmt, ap); 5589 va_end(ap); 5590 sbuf_finish(sb); 5591 sbuf_delete(sb); 5592 if (cam_debug_delay != 0) 5593 DELAY(cam_debug_delay); 5594 } 5595