1 /*- 2 * Implementation of the Common Access Method Transport (XPT) layer. 3 * 4 * SPDX-License-Identifier: BSD-2-Clause 5 * 6 * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs. 7 * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry. 8 * All rights reserved. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions, and the following disclaimer, 15 * without modification, immediately at the beginning of the file. 16 * 2. The name of the author may not be used to endorse or promote products 17 * derived from this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 23 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 #include "opt_printf.h" 33 34 #include <sys/param.h> 35 #include <sys/bio.h> 36 #include <sys/bus.h> 37 #include <sys/systm.h> 38 #include <sys/types.h> 39 #include <sys/malloc.h> 40 #include <sys/kernel.h> 41 #include <sys/time.h> 42 #include <sys/conf.h> 43 #include <sys/fcntl.h> 44 #include <sys/proc.h> 45 #include <sys/sbuf.h> 46 #include <sys/smp.h> 47 #include <sys/taskqueue.h> 48 49 #include <sys/lock.h> 50 #include <sys/mutex.h> 51 #include <sys/sysctl.h> 52 #include <sys/kthread.h> 53 54 #include <cam/cam.h> 55 #include <cam/cam_ccb.h> 56 #include <cam/cam_iosched.h> 57 #include <cam/cam_periph.h> 58 #include <cam/cam_queue.h> 59 #include <cam/cam_sim.h> 60 #include <cam/cam_xpt.h> 61 #include <cam/cam_xpt_sim.h> 62 #include <cam/cam_xpt_periph.h> 63 #include <cam/cam_xpt_internal.h> 64 #include <cam/cam_debug.h> 65 #include <cam/cam_compat.h> 66 67 #include <cam/scsi/scsi_all.h> 68 #include <cam/scsi/scsi_message.h> 69 #include <cam/scsi/scsi_pass.h> 70 71 #include <machine/stdarg.h> /* for xpt_print below */ 72 73 /* Wild guess based on not wanting to grow the stack too much */ 74 #define XPT_PRINT_MAXLEN 512 75 #ifdef PRINTF_BUFR_SIZE 76 #define XPT_PRINT_LEN PRINTF_BUFR_SIZE 77 #else 78 #define XPT_PRINT_LEN 128 79 #endif 80 _Static_assert(XPT_PRINT_LEN <= XPT_PRINT_MAXLEN, "XPT_PRINT_LEN is too large"); 81 82 /* 83 * This is the maximum number of high powered commands (e.g. start unit) 84 * that can be outstanding at a particular time. 85 */ 86 #ifndef CAM_MAX_HIGHPOWER 87 #define CAM_MAX_HIGHPOWER 4 88 #endif 89 90 /* Datastructures internal to the xpt layer */ 91 MALLOC_DEFINE(M_CAMXPT, "CAM XPT", "CAM XPT buffers"); 92 MALLOC_DEFINE(M_CAMDEV, "CAM DEV", "CAM devices"); 93 MALLOC_DEFINE(M_CAMCCB, "CAM CCB", "CAM CCBs"); 94 MALLOC_DEFINE(M_CAMPATH, "CAM path", "CAM paths"); 95 96 struct xpt_softc { 97 uint32_t xpt_generation; 98 99 /* number of high powered commands that can go through right now */ 100 struct mtx xpt_highpower_lock; 101 STAILQ_HEAD(highpowerlist, cam_ed) highpowerq; 102 int num_highpower; 103 104 /* queue for handling async rescan requests. */ 105 TAILQ_HEAD(, ccb_hdr) ccb_scanq; 106 int buses_to_config; 107 int buses_config_done; 108 109 /* 110 * Registered buses 111 * 112 * N.B., "busses" is an archaic spelling of "buses". In new code 113 * "buses" is preferred. 114 */ 115 TAILQ_HEAD(,cam_eb) xpt_busses; 116 u_int bus_generation; 117 118 int boot_delay; 119 struct callout boot_callout; 120 struct task boot_task; 121 struct root_hold_token xpt_rootmount; 122 123 struct mtx xpt_topo_lock; 124 struct taskqueue *xpt_taskq; 125 }; 126 127 typedef enum { 128 DM_RET_COPY = 0x01, 129 DM_RET_FLAG_MASK = 0x0f, 130 DM_RET_NONE = 0x00, 131 DM_RET_STOP = 0x10, 132 DM_RET_DESCEND = 0x20, 133 DM_RET_ERROR = 0x30, 134 DM_RET_ACTION_MASK = 0xf0 135 } dev_match_ret; 136 137 typedef enum { 138 XPT_DEPTH_BUS, 139 XPT_DEPTH_TARGET, 140 XPT_DEPTH_DEVICE, 141 XPT_DEPTH_PERIPH 142 } xpt_traverse_depth; 143 144 struct xpt_traverse_config { 145 xpt_traverse_depth depth; 146 void *tr_func; 147 void *tr_arg; 148 }; 149 150 typedef int xpt_busfunc_t (struct cam_eb *bus, void *arg); 151 typedef int xpt_targetfunc_t (struct cam_et *target, void *arg); 152 typedef int xpt_devicefunc_t (struct cam_ed *device, void *arg); 153 typedef int xpt_periphfunc_t (struct cam_periph *periph, void *arg); 154 typedef int xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg); 155 156 /* Transport layer configuration information */ 157 static struct xpt_softc xsoftc; 158 159 MTX_SYSINIT(xpt_topo_init, &xsoftc.xpt_topo_lock, "XPT topology lock", MTX_DEF); 160 161 SYSCTL_INT(_kern_cam, OID_AUTO, boot_delay, CTLFLAG_RDTUN, 162 &xsoftc.boot_delay, 0, "Bus registration wait time"); 163 SYSCTL_UINT(_kern_cam, OID_AUTO, xpt_generation, CTLFLAG_RD, 164 &xsoftc.xpt_generation, 0, "CAM peripheral generation count"); 165 166 struct cam_doneq { 167 struct mtx_padalign cam_doneq_mtx; 168 STAILQ_HEAD(, ccb_hdr) cam_doneq; 169 int cam_doneq_sleep; 170 }; 171 172 static struct cam_doneq cam_doneqs[MAXCPU]; 173 static u_int __read_mostly cam_num_doneqs; 174 static struct proc *cam_proc; 175 static struct cam_doneq cam_async; 176 177 SYSCTL_INT(_kern_cam, OID_AUTO, num_doneqs, CTLFLAG_RDTUN, 178 &cam_num_doneqs, 0, "Number of completion queues/threads"); 179 180 struct cam_periph *xpt_periph; 181 182 static periph_init_t xpt_periph_init; 183 184 static struct periph_driver xpt_driver = 185 { 186 xpt_periph_init, "xpt", 187 TAILQ_HEAD_INITIALIZER(xpt_driver.units), /* generation */ 0, 188 CAM_PERIPH_DRV_EARLY 189 }; 190 191 PERIPHDRIVER_DECLARE(xpt, xpt_driver); 192 193 static d_open_t xptopen; 194 static d_close_t xptclose; 195 static d_ioctl_t xptioctl; 196 static d_ioctl_t xptdoioctl; 197 198 static struct cdevsw xpt_cdevsw = { 199 .d_version = D_VERSION, 200 .d_flags = 0, 201 .d_open = xptopen, 202 .d_close = xptclose, 203 .d_ioctl = xptioctl, 204 .d_name = "xpt", 205 }; 206 207 /* Storage for debugging datastructures */ 208 struct cam_path *cam_dpath; 209 uint32_t __read_mostly cam_dflags = CAM_DEBUG_FLAGS; 210 SYSCTL_UINT(_kern_cam, OID_AUTO, dflags, CTLFLAG_RWTUN, 211 &cam_dflags, 0, "Enabled debug flags"); 212 uint32_t cam_debug_delay = CAM_DEBUG_DELAY; 213 SYSCTL_UINT(_kern_cam, OID_AUTO, debug_delay, CTLFLAG_RWTUN, 214 &cam_debug_delay, 0, "Delay in us after each debug message"); 215 216 /* Our boot-time initialization hook */ 217 static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *); 218 219 static moduledata_t cam_moduledata = { 220 "cam", 221 cam_module_event_handler, 222 NULL 223 }; 224 225 static int xpt_init(void *); 226 227 DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND); 228 MODULE_VERSION(cam, 1); 229 230 static void xpt_async_bcast(struct async_list *async_head, 231 uint32_t async_code, 232 struct cam_path *path, 233 void *async_arg); 234 static path_id_t xptnextfreepathid(void); 235 static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus); 236 static union ccb *xpt_get_ccb(struct cam_periph *periph); 237 static union ccb *xpt_get_ccb_nowait(struct cam_periph *periph); 238 static void xpt_run_allocq(struct cam_periph *periph, int sleep); 239 static void xpt_run_allocq_task(void *context, int pending); 240 static void xpt_run_devq(struct cam_devq *devq); 241 static callout_func_t xpt_release_devq_timeout; 242 static void xpt_acquire_bus(struct cam_eb *bus); 243 static void xpt_release_bus(struct cam_eb *bus); 244 static uint32_t xpt_freeze_devq_device(struct cam_ed *dev, u_int count); 245 static int xpt_release_devq_device(struct cam_ed *dev, u_int count, 246 int run_queue); 247 static struct cam_et* 248 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id); 249 static void xpt_acquire_target(struct cam_et *target); 250 static void xpt_release_target(struct cam_et *target); 251 static struct cam_eb* 252 xpt_find_bus(path_id_t path_id); 253 static struct cam_et* 254 xpt_find_target(struct cam_eb *bus, target_id_t target_id); 255 static struct cam_ed* 256 xpt_find_device(struct cam_et *target, lun_id_t lun_id); 257 static void xpt_config(void *arg); 258 static void xpt_hold_boot_locked(void); 259 static int xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo, 260 uint32_t new_priority); 261 static xpt_devicefunc_t xptpassannouncefunc; 262 static void xptaction(struct cam_sim *sim, union ccb *work_ccb); 263 static void xptpoll(struct cam_sim *sim); 264 static void camisr_runqueue(void); 265 static void xpt_done_process(struct ccb_hdr *ccb_h); 266 static void xpt_done_td(void *); 267 static void xpt_async_td(void *); 268 static dev_match_ret xptbusmatch(struct dev_match_pattern *patterns, 269 u_int num_patterns, struct cam_eb *bus); 270 static dev_match_ret xptdevicematch(struct dev_match_pattern *patterns, 271 u_int num_patterns, 272 struct cam_ed *device); 273 static dev_match_ret xptperiphmatch(struct dev_match_pattern *patterns, 274 u_int num_patterns, 275 struct cam_periph *periph); 276 static xpt_busfunc_t xptedtbusfunc; 277 static xpt_targetfunc_t xptedttargetfunc; 278 static xpt_devicefunc_t xptedtdevicefunc; 279 static xpt_periphfunc_t xptedtperiphfunc; 280 static xpt_pdrvfunc_t xptplistpdrvfunc; 281 static xpt_periphfunc_t xptplistperiphfunc; 282 static int xptedtmatch(struct ccb_dev_match *cdm); 283 static int xptperiphlistmatch(struct ccb_dev_match *cdm); 284 static int xptbustraverse(struct cam_eb *start_bus, 285 xpt_busfunc_t *tr_func, void *arg); 286 static int xpttargettraverse(struct cam_eb *bus, 287 struct cam_et *start_target, 288 xpt_targetfunc_t *tr_func, void *arg); 289 static int xptdevicetraverse(struct cam_et *target, 290 struct cam_ed *start_device, 291 xpt_devicefunc_t *tr_func, void *arg); 292 static int xptperiphtraverse(struct cam_ed *device, 293 struct cam_periph *start_periph, 294 xpt_periphfunc_t *tr_func, void *arg); 295 static int xptpdrvtraverse(struct periph_driver **start_pdrv, 296 xpt_pdrvfunc_t *tr_func, void *arg); 297 static int xptpdperiphtraverse(struct periph_driver **pdrv, 298 struct cam_periph *start_periph, 299 xpt_periphfunc_t *tr_func, 300 void *arg); 301 static xpt_busfunc_t xptdefbusfunc; 302 static xpt_targetfunc_t xptdeftargetfunc; 303 static xpt_devicefunc_t xptdefdevicefunc; 304 static xpt_periphfunc_t xptdefperiphfunc; 305 static void xpt_finishconfig_task(void *context, int pending); 306 static void xpt_dev_async_default(uint32_t async_code, 307 struct cam_eb *bus, 308 struct cam_et *target, 309 struct cam_ed *device, 310 void *async_arg); 311 static struct cam_ed * xpt_alloc_device_default(struct cam_eb *bus, 312 struct cam_et *target, 313 lun_id_t lun_id); 314 static xpt_devicefunc_t xptsetasyncfunc; 315 static xpt_busfunc_t xptsetasyncbusfunc; 316 static cam_status xptregister(struct cam_periph *periph, 317 void *arg); 318 319 static __inline int 320 xpt_schedule_devq(struct cam_devq *devq, struct cam_ed *dev) 321 { 322 int retval; 323 324 mtx_assert(&devq->send_mtx, MA_OWNED); 325 if ((dev->ccbq.queue.entries > 0) && 326 (dev->ccbq.dev_openings > 0) && 327 (dev->ccbq.queue.qfrozen_cnt == 0)) { 328 /* 329 * The priority of a device waiting for controller 330 * resources is that of the highest priority CCB 331 * enqueued. 332 */ 333 retval = 334 xpt_schedule_dev(&devq->send_queue, 335 &dev->devq_entry, 336 CAMQ_GET_PRIO(&dev->ccbq.queue)); 337 } else { 338 retval = 0; 339 } 340 return (retval); 341 } 342 343 static __inline int 344 device_is_queued(struct cam_ed *device) 345 { 346 return (device->devq_entry.index != CAM_UNQUEUED_INDEX); 347 } 348 349 static void 350 xpt_periph_init(void) 351 { 352 make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0"); 353 } 354 355 static int 356 xptopen(struct cdev *dev, int flags, int fmt, struct thread *td) 357 { 358 359 /* 360 * Only allow read-write access. 361 */ 362 if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0)) 363 return(EPERM); 364 365 /* 366 * We don't allow nonblocking access. 367 */ 368 if ((flags & O_NONBLOCK) != 0) { 369 printf("%s: can't do nonblocking access\n", devtoname(dev)); 370 return(ENODEV); 371 } 372 373 return(0); 374 } 375 376 static int 377 xptclose(struct cdev *dev, int flag, int fmt, struct thread *td) 378 { 379 380 return(0); 381 } 382 383 /* 384 * Don't automatically grab the xpt softc lock here even though this is going 385 * through the xpt device. The xpt device is really just a back door for 386 * accessing other devices and SIMs, so the right thing to do is to grab 387 * the appropriate SIM lock once the bus/SIM is located. 388 */ 389 static int 390 xptioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) 391 { 392 int error; 393 394 if ((error = xptdoioctl(dev, cmd, addr, flag, td)) == ENOTTY) { 395 error = cam_compat_ioctl(dev, cmd, addr, flag, td, xptdoioctl); 396 } 397 return (error); 398 } 399 400 static int 401 xptdoioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) 402 { 403 int error; 404 405 error = 0; 406 407 switch(cmd) { 408 /* 409 * For the transport layer CAMIOCOMMAND ioctl, we really only want 410 * to accept CCB types that don't quite make sense to send through a 411 * passthrough driver. XPT_PATH_INQ is an exception to this, as stated 412 * in the CAM spec. 413 */ 414 case CAMIOCOMMAND: { 415 union ccb *ccb; 416 union ccb *inccb; 417 struct cam_eb *bus; 418 419 inccb = (union ccb *)addr; 420 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) 421 if (inccb->ccb_h.func_code == XPT_SCSI_IO) 422 inccb->csio.bio = NULL; 423 #endif 424 425 if (inccb->ccb_h.flags & CAM_UNLOCKED) 426 return (EINVAL); 427 428 bus = xpt_find_bus(inccb->ccb_h.path_id); 429 if (bus == NULL) 430 return (EINVAL); 431 432 switch (inccb->ccb_h.func_code) { 433 case XPT_SCAN_BUS: 434 case XPT_RESET_BUS: 435 if (inccb->ccb_h.target_id != CAM_TARGET_WILDCARD || 436 inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) { 437 xpt_release_bus(bus); 438 return (EINVAL); 439 } 440 break; 441 case XPT_SCAN_TGT: 442 if (inccb->ccb_h.target_id == CAM_TARGET_WILDCARD || 443 inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) { 444 xpt_release_bus(bus); 445 return (EINVAL); 446 } 447 break; 448 default: 449 break; 450 } 451 452 switch(inccb->ccb_h.func_code) { 453 case XPT_SCAN_BUS: 454 case XPT_RESET_BUS: 455 case XPT_PATH_INQ: 456 case XPT_ENG_INQ: 457 case XPT_SCAN_LUN: 458 case XPT_SCAN_TGT: 459 460 ccb = xpt_alloc_ccb(); 461 462 /* 463 * Create a path using the bus, target, and lun the 464 * user passed in. 465 */ 466 if (xpt_create_path(&ccb->ccb_h.path, NULL, 467 inccb->ccb_h.path_id, 468 inccb->ccb_h.target_id, 469 inccb->ccb_h.target_lun) != 470 CAM_REQ_CMP){ 471 error = EINVAL; 472 xpt_free_ccb(ccb); 473 break; 474 } 475 /* Ensure all of our fields are correct */ 476 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, 477 inccb->ccb_h.pinfo.priority); 478 xpt_merge_ccb(ccb, inccb); 479 xpt_path_lock(ccb->ccb_h.path); 480 cam_periph_runccb(ccb, NULL, 0, 0, NULL); 481 xpt_path_unlock(ccb->ccb_h.path); 482 bcopy(ccb, inccb, sizeof(union ccb)); 483 xpt_free_path(ccb->ccb_h.path); 484 xpt_free_ccb(ccb); 485 break; 486 487 case XPT_DEBUG: { 488 union ccb ccb; 489 490 /* 491 * This is an immediate CCB, so it's okay to 492 * allocate it on the stack. 493 */ 494 memset(&ccb, 0, sizeof(ccb)); 495 496 /* 497 * Create a path using the bus, target, and lun the 498 * user passed in. 499 */ 500 if (xpt_create_path(&ccb.ccb_h.path, NULL, 501 inccb->ccb_h.path_id, 502 inccb->ccb_h.target_id, 503 inccb->ccb_h.target_lun) != 504 CAM_REQ_CMP){ 505 error = EINVAL; 506 break; 507 } 508 /* Ensure all of our fields are correct */ 509 xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path, 510 inccb->ccb_h.pinfo.priority); 511 xpt_merge_ccb(&ccb, inccb); 512 xpt_action(&ccb); 513 bcopy(&ccb, inccb, sizeof(union ccb)); 514 xpt_free_path(ccb.ccb_h.path); 515 break; 516 } 517 case XPT_DEV_MATCH: { 518 struct cam_periph_map_info mapinfo; 519 struct cam_path *old_path; 520 521 /* 522 * We can't deal with physical addresses for this 523 * type of transaction. 524 */ 525 if ((inccb->ccb_h.flags & CAM_DATA_MASK) != 526 CAM_DATA_VADDR) { 527 error = EINVAL; 528 break; 529 } 530 531 /* 532 * Save this in case the caller had it set to 533 * something in particular. 534 */ 535 old_path = inccb->ccb_h.path; 536 537 /* 538 * We really don't need a path for the matching 539 * code. The path is needed because of the 540 * debugging statements in xpt_action(). They 541 * assume that the CCB has a valid path. 542 */ 543 inccb->ccb_h.path = xpt_periph->path; 544 545 bzero(&mapinfo, sizeof(mapinfo)); 546 547 /* 548 * Map the pattern and match buffers into kernel 549 * virtual address space. 550 */ 551 error = cam_periph_mapmem(inccb, &mapinfo, maxphys); 552 553 if (error) { 554 inccb->ccb_h.path = old_path; 555 break; 556 } 557 558 /* 559 * This is an immediate CCB, we can send it on directly. 560 */ 561 xpt_action(inccb); 562 563 /* 564 * Map the buffers back into user space. 565 */ 566 error = cam_periph_unmapmem(inccb, &mapinfo); 567 568 inccb->ccb_h.path = old_path; 569 break; 570 } 571 default: 572 error = ENOTSUP; 573 break; 574 } 575 xpt_release_bus(bus); 576 break; 577 } 578 /* 579 * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input, 580 * with the periphal driver name and unit name filled in. The other 581 * fields don't really matter as input. The passthrough driver name 582 * ("pass"), and unit number are passed back in the ccb. The current 583 * device generation number, and the index into the device peripheral 584 * driver list, and the status are also passed back. Note that 585 * since we do everything in one pass, unlike the XPT_GDEVLIST ccb, 586 * we never return a status of CAM_GDEVLIST_LIST_CHANGED. It is 587 * (or rather should be) impossible for the device peripheral driver 588 * list to change since we look at the whole thing in one pass, and 589 * we do it with lock protection. 590 * 591 */ 592 case CAMGETPASSTHRU: { 593 union ccb *ccb; 594 struct cam_periph *periph; 595 struct periph_driver **p_drv; 596 char *name; 597 u_int unit; 598 bool base_periph_found; 599 600 ccb = (union ccb *)addr; 601 unit = ccb->cgdl.unit_number; 602 name = ccb->cgdl.periph_name; 603 base_periph_found = false; 604 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) 605 if (ccb->ccb_h.func_code == XPT_SCSI_IO) 606 ccb->csio.bio = NULL; 607 #endif 608 609 /* 610 * Sanity check -- make sure we don't get a null peripheral 611 * driver name. 612 */ 613 if (*ccb->cgdl.periph_name == '\0') { 614 error = EINVAL; 615 break; 616 } 617 618 /* Keep the list from changing while we traverse it */ 619 xpt_lock_buses(); 620 621 /* first find our driver in the list of drivers */ 622 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) 623 if (strcmp((*p_drv)->driver_name, name) == 0) 624 break; 625 626 if (*p_drv == NULL) { 627 xpt_unlock_buses(); 628 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 629 ccb->cgdl.status = CAM_GDEVLIST_ERROR; 630 *ccb->cgdl.periph_name = '\0'; 631 ccb->cgdl.unit_number = 0; 632 error = ENOENT; 633 break; 634 } 635 636 /* 637 * Run through every peripheral instance of this driver 638 * and check to see whether it matches the unit passed 639 * in by the user. If it does, get out of the loops and 640 * find the passthrough driver associated with that 641 * peripheral driver. 642 */ 643 for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL; 644 periph = TAILQ_NEXT(periph, unit_links)) { 645 if (periph->unit_number == unit) 646 break; 647 } 648 /* 649 * If we found the peripheral driver that the user passed 650 * in, go through all of the peripheral drivers for that 651 * particular device and look for a passthrough driver. 652 */ 653 if (periph != NULL) { 654 struct cam_ed *device; 655 int i; 656 657 base_periph_found = true; 658 device = periph->path->device; 659 for (i = 0, periph = SLIST_FIRST(&device->periphs); 660 periph != NULL; 661 periph = SLIST_NEXT(periph, periph_links), i++) { 662 /* 663 * Check to see whether we have a 664 * passthrough device or not. 665 */ 666 if (strcmp(periph->periph_name, "pass") == 0) { 667 /* 668 * Fill in the getdevlist fields. 669 */ 670 strlcpy(ccb->cgdl.periph_name, 671 periph->periph_name, 672 sizeof(ccb->cgdl.periph_name)); 673 ccb->cgdl.unit_number = 674 periph->unit_number; 675 if (SLIST_NEXT(periph, periph_links)) 676 ccb->cgdl.status = 677 CAM_GDEVLIST_MORE_DEVS; 678 else 679 ccb->cgdl.status = 680 CAM_GDEVLIST_LAST_DEVICE; 681 ccb->cgdl.generation = 682 device->generation; 683 ccb->cgdl.index = i; 684 /* 685 * Fill in some CCB header fields 686 * that the user may want. 687 */ 688 ccb->ccb_h.path_id = 689 periph->path->bus->path_id; 690 ccb->ccb_h.target_id = 691 periph->path->target->target_id; 692 ccb->ccb_h.target_lun = 693 periph->path->device->lun_id; 694 ccb->ccb_h.status = CAM_REQ_CMP; 695 break; 696 } 697 } 698 } 699 700 /* 701 * If the periph is null here, one of two things has 702 * happened. The first possibility is that we couldn't 703 * find the unit number of the particular peripheral driver 704 * that the user is asking about. e.g. the user asks for 705 * the passthrough driver for "da11". We find the list of 706 * "da" peripherals all right, but there is no unit 11. 707 * The other possibility is that we went through the list 708 * of peripheral drivers attached to the device structure, 709 * but didn't find one with the name "pass". Either way, 710 * we return ENOENT, since we couldn't find something. 711 */ 712 if (periph == NULL) { 713 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 714 ccb->cgdl.status = CAM_GDEVLIST_ERROR; 715 *ccb->cgdl.periph_name = '\0'; 716 ccb->cgdl.unit_number = 0; 717 error = ENOENT; 718 /* 719 * It is unfortunate that this is even necessary, 720 * but there are many, many clueless users out there. 721 * If this is true, the user is looking for the 722 * passthrough driver, but doesn't have one in his 723 * kernel. 724 */ 725 if (base_periph_found) { 726 printf("xptioctl: pass driver is not in the " 727 "kernel\n"); 728 printf("xptioctl: put \"device pass\" in " 729 "your kernel config file\n"); 730 } 731 } 732 xpt_unlock_buses(); 733 break; 734 } 735 default: 736 error = ENOTTY; 737 break; 738 } 739 740 return(error); 741 } 742 743 static int 744 cam_module_event_handler(module_t mod, int what, void *arg) 745 { 746 int error; 747 748 switch (what) { 749 case MOD_LOAD: 750 if ((error = xpt_init(NULL)) != 0) 751 return (error); 752 break; 753 case MOD_UNLOAD: 754 return EBUSY; 755 default: 756 return EOPNOTSUPP; 757 } 758 759 return 0; 760 } 761 762 static struct xpt_proto * 763 xpt_proto_find(cam_proto proto) 764 { 765 struct xpt_proto **pp; 766 767 SET_FOREACH(pp, cam_xpt_proto_set) { 768 if ((*pp)->proto == proto) 769 return *pp; 770 } 771 772 return NULL; 773 } 774 775 static void 776 xpt_rescan_done(struct cam_periph *periph, union ccb *done_ccb) 777 { 778 779 if (done_ccb->ccb_h.ppriv_ptr1 == NULL) { 780 xpt_free_path(done_ccb->ccb_h.path); 781 xpt_free_ccb(done_ccb); 782 } else { 783 done_ccb->ccb_h.cbfcnp = done_ccb->ccb_h.ppriv_ptr1; 784 (*done_ccb->ccb_h.cbfcnp)(periph, done_ccb); 785 } 786 xpt_release_boot(); 787 } 788 789 /* thread to handle bus rescans */ 790 static void 791 xpt_scanner_thread(void *dummy) 792 { 793 union ccb *ccb; 794 struct mtx *mtx; 795 struct cam_ed *device; 796 797 xpt_lock_buses(); 798 for (;;) { 799 if (TAILQ_EMPTY(&xsoftc.ccb_scanq)) 800 msleep(&xsoftc.ccb_scanq, &xsoftc.xpt_topo_lock, PRIBIO, 801 "-", 0); 802 if ((ccb = (union ccb *)TAILQ_FIRST(&xsoftc.ccb_scanq)) != NULL) { 803 TAILQ_REMOVE(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe); 804 xpt_unlock_buses(); 805 806 /* 807 * We need to lock the device's mutex which we use as 808 * the path mutex. We can't do it directly because the 809 * cam_path in the ccb may wind up going away because 810 * the path lock may be dropped and the path retired in 811 * the completion callback. We do this directly to keep 812 * the reference counts in cam_path sane. We also have 813 * to copy the device pointer because ccb_h.path may 814 * be freed in the callback. 815 */ 816 mtx = xpt_path_mtx(ccb->ccb_h.path); 817 device = ccb->ccb_h.path->device; 818 xpt_acquire_device(device); 819 mtx_lock(mtx); 820 xpt_action(ccb); 821 mtx_unlock(mtx); 822 xpt_release_device(device); 823 824 xpt_lock_buses(); 825 } 826 } 827 } 828 829 void 830 xpt_rescan(union ccb *ccb) 831 { 832 struct ccb_hdr *hdr; 833 834 /* Prepare request */ 835 if (ccb->ccb_h.path->target->target_id == CAM_TARGET_WILDCARD && 836 ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD) 837 ccb->ccb_h.func_code = XPT_SCAN_BUS; 838 else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD && 839 ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD) 840 ccb->ccb_h.func_code = XPT_SCAN_TGT; 841 else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD && 842 ccb->ccb_h.path->device->lun_id != CAM_LUN_WILDCARD) 843 ccb->ccb_h.func_code = XPT_SCAN_LUN; 844 else { 845 xpt_print(ccb->ccb_h.path, "illegal scan path\n"); 846 xpt_free_path(ccb->ccb_h.path); 847 xpt_free_ccb(ccb); 848 return; 849 } 850 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, 851 ("xpt_rescan: func %#x %s\n", ccb->ccb_h.func_code, 852 xpt_action_name(ccb->ccb_h.func_code))); 853 854 ccb->ccb_h.ppriv_ptr1 = ccb->ccb_h.cbfcnp; 855 ccb->ccb_h.cbfcnp = xpt_rescan_done; 856 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, CAM_PRIORITY_XPT); 857 /* Don't make duplicate entries for the same paths. */ 858 xpt_lock_buses(); 859 if (ccb->ccb_h.ppriv_ptr1 == NULL) { 860 TAILQ_FOREACH(hdr, &xsoftc.ccb_scanq, sim_links.tqe) { 861 if (xpt_path_comp(hdr->path, ccb->ccb_h.path) == 0) { 862 wakeup(&xsoftc.ccb_scanq); 863 xpt_unlock_buses(); 864 xpt_print(ccb->ccb_h.path, "rescan already queued\n"); 865 xpt_free_path(ccb->ccb_h.path); 866 xpt_free_ccb(ccb); 867 return; 868 } 869 } 870 } 871 TAILQ_INSERT_TAIL(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe); 872 xpt_hold_boot_locked(); 873 wakeup(&xsoftc.ccb_scanq); 874 xpt_unlock_buses(); 875 } 876 877 /* Functions accessed by the peripheral drivers */ 878 static int 879 xpt_init(void *dummy) 880 { 881 struct cam_sim *xpt_sim; 882 struct cam_path *path; 883 struct cam_devq *devq; 884 cam_status status; 885 int error, i; 886 887 TAILQ_INIT(&xsoftc.xpt_busses); 888 TAILQ_INIT(&xsoftc.ccb_scanq); 889 STAILQ_INIT(&xsoftc.highpowerq); 890 xsoftc.num_highpower = CAM_MAX_HIGHPOWER; 891 892 mtx_init(&xsoftc.xpt_highpower_lock, "XPT highpower lock", NULL, MTX_DEF); 893 xsoftc.xpt_taskq = taskqueue_create("CAM XPT task", M_WAITOK, 894 taskqueue_thread_enqueue, /*context*/&xsoftc.xpt_taskq); 895 896 #ifdef CAM_BOOT_DELAY 897 /* 898 * Override this value at compile time to assist our users 899 * who don't use loader to boot a kernel. 900 */ 901 xsoftc.boot_delay = CAM_BOOT_DELAY; 902 #endif 903 904 /* 905 * The xpt layer is, itself, the equivalent of a SIM. 906 * Allow 16 ccbs in the ccb pool for it. This should 907 * give decent parallelism when we probe buses and 908 * perform other XPT functions. 909 */ 910 devq = cam_simq_alloc(16); 911 xpt_sim = cam_sim_alloc(xptaction, 912 xptpoll, 913 "xpt", 914 /*softc*/NULL, 915 /*unit*/0, 916 /*mtx*/NULL, 917 /*max_dev_transactions*/0, 918 /*max_tagged_dev_transactions*/0, 919 devq); 920 if (xpt_sim == NULL) 921 return (ENOMEM); 922 923 if ((error = xpt_bus_register(xpt_sim, NULL, 0)) != CAM_SUCCESS) { 924 printf("xpt_init: xpt_bus_register failed with errno %d," 925 " failing attach\n", error); 926 return (EINVAL); 927 } 928 929 /* 930 * Looking at the XPT from the SIM layer, the XPT is 931 * the equivalent of a peripheral driver. Allocate 932 * a peripheral driver entry for us. 933 */ 934 if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID, 935 CAM_TARGET_WILDCARD, 936 CAM_LUN_WILDCARD)) != CAM_REQ_CMP) { 937 printf("xpt_init: xpt_create_path failed with status %#x," 938 " failing attach\n", status); 939 return (EINVAL); 940 } 941 xpt_path_lock(path); 942 cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO, 943 path, NULL, 0, xpt_sim); 944 xpt_path_unlock(path); 945 xpt_free_path(path); 946 947 if (cam_num_doneqs < 1) 948 cam_num_doneqs = 1 + mp_ncpus / 6; 949 else if (cam_num_doneqs > MAXCPU) 950 cam_num_doneqs = MAXCPU; 951 for (i = 0; i < cam_num_doneqs; i++) { 952 mtx_init(&cam_doneqs[i].cam_doneq_mtx, "CAM doneq", NULL, 953 MTX_DEF); 954 STAILQ_INIT(&cam_doneqs[i].cam_doneq); 955 error = kproc_kthread_add(xpt_done_td, &cam_doneqs[i], 956 &cam_proc, NULL, 0, 0, "cam", "doneq%d", i); 957 if (error != 0) { 958 cam_num_doneqs = i; 959 break; 960 } 961 } 962 if (cam_num_doneqs < 1) { 963 printf("xpt_init: Cannot init completion queues " 964 "- failing attach\n"); 965 return (ENOMEM); 966 } 967 968 mtx_init(&cam_async.cam_doneq_mtx, "CAM async", NULL, MTX_DEF); 969 STAILQ_INIT(&cam_async.cam_doneq); 970 if (kproc_kthread_add(xpt_async_td, &cam_async, 971 &cam_proc, NULL, 0, 0, "cam", "async") != 0) { 972 printf("xpt_init: Cannot init async thread " 973 "- failing attach\n"); 974 return (ENOMEM); 975 } 976 977 /* 978 * Register a callback for when interrupts are enabled. 979 */ 980 config_intrhook_oneshot(xpt_config, NULL); 981 982 return (0); 983 } 984 985 static cam_status 986 xptregister(struct cam_periph *periph, void *arg) 987 { 988 struct cam_sim *xpt_sim; 989 990 if (periph == NULL) { 991 printf("xptregister: periph was NULL!!\n"); 992 return(CAM_REQ_CMP_ERR); 993 } 994 995 xpt_sim = (struct cam_sim *)arg; 996 xpt_sim->softc = periph; 997 xpt_periph = periph; 998 periph->softc = NULL; 999 1000 return(CAM_REQ_CMP); 1001 } 1002 1003 int32_t 1004 xpt_add_periph(struct cam_periph *periph) 1005 { 1006 struct cam_ed *device; 1007 int32_t status; 1008 1009 TASK_INIT(&periph->periph_run_task, 0, xpt_run_allocq_task, periph); 1010 device = periph->path->device; 1011 status = CAM_REQ_CMP; 1012 if (device != NULL) { 1013 mtx_lock(&device->target->bus->eb_mtx); 1014 device->generation++; 1015 SLIST_INSERT_HEAD(&device->periphs, periph, periph_links); 1016 mtx_unlock(&device->target->bus->eb_mtx); 1017 atomic_add_32(&xsoftc.xpt_generation, 1); 1018 } 1019 1020 return (status); 1021 } 1022 1023 void 1024 xpt_remove_periph(struct cam_periph *periph) 1025 { 1026 struct cam_ed *device; 1027 1028 device = periph->path->device; 1029 if (device != NULL) { 1030 mtx_lock(&device->target->bus->eb_mtx); 1031 device->generation++; 1032 SLIST_REMOVE(&device->periphs, periph, cam_periph, periph_links); 1033 mtx_unlock(&device->target->bus->eb_mtx); 1034 atomic_add_32(&xsoftc.xpt_generation, 1); 1035 } 1036 } 1037 1038 void 1039 xpt_announce_periph(struct cam_periph *periph, char *announce_string) 1040 { 1041 char buf[128]; 1042 struct sbuf sb; 1043 1044 (void)sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN | SBUF_INCLUDENUL); 1045 sbuf_set_drain(&sb, sbuf_printf_drain, NULL); 1046 xpt_announce_periph_sbuf(periph, &sb, announce_string); 1047 (void)sbuf_finish(&sb); 1048 } 1049 1050 void 1051 xpt_announce_periph_sbuf(struct cam_periph *periph, struct sbuf *sb, 1052 char *announce_string) 1053 { 1054 struct cam_path *path = periph->path; 1055 struct xpt_proto *proto; 1056 1057 cam_periph_assert(periph, MA_OWNED); 1058 periph->flags |= CAM_PERIPH_ANNOUNCED; 1059 1060 sbuf_printf(sb, "%s%d at %s%d bus %d scbus%d target %d lun %jx\n", 1061 periph->periph_name, periph->unit_number, 1062 path->bus->sim->sim_name, 1063 path->bus->sim->unit_number, 1064 path->bus->sim->bus_id, 1065 path->bus->path_id, 1066 path->target->target_id, 1067 (uintmax_t)path->device->lun_id); 1068 sbuf_printf(sb, "%s%d: ", periph->periph_name, periph->unit_number); 1069 proto = xpt_proto_find(path->device->protocol); 1070 if (proto) 1071 proto->ops->announce_sbuf(path->device, sb); 1072 else 1073 sbuf_printf(sb, "Unknown protocol device %d\n", 1074 path->device->protocol); 1075 if (path->device->serial_num_len > 0) { 1076 /* Don't wrap the screen - print only the first 60 chars */ 1077 sbuf_printf(sb, "%s%d: Serial Number %.60s\n", 1078 periph->periph_name, periph->unit_number, 1079 path->device->serial_num); 1080 } 1081 /* Announce transport details. */ 1082 path->bus->xport->ops->announce_sbuf(periph, sb); 1083 /* Announce command queueing. */ 1084 if (path->device->inq_flags & SID_CmdQue 1085 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) { 1086 sbuf_printf(sb, "%s%d: Command Queueing enabled\n", 1087 periph->periph_name, periph->unit_number); 1088 } 1089 /* Announce caller's details if they've passed in. */ 1090 if (announce_string != NULL) 1091 sbuf_printf(sb, "%s%d: %s\n", periph->periph_name, 1092 periph->unit_number, announce_string); 1093 } 1094 1095 void 1096 xpt_announce_quirks(struct cam_periph *periph, int quirks, char *bit_string) 1097 { 1098 if (quirks != 0) { 1099 printf("%s%d: quirks=0x%b\n", periph->periph_name, 1100 periph->unit_number, quirks, bit_string); 1101 } 1102 } 1103 1104 void 1105 xpt_announce_quirks_sbuf(struct cam_periph *periph, struct sbuf *sb, 1106 int quirks, char *bit_string) 1107 { 1108 if (quirks != 0) { 1109 sbuf_printf(sb, "%s%d: quirks=0x%b\n", periph->periph_name, 1110 periph->unit_number, quirks, bit_string); 1111 } 1112 } 1113 1114 void 1115 xpt_denounce_periph(struct cam_periph *periph) 1116 { 1117 char buf[128]; 1118 struct sbuf sb; 1119 1120 (void)sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN | SBUF_INCLUDENUL); 1121 sbuf_set_drain(&sb, sbuf_printf_drain, NULL); 1122 xpt_denounce_periph_sbuf(periph, &sb); 1123 (void)sbuf_finish(&sb); 1124 } 1125 1126 void 1127 xpt_denounce_periph_sbuf(struct cam_periph *periph, struct sbuf *sb) 1128 { 1129 struct cam_path *path = periph->path; 1130 struct xpt_proto *proto; 1131 1132 cam_periph_assert(periph, MA_OWNED); 1133 1134 sbuf_printf(sb, "%s%d at %s%d bus %d scbus%d target %d lun %jx\n", 1135 periph->periph_name, periph->unit_number, 1136 path->bus->sim->sim_name, 1137 path->bus->sim->unit_number, 1138 path->bus->sim->bus_id, 1139 path->bus->path_id, 1140 path->target->target_id, 1141 (uintmax_t)path->device->lun_id); 1142 sbuf_printf(sb, "%s%d: ", periph->periph_name, periph->unit_number); 1143 proto = xpt_proto_find(path->device->protocol); 1144 if (proto) 1145 proto->ops->denounce_sbuf(path->device, sb); 1146 else 1147 sbuf_printf(sb, "Unknown protocol device %d", 1148 path->device->protocol); 1149 if (path->device->serial_num_len > 0) 1150 sbuf_printf(sb, " s/n %.60s", path->device->serial_num); 1151 sbuf_cat(sb, " detached\n"); 1152 } 1153 1154 int 1155 xpt_getattr(char *buf, size_t len, const char *attr, struct cam_path *path) 1156 { 1157 int ret = -1, l, o; 1158 struct ccb_dev_advinfo cdai; 1159 struct scsi_vpd_device_id *did; 1160 struct scsi_vpd_id_descriptor *idd; 1161 1162 xpt_path_assert(path, MA_OWNED); 1163 1164 memset(&cdai, 0, sizeof(cdai)); 1165 xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL); 1166 cdai.ccb_h.func_code = XPT_DEV_ADVINFO; 1167 cdai.flags = CDAI_FLAG_NONE; 1168 cdai.bufsiz = len; 1169 cdai.buf = buf; 1170 1171 if (!strcmp(attr, "GEOM::ident")) 1172 cdai.buftype = CDAI_TYPE_SERIAL_NUM; 1173 else if (!strcmp(attr, "GEOM::physpath")) 1174 cdai.buftype = CDAI_TYPE_PHYS_PATH; 1175 else if (strcmp(attr, "GEOM::lunid") == 0 || 1176 strcmp(attr, "GEOM::lunname") == 0) { 1177 cdai.buftype = CDAI_TYPE_SCSI_DEVID; 1178 cdai.bufsiz = CAM_SCSI_DEVID_MAXLEN; 1179 cdai.buf = malloc(cdai.bufsiz, M_CAMXPT, M_NOWAIT); 1180 if (cdai.buf == NULL) { 1181 ret = ENOMEM; 1182 goto out; 1183 } 1184 } else 1185 goto out; 1186 1187 xpt_action((union ccb *)&cdai); /* can only be synchronous */ 1188 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0) 1189 cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE); 1190 if (cdai.provsiz == 0) 1191 goto out; 1192 switch(cdai.buftype) { 1193 case CDAI_TYPE_SCSI_DEVID: 1194 did = (struct scsi_vpd_device_id *)cdai.buf; 1195 if (strcmp(attr, "GEOM::lunid") == 0) { 1196 idd = scsi_get_devid(did, cdai.provsiz, 1197 scsi_devid_is_lun_naa); 1198 if (idd == NULL) 1199 idd = scsi_get_devid(did, cdai.provsiz, 1200 scsi_devid_is_lun_eui64); 1201 if (idd == NULL) 1202 idd = scsi_get_devid(did, cdai.provsiz, 1203 scsi_devid_is_lun_uuid); 1204 if (idd == NULL) 1205 idd = scsi_get_devid(did, cdai.provsiz, 1206 scsi_devid_is_lun_md5); 1207 } else 1208 idd = NULL; 1209 1210 if (idd == NULL) 1211 idd = scsi_get_devid(did, cdai.provsiz, 1212 scsi_devid_is_lun_t10); 1213 if (idd == NULL) 1214 idd = scsi_get_devid(did, cdai.provsiz, 1215 scsi_devid_is_lun_name); 1216 if (idd == NULL) 1217 break; 1218 1219 ret = 0; 1220 if ((idd->proto_codeset & SVPD_ID_CODESET_MASK) == 1221 SVPD_ID_CODESET_ASCII) { 1222 if (idd->length < len) { 1223 for (l = 0; l < idd->length; l++) 1224 buf[l] = idd->identifier[l] ? 1225 idd->identifier[l] : ' '; 1226 buf[l] = 0; 1227 } else 1228 ret = EFAULT; 1229 break; 1230 } 1231 if ((idd->proto_codeset & SVPD_ID_CODESET_MASK) == 1232 SVPD_ID_CODESET_UTF8) { 1233 l = strnlen(idd->identifier, idd->length); 1234 if (l < len) { 1235 bcopy(idd->identifier, buf, l); 1236 buf[l] = 0; 1237 } else 1238 ret = EFAULT; 1239 break; 1240 } 1241 if ((idd->id_type & SVPD_ID_TYPE_MASK) == 1242 SVPD_ID_TYPE_UUID && idd->identifier[0] == 0x10) { 1243 if ((idd->length - 2) * 2 + 4 >= len) { 1244 ret = EFAULT; 1245 break; 1246 } 1247 for (l = 2, o = 0; l < idd->length; l++) { 1248 if (l == 6 || l == 8 || l == 10 || l == 12) 1249 o += sprintf(buf + o, "-"); 1250 o += sprintf(buf + o, "%02x", 1251 idd->identifier[l]); 1252 } 1253 break; 1254 } 1255 if (idd->length * 2 < len) { 1256 for (l = 0; l < idd->length; l++) 1257 sprintf(buf + l * 2, "%02x", 1258 idd->identifier[l]); 1259 } else 1260 ret = EFAULT; 1261 break; 1262 default: 1263 if (cdai.provsiz < len) { 1264 cdai.buf[cdai.provsiz] = 0; 1265 ret = 0; 1266 } else 1267 ret = EFAULT; 1268 break; 1269 } 1270 1271 out: 1272 if ((char *)cdai.buf != buf) 1273 free(cdai.buf, M_CAMXPT); 1274 return ret; 1275 } 1276 1277 static dev_match_ret 1278 xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns, 1279 struct cam_eb *bus) 1280 { 1281 dev_match_ret retval; 1282 u_int i; 1283 1284 retval = DM_RET_NONE; 1285 1286 /* 1287 * If we aren't given something to match against, that's an error. 1288 */ 1289 if (bus == NULL) 1290 return(DM_RET_ERROR); 1291 1292 /* 1293 * If there are no match entries, then this bus matches no 1294 * matter what. 1295 */ 1296 if ((patterns == NULL) || (num_patterns == 0)) 1297 return(DM_RET_DESCEND | DM_RET_COPY); 1298 1299 for (i = 0; i < num_patterns; i++) { 1300 struct bus_match_pattern *cur_pattern; 1301 struct device_match_pattern *dp = &patterns[i].pattern.device_pattern; 1302 struct periph_match_pattern *pp = &patterns[i].pattern.periph_pattern; 1303 1304 /* 1305 * If the pattern in question isn't for a bus node, we 1306 * aren't interested. However, we do indicate to the 1307 * calling routine that we should continue descending the 1308 * tree, since the user wants to match against lower-level 1309 * EDT elements. 1310 */ 1311 if (patterns[i].type == DEV_MATCH_DEVICE && 1312 (dp->flags & DEV_MATCH_PATH) != 0 && 1313 dp->path_id != bus->path_id) 1314 continue; 1315 if (patterns[i].type == DEV_MATCH_PERIPH && 1316 (pp->flags & PERIPH_MATCH_PATH) != 0 && 1317 pp->path_id != bus->path_id) 1318 continue; 1319 if (patterns[i].type != DEV_MATCH_BUS) { 1320 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) 1321 retval |= DM_RET_DESCEND; 1322 continue; 1323 } 1324 1325 cur_pattern = &patterns[i].pattern.bus_pattern; 1326 1327 if (((cur_pattern->flags & BUS_MATCH_PATH) != 0) 1328 && (cur_pattern->path_id != bus->path_id)) 1329 continue; 1330 1331 if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0) 1332 && (cur_pattern->bus_id != bus->sim->bus_id)) 1333 continue; 1334 1335 if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0) 1336 && (cur_pattern->unit_number != bus->sim->unit_number)) 1337 continue; 1338 1339 if (((cur_pattern->flags & BUS_MATCH_NAME) != 0) 1340 && (strncmp(cur_pattern->dev_name, bus->sim->sim_name, 1341 DEV_IDLEN) != 0)) 1342 continue; 1343 1344 /* 1345 * If we get to this point, the user definitely wants 1346 * information on this bus. So tell the caller to copy the 1347 * data out. 1348 */ 1349 retval |= DM_RET_COPY; 1350 1351 /* 1352 * If the return action has been set to descend, then we 1353 * know that we've already seen a non-bus matching 1354 * expression, therefore we need to further descend the tree. 1355 * This won't change by continuing around the loop, so we 1356 * go ahead and return. If we haven't seen a non-bus 1357 * matching expression, we keep going around the loop until 1358 * we exhaust the matching expressions. We'll set the stop 1359 * flag once we fall out of the loop. 1360 */ 1361 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND) 1362 return(retval); 1363 } 1364 1365 /* 1366 * If the return action hasn't been set to descend yet, that means 1367 * we haven't seen anything other than bus matching patterns. So 1368 * tell the caller to stop descending the tree -- the user doesn't 1369 * want to match against lower level tree elements. 1370 */ 1371 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) 1372 retval |= DM_RET_STOP; 1373 1374 return(retval); 1375 } 1376 1377 static dev_match_ret 1378 xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns, 1379 struct cam_ed *device) 1380 { 1381 dev_match_ret retval; 1382 u_int i; 1383 1384 retval = DM_RET_NONE; 1385 1386 /* 1387 * If we aren't given something to match against, that's an error. 1388 */ 1389 if (device == NULL) 1390 return(DM_RET_ERROR); 1391 1392 /* 1393 * If there are no match entries, then this device matches no 1394 * matter what. 1395 */ 1396 if ((patterns == NULL) || (num_patterns == 0)) 1397 return(DM_RET_DESCEND | DM_RET_COPY); 1398 1399 for (i = 0; i < num_patterns; i++) { 1400 struct device_match_pattern *cur_pattern; 1401 struct scsi_vpd_device_id *device_id_page; 1402 struct periph_match_pattern *pp = &patterns[i].pattern.periph_pattern; 1403 1404 /* 1405 * If the pattern in question isn't for a device node, we 1406 * aren't interested. 1407 */ 1408 if (patterns[i].type == DEV_MATCH_PERIPH && 1409 (pp->flags & PERIPH_MATCH_TARGET) != 0 && 1410 pp->target_id != device->target->target_id) 1411 continue; 1412 if (patterns[i].type == DEV_MATCH_PERIPH && 1413 (pp->flags & PERIPH_MATCH_LUN) != 0 && 1414 pp->target_lun != device->lun_id) 1415 continue; 1416 if (patterns[i].type != DEV_MATCH_DEVICE) { 1417 if ((patterns[i].type == DEV_MATCH_PERIPH) 1418 && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)) 1419 retval |= DM_RET_DESCEND; 1420 continue; 1421 } 1422 1423 cur_pattern = &patterns[i].pattern.device_pattern; 1424 1425 /* Error out if mutually exclusive options are specified. */ 1426 if ((cur_pattern->flags & (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID)) 1427 == (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID)) 1428 return(DM_RET_ERROR); 1429 1430 if (((cur_pattern->flags & DEV_MATCH_PATH) != 0) 1431 && (cur_pattern->path_id != device->target->bus->path_id)) 1432 continue; 1433 1434 if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0) 1435 && (cur_pattern->target_id != device->target->target_id)) 1436 continue; 1437 1438 if (((cur_pattern->flags & DEV_MATCH_LUN) != 0) 1439 && (cur_pattern->target_lun != device->lun_id)) 1440 continue; 1441 1442 if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0) 1443 && (cam_quirkmatch((caddr_t)&device->inq_data, 1444 (caddr_t)&cur_pattern->data.inq_pat, 1445 1, sizeof(cur_pattern->data.inq_pat), 1446 scsi_static_inquiry_match) == NULL)) 1447 continue; 1448 1449 device_id_page = (struct scsi_vpd_device_id *)device->device_id; 1450 if (((cur_pattern->flags & DEV_MATCH_DEVID) != 0) 1451 && (device->device_id_len < SVPD_DEVICE_ID_HDR_LEN 1452 || scsi_devid_match((uint8_t *)device_id_page->desc_list, 1453 device->device_id_len 1454 - SVPD_DEVICE_ID_HDR_LEN, 1455 cur_pattern->data.devid_pat.id, 1456 cur_pattern->data.devid_pat.id_len) != 0)) 1457 continue; 1458 1459 /* 1460 * If we get to this point, the user definitely wants 1461 * information on this device. So tell the caller to copy 1462 * the data out. 1463 */ 1464 retval |= DM_RET_COPY; 1465 1466 /* 1467 * If the return action has been set to descend, then we 1468 * know that we've already seen a peripheral matching 1469 * expression, therefore we need to further descend the tree. 1470 * This won't change by continuing around the loop, so we 1471 * go ahead and return. If we haven't seen a peripheral 1472 * matching expression, we keep going around the loop until 1473 * we exhaust the matching expressions. We'll set the stop 1474 * flag once we fall out of the loop. 1475 */ 1476 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND) 1477 return(retval); 1478 } 1479 1480 /* 1481 * If the return action hasn't been set to descend yet, that means 1482 * we haven't seen any peripheral matching patterns. So tell the 1483 * caller to stop descending the tree -- the user doesn't want to 1484 * match against lower level tree elements. 1485 */ 1486 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) 1487 retval |= DM_RET_STOP; 1488 1489 return(retval); 1490 } 1491 1492 /* 1493 * Match a single peripheral against any number of match patterns. 1494 */ 1495 static dev_match_ret 1496 xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns, 1497 struct cam_periph *periph) 1498 { 1499 dev_match_ret retval; 1500 u_int i; 1501 1502 /* 1503 * If we aren't given something to match against, that's an error. 1504 */ 1505 if (periph == NULL) 1506 return(DM_RET_ERROR); 1507 1508 /* 1509 * If there are no match entries, then this peripheral matches no 1510 * matter what. 1511 */ 1512 if ((patterns == NULL) || (num_patterns == 0)) 1513 return(DM_RET_STOP | DM_RET_COPY); 1514 1515 /* 1516 * There aren't any nodes below a peripheral node, so there's no 1517 * reason to descend the tree any further. 1518 */ 1519 retval = DM_RET_STOP; 1520 1521 for (i = 0; i < num_patterns; i++) { 1522 struct periph_match_pattern *cur_pattern; 1523 1524 /* 1525 * If the pattern in question isn't for a peripheral, we 1526 * aren't interested. 1527 */ 1528 if (patterns[i].type != DEV_MATCH_PERIPH) 1529 continue; 1530 1531 cur_pattern = &patterns[i].pattern.periph_pattern; 1532 1533 if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0) 1534 && (cur_pattern->path_id != periph->path->bus->path_id)) 1535 continue; 1536 1537 /* 1538 * For the target and lun id's, we have to make sure the 1539 * target and lun pointers aren't NULL. The xpt peripheral 1540 * has a wildcard target and device. 1541 */ 1542 if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0) 1543 && ((periph->path->target == NULL) 1544 ||(cur_pattern->target_id != periph->path->target->target_id))) 1545 continue; 1546 1547 if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0) 1548 && ((periph->path->device == NULL) 1549 || (cur_pattern->target_lun != periph->path->device->lun_id))) 1550 continue; 1551 1552 if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0) 1553 && (cur_pattern->unit_number != periph->unit_number)) 1554 continue; 1555 1556 if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0) 1557 && (strncmp(cur_pattern->periph_name, periph->periph_name, 1558 DEV_IDLEN) != 0)) 1559 continue; 1560 1561 /* 1562 * If we get to this point, the user definitely wants 1563 * information on this peripheral. So tell the caller to 1564 * copy the data out. 1565 */ 1566 retval |= DM_RET_COPY; 1567 1568 /* 1569 * The return action has already been set to stop, since 1570 * peripherals don't have any nodes below them in the EDT. 1571 */ 1572 return(retval); 1573 } 1574 1575 /* 1576 * If we get to this point, the peripheral that was passed in 1577 * doesn't match any of the patterns. 1578 */ 1579 return(retval); 1580 } 1581 1582 static int 1583 xptedtbusfunc(struct cam_eb *bus, void *arg) 1584 { 1585 struct ccb_dev_match *cdm; 1586 struct cam_et *target; 1587 dev_match_ret retval; 1588 1589 cdm = (struct ccb_dev_match *)arg; 1590 1591 /* 1592 * If our position is for something deeper in the tree, that means 1593 * that we've already seen this node. So, we keep going down. 1594 */ 1595 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1596 && (cdm->pos.cookie.bus == bus) 1597 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1598 && (cdm->pos.cookie.target != NULL)) 1599 retval = DM_RET_DESCEND; 1600 else 1601 retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus); 1602 1603 /* 1604 * If we got an error, bail out of the search. 1605 */ 1606 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 1607 cdm->status = CAM_DEV_MATCH_ERROR; 1608 return(0); 1609 } 1610 1611 /* 1612 * If the copy flag is set, copy this bus out. 1613 */ 1614 if (retval & DM_RET_COPY) { 1615 int spaceleft, j; 1616 1617 spaceleft = cdm->match_buf_len - (cdm->num_matches * 1618 sizeof(struct dev_match_result)); 1619 1620 /* 1621 * If we don't have enough space to put in another 1622 * match result, save our position and tell the 1623 * user there are more devices to check. 1624 */ 1625 if (spaceleft < sizeof(struct dev_match_result)) { 1626 bzero(&cdm->pos, sizeof(cdm->pos)); 1627 cdm->pos.position_type = 1628 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS; 1629 1630 cdm->pos.cookie.bus = bus; 1631 cdm->pos.generations[CAM_BUS_GENERATION]= 1632 xsoftc.bus_generation; 1633 cdm->status = CAM_DEV_MATCH_MORE; 1634 return(0); 1635 } 1636 j = cdm->num_matches; 1637 cdm->num_matches++; 1638 cdm->matches[j].type = DEV_MATCH_BUS; 1639 cdm->matches[j].result.bus_result.path_id = bus->path_id; 1640 cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id; 1641 cdm->matches[j].result.bus_result.unit_number = 1642 bus->sim->unit_number; 1643 strlcpy(cdm->matches[j].result.bus_result.dev_name, 1644 bus->sim->sim_name, 1645 sizeof(cdm->matches[j].result.bus_result.dev_name)); 1646 } 1647 1648 /* 1649 * If the user is only interested in buses, there's no 1650 * reason to descend to the next level in the tree. 1651 */ 1652 if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP) 1653 return(1); 1654 1655 /* 1656 * If there is a target generation recorded, check it to 1657 * make sure the target list hasn't changed. 1658 */ 1659 mtx_lock(&bus->eb_mtx); 1660 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1661 && (cdm->pos.cookie.bus == bus) 1662 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1663 && (cdm->pos.cookie.target != NULL)) { 1664 if ((cdm->pos.generations[CAM_TARGET_GENERATION] != 1665 bus->generation)) { 1666 mtx_unlock(&bus->eb_mtx); 1667 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 1668 return (0); 1669 } 1670 target = (struct cam_et *)cdm->pos.cookie.target; 1671 target->refcount++; 1672 } else 1673 target = NULL; 1674 mtx_unlock(&bus->eb_mtx); 1675 1676 return (xpttargettraverse(bus, target, xptedttargetfunc, arg)); 1677 } 1678 1679 static int 1680 xptedttargetfunc(struct cam_et *target, void *arg) 1681 { 1682 struct ccb_dev_match *cdm; 1683 struct cam_eb *bus; 1684 struct cam_ed *device; 1685 1686 cdm = (struct ccb_dev_match *)arg; 1687 bus = target->bus; 1688 1689 /* 1690 * If there is a device list generation recorded, check it to 1691 * make sure the device list hasn't changed. 1692 */ 1693 mtx_lock(&bus->eb_mtx); 1694 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1695 && (cdm->pos.cookie.bus == bus) 1696 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1697 && (cdm->pos.cookie.target == target) 1698 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) 1699 && (cdm->pos.cookie.device != NULL)) { 1700 if (cdm->pos.generations[CAM_DEV_GENERATION] != 1701 target->generation) { 1702 mtx_unlock(&bus->eb_mtx); 1703 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 1704 return(0); 1705 } 1706 device = (struct cam_ed *)cdm->pos.cookie.device; 1707 device->refcount++; 1708 } else 1709 device = NULL; 1710 mtx_unlock(&bus->eb_mtx); 1711 1712 return (xptdevicetraverse(target, device, xptedtdevicefunc, arg)); 1713 } 1714 1715 static int 1716 xptedtdevicefunc(struct cam_ed *device, void *arg) 1717 { 1718 struct cam_eb *bus; 1719 struct cam_periph *periph; 1720 struct ccb_dev_match *cdm; 1721 dev_match_ret retval; 1722 1723 cdm = (struct ccb_dev_match *)arg; 1724 bus = device->target->bus; 1725 1726 /* 1727 * If our position is for something deeper in the tree, that means 1728 * that we've already seen this node. So, we keep going down. 1729 */ 1730 if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE) 1731 && (cdm->pos.cookie.device == device) 1732 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 1733 && (cdm->pos.cookie.periph != NULL)) 1734 retval = DM_RET_DESCEND; 1735 else 1736 retval = xptdevicematch(cdm->patterns, cdm->num_patterns, 1737 device); 1738 1739 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 1740 cdm->status = CAM_DEV_MATCH_ERROR; 1741 return(0); 1742 } 1743 1744 /* 1745 * If the copy flag is set, copy this device out. 1746 */ 1747 if (retval & DM_RET_COPY) { 1748 int spaceleft, j; 1749 1750 spaceleft = cdm->match_buf_len - (cdm->num_matches * 1751 sizeof(struct dev_match_result)); 1752 1753 /* 1754 * If we don't have enough space to put in another 1755 * match result, save our position and tell the 1756 * user there are more devices to check. 1757 */ 1758 if (spaceleft < sizeof(struct dev_match_result)) { 1759 bzero(&cdm->pos, sizeof(cdm->pos)); 1760 cdm->pos.position_type = 1761 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS | 1762 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE; 1763 1764 cdm->pos.cookie.bus = device->target->bus; 1765 cdm->pos.generations[CAM_BUS_GENERATION]= 1766 xsoftc.bus_generation; 1767 cdm->pos.cookie.target = device->target; 1768 cdm->pos.generations[CAM_TARGET_GENERATION] = 1769 device->target->bus->generation; 1770 cdm->pos.cookie.device = device; 1771 cdm->pos.generations[CAM_DEV_GENERATION] = 1772 device->target->generation; 1773 cdm->status = CAM_DEV_MATCH_MORE; 1774 return(0); 1775 } 1776 j = cdm->num_matches; 1777 cdm->num_matches++; 1778 cdm->matches[j].type = DEV_MATCH_DEVICE; 1779 cdm->matches[j].result.device_result.path_id = 1780 device->target->bus->path_id; 1781 cdm->matches[j].result.device_result.target_id = 1782 device->target->target_id; 1783 cdm->matches[j].result.device_result.target_lun = 1784 device->lun_id; 1785 cdm->matches[j].result.device_result.protocol = 1786 device->protocol; 1787 bcopy(&device->inq_data, 1788 &cdm->matches[j].result.device_result.inq_data, 1789 sizeof(struct scsi_inquiry_data)); 1790 bcopy(&device->ident_data, 1791 &cdm->matches[j].result.device_result.ident_data, 1792 sizeof(struct ata_params)); 1793 1794 /* Let the user know whether this device is unconfigured */ 1795 if (device->flags & CAM_DEV_UNCONFIGURED) 1796 cdm->matches[j].result.device_result.flags = 1797 DEV_RESULT_UNCONFIGURED; 1798 else 1799 cdm->matches[j].result.device_result.flags = 1800 DEV_RESULT_NOFLAG; 1801 } 1802 1803 /* 1804 * If the user isn't interested in peripherals, don't descend 1805 * the tree any further. 1806 */ 1807 if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP) 1808 return(1); 1809 1810 /* 1811 * If there is a peripheral list generation recorded, make sure 1812 * it hasn't changed. 1813 */ 1814 xpt_lock_buses(); 1815 mtx_lock(&bus->eb_mtx); 1816 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1817 && (cdm->pos.cookie.bus == bus) 1818 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1819 && (cdm->pos.cookie.target == device->target) 1820 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) 1821 && (cdm->pos.cookie.device == device) 1822 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 1823 && (cdm->pos.cookie.periph != NULL)) { 1824 if (cdm->pos.generations[CAM_PERIPH_GENERATION] != 1825 device->generation) { 1826 mtx_unlock(&bus->eb_mtx); 1827 xpt_unlock_buses(); 1828 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 1829 return(0); 1830 } 1831 periph = (struct cam_periph *)cdm->pos.cookie.periph; 1832 periph->refcount++; 1833 } else 1834 periph = NULL; 1835 mtx_unlock(&bus->eb_mtx); 1836 xpt_unlock_buses(); 1837 1838 return (xptperiphtraverse(device, periph, xptedtperiphfunc, arg)); 1839 } 1840 1841 static int 1842 xptedtperiphfunc(struct cam_periph *periph, void *arg) 1843 { 1844 struct ccb_dev_match *cdm; 1845 dev_match_ret retval; 1846 1847 cdm = (struct ccb_dev_match *)arg; 1848 1849 retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph); 1850 1851 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 1852 cdm->status = CAM_DEV_MATCH_ERROR; 1853 return(0); 1854 } 1855 1856 /* 1857 * If the copy flag is set, copy this peripheral out. 1858 */ 1859 if (retval & DM_RET_COPY) { 1860 int spaceleft, j; 1861 size_t l; 1862 1863 spaceleft = cdm->match_buf_len - (cdm->num_matches * 1864 sizeof(struct dev_match_result)); 1865 1866 /* 1867 * If we don't have enough space to put in another 1868 * match result, save our position and tell the 1869 * user there are more devices to check. 1870 */ 1871 if (spaceleft < sizeof(struct dev_match_result)) { 1872 bzero(&cdm->pos, sizeof(cdm->pos)); 1873 cdm->pos.position_type = 1874 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS | 1875 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE | 1876 CAM_DEV_POS_PERIPH; 1877 1878 cdm->pos.cookie.bus = periph->path->bus; 1879 cdm->pos.generations[CAM_BUS_GENERATION]= 1880 xsoftc.bus_generation; 1881 cdm->pos.cookie.target = periph->path->target; 1882 cdm->pos.generations[CAM_TARGET_GENERATION] = 1883 periph->path->bus->generation; 1884 cdm->pos.cookie.device = periph->path->device; 1885 cdm->pos.generations[CAM_DEV_GENERATION] = 1886 periph->path->target->generation; 1887 cdm->pos.cookie.periph = periph; 1888 cdm->pos.generations[CAM_PERIPH_GENERATION] = 1889 periph->path->device->generation; 1890 cdm->status = CAM_DEV_MATCH_MORE; 1891 return(0); 1892 } 1893 1894 j = cdm->num_matches; 1895 cdm->num_matches++; 1896 cdm->matches[j].type = DEV_MATCH_PERIPH; 1897 cdm->matches[j].result.periph_result.path_id = 1898 periph->path->bus->path_id; 1899 cdm->matches[j].result.periph_result.target_id = 1900 periph->path->target->target_id; 1901 cdm->matches[j].result.periph_result.target_lun = 1902 periph->path->device->lun_id; 1903 cdm->matches[j].result.periph_result.unit_number = 1904 periph->unit_number; 1905 l = sizeof(cdm->matches[j].result.periph_result.periph_name); 1906 strlcpy(cdm->matches[j].result.periph_result.periph_name, 1907 periph->periph_name, l); 1908 } 1909 1910 return(1); 1911 } 1912 1913 static int 1914 xptedtmatch(struct ccb_dev_match *cdm) 1915 { 1916 struct cam_eb *bus; 1917 int ret; 1918 1919 cdm->num_matches = 0; 1920 1921 /* 1922 * Check the bus list generation. If it has changed, the user 1923 * needs to reset everything and start over. 1924 */ 1925 xpt_lock_buses(); 1926 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1927 && (cdm->pos.cookie.bus != NULL)) { 1928 if (cdm->pos.generations[CAM_BUS_GENERATION] != 1929 xsoftc.bus_generation) { 1930 xpt_unlock_buses(); 1931 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 1932 return(0); 1933 } 1934 bus = (struct cam_eb *)cdm->pos.cookie.bus; 1935 bus->refcount++; 1936 } else 1937 bus = NULL; 1938 xpt_unlock_buses(); 1939 1940 ret = xptbustraverse(bus, xptedtbusfunc, cdm); 1941 1942 /* 1943 * If we get back 0, that means that we had to stop before fully 1944 * traversing the EDT. It also means that one of the subroutines 1945 * has set the status field to the proper value. If we get back 1, 1946 * we've fully traversed the EDT and copied out any matching entries. 1947 */ 1948 if (ret == 1) 1949 cdm->status = CAM_DEV_MATCH_LAST; 1950 1951 return(ret); 1952 } 1953 1954 static int 1955 xptplistpdrvfunc(struct periph_driver **pdrv, void *arg) 1956 { 1957 struct cam_periph *periph; 1958 struct ccb_dev_match *cdm; 1959 1960 cdm = (struct ccb_dev_match *)arg; 1961 1962 xpt_lock_buses(); 1963 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR) 1964 && (cdm->pos.cookie.pdrv == pdrv) 1965 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 1966 && (cdm->pos.cookie.periph != NULL)) { 1967 if (cdm->pos.generations[CAM_PERIPH_GENERATION] != 1968 (*pdrv)->generation) { 1969 xpt_unlock_buses(); 1970 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 1971 return(0); 1972 } 1973 periph = (struct cam_periph *)cdm->pos.cookie.periph; 1974 periph->refcount++; 1975 } else 1976 periph = NULL; 1977 xpt_unlock_buses(); 1978 1979 return (xptpdperiphtraverse(pdrv, periph, xptplistperiphfunc, arg)); 1980 } 1981 1982 static int 1983 xptplistperiphfunc(struct cam_periph *periph, void *arg) 1984 { 1985 struct ccb_dev_match *cdm; 1986 dev_match_ret retval; 1987 1988 cdm = (struct ccb_dev_match *)arg; 1989 1990 retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph); 1991 1992 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 1993 cdm->status = CAM_DEV_MATCH_ERROR; 1994 return(0); 1995 } 1996 1997 /* 1998 * If the copy flag is set, copy this peripheral out. 1999 */ 2000 if (retval & DM_RET_COPY) { 2001 int spaceleft, j; 2002 size_t l; 2003 2004 spaceleft = cdm->match_buf_len - (cdm->num_matches * 2005 sizeof(struct dev_match_result)); 2006 2007 /* 2008 * If we don't have enough space to put in another 2009 * match result, save our position and tell the 2010 * user there are more devices to check. 2011 */ 2012 if (spaceleft < sizeof(struct dev_match_result)) { 2013 struct periph_driver **pdrv; 2014 2015 pdrv = NULL; 2016 bzero(&cdm->pos, sizeof(cdm->pos)); 2017 cdm->pos.position_type = 2018 CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR | 2019 CAM_DEV_POS_PERIPH; 2020 2021 /* 2022 * This may look a bit non-sensical, but it is 2023 * actually quite logical. There are very few 2024 * peripheral drivers, and bloating every peripheral 2025 * structure with a pointer back to its parent 2026 * peripheral driver linker set entry would cost 2027 * more in the long run than doing this quick lookup. 2028 */ 2029 for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) { 2030 if (strcmp((*pdrv)->driver_name, 2031 periph->periph_name) == 0) 2032 break; 2033 } 2034 2035 if (*pdrv == NULL) { 2036 cdm->status = CAM_DEV_MATCH_ERROR; 2037 return(0); 2038 } 2039 2040 cdm->pos.cookie.pdrv = pdrv; 2041 /* 2042 * The periph generation slot does double duty, as 2043 * does the periph pointer slot. They are used for 2044 * both edt and pdrv lookups and positioning. 2045 */ 2046 cdm->pos.cookie.periph = periph; 2047 cdm->pos.generations[CAM_PERIPH_GENERATION] = 2048 (*pdrv)->generation; 2049 cdm->status = CAM_DEV_MATCH_MORE; 2050 return(0); 2051 } 2052 2053 j = cdm->num_matches; 2054 cdm->num_matches++; 2055 cdm->matches[j].type = DEV_MATCH_PERIPH; 2056 cdm->matches[j].result.periph_result.path_id = 2057 periph->path->bus->path_id; 2058 2059 /* 2060 * The transport layer peripheral doesn't have a target or 2061 * lun. 2062 */ 2063 if (periph->path->target) 2064 cdm->matches[j].result.periph_result.target_id = 2065 periph->path->target->target_id; 2066 else 2067 cdm->matches[j].result.periph_result.target_id = 2068 CAM_TARGET_WILDCARD; 2069 2070 if (periph->path->device) 2071 cdm->matches[j].result.periph_result.target_lun = 2072 periph->path->device->lun_id; 2073 else 2074 cdm->matches[j].result.periph_result.target_lun = 2075 CAM_LUN_WILDCARD; 2076 2077 cdm->matches[j].result.periph_result.unit_number = 2078 periph->unit_number; 2079 l = sizeof(cdm->matches[j].result.periph_result.periph_name); 2080 strlcpy(cdm->matches[j].result.periph_result.periph_name, 2081 periph->periph_name, l); 2082 } 2083 2084 return(1); 2085 } 2086 2087 static int 2088 xptperiphlistmatch(struct ccb_dev_match *cdm) 2089 { 2090 int ret; 2091 2092 cdm->num_matches = 0; 2093 2094 /* 2095 * At this point in the edt traversal function, we check the bus 2096 * list generation to make sure that no buses have been added or 2097 * removed since the user last sent a XPT_DEV_MATCH ccb through. 2098 * For the peripheral driver list traversal function, however, we 2099 * don't have to worry about new peripheral driver types coming or 2100 * going; they're in a linker set, and therefore can't change 2101 * without a recompile. 2102 */ 2103 2104 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR) 2105 && (cdm->pos.cookie.pdrv != NULL)) 2106 ret = xptpdrvtraverse( 2107 (struct periph_driver **)cdm->pos.cookie.pdrv, 2108 xptplistpdrvfunc, cdm); 2109 else 2110 ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm); 2111 2112 /* 2113 * If we get back 0, that means that we had to stop before fully 2114 * traversing the peripheral driver tree. It also means that one of 2115 * the subroutines has set the status field to the proper value. If 2116 * we get back 1, we've fully traversed the EDT and copied out any 2117 * matching entries. 2118 */ 2119 if (ret == 1) 2120 cdm->status = CAM_DEV_MATCH_LAST; 2121 2122 return(ret); 2123 } 2124 2125 static int 2126 xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg) 2127 { 2128 struct cam_eb *bus, *next_bus; 2129 int retval; 2130 2131 retval = 1; 2132 if (start_bus) 2133 bus = start_bus; 2134 else { 2135 xpt_lock_buses(); 2136 bus = TAILQ_FIRST(&xsoftc.xpt_busses); 2137 if (bus == NULL) { 2138 xpt_unlock_buses(); 2139 return (retval); 2140 } 2141 bus->refcount++; 2142 xpt_unlock_buses(); 2143 } 2144 for (; bus != NULL; bus = next_bus) { 2145 retval = tr_func(bus, arg); 2146 if (retval == 0) { 2147 xpt_release_bus(bus); 2148 break; 2149 } 2150 xpt_lock_buses(); 2151 next_bus = TAILQ_NEXT(bus, links); 2152 if (next_bus) 2153 next_bus->refcount++; 2154 xpt_unlock_buses(); 2155 xpt_release_bus(bus); 2156 } 2157 return(retval); 2158 } 2159 2160 static int 2161 xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target, 2162 xpt_targetfunc_t *tr_func, void *arg) 2163 { 2164 struct cam_et *target, *next_target; 2165 int retval; 2166 2167 retval = 1; 2168 if (start_target) 2169 target = start_target; 2170 else { 2171 mtx_lock(&bus->eb_mtx); 2172 target = TAILQ_FIRST(&bus->et_entries); 2173 if (target == NULL) { 2174 mtx_unlock(&bus->eb_mtx); 2175 return (retval); 2176 } 2177 target->refcount++; 2178 mtx_unlock(&bus->eb_mtx); 2179 } 2180 for (; target != NULL; target = next_target) { 2181 retval = tr_func(target, arg); 2182 if (retval == 0) { 2183 xpt_release_target(target); 2184 break; 2185 } 2186 mtx_lock(&bus->eb_mtx); 2187 next_target = TAILQ_NEXT(target, links); 2188 if (next_target) 2189 next_target->refcount++; 2190 mtx_unlock(&bus->eb_mtx); 2191 xpt_release_target(target); 2192 } 2193 return(retval); 2194 } 2195 2196 static int 2197 xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device, 2198 xpt_devicefunc_t *tr_func, void *arg) 2199 { 2200 struct cam_eb *bus; 2201 struct cam_ed *device, *next_device; 2202 int retval; 2203 2204 retval = 1; 2205 bus = target->bus; 2206 if (start_device) 2207 device = start_device; 2208 else { 2209 mtx_lock(&bus->eb_mtx); 2210 device = TAILQ_FIRST(&target->ed_entries); 2211 if (device == NULL) { 2212 mtx_unlock(&bus->eb_mtx); 2213 return (retval); 2214 } 2215 device->refcount++; 2216 mtx_unlock(&bus->eb_mtx); 2217 } 2218 for (; device != NULL; device = next_device) { 2219 mtx_lock(&device->device_mtx); 2220 retval = tr_func(device, arg); 2221 mtx_unlock(&device->device_mtx); 2222 if (retval == 0) { 2223 xpt_release_device(device); 2224 break; 2225 } 2226 mtx_lock(&bus->eb_mtx); 2227 next_device = TAILQ_NEXT(device, links); 2228 if (next_device) 2229 next_device->refcount++; 2230 mtx_unlock(&bus->eb_mtx); 2231 xpt_release_device(device); 2232 } 2233 return(retval); 2234 } 2235 2236 static int 2237 xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph, 2238 xpt_periphfunc_t *tr_func, void *arg) 2239 { 2240 struct cam_eb *bus; 2241 struct cam_periph *periph, *next_periph; 2242 int retval; 2243 2244 retval = 1; 2245 2246 bus = device->target->bus; 2247 if (start_periph) 2248 periph = start_periph; 2249 else { 2250 xpt_lock_buses(); 2251 mtx_lock(&bus->eb_mtx); 2252 periph = SLIST_FIRST(&device->periphs); 2253 while (periph != NULL && (periph->flags & CAM_PERIPH_FREE) != 0) 2254 periph = SLIST_NEXT(periph, periph_links); 2255 if (periph == NULL) { 2256 mtx_unlock(&bus->eb_mtx); 2257 xpt_unlock_buses(); 2258 return (retval); 2259 } 2260 periph->refcount++; 2261 mtx_unlock(&bus->eb_mtx); 2262 xpt_unlock_buses(); 2263 } 2264 for (; periph != NULL; periph = next_periph) { 2265 retval = tr_func(periph, arg); 2266 if (retval == 0) { 2267 cam_periph_release_locked(periph); 2268 break; 2269 } 2270 xpt_lock_buses(); 2271 mtx_lock(&bus->eb_mtx); 2272 next_periph = SLIST_NEXT(periph, periph_links); 2273 while (next_periph != NULL && 2274 (next_periph->flags & CAM_PERIPH_FREE) != 0) 2275 next_periph = SLIST_NEXT(next_periph, periph_links); 2276 if (next_periph) 2277 next_periph->refcount++; 2278 mtx_unlock(&bus->eb_mtx); 2279 xpt_unlock_buses(); 2280 cam_periph_release_locked(periph); 2281 } 2282 return(retval); 2283 } 2284 2285 static int 2286 xptpdrvtraverse(struct periph_driver **start_pdrv, 2287 xpt_pdrvfunc_t *tr_func, void *arg) 2288 { 2289 struct periph_driver **pdrv; 2290 int retval; 2291 2292 retval = 1; 2293 2294 /* 2295 * We don't traverse the peripheral driver list like we do the 2296 * other lists, because it is a linker set, and therefore cannot be 2297 * changed during runtime. If the peripheral driver list is ever 2298 * re-done to be something other than a linker set (i.e. it can 2299 * change while the system is running), the list traversal should 2300 * be modified to work like the other traversal functions. 2301 */ 2302 for (pdrv = (start_pdrv ? start_pdrv : periph_drivers); 2303 *pdrv != NULL; pdrv++) { 2304 retval = tr_func(pdrv, arg); 2305 2306 if (retval == 0) 2307 return(retval); 2308 } 2309 2310 return(retval); 2311 } 2312 2313 static int 2314 xptpdperiphtraverse(struct periph_driver **pdrv, 2315 struct cam_periph *start_periph, 2316 xpt_periphfunc_t *tr_func, void *arg) 2317 { 2318 struct cam_periph *periph, *next_periph; 2319 int retval; 2320 2321 retval = 1; 2322 2323 if (start_periph) 2324 periph = start_periph; 2325 else { 2326 xpt_lock_buses(); 2327 periph = TAILQ_FIRST(&(*pdrv)->units); 2328 while (periph != NULL && (periph->flags & CAM_PERIPH_FREE) != 0) 2329 periph = TAILQ_NEXT(periph, unit_links); 2330 if (periph == NULL) { 2331 xpt_unlock_buses(); 2332 return (retval); 2333 } 2334 periph->refcount++; 2335 xpt_unlock_buses(); 2336 } 2337 for (; periph != NULL; periph = next_periph) { 2338 cam_periph_lock(periph); 2339 retval = tr_func(periph, arg); 2340 cam_periph_unlock(periph); 2341 if (retval == 0) { 2342 cam_periph_release(periph); 2343 break; 2344 } 2345 xpt_lock_buses(); 2346 next_periph = TAILQ_NEXT(periph, unit_links); 2347 while (next_periph != NULL && 2348 (next_periph->flags & CAM_PERIPH_FREE) != 0) 2349 next_periph = TAILQ_NEXT(next_periph, unit_links); 2350 if (next_periph) 2351 next_periph->refcount++; 2352 xpt_unlock_buses(); 2353 cam_periph_release(periph); 2354 } 2355 return(retval); 2356 } 2357 2358 static int 2359 xptdefbusfunc(struct cam_eb *bus, void *arg) 2360 { 2361 struct xpt_traverse_config *tr_config; 2362 2363 tr_config = (struct xpt_traverse_config *)arg; 2364 2365 if (tr_config->depth == XPT_DEPTH_BUS) { 2366 xpt_busfunc_t *tr_func; 2367 2368 tr_func = (xpt_busfunc_t *)tr_config->tr_func; 2369 2370 return(tr_func(bus, tr_config->tr_arg)); 2371 } else 2372 return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg)); 2373 } 2374 2375 static int 2376 xptdeftargetfunc(struct cam_et *target, void *arg) 2377 { 2378 struct xpt_traverse_config *tr_config; 2379 2380 tr_config = (struct xpt_traverse_config *)arg; 2381 2382 if (tr_config->depth == XPT_DEPTH_TARGET) { 2383 xpt_targetfunc_t *tr_func; 2384 2385 tr_func = (xpt_targetfunc_t *)tr_config->tr_func; 2386 2387 return(tr_func(target, tr_config->tr_arg)); 2388 } else 2389 return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg)); 2390 } 2391 2392 static int 2393 xptdefdevicefunc(struct cam_ed *device, void *arg) 2394 { 2395 struct xpt_traverse_config *tr_config; 2396 2397 tr_config = (struct xpt_traverse_config *)arg; 2398 2399 if (tr_config->depth == XPT_DEPTH_DEVICE) { 2400 xpt_devicefunc_t *tr_func; 2401 2402 tr_func = (xpt_devicefunc_t *)tr_config->tr_func; 2403 2404 return(tr_func(device, tr_config->tr_arg)); 2405 } else 2406 return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg)); 2407 } 2408 2409 static int 2410 xptdefperiphfunc(struct cam_periph *periph, void *arg) 2411 { 2412 struct xpt_traverse_config *tr_config; 2413 xpt_periphfunc_t *tr_func; 2414 2415 tr_config = (struct xpt_traverse_config *)arg; 2416 2417 tr_func = (xpt_periphfunc_t *)tr_config->tr_func; 2418 2419 /* 2420 * Unlike the other default functions, we don't check for depth 2421 * here. The peripheral driver level is the last level in the EDT, 2422 * so if we're here, we should execute the function in question. 2423 */ 2424 return(tr_func(periph, tr_config->tr_arg)); 2425 } 2426 2427 /* 2428 * Execute the given function for every bus in the EDT. 2429 */ 2430 static int 2431 xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg) 2432 { 2433 struct xpt_traverse_config tr_config; 2434 2435 tr_config.depth = XPT_DEPTH_BUS; 2436 tr_config.tr_func = tr_func; 2437 tr_config.tr_arg = arg; 2438 2439 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config)); 2440 } 2441 2442 /* 2443 * Execute the given function for every device in the EDT. 2444 */ 2445 static int 2446 xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg) 2447 { 2448 struct xpt_traverse_config tr_config; 2449 2450 tr_config.depth = XPT_DEPTH_DEVICE; 2451 tr_config.tr_func = tr_func; 2452 tr_config.tr_arg = arg; 2453 2454 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config)); 2455 } 2456 2457 static int 2458 xptsetasyncfunc(struct cam_ed *device, void *arg) 2459 { 2460 struct cam_path path; 2461 struct ccb_getdev cgd; 2462 struct ccb_setasync *csa = (struct ccb_setasync *)arg; 2463 2464 /* 2465 * Don't report unconfigured devices (Wildcard devs, 2466 * devices only for target mode, device instances 2467 * that have been invalidated but are waiting for 2468 * their last reference count to be released). 2469 */ 2470 if ((device->flags & CAM_DEV_UNCONFIGURED) != 0) 2471 return (1); 2472 2473 memset(&cgd, 0, sizeof(cgd)); 2474 xpt_compile_path(&path, 2475 NULL, 2476 device->target->bus->path_id, 2477 device->target->target_id, 2478 device->lun_id); 2479 xpt_setup_ccb(&cgd.ccb_h, &path, CAM_PRIORITY_NORMAL); 2480 cgd.ccb_h.func_code = XPT_GDEV_TYPE; 2481 xpt_action((union ccb *)&cgd); 2482 csa->callback(csa->callback_arg, 2483 AC_FOUND_DEVICE, 2484 &path, &cgd); 2485 xpt_release_path(&path); 2486 2487 return(1); 2488 } 2489 2490 static int 2491 xptsetasyncbusfunc(struct cam_eb *bus, void *arg) 2492 { 2493 struct cam_path path; 2494 struct ccb_pathinq cpi; 2495 struct ccb_setasync *csa = (struct ccb_setasync *)arg; 2496 2497 xpt_compile_path(&path, /*periph*/NULL, 2498 bus->path_id, 2499 CAM_TARGET_WILDCARD, 2500 CAM_LUN_WILDCARD); 2501 xpt_path_lock(&path); 2502 xpt_path_inq(&cpi, &path); 2503 csa->callback(csa->callback_arg, 2504 AC_PATH_REGISTERED, 2505 &path, &cpi); 2506 xpt_path_unlock(&path); 2507 xpt_release_path(&path); 2508 2509 return(1); 2510 } 2511 2512 void 2513 xpt_action(union ccb *start_ccb) 2514 { 2515 2516 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, 2517 ("xpt_action: func %#x %s\n", start_ccb->ccb_h.func_code, 2518 xpt_action_name(start_ccb->ccb_h.func_code))); 2519 2520 start_ccb->ccb_h.status = CAM_REQ_INPROG; 2521 (*(start_ccb->ccb_h.path->bus->xport->ops->action))(start_ccb); 2522 } 2523 2524 void 2525 xpt_action_default(union ccb *start_ccb) 2526 { 2527 struct cam_path *path; 2528 struct cam_sim *sim; 2529 struct mtx *mtx; 2530 2531 path = start_ccb->ccb_h.path; 2532 CAM_DEBUG(path, CAM_DEBUG_TRACE, 2533 ("xpt_action_default: func %#x %s\n", start_ccb->ccb_h.func_code, 2534 xpt_action_name(start_ccb->ccb_h.func_code))); 2535 2536 switch (start_ccb->ccb_h.func_code) { 2537 case XPT_SCSI_IO: 2538 { 2539 struct cam_ed *device; 2540 2541 /* 2542 * For the sake of compatibility with SCSI-1 2543 * devices that may not understand the identify 2544 * message, we include lun information in the 2545 * second byte of all commands. SCSI-1 specifies 2546 * that luns are a 3 bit value and reserves only 3 2547 * bits for lun information in the CDB. Later 2548 * revisions of the SCSI spec allow for more than 8 2549 * luns, but have deprecated lun information in the 2550 * CDB. So, if the lun won't fit, we must omit. 2551 * 2552 * Also be aware that during initial probing for devices, 2553 * the inquiry information is unknown but initialized to 0. 2554 * This means that this code will be exercised while probing 2555 * devices with an ANSI revision greater than 2. 2556 */ 2557 device = path->device; 2558 if (device->protocol_version <= SCSI_REV_2 2559 && start_ccb->ccb_h.target_lun < 8 2560 && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) { 2561 start_ccb->csio.cdb_io.cdb_bytes[1] |= 2562 start_ccb->ccb_h.target_lun << 5; 2563 } 2564 start_ccb->csio.scsi_status = SCSI_STATUS_OK; 2565 } 2566 /* FALLTHROUGH */ 2567 case XPT_TARGET_IO: 2568 case XPT_CONT_TARGET_IO: 2569 start_ccb->csio.sense_resid = 0; 2570 start_ccb->csio.resid = 0; 2571 /* FALLTHROUGH */ 2572 case XPT_ATA_IO: 2573 if (start_ccb->ccb_h.func_code == XPT_ATA_IO) 2574 start_ccb->ataio.resid = 0; 2575 /* FALLTHROUGH */ 2576 case XPT_NVME_IO: 2577 case XPT_NVME_ADMIN: 2578 case XPT_MMC_IO: 2579 case XPT_MMC_GET_TRAN_SETTINGS: 2580 case XPT_MMC_SET_TRAN_SETTINGS: 2581 case XPT_RESET_DEV: 2582 case XPT_ENG_EXEC: 2583 case XPT_SMP_IO: 2584 { 2585 struct cam_devq *devq; 2586 2587 devq = path->bus->sim->devq; 2588 mtx_lock(&devq->send_mtx); 2589 cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb); 2590 if (xpt_schedule_devq(devq, path->device) != 0) 2591 xpt_run_devq(devq); 2592 mtx_unlock(&devq->send_mtx); 2593 break; 2594 } 2595 case XPT_CALC_GEOMETRY: 2596 /* Filter out garbage */ 2597 if (start_ccb->ccg.block_size == 0 2598 || start_ccb->ccg.volume_size == 0) { 2599 start_ccb->ccg.cylinders = 0; 2600 start_ccb->ccg.heads = 0; 2601 start_ccb->ccg.secs_per_track = 0; 2602 start_ccb->ccb_h.status = CAM_REQ_CMP; 2603 break; 2604 } 2605 goto call_sim; 2606 case XPT_ABORT: 2607 { 2608 union ccb* abort_ccb; 2609 2610 abort_ccb = start_ccb->cab.abort_ccb; 2611 if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) { 2612 struct cam_ed *device; 2613 struct cam_devq *devq; 2614 2615 device = abort_ccb->ccb_h.path->device; 2616 devq = device->sim->devq; 2617 2618 mtx_lock(&devq->send_mtx); 2619 if (abort_ccb->ccb_h.pinfo.index > 0) { 2620 cam_ccbq_remove_ccb(&device->ccbq, abort_ccb); 2621 abort_ccb->ccb_h.status = 2622 CAM_REQ_ABORTED|CAM_DEV_QFRZN; 2623 xpt_freeze_devq_device(device, 1); 2624 mtx_unlock(&devq->send_mtx); 2625 xpt_done(abort_ccb); 2626 start_ccb->ccb_h.status = CAM_REQ_CMP; 2627 break; 2628 } 2629 mtx_unlock(&devq->send_mtx); 2630 2631 if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX 2632 && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) { 2633 /* 2634 * We've caught this ccb en route to 2635 * the SIM. Flag it for abort and the 2636 * SIM will do so just before starting 2637 * real work on the CCB. 2638 */ 2639 abort_ccb->ccb_h.status = 2640 CAM_REQ_ABORTED|CAM_DEV_QFRZN; 2641 xpt_freeze_devq(abort_ccb->ccb_h.path, 1); 2642 start_ccb->ccb_h.status = CAM_REQ_CMP; 2643 break; 2644 } 2645 } 2646 if (XPT_FC_IS_QUEUED(abort_ccb) 2647 && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) { 2648 /* 2649 * It's already completed but waiting 2650 * for our SWI to get to it. 2651 */ 2652 start_ccb->ccb_h.status = CAM_UA_ABORT; 2653 break; 2654 } 2655 /* 2656 * If we weren't able to take care of the abort request 2657 * in the XPT, pass the request down to the SIM for processing. 2658 */ 2659 } 2660 /* FALLTHROUGH */ 2661 case XPT_ACCEPT_TARGET_IO: 2662 case XPT_EN_LUN: 2663 case XPT_IMMED_NOTIFY: 2664 case XPT_NOTIFY_ACK: 2665 case XPT_RESET_BUS: 2666 case XPT_IMMEDIATE_NOTIFY: 2667 case XPT_NOTIFY_ACKNOWLEDGE: 2668 case XPT_GET_SIM_KNOB_OLD: 2669 case XPT_GET_SIM_KNOB: 2670 case XPT_SET_SIM_KNOB: 2671 case XPT_GET_TRAN_SETTINGS: 2672 case XPT_SET_TRAN_SETTINGS: 2673 case XPT_PATH_INQ: 2674 call_sim: 2675 sim = path->bus->sim; 2676 mtx = sim->mtx; 2677 if (mtx && !mtx_owned(mtx)) 2678 mtx_lock(mtx); 2679 else 2680 mtx = NULL; 2681 2682 CAM_DEBUG(path, CAM_DEBUG_TRACE, 2683 ("Calling sim->sim_action(): func=%#x\n", start_ccb->ccb_h.func_code)); 2684 (*(sim->sim_action))(sim, start_ccb); 2685 CAM_DEBUG(path, CAM_DEBUG_TRACE, 2686 ("sim->sim_action returned: status=%#x\n", start_ccb->ccb_h.status)); 2687 if (mtx) 2688 mtx_unlock(mtx); 2689 break; 2690 case XPT_PATH_STATS: 2691 start_ccb->cpis.last_reset = path->bus->last_reset; 2692 start_ccb->ccb_h.status = CAM_REQ_CMP; 2693 break; 2694 case XPT_GDEV_TYPE: 2695 { 2696 struct cam_ed *dev; 2697 2698 dev = path->device; 2699 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) { 2700 start_ccb->ccb_h.status = CAM_DEV_NOT_THERE; 2701 } else { 2702 struct ccb_getdev *cgd; 2703 2704 cgd = &start_ccb->cgd; 2705 cgd->protocol = dev->protocol; 2706 cgd->inq_data = dev->inq_data; 2707 cgd->ident_data = dev->ident_data; 2708 cgd->inq_flags = dev->inq_flags; 2709 cgd->ccb_h.status = CAM_REQ_CMP; 2710 cgd->serial_num_len = dev->serial_num_len; 2711 if ((dev->serial_num_len > 0) 2712 && (dev->serial_num != NULL)) 2713 bcopy(dev->serial_num, cgd->serial_num, 2714 dev->serial_num_len); 2715 } 2716 break; 2717 } 2718 case XPT_GDEV_STATS: 2719 { 2720 struct ccb_getdevstats *cgds = &start_ccb->cgds; 2721 struct cam_ed *dev = path->device; 2722 struct cam_eb *bus = path->bus; 2723 struct cam_et *tar = path->target; 2724 struct cam_devq *devq = bus->sim->devq; 2725 2726 mtx_lock(&devq->send_mtx); 2727 cgds->dev_openings = dev->ccbq.dev_openings; 2728 cgds->dev_active = dev->ccbq.dev_active; 2729 cgds->allocated = dev->ccbq.allocated; 2730 cgds->queued = cam_ccbq_pending_ccb_count(&dev->ccbq); 2731 cgds->held = cgds->allocated - cgds->dev_active - cgds->queued; 2732 cgds->last_reset = tar->last_reset; 2733 cgds->maxtags = dev->maxtags; 2734 cgds->mintags = dev->mintags; 2735 if (timevalcmp(&tar->last_reset, &bus->last_reset, <)) 2736 cgds->last_reset = bus->last_reset; 2737 mtx_unlock(&devq->send_mtx); 2738 cgds->ccb_h.status = CAM_REQ_CMP; 2739 break; 2740 } 2741 case XPT_GDEVLIST: 2742 { 2743 struct cam_periph *nperiph; 2744 struct periph_list *periph_head; 2745 struct ccb_getdevlist *cgdl; 2746 u_int i; 2747 struct cam_ed *device; 2748 bool found; 2749 2750 found = false; 2751 2752 /* 2753 * Don't want anyone mucking with our data. 2754 */ 2755 device = path->device; 2756 periph_head = &device->periphs; 2757 cgdl = &start_ccb->cgdl; 2758 2759 /* 2760 * Check and see if the list has changed since the user 2761 * last requested a list member. If so, tell them that the 2762 * list has changed, and therefore they need to start over 2763 * from the beginning. 2764 */ 2765 if ((cgdl->index != 0) && 2766 (cgdl->generation != device->generation)) { 2767 cgdl->status = CAM_GDEVLIST_LIST_CHANGED; 2768 break; 2769 } 2770 2771 /* 2772 * Traverse the list of peripherals and attempt to find 2773 * the requested peripheral. 2774 */ 2775 for (nperiph = SLIST_FIRST(periph_head), i = 0; 2776 (nperiph != NULL) && (i <= cgdl->index); 2777 nperiph = SLIST_NEXT(nperiph, periph_links), i++) { 2778 if (i == cgdl->index) { 2779 strlcpy(cgdl->periph_name, 2780 nperiph->periph_name, 2781 sizeof(cgdl->periph_name)); 2782 cgdl->unit_number = nperiph->unit_number; 2783 found = true; 2784 } 2785 } 2786 if (!found) { 2787 cgdl->status = CAM_GDEVLIST_ERROR; 2788 break; 2789 } 2790 2791 if (nperiph == NULL) 2792 cgdl->status = CAM_GDEVLIST_LAST_DEVICE; 2793 else 2794 cgdl->status = CAM_GDEVLIST_MORE_DEVS; 2795 2796 cgdl->index++; 2797 cgdl->generation = device->generation; 2798 2799 cgdl->ccb_h.status = CAM_REQ_CMP; 2800 break; 2801 } 2802 case XPT_DEV_MATCH: 2803 { 2804 dev_pos_type position_type; 2805 struct ccb_dev_match *cdm; 2806 2807 cdm = &start_ccb->cdm; 2808 2809 /* 2810 * There are two ways of getting at information in the EDT. 2811 * The first way is via the primary EDT tree. It starts 2812 * with a list of buses, then a list of targets on a bus, 2813 * then devices/luns on a target, and then peripherals on a 2814 * device/lun. The "other" way is by the peripheral driver 2815 * lists. The peripheral driver lists are organized by 2816 * peripheral driver. (obviously) So it makes sense to 2817 * use the peripheral driver list if the user is looking 2818 * for something like "da1", or all "da" devices. If the 2819 * user is looking for something on a particular bus/target 2820 * or lun, it's generally better to go through the EDT tree. 2821 */ 2822 2823 if (cdm->pos.position_type != CAM_DEV_POS_NONE) 2824 position_type = cdm->pos.position_type; 2825 else { 2826 u_int i; 2827 2828 position_type = CAM_DEV_POS_NONE; 2829 2830 for (i = 0; i < cdm->num_patterns; i++) { 2831 if ((cdm->patterns[i].type == DEV_MATCH_BUS) 2832 ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){ 2833 position_type = CAM_DEV_POS_EDT; 2834 break; 2835 } 2836 } 2837 2838 if (cdm->num_patterns == 0) 2839 position_type = CAM_DEV_POS_EDT; 2840 else if (position_type == CAM_DEV_POS_NONE) 2841 position_type = CAM_DEV_POS_PDRV; 2842 } 2843 2844 switch(position_type & CAM_DEV_POS_TYPEMASK) { 2845 case CAM_DEV_POS_EDT: 2846 xptedtmatch(cdm); 2847 break; 2848 case CAM_DEV_POS_PDRV: 2849 xptperiphlistmatch(cdm); 2850 break; 2851 default: 2852 cdm->status = CAM_DEV_MATCH_ERROR; 2853 break; 2854 } 2855 2856 if (cdm->status == CAM_DEV_MATCH_ERROR) 2857 start_ccb->ccb_h.status = CAM_REQ_CMP_ERR; 2858 else 2859 start_ccb->ccb_h.status = CAM_REQ_CMP; 2860 2861 break; 2862 } 2863 case XPT_SASYNC_CB: 2864 { 2865 struct ccb_setasync *csa; 2866 struct async_node *cur_entry; 2867 struct async_list *async_head; 2868 uint32_t added; 2869 2870 csa = &start_ccb->csa; 2871 added = csa->event_enable; 2872 async_head = &path->device->asyncs; 2873 2874 /* 2875 * If there is already an entry for us, simply 2876 * update it. 2877 */ 2878 cur_entry = SLIST_FIRST(async_head); 2879 while (cur_entry != NULL) { 2880 if ((cur_entry->callback_arg == csa->callback_arg) 2881 && (cur_entry->callback == csa->callback)) 2882 break; 2883 cur_entry = SLIST_NEXT(cur_entry, links); 2884 } 2885 2886 if (cur_entry != NULL) { 2887 /* 2888 * If the request has no flags set, 2889 * remove the entry. 2890 */ 2891 added &= ~cur_entry->event_enable; 2892 if (csa->event_enable == 0) { 2893 SLIST_REMOVE(async_head, cur_entry, 2894 async_node, links); 2895 xpt_release_device(path->device); 2896 free(cur_entry, M_CAMXPT); 2897 } else { 2898 cur_entry->event_enable = csa->event_enable; 2899 } 2900 csa->event_enable = added; 2901 } else { 2902 cur_entry = malloc(sizeof(*cur_entry), M_CAMXPT, 2903 M_NOWAIT); 2904 if (cur_entry == NULL) { 2905 csa->ccb_h.status = CAM_RESRC_UNAVAIL; 2906 break; 2907 } 2908 cur_entry->event_enable = csa->event_enable; 2909 cur_entry->event_lock = (path->bus->sim->mtx && 2910 mtx_owned(path->bus->sim->mtx)) ? 1 : 0; 2911 cur_entry->callback_arg = csa->callback_arg; 2912 cur_entry->callback = csa->callback; 2913 SLIST_INSERT_HEAD(async_head, cur_entry, links); 2914 xpt_acquire_device(path->device); 2915 } 2916 start_ccb->ccb_h.status = CAM_REQ_CMP; 2917 break; 2918 } 2919 case XPT_REL_SIMQ: 2920 { 2921 struct ccb_relsim *crs; 2922 struct cam_ed *dev; 2923 2924 crs = &start_ccb->crs; 2925 dev = path->device; 2926 if (dev == NULL) { 2927 crs->ccb_h.status = CAM_DEV_NOT_THERE; 2928 break; 2929 } 2930 2931 if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) { 2932 /* Don't ever go below one opening */ 2933 if (crs->openings > 0) { 2934 xpt_dev_ccbq_resize(path, crs->openings); 2935 if (bootverbose) { 2936 xpt_print(path, 2937 "number of openings is now %d\n", 2938 crs->openings); 2939 } 2940 } 2941 } 2942 2943 mtx_lock(&dev->sim->devq->send_mtx); 2944 if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) { 2945 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) { 2946 /* 2947 * Just extend the old timeout and decrement 2948 * the freeze count so that a single timeout 2949 * is sufficient for releasing the queue. 2950 */ 2951 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE; 2952 callout_stop(&dev->callout); 2953 } else { 2954 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 2955 } 2956 2957 callout_reset_sbt(&dev->callout, 2958 SBT_1MS * crs->release_timeout, SBT_1MS, 2959 xpt_release_devq_timeout, dev, 0); 2960 2961 dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING; 2962 } 2963 2964 if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) { 2965 if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) { 2966 /* 2967 * Decrement the freeze count so that a single 2968 * completion is still sufficient to unfreeze 2969 * the queue. 2970 */ 2971 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE; 2972 } else { 2973 dev->flags |= CAM_DEV_REL_ON_COMPLETE; 2974 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 2975 } 2976 } 2977 2978 if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) { 2979 if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0 2980 || (dev->ccbq.dev_active == 0)) { 2981 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE; 2982 } else { 2983 dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY; 2984 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 2985 } 2986 } 2987 mtx_unlock(&dev->sim->devq->send_mtx); 2988 2989 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) 2990 xpt_release_devq(path, /*count*/1, /*run_queue*/TRUE); 2991 start_ccb->crs.qfrozen_cnt = dev->ccbq.queue.qfrozen_cnt; 2992 start_ccb->ccb_h.status = CAM_REQ_CMP; 2993 break; 2994 } 2995 case XPT_DEBUG: { 2996 struct cam_path *oldpath; 2997 2998 /* Check that all request bits are supported. */ 2999 if (start_ccb->cdbg.flags & ~(CAM_DEBUG_COMPILE)) { 3000 start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 3001 break; 3002 } 3003 3004 cam_dflags = CAM_DEBUG_NONE; 3005 if (cam_dpath != NULL) { 3006 oldpath = cam_dpath; 3007 cam_dpath = NULL; 3008 xpt_free_path(oldpath); 3009 } 3010 if (start_ccb->cdbg.flags != CAM_DEBUG_NONE) { 3011 if (xpt_create_path(&cam_dpath, NULL, 3012 start_ccb->ccb_h.path_id, 3013 start_ccb->ccb_h.target_id, 3014 start_ccb->ccb_h.target_lun) != 3015 CAM_REQ_CMP) { 3016 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 3017 } else { 3018 cam_dflags = start_ccb->cdbg.flags; 3019 start_ccb->ccb_h.status = CAM_REQ_CMP; 3020 xpt_print(cam_dpath, "debugging flags now %x\n", 3021 cam_dflags); 3022 } 3023 } else 3024 start_ccb->ccb_h.status = CAM_REQ_CMP; 3025 break; 3026 } 3027 case XPT_NOOP: 3028 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0) 3029 xpt_freeze_devq(path, 1); 3030 start_ccb->ccb_h.status = CAM_REQ_CMP; 3031 break; 3032 case XPT_REPROBE_LUN: 3033 xpt_async(AC_INQ_CHANGED, path, NULL); 3034 start_ccb->ccb_h.status = CAM_REQ_CMP; 3035 xpt_done(start_ccb); 3036 break; 3037 case XPT_ASYNC: 3038 /* 3039 * Queue the async operation so it can be run from a sleepable 3040 * context. 3041 */ 3042 start_ccb->ccb_h.status = CAM_REQ_CMP; 3043 mtx_lock(&cam_async.cam_doneq_mtx); 3044 STAILQ_INSERT_TAIL(&cam_async.cam_doneq, &start_ccb->ccb_h, sim_links.stqe); 3045 start_ccb->ccb_h.pinfo.index = CAM_ASYNC_INDEX; 3046 mtx_unlock(&cam_async.cam_doneq_mtx); 3047 wakeup(&cam_async.cam_doneq); 3048 break; 3049 default: 3050 case XPT_SDEV_TYPE: 3051 case XPT_TERM_IO: 3052 case XPT_ENG_INQ: 3053 /* XXX Implement */ 3054 xpt_print(start_ccb->ccb_h.path, 3055 "%s: CCB type %#x %s not supported\n", __func__, 3056 start_ccb->ccb_h.func_code, 3057 xpt_action_name(start_ccb->ccb_h.func_code)); 3058 start_ccb->ccb_h.status = CAM_PROVIDE_FAIL; 3059 if (start_ccb->ccb_h.func_code & XPT_FC_DEV_QUEUED) { 3060 xpt_done(start_ccb); 3061 } 3062 break; 3063 } 3064 CAM_DEBUG(path, CAM_DEBUG_TRACE, 3065 ("xpt_action_default: func= %#x %s status %#x\n", 3066 start_ccb->ccb_h.func_code, 3067 xpt_action_name(start_ccb->ccb_h.func_code), 3068 start_ccb->ccb_h.status)); 3069 } 3070 3071 /* 3072 * Call the sim poll routine to allow the sim to complete 3073 * any inflight requests, then call camisr_runqueue to 3074 * complete any CCB that the polling completed. 3075 */ 3076 void 3077 xpt_sim_poll(struct cam_sim *sim) 3078 { 3079 struct mtx *mtx; 3080 3081 KASSERT(cam_sim_pollable(sim), ("%s: non-pollable sim", __func__)); 3082 mtx = sim->mtx; 3083 if (mtx) 3084 mtx_lock(mtx); 3085 (*(sim->sim_poll))(sim); 3086 if (mtx) 3087 mtx_unlock(mtx); 3088 camisr_runqueue(); 3089 } 3090 3091 uint32_t 3092 xpt_poll_setup(union ccb *start_ccb) 3093 { 3094 uint32_t timeout; 3095 struct cam_sim *sim; 3096 struct cam_devq *devq; 3097 struct cam_ed *dev; 3098 3099 timeout = start_ccb->ccb_h.timeout * 10; 3100 sim = start_ccb->ccb_h.path->bus->sim; 3101 devq = sim->devq; 3102 dev = start_ccb->ccb_h.path->device; 3103 3104 KASSERT(cam_sim_pollable(sim), ("%s: non-pollable sim", __func__)); 3105 3106 /* 3107 * Steal an opening so that no other queued requests 3108 * can get it before us while we simulate interrupts. 3109 */ 3110 mtx_lock(&devq->send_mtx); 3111 dev->ccbq.dev_openings--; 3112 while((devq->send_openings <= 0 || dev->ccbq.dev_openings < 0) && 3113 (--timeout > 0)) { 3114 mtx_unlock(&devq->send_mtx); 3115 DELAY(100); 3116 xpt_sim_poll(sim); 3117 mtx_lock(&devq->send_mtx); 3118 } 3119 dev->ccbq.dev_openings++; 3120 mtx_unlock(&devq->send_mtx); 3121 3122 return (timeout); 3123 } 3124 3125 void 3126 xpt_pollwait(union ccb *start_ccb, uint32_t timeout) 3127 { 3128 3129 KASSERT(cam_sim_pollable(start_ccb->ccb_h.path->bus->sim), 3130 ("%s: non-pollable sim", __func__)); 3131 while (--timeout > 0) { 3132 xpt_sim_poll(start_ccb->ccb_h.path->bus->sim); 3133 if ((start_ccb->ccb_h.status & CAM_STATUS_MASK) 3134 != CAM_REQ_INPROG) 3135 break; 3136 DELAY(100); 3137 } 3138 3139 if (timeout == 0) { 3140 /* 3141 * XXX Is it worth adding a sim_timeout entry 3142 * point so we can attempt recovery? If 3143 * this is only used for dumps, I don't think 3144 * it is. 3145 */ 3146 start_ccb->ccb_h.status = CAM_CMD_TIMEOUT; 3147 } 3148 } 3149 3150 /* 3151 * Schedule a peripheral driver to receive a ccb when its 3152 * target device has space for more transactions. 3153 */ 3154 void 3155 xpt_schedule(struct cam_periph *periph, uint32_t new_priority) 3156 { 3157 3158 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n")); 3159 cam_periph_assert(periph, MA_OWNED); 3160 if (new_priority < periph->scheduled_priority) { 3161 periph->scheduled_priority = new_priority; 3162 xpt_run_allocq(periph, 0); 3163 } 3164 } 3165 3166 /* 3167 * Schedule a device to run on a given queue. 3168 * If the device was inserted as a new entry on the queue, 3169 * return 1 meaning the device queue should be run. If we 3170 * were already queued, implying someone else has already 3171 * started the queue, return 0 so the caller doesn't attempt 3172 * to run the queue. 3173 */ 3174 static int 3175 xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo, 3176 uint32_t new_priority) 3177 { 3178 int retval; 3179 uint32_t old_priority; 3180 3181 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n")); 3182 3183 old_priority = pinfo->priority; 3184 3185 /* 3186 * Are we already queued? 3187 */ 3188 if (pinfo->index != CAM_UNQUEUED_INDEX) { 3189 /* Simply reorder based on new priority */ 3190 if (new_priority < old_priority) { 3191 camq_change_priority(queue, pinfo->index, 3192 new_priority); 3193 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3194 ("changed priority to %d\n", 3195 new_priority)); 3196 retval = 1; 3197 } else 3198 retval = 0; 3199 } else { 3200 /* New entry on the queue */ 3201 if (new_priority < old_priority) 3202 pinfo->priority = new_priority; 3203 3204 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3205 ("Inserting onto queue\n")); 3206 pinfo->generation = ++queue->generation; 3207 camq_insert(queue, pinfo); 3208 retval = 1; 3209 } 3210 return (retval); 3211 } 3212 3213 static void 3214 xpt_run_allocq_task(void *context, int pending) 3215 { 3216 struct cam_periph *periph = context; 3217 3218 cam_periph_lock(periph); 3219 periph->flags &= ~CAM_PERIPH_RUN_TASK; 3220 xpt_run_allocq(periph, 1); 3221 cam_periph_unlock(periph); 3222 cam_periph_release(periph); 3223 } 3224 3225 static void 3226 xpt_run_allocq(struct cam_periph *periph, int sleep) 3227 { 3228 struct cam_ed *device; 3229 union ccb *ccb; 3230 uint32_t prio; 3231 3232 cam_periph_assert(periph, MA_OWNED); 3233 if (periph->periph_allocating) 3234 return; 3235 cam_periph_doacquire(periph); 3236 periph->periph_allocating = 1; 3237 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_allocq(%p)\n", periph)); 3238 device = periph->path->device; 3239 ccb = NULL; 3240 restart: 3241 while ((prio = min(periph->scheduled_priority, 3242 periph->immediate_priority)) != CAM_PRIORITY_NONE && 3243 (periph->periph_allocated - (ccb != NULL ? 1 : 0) < 3244 device->ccbq.total_openings || prio <= CAM_PRIORITY_OOB)) { 3245 if (ccb == NULL && 3246 (ccb = xpt_get_ccb_nowait(periph)) == NULL) { 3247 if (sleep) { 3248 ccb = xpt_get_ccb(periph); 3249 goto restart; 3250 } 3251 if (periph->flags & CAM_PERIPH_RUN_TASK) 3252 break; 3253 cam_periph_doacquire(periph); 3254 periph->flags |= CAM_PERIPH_RUN_TASK; 3255 taskqueue_enqueue(xsoftc.xpt_taskq, 3256 &periph->periph_run_task); 3257 break; 3258 } 3259 xpt_setup_ccb(&ccb->ccb_h, periph->path, prio); 3260 if (prio == periph->immediate_priority) { 3261 periph->immediate_priority = CAM_PRIORITY_NONE; 3262 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3263 ("waking cam_periph_getccb()\n")); 3264 SLIST_INSERT_HEAD(&periph->ccb_list, &ccb->ccb_h, 3265 periph_links.sle); 3266 wakeup(&periph->ccb_list); 3267 } else { 3268 periph->scheduled_priority = CAM_PRIORITY_NONE; 3269 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3270 ("calling periph_start()\n")); 3271 periph->periph_start(periph, ccb); 3272 } 3273 ccb = NULL; 3274 } 3275 if (ccb != NULL) 3276 xpt_release_ccb(ccb); 3277 periph->periph_allocating = 0; 3278 cam_periph_release_locked(periph); 3279 } 3280 3281 static void 3282 xpt_run_devq(struct cam_devq *devq) 3283 { 3284 struct mtx *mtx; 3285 3286 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_devq\n")); 3287 3288 devq->send_queue.qfrozen_cnt++; 3289 while ((devq->send_queue.entries > 0) 3290 && (devq->send_openings > 0) 3291 && (devq->send_queue.qfrozen_cnt <= 1)) { 3292 struct cam_ed *device; 3293 union ccb *work_ccb; 3294 struct cam_sim *sim; 3295 struct xpt_proto *proto; 3296 3297 device = (struct cam_ed *)camq_remove(&devq->send_queue, 3298 CAMQ_HEAD); 3299 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3300 ("running device %p\n", device)); 3301 3302 work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD); 3303 if (work_ccb == NULL) { 3304 printf("device on run queue with no ccbs???\n"); 3305 continue; 3306 } 3307 3308 if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) { 3309 mtx_lock(&xsoftc.xpt_highpower_lock); 3310 if (xsoftc.num_highpower <= 0) { 3311 /* 3312 * We got a high power command, but we 3313 * don't have any available slots. Freeze 3314 * the device queue until we have a slot 3315 * available. 3316 */ 3317 xpt_freeze_devq_device(device, 1); 3318 STAILQ_INSERT_TAIL(&xsoftc.highpowerq, device, 3319 highpowerq_entry); 3320 3321 mtx_unlock(&xsoftc.xpt_highpower_lock); 3322 continue; 3323 } else { 3324 /* 3325 * Consume a high power slot while 3326 * this ccb runs. 3327 */ 3328 xsoftc.num_highpower--; 3329 } 3330 mtx_unlock(&xsoftc.xpt_highpower_lock); 3331 } 3332 cam_ccbq_remove_ccb(&device->ccbq, work_ccb); 3333 cam_ccbq_send_ccb(&device->ccbq, work_ccb); 3334 devq->send_openings--; 3335 devq->send_active++; 3336 xpt_schedule_devq(devq, device); 3337 mtx_unlock(&devq->send_mtx); 3338 3339 if ((work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0) { 3340 /* 3341 * The client wants to freeze the queue 3342 * after this CCB is sent. 3343 */ 3344 xpt_freeze_devq(work_ccb->ccb_h.path, 1); 3345 } 3346 3347 /* In Target mode, the peripheral driver knows best... */ 3348 if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) { 3349 if ((device->inq_flags & SID_CmdQue) != 0 3350 && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE) 3351 work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID; 3352 else 3353 /* 3354 * Clear this in case of a retried CCB that 3355 * failed due to a rejected tag. 3356 */ 3357 work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID; 3358 } 3359 3360 KASSERT(device == work_ccb->ccb_h.path->device, 3361 ("device (%p) / path->device (%p) mismatch", 3362 device, work_ccb->ccb_h.path->device)); 3363 proto = xpt_proto_find(device->protocol); 3364 if (proto && proto->ops->debug_out) 3365 proto->ops->debug_out(work_ccb); 3366 3367 /* 3368 * Device queues can be shared among multiple SIM instances 3369 * that reside on different buses. Use the SIM from the 3370 * queued device, rather than the one from the calling bus. 3371 */ 3372 sim = device->sim; 3373 mtx = sim->mtx; 3374 if (mtx && !mtx_owned(mtx)) 3375 mtx_lock(mtx); 3376 else 3377 mtx = NULL; 3378 work_ccb->ccb_h.qos.periph_data = cam_iosched_now(); 3379 (*(sim->sim_action))(sim, work_ccb); 3380 if (mtx) 3381 mtx_unlock(mtx); 3382 mtx_lock(&devq->send_mtx); 3383 } 3384 devq->send_queue.qfrozen_cnt--; 3385 } 3386 3387 /* 3388 * This function merges stuff from the src ccb into the dst ccb, while keeping 3389 * important fields in the dst ccb constant. 3390 */ 3391 void 3392 xpt_merge_ccb(union ccb *dst_ccb, union ccb *src_ccb) 3393 { 3394 3395 /* 3396 * Pull fields that are valid for peripheral drivers to set 3397 * into the dst CCB along with the CCB "payload". 3398 */ 3399 dst_ccb->ccb_h.retry_count = src_ccb->ccb_h.retry_count; 3400 dst_ccb->ccb_h.func_code = src_ccb->ccb_h.func_code; 3401 dst_ccb->ccb_h.timeout = src_ccb->ccb_h.timeout; 3402 dst_ccb->ccb_h.flags = src_ccb->ccb_h.flags; 3403 bcopy(&(&src_ccb->ccb_h)[1], &(&dst_ccb->ccb_h)[1], 3404 sizeof(union ccb) - sizeof(struct ccb_hdr)); 3405 } 3406 3407 void 3408 xpt_setup_ccb_flags(struct ccb_hdr *ccb_h, struct cam_path *path, 3409 uint32_t priority, uint32_t flags) 3410 { 3411 3412 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n")); 3413 ccb_h->pinfo.priority = priority; 3414 ccb_h->path = path; 3415 ccb_h->path_id = path->bus->path_id; 3416 if (path->target) 3417 ccb_h->target_id = path->target->target_id; 3418 else 3419 ccb_h->target_id = CAM_TARGET_WILDCARD; 3420 if (path->device) { 3421 ccb_h->target_lun = path->device->lun_id; 3422 ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation; 3423 } else { 3424 ccb_h->target_lun = CAM_TARGET_WILDCARD; 3425 } 3426 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX; 3427 ccb_h->flags = flags; 3428 ccb_h->xflags = 0; 3429 } 3430 3431 void 3432 xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, uint32_t priority) 3433 { 3434 xpt_setup_ccb_flags(ccb_h, path, priority, /*flags*/ 0); 3435 } 3436 3437 /* Path manipulation functions */ 3438 cam_status 3439 xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph, 3440 path_id_t path_id, target_id_t target_id, lun_id_t lun_id) 3441 { 3442 struct cam_path *path; 3443 cam_status status; 3444 3445 path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_NOWAIT); 3446 3447 if (path == NULL) { 3448 status = CAM_RESRC_UNAVAIL; 3449 return(status); 3450 } 3451 status = xpt_compile_path(path, perph, path_id, target_id, lun_id); 3452 if (status != CAM_REQ_CMP) { 3453 free(path, M_CAMPATH); 3454 path = NULL; 3455 } 3456 *new_path_ptr = path; 3457 return (status); 3458 } 3459 3460 cam_status 3461 xpt_create_path_unlocked(struct cam_path **new_path_ptr, 3462 struct cam_periph *periph, path_id_t path_id, 3463 target_id_t target_id, lun_id_t lun_id) 3464 { 3465 3466 return (xpt_create_path(new_path_ptr, periph, path_id, target_id, 3467 lun_id)); 3468 } 3469 3470 cam_status 3471 xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph, 3472 path_id_t path_id, target_id_t target_id, lun_id_t lun_id) 3473 { 3474 struct cam_eb *bus; 3475 struct cam_et *target; 3476 struct cam_ed *device; 3477 cam_status status; 3478 3479 status = CAM_REQ_CMP; /* Completed without error */ 3480 target = NULL; /* Wildcarded */ 3481 device = NULL; /* Wildcarded */ 3482 3483 /* 3484 * We will potentially modify the EDT, so block interrupts 3485 * that may attempt to create cam paths. 3486 */ 3487 bus = xpt_find_bus(path_id); 3488 if (bus == NULL) { 3489 status = CAM_PATH_INVALID; 3490 } else { 3491 xpt_lock_buses(); 3492 mtx_lock(&bus->eb_mtx); 3493 target = xpt_find_target(bus, target_id); 3494 if (target == NULL) { 3495 /* Create one */ 3496 struct cam_et *new_target; 3497 3498 new_target = xpt_alloc_target(bus, target_id); 3499 if (new_target == NULL) { 3500 status = CAM_RESRC_UNAVAIL; 3501 } else { 3502 target = new_target; 3503 } 3504 } 3505 xpt_unlock_buses(); 3506 if (target != NULL) { 3507 device = xpt_find_device(target, lun_id); 3508 if (device == NULL) { 3509 /* Create one */ 3510 struct cam_ed *new_device; 3511 3512 new_device = 3513 (*(bus->xport->ops->alloc_device))(bus, 3514 target, 3515 lun_id); 3516 if (new_device == NULL) { 3517 status = CAM_RESRC_UNAVAIL; 3518 } else { 3519 device = new_device; 3520 } 3521 } 3522 } 3523 mtx_unlock(&bus->eb_mtx); 3524 } 3525 3526 /* 3527 * Only touch the user's data if we are successful. 3528 */ 3529 if (status == CAM_REQ_CMP) { 3530 new_path->periph = perph; 3531 new_path->bus = bus; 3532 new_path->target = target; 3533 new_path->device = device; 3534 CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n")); 3535 } else { 3536 if (device != NULL) 3537 xpt_release_device(device); 3538 if (target != NULL) 3539 xpt_release_target(target); 3540 if (bus != NULL) 3541 xpt_release_bus(bus); 3542 } 3543 return (status); 3544 } 3545 3546 int 3547 xpt_clone_path(struct cam_path **new_path_ptr, struct cam_path *path) 3548 { 3549 struct cam_path *new_path; 3550 3551 new_path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_NOWAIT); 3552 if (new_path == NULL) 3553 return (ENOMEM); 3554 *new_path = *path; 3555 if (path->bus != NULL) 3556 xpt_acquire_bus(path->bus); 3557 if (path->target != NULL) 3558 xpt_acquire_target(path->target); 3559 if (path->device != NULL) 3560 xpt_acquire_device(path->device); 3561 *new_path_ptr = new_path; 3562 return (0); 3563 } 3564 3565 void 3566 xpt_release_path(struct cam_path *path) 3567 { 3568 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n")); 3569 if (path->device != NULL) { 3570 xpt_release_device(path->device); 3571 path->device = NULL; 3572 } 3573 if (path->target != NULL) { 3574 xpt_release_target(path->target); 3575 path->target = NULL; 3576 } 3577 if (path->bus != NULL) { 3578 xpt_release_bus(path->bus); 3579 path->bus = NULL; 3580 } 3581 } 3582 3583 void 3584 xpt_free_path(struct cam_path *path) 3585 { 3586 3587 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n")); 3588 xpt_release_path(path); 3589 free(path, M_CAMPATH); 3590 } 3591 3592 void 3593 xpt_path_counts(struct cam_path *path, uint32_t *bus_ref, 3594 uint32_t *periph_ref, uint32_t *target_ref, uint32_t *device_ref) 3595 { 3596 3597 xpt_lock_buses(); 3598 if (bus_ref) { 3599 if (path->bus) 3600 *bus_ref = path->bus->refcount; 3601 else 3602 *bus_ref = 0; 3603 } 3604 if (periph_ref) { 3605 if (path->periph) 3606 *periph_ref = path->periph->refcount; 3607 else 3608 *periph_ref = 0; 3609 } 3610 xpt_unlock_buses(); 3611 if (target_ref) { 3612 if (path->target) 3613 *target_ref = path->target->refcount; 3614 else 3615 *target_ref = 0; 3616 } 3617 if (device_ref) { 3618 if (path->device) 3619 *device_ref = path->device->refcount; 3620 else 3621 *device_ref = 0; 3622 } 3623 } 3624 3625 /* 3626 * Return -1 for failure, 0 for exact match, 1 for match with wildcards 3627 * in path1, 2 for match with wildcards in path2. 3628 */ 3629 int 3630 xpt_path_comp(struct cam_path *path1, struct cam_path *path2) 3631 { 3632 int retval = 0; 3633 3634 if (path1->bus != path2->bus) { 3635 if (path1->bus->path_id == CAM_BUS_WILDCARD) 3636 retval = 1; 3637 else if (path2->bus->path_id == CAM_BUS_WILDCARD) 3638 retval = 2; 3639 else 3640 return (-1); 3641 } 3642 if (path1->target != path2->target) { 3643 if (path1->target->target_id == CAM_TARGET_WILDCARD) { 3644 if (retval == 0) 3645 retval = 1; 3646 } else if (path2->target->target_id == CAM_TARGET_WILDCARD) 3647 retval = 2; 3648 else 3649 return (-1); 3650 } 3651 if (path1->device != path2->device) { 3652 if (path1->device->lun_id == CAM_LUN_WILDCARD) { 3653 if (retval == 0) 3654 retval = 1; 3655 } else if (path2->device->lun_id == CAM_LUN_WILDCARD) 3656 retval = 2; 3657 else 3658 return (-1); 3659 } 3660 return (retval); 3661 } 3662 3663 int 3664 xpt_path_comp_dev(struct cam_path *path, struct cam_ed *dev) 3665 { 3666 int retval = 0; 3667 3668 if (path->bus != dev->target->bus) { 3669 if (path->bus->path_id == CAM_BUS_WILDCARD) 3670 retval = 1; 3671 else if (dev->target->bus->path_id == CAM_BUS_WILDCARD) 3672 retval = 2; 3673 else 3674 return (-1); 3675 } 3676 if (path->target != dev->target) { 3677 if (path->target->target_id == CAM_TARGET_WILDCARD) { 3678 if (retval == 0) 3679 retval = 1; 3680 } else if (dev->target->target_id == CAM_TARGET_WILDCARD) 3681 retval = 2; 3682 else 3683 return (-1); 3684 } 3685 if (path->device != dev) { 3686 if (path->device->lun_id == CAM_LUN_WILDCARD) { 3687 if (retval == 0) 3688 retval = 1; 3689 } else if (dev->lun_id == CAM_LUN_WILDCARD) 3690 retval = 2; 3691 else 3692 return (-1); 3693 } 3694 return (retval); 3695 } 3696 3697 void 3698 xpt_print_path(struct cam_path *path) 3699 { 3700 struct sbuf sb; 3701 char buffer[XPT_PRINT_LEN]; 3702 3703 sbuf_new(&sb, buffer, XPT_PRINT_LEN, SBUF_FIXEDLEN); 3704 xpt_path_sbuf(path, &sb); 3705 sbuf_finish(&sb); 3706 printf("%s", sbuf_data(&sb)); 3707 sbuf_delete(&sb); 3708 } 3709 3710 static void 3711 xpt_device_sbuf(struct cam_ed *device, struct sbuf *sb) 3712 { 3713 if (device == NULL) 3714 sbuf_cat(sb, "(nopath): "); 3715 else { 3716 sbuf_printf(sb, "(noperiph:%s%d:%d:%d:%jx): ", 3717 device->sim->sim_name, 3718 device->sim->unit_number, 3719 device->sim->bus_id, 3720 device->target->target_id, 3721 (uintmax_t)device->lun_id); 3722 } 3723 } 3724 3725 void 3726 xpt_print(struct cam_path *path, const char *fmt, ...) 3727 { 3728 va_list ap; 3729 struct sbuf sb; 3730 char buffer[XPT_PRINT_LEN]; 3731 3732 sbuf_new(&sb, buffer, XPT_PRINT_LEN, SBUF_FIXEDLEN); 3733 3734 xpt_path_sbuf(path, &sb); 3735 va_start(ap, fmt); 3736 sbuf_vprintf(&sb, fmt, ap); 3737 va_end(ap); 3738 3739 sbuf_finish(&sb); 3740 printf("%s", sbuf_data(&sb)); 3741 sbuf_delete(&sb); 3742 } 3743 3744 char * 3745 xpt_path_string(struct cam_path *path, char *str, size_t str_len) 3746 { 3747 struct sbuf sb; 3748 3749 sbuf_new(&sb, str, str_len, 0); 3750 xpt_path_sbuf(path, &sb); 3751 sbuf_finish(&sb); 3752 return (str); 3753 } 3754 3755 void 3756 xpt_path_sbuf(struct cam_path *path, struct sbuf *sb) 3757 { 3758 3759 if (path == NULL) 3760 sbuf_cat(sb, "(nopath): "); 3761 else { 3762 if (path->periph != NULL) 3763 sbuf_printf(sb, "(%s%d:", path->periph->periph_name, 3764 path->periph->unit_number); 3765 else 3766 sbuf_cat(sb, "(noperiph:"); 3767 3768 if (path->bus != NULL) 3769 sbuf_printf(sb, "%s%d:%d:", path->bus->sim->sim_name, 3770 path->bus->sim->unit_number, 3771 path->bus->sim->bus_id); 3772 else 3773 sbuf_cat(sb, "nobus:"); 3774 3775 if (path->target != NULL) 3776 sbuf_printf(sb, "%d:", path->target->target_id); 3777 else 3778 sbuf_cat(sb, "X:"); 3779 3780 if (path->device != NULL) 3781 sbuf_printf(sb, "%jx): ", 3782 (uintmax_t)path->device->lun_id); 3783 else 3784 sbuf_cat(sb, "X): "); 3785 } 3786 } 3787 3788 path_id_t 3789 xpt_path_path_id(struct cam_path *path) 3790 { 3791 return(path->bus->path_id); 3792 } 3793 3794 target_id_t 3795 xpt_path_target_id(struct cam_path *path) 3796 { 3797 if (path->target != NULL) 3798 return (path->target->target_id); 3799 else 3800 return (CAM_TARGET_WILDCARD); 3801 } 3802 3803 lun_id_t 3804 xpt_path_lun_id(struct cam_path *path) 3805 { 3806 if (path->device != NULL) 3807 return (path->device->lun_id); 3808 else 3809 return (CAM_LUN_WILDCARD); 3810 } 3811 3812 struct cam_sim * 3813 xpt_path_sim(struct cam_path *path) 3814 { 3815 3816 return (path->bus->sim); 3817 } 3818 3819 struct cam_periph* 3820 xpt_path_periph(struct cam_path *path) 3821 { 3822 3823 return (path->periph); 3824 } 3825 3826 /* 3827 * Release a CAM control block for the caller. Remit the cost of the structure 3828 * to the device referenced by the path. If the this device had no 'credits' 3829 * and peripheral drivers have registered async callbacks for this notification 3830 * call them now. 3831 */ 3832 void 3833 xpt_release_ccb(union ccb *free_ccb) 3834 { 3835 struct cam_ed *device; 3836 struct cam_periph *periph; 3837 3838 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n")); 3839 xpt_path_assert(free_ccb->ccb_h.path, MA_OWNED); 3840 device = free_ccb->ccb_h.path->device; 3841 periph = free_ccb->ccb_h.path->periph; 3842 3843 xpt_free_ccb(free_ccb); 3844 periph->periph_allocated--; 3845 cam_ccbq_release_opening(&device->ccbq); 3846 xpt_run_allocq(periph, 0); 3847 } 3848 3849 /* Functions accessed by SIM drivers */ 3850 3851 static struct xpt_xport_ops xport_default_ops = { 3852 .alloc_device = xpt_alloc_device_default, 3853 .action = xpt_action_default, 3854 .async = xpt_dev_async_default, 3855 }; 3856 static struct xpt_xport xport_default = { 3857 .xport = XPORT_UNKNOWN, 3858 .name = "unknown", 3859 .ops = &xport_default_ops, 3860 }; 3861 3862 CAM_XPT_XPORT(xport_default); 3863 3864 /* 3865 * A sim structure, listing the SIM entry points and instance 3866 * identification info is passed to xpt_bus_register to hook the SIM 3867 * into the CAM framework. xpt_bus_register creates a cam_eb entry 3868 * for this new bus and places it in the array of buses and assigns 3869 * it a path_id. The path_id may be influenced by "hard wiring" 3870 * information specified by the user. Once interrupt services are 3871 * available, the bus will be probed. 3872 */ 3873 int 3874 xpt_bus_register(struct cam_sim *sim, device_t parent, uint32_t bus) 3875 { 3876 struct cam_eb *new_bus; 3877 struct cam_eb *old_bus; 3878 struct ccb_pathinq cpi; 3879 struct cam_path *path; 3880 cam_status status; 3881 3882 sim->bus_id = bus; 3883 new_bus = (struct cam_eb *)malloc(sizeof(*new_bus), 3884 M_CAMXPT, M_NOWAIT|M_ZERO); 3885 if (new_bus == NULL) { 3886 /* Couldn't satisfy request */ 3887 return (ENOMEM); 3888 } 3889 3890 mtx_init(&new_bus->eb_mtx, "CAM bus lock", NULL, MTX_DEF); 3891 TAILQ_INIT(&new_bus->et_entries); 3892 cam_sim_hold(sim); 3893 new_bus->sim = sim; 3894 timevalclear(&new_bus->last_reset); 3895 new_bus->flags = 0; 3896 new_bus->refcount = 1; /* Held until a bus_deregister event */ 3897 new_bus->generation = 0; 3898 new_bus->parent_dev = parent; 3899 3900 xpt_lock_buses(); 3901 sim->path_id = new_bus->path_id = 3902 xptpathid(sim->sim_name, sim->unit_number, sim->bus_id); 3903 old_bus = TAILQ_FIRST(&xsoftc.xpt_busses); 3904 while (old_bus != NULL 3905 && old_bus->path_id < new_bus->path_id) 3906 old_bus = TAILQ_NEXT(old_bus, links); 3907 if (old_bus != NULL) 3908 TAILQ_INSERT_BEFORE(old_bus, new_bus, links); 3909 else 3910 TAILQ_INSERT_TAIL(&xsoftc.xpt_busses, new_bus, links); 3911 xsoftc.bus_generation++; 3912 xpt_unlock_buses(); 3913 3914 /* 3915 * Set a default transport so that a PATH_INQ can be issued to 3916 * the SIM. This will then allow for probing and attaching of 3917 * a more appropriate transport. 3918 */ 3919 new_bus->xport = &xport_default; 3920 3921 status = xpt_create_path(&path, /*periph*/NULL, sim->path_id, 3922 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 3923 if (status != CAM_REQ_CMP) { 3924 xpt_release_bus(new_bus); 3925 return (ENOMEM); 3926 } 3927 3928 xpt_path_inq(&cpi, path); 3929 3930 /* 3931 * Use the results of PATH_INQ to pick a transport. Note that 3932 * the xpt bus (which uses XPORT_UNSPECIFIED) always uses 3933 * xport_default instead of a transport from 3934 * cam_xpt_port_set. 3935 */ 3936 if (cam_ccb_success((union ccb *)&cpi) && 3937 cpi.transport != XPORT_UNSPECIFIED) { 3938 struct xpt_xport **xpt; 3939 3940 SET_FOREACH(xpt, cam_xpt_xport_set) { 3941 if ((*xpt)->xport == cpi.transport) { 3942 new_bus->xport = *xpt; 3943 break; 3944 } 3945 } 3946 if (new_bus->xport == &xport_default) { 3947 xpt_print(path, 3948 "No transport found for %d\n", cpi.transport); 3949 xpt_release_bus(new_bus); 3950 xpt_free_path(path); 3951 return (EINVAL); 3952 } 3953 } 3954 3955 /* Notify interested parties */ 3956 if (sim->path_id != CAM_XPT_PATH_ID) { 3957 xpt_async(AC_PATH_REGISTERED, path, &cpi); 3958 if ((cpi.hba_misc & PIM_NOSCAN) == 0) { 3959 union ccb *scan_ccb; 3960 3961 /* Initiate bus rescan. */ 3962 scan_ccb = xpt_alloc_ccb_nowait(); 3963 if (scan_ccb != NULL) { 3964 scan_ccb->ccb_h.path = path; 3965 scan_ccb->ccb_h.func_code = XPT_SCAN_BUS; 3966 scan_ccb->crcn.flags = 0; 3967 xpt_rescan(scan_ccb); 3968 } else { 3969 xpt_print(path, 3970 "Can't allocate CCB to scan bus\n"); 3971 xpt_free_path(path); 3972 } 3973 } else 3974 xpt_free_path(path); 3975 } else 3976 xpt_free_path(path); 3977 return (CAM_SUCCESS); 3978 } 3979 3980 int 3981 xpt_bus_deregister(path_id_t pathid) 3982 { 3983 struct cam_path bus_path; 3984 cam_status status; 3985 3986 status = xpt_compile_path(&bus_path, NULL, pathid, 3987 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 3988 if (status != CAM_REQ_CMP) 3989 return (ENOMEM); 3990 3991 xpt_async(AC_LOST_DEVICE, &bus_path, NULL); 3992 xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL); 3993 3994 /* Release the reference count held while registered. */ 3995 xpt_release_bus(bus_path.bus); 3996 xpt_release_path(&bus_path); 3997 3998 return (CAM_SUCCESS); 3999 } 4000 4001 static path_id_t 4002 xptnextfreepathid(void) 4003 { 4004 struct cam_eb *bus; 4005 path_id_t pathid; 4006 const char *strval; 4007 4008 mtx_assert(&xsoftc.xpt_topo_lock, MA_OWNED); 4009 pathid = 0; 4010 bus = TAILQ_FIRST(&xsoftc.xpt_busses); 4011 retry: 4012 /* Find an unoccupied pathid */ 4013 while (bus != NULL && bus->path_id <= pathid) { 4014 if (bus->path_id == pathid) 4015 pathid++; 4016 bus = TAILQ_NEXT(bus, links); 4017 } 4018 4019 /* 4020 * Ensure that this pathid is not reserved for 4021 * a bus that may be registered in the future. 4022 */ 4023 if (resource_string_value("scbus", pathid, "at", &strval) == 0) { 4024 ++pathid; 4025 /* Start the search over */ 4026 goto retry; 4027 } 4028 return (pathid); 4029 } 4030 4031 static path_id_t 4032 xptpathid(const char *sim_name, int sim_unit, int sim_bus) 4033 { 4034 path_id_t pathid; 4035 int i, dunit, val; 4036 char buf[32]; 4037 const char *dname; 4038 4039 pathid = CAM_XPT_PATH_ID; 4040 snprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit); 4041 if (strcmp(buf, "xpt0") == 0 && sim_bus == 0) 4042 return (pathid); 4043 i = 0; 4044 while ((resource_find_match(&i, &dname, &dunit, "at", buf)) == 0) { 4045 if (strcmp(dname, "scbus")) { 4046 /* Avoid a bit of foot shooting. */ 4047 continue; 4048 } 4049 if (dunit < 0) /* unwired?! */ 4050 continue; 4051 if (resource_int_value("scbus", dunit, "bus", &val) == 0) { 4052 if (sim_bus == val) { 4053 pathid = dunit; 4054 break; 4055 } 4056 } else if (sim_bus == 0) { 4057 /* Unspecified matches bus 0 */ 4058 pathid = dunit; 4059 break; 4060 } else { 4061 printf("Ambiguous scbus configuration for %s%d " 4062 "bus %d, cannot wire down. The kernel " 4063 "config entry for scbus%d should " 4064 "specify a controller bus.\n" 4065 "Scbus will be assigned dynamically.\n", 4066 sim_name, sim_unit, sim_bus, dunit); 4067 break; 4068 } 4069 } 4070 4071 if (pathid == CAM_XPT_PATH_ID) 4072 pathid = xptnextfreepathid(); 4073 return (pathid); 4074 } 4075 4076 static const char * 4077 xpt_async_string(uint32_t async_code) 4078 { 4079 4080 switch (async_code) { 4081 case AC_BUS_RESET: return ("AC_BUS_RESET"); 4082 case AC_UNSOL_RESEL: return ("AC_UNSOL_RESEL"); 4083 case AC_SCSI_AEN: return ("AC_SCSI_AEN"); 4084 case AC_SENT_BDR: return ("AC_SENT_BDR"); 4085 case AC_PATH_REGISTERED: return ("AC_PATH_REGISTERED"); 4086 case AC_PATH_DEREGISTERED: return ("AC_PATH_DEREGISTERED"); 4087 case AC_FOUND_DEVICE: return ("AC_FOUND_DEVICE"); 4088 case AC_LOST_DEVICE: return ("AC_LOST_DEVICE"); 4089 case AC_TRANSFER_NEG: return ("AC_TRANSFER_NEG"); 4090 case AC_INQ_CHANGED: return ("AC_INQ_CHANGED"); 4091 case AC_GETDEV_CHANGED: return ("AC_GETDEV_CHANGED"); 4092 case AC_CONTRACT: return ("AC_CONTRACT"); 4093 case AC_ADVINFO_CHANGED: return ("AC_ADVINFO_CHANGED"); 4094 case AC_UNIT_ATTENTION: return ("AC_UNIT_ATTENTION"); 4095 } 4096 return ("AC_UNKNOWN"); 4097 } 4098 4099 static int 4100 xpt_async_size(uint32_t async_code) 4101 { 4102 4103 switch (async_code) { 4104 case AC_BUS_RESET: return (0); 4105 case AC_UNSOL_RESEL: return (0); 4106 case AC_SCSI_AEN: return (0); 4107 case AC_SENT_BDR: return (0); 4108 case AC_PATH_REGISTERED: return (sizeof(struct ccb_pathinq)); 4109 case AC_PATH_DEREGISTERED: return (0); 4110 case AC_FOUND_DEVICE: return (sizeof(struct ccb_getdev)); 4111 case AC_LOST_DEVICE: return (0); 4112 case AC_TRANSFER_NEG: return (sizeof(struct ccb_trans_settings)); 4113 case AC_INQ_CHANGED: return (0); 4114 case AC_GETDEV_CHANGED: return (0); 4115 case AC_CONTRACT: return (sizeof(struct ac_contract)); 4116 case AC_ADVINFO_CHANGED: return (-1); 4117 case AC_UNIT_ATTENTION: return (sizeof(struct ccb_scsiio)); 4118 } 4119 return (0); 4120 } 4121 4122 static int 4123 xpt_async_process_dev(struct cam_ed *device, void *arg) 4124 { 4125 union ccb *ccb = arg; 4126 struct cam_path *path = ccb->ccb_h.path; 4127 void *async_arg = ccb->casync.async_arg_ptr; 4128 uint32_t async_code = ccb->casync.async_code; 4129 bool relock; 4130 4131 if (path->device != device 4132 && path->device->lun_id != CAM_LUN_WILDCARD 4133 && device->lun_id != CAM_LUN_WILDCARD) 4134 return (1); 4135 4136 /* 4137 * The async callback could free the device. 4138 * If it is a broadcast async, it doesn't hold 4139 * device reference, so take our own reference. 4140 */ 4141 xpt_acquire_device(device); 4142 4143 /* 4144 * If async for specific device is to be delivered to 4145 * the wildcard client, take the specific device lock. 4146 * XXX: We may need a way for client to specify it. 4147 */ 4148 if ((device->lun_id == CAM_LUN_WILDCARD && 4149 path->device->lun_id != CAM_LUN_WILDCARD) || 4150 (device->target->target_id == CAM_TARGET_WILDCARD && 4151 path->target->target_id != CAM_TARGET_WILDCARD) || 4152 (device->target->bus->path_id == CAM_BUS_WILDCARD && 4153 path->target->bus->path_id != CAM_BUS_WILDCARD)) { 4154 mtx_unlock(&device->device_mtx); 4155 xpt_path_lock(path); 4156 relock = true; 4157 } else 4158 relock = false; 4159 4160 (*(device->target->bus->xport->ops->async))(async_code, 4161 device->target->bus, device->target, device, async_arg); 4162 xpt_async_bcast(&device->asyncs, async_code, path, async_arg); 4163 4164 if (relock) { 4165 xpt_path_unlock(path); 4166 mtx_lock(&device->device_mtx); 4167 } 4168 xpt_release_device(device); 4169 return (1); 4170 } 4171 4172 static int 4173 xpt_async_process_tgt(struct cam_et *target, void *arg) 4174 { 4175 union ccb *ccb = arg; 4176 struct cam_path *path = ccb->ccb_h.path; 4177 4178 if (path->target != target 4179 && path->target->target_id != CAM_TARGET_WILDCARD 4180 && target->target_id != CAM_TARGET_WILDCARD) 4181 return (1); 4182 4183 if (ccb->casync.async_code == AC_SENT_BDR) { 4184 /* Update our notion of when the last reset occurred */ 4185 microtime(&target->last_reset); 4186 } 4187 4188 return (xptdevicetraverse(target, NULL, xpt_async_process_dev, ccb)); 4189 } 4190 4191 static void 4192 xpt_async_process(struct cam_periph *periph, union ccb *ccb) 4193 { 4194 struct cam_eb *bus; 4195 struct cam_path *path; 4196 void *async_arg; 4197 uint32_t async_code; 4198 4199 path = ccb->ccb_h.path; 4200 async_code = ccb->casync.async_code; 4201 async_arg = ccb->casync.async_arg_ptr; 4202 CAM_DEBUG(path, CAM_DEBUG_TRACE | CAM_DEBUG_INFO, 4203 ("xpt_async(%s)\n", xpt_async_string(async_code))); 4204 bus = path->bus; 4205 4206 if (async_code == AC_BUS_RESET) { 4207 /* Update our notion of when the last reset occurred */ 4208 microtime(&bus->last_reset); 4209 } 4210 4211 xpttargettraverse(bus, NULL, xpt_async_process_tgt, ccb); 4212 4213 /* 4214 * If this wasn't a fully wildcarded async, tell all 4215 * clients that want all async events. 4216 */ 4217 if (bus != xpt_periph->path->bus) { 4218 xpt_path_lock(xpt_periph->path); 4219 xpt_async_process_dev(xpt_periph->path->device, ccb); 4220 xpt_path_unlock(xpt_periph->path); 4221 } 4222 4223 if (path->device != NULL && path->device->lun_id != CAM_LUN_WILDCARD) 4224 xpt_release_devq(path, 1, TRUE); 4225 else 4226 xpt_release_simq(path->bus->sim, TRUE); 4227 if (ccb->casync.async_arg_size > 0) 4228 free(async_arg, M_CAMXPT); 4229 xpt_free_path(path); 4230 xpt_free_ccb(ccb); 4231 } 4232 4233 static void 4234 xpt_async_bcast(struct async_list *async_head, 4235 uint32_t async_code, 4236 struct cam_path *path, void *async_arg) 4237 { 4238 struct async_node *cur_entry; 4239 struct mtx *mtx; 4240 4241 cur_entry = SLIST_FIRST(async_head); 4242 while (cur_entry != NULL) { 4243 struct async_node *next_entry; 4244 /* 4245 * Grab the next list entry before we call the current 4246 * entry's callback. This is because the callback function 4247 * can delete its async callback entry. 4248 */ 4249 next_entry = SLIST_NEXT(cur_entry, links); 4250 if ((cur_entry->event_enable & async_code) != 0) { 4251 mtx = cur_entry->event_lock ? 4252 path->device->sim->mtx : NULL; 4253 if (mtx) 4254 mtx_lock(mtx); 4255 cur_entry->callback(cur_entry->callback_arg, 4256 async_code, path, 4257 async_arg); 4258 if (mtx) 4259 mtx_unlock(mtx); 4260 } 4261 cur_entry = next_entry; 4262 } 4263 } 4264 4265 void 4266 xpt_async(uint32_t async_code, struct cam_path *path, void *async_arg) 4267 { 4268 union ccb *ccb; 4269 int size; 4270 4271 ccb = xpt_alloc_ccb_nowait(); 4272 if (ccb == NULL) { 4273 xpt_print(path, "Can't allocate CCB to send %s\n", 4274 xpt_async_string(async_code)); 4275 return; 4276 } 4277 4278 if (xpt_clone_path(&ccb->ccb_h.path, path) != 0) { 4279 xpt_print(path, "Can't allocate path to send %s\n", 4280 xpt_async_string(async_code)); 4281 xpt_free_ccb(ccb); 4282 return; 4283 } 4284 ccb->ccb_h.path->periph = NULL; 4285 ccb->ccb_h.func_code = XPT_ASYNC; 4286 ccb->ccb_h.cbfcnp = xpt_async_process; 4287 ccb->ccb_h.flags |= CAM_UNLOCKED; 4288 ccb->casync.async_code = async_code; 4289 ccb->casync.async_arg_size = 0; 4290 size = xpt_async_size(async_code); 4291 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, 4292 ("xpt_async: func %#x %s aync_code %d %s\n", 4293 ccb->ccb_h.func_code, 4294 xpt_action_name(ccb->ccb_h.func_code), 4295 async_code, 4296 xpt_async_string(async_code))); 4297 if (size > 0 && async_arg != NULL) { 4298 ccb->casync.async_arg_ptr = malloc(size, M_CAMXPT, M_NOWAIT); 4299 if (ccb->casync.async_arg_ptr == NULL) { 4300 xpt_print(path, "Can't allocate argument to send %s\n", 4301 xpt_async_string(async_code)); 4302 xpt_free_path(ccb->ccb_h.path); 4303 xpt_free_ccb(ccb); 4304 return; 4305 } 4306 memcpy(ccb->casync.async_arg_ptr, async_arg, size); 4307 ccb->casync.async_arg_size = size; 4308 } else if (size < 0) { 4309 ccb->casync.async_arg_ptr = async_arg; 4310 ccb->casync.async_arg_size = size; 4311 } 4312 if (path->device != NULL && path->device->lun_id != CAM_LUN_WILDCARD) 4313 xpt_freeze_devq(path, 1); 4314 else 4315 xpt_freeze_simq(path->bus->sim, 1); 4316 xpt_action(ccb); 4317 } 4318 4319 static void 4320 xpt_dev_async_default(uint32_t async_code, struct cam_eb *bus, 4321 struct cam_et *target, struct cam_ed *device, 4322 void *async_arg) 4323 { 4324 4325 /* 4326 * We only need to handle events for real devices. 4327 */ 4328 if (target->target_id == CAM_TARGET_WILDCARD 4329 || device->lun_id == CAM_LUN_WILDCARD) 4330 return; 4331 4332 printf("%s called\n", __func__); 4333 } 4334 4335 static uint32_t 4336 xpt_freeze_devq_device(struct cam_ed *dev, u_int count) 4337 { 4338 struct cam_devq *devq; 4339 uint32_t freeze; 4340 4341 devq = dev->sim->devq; 4342 mtx_assert(&devq->send_mtx, MA_OWNED); 4343 CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE, 4344 ("xpt_freeze_devq_device(%d) %u->%u\n", count, 4345 dev->ccbq.queue.qfrozen_cnt, dev->ccbq.queue.qfrozen_cnt + count)); 4346 freeze = (dev->ccbq.queue.qfrozen_cnt += count); 4347 /* Remove frozen device from sendq. */ 4348 if (device_is_queued(dev)) 4349 camq_remove(&devq->send_queue, dev->devq_entry.index); 4350 return (freeze); 4351 } 4352 4353 uint32_t 4354 xpt_freeze_devq(struct cam_path *path, u_int count) 4355 { 4356 struct cam_ed *dev = path->device; 4357 struct cam_devq *devq; 4358 uint32_t freeze; 4359 4360 devq = dev->sim->devq; 4361 mtx_lock(&devq->send_mtx); 4362 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_freeze_devq(%d)\n", count)); 4363 freeze = xpt_freeze_devq_device(dev, count); 4364 mtx_unlock(&devq->send_mtx); 4365 return (freeze); 4366 } 4367 4368 uint32_t 4369 xpt_freeze_simq(struct cam_sim *sim, u_int count) 4370 { 4371 struct cam_devq *devq; 4372 uint32_t freeze; 4373 4374 devq = sim->devq; 4375 mtx_lock(&devq->send_mtx); 4376 freeze = (devq->send_queue.qfrozen_cnt += count); 4377 mtx_unlock(&devq->send_mtx); 4378 return (freeze); 4379 } 4380 4381 static void 4382 xpt_release_devq_timeout(void *arg) 4383 { 4384 struct cam_ed *dev; 4385 struct cam_devq *devq; 4386 4387 dev = (struct cam_ed *)arg; 4388 CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE, ("xpt_release_devq_timeout\n")); 4389 devq = dev->sim->devq; 4390 mtx_assert(&devq->send_mtx, MA_OWNED); 4391 if (xpt_release_devq_device(dev, /*count*/1, /*run_queue*/TRUE)) 4392 xpt_run_devq(devq); 4393 } 4394 4395 void 4396 xpt_release_devq(struct cam_path *path, u_int count, int run_queue) 4397 { 4398 struct cam_ed *dev; 4399 struct cam_devq *devq; 4400 4401 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_devq(%d, %d)\n", 4402 count, run_queue)); 4403 dev = path->device; 4404 devq = dev->sim->devq; 4405 mtx_lock(&devq->send_mtx); 4406 if (xpt_release_devq_device(dev, count, run_queue)) 4407 xpt_run_devq(dev->sim->devq); 4408 mtx_unlock(&devq->send_mtx); 4409 } 4410 4411 static int 4412 xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue) 4413 { 4414 4415 mtx_assert(&dev->sim->devq->send_mtx, MA_OWNED); 4416 CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE, 4417 ("xpt_release_devq_device(%d, %d) %u->%u\n", count, run_queue, 4418 dev->ccbq.queue.qfrozen_cnt, dev->ccbq.queue.qfrozen_cnt - count)); 4419 if (count > dev->ccbq.queue.qfrozen_cnt) { 4420 #ifdef INVARIANTS 4421 printf("xpt_release_devq(): requested %u > present %u\n", 4422 count, dev->ccbq.queue.qfrozen_cnt); 4423 #endif 4424 count = dev->ccbq.queue.qfrozen_cnt; 4425 } 4426 dev->ccbq.queue.qfrozen_cnt -= count; 4427 if (dev->ccbq.queue.qfrozen_cnt == 0) { 4428 /* 4429 * No longer need to wait for a successful 4430 * command completion. 4431 */ 4432 dev->flags &= ~CAM_DEV_REL_ON_COMPLETE; 4433 /* 4434 * Remove any timeouts that might be scheduled 4435 * to release this queue. 4436 */ 4437 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) { 4438 callout_stop(&dev->callout); 4439 dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING; 4440 } 4441 /* 4442 * Now that we are unfrozen schedule the 4443 * device so any pending transactions are 4444 * run. 4445 */ 4446 xpt_schedule_devq(dev->sim->devq, dev); 4447 } else 4448 run_queue = 0; 4449 return (run_queue); 4450 } 4451 4452 void 4453 xpt_release_simq(struct cam_sim *sim, int run_queue) 4454 { 4455 struct cam_devq *devq; 4456 4457 devq = sim->devq; 4458 mtx_lock(&devq->send_mtx); 4459 if (devq->send_queue.qfrozen_cnt <= 0) { 4460 #ifdef INVARIANTS 4461 printf("xpt_release_simq: requested 1 > present %u\n", 4462 devq->send_queue.qfrozen_cnt); 4463 #endif 4464 } else 4465 devq->send_queue.qfrozen_cnt--; 4466 if (devq->send_queue.qfrozen_cnt == 0) { 4467 if (run_queue) { 4468 /* 4469 * Now that we are unfrozen run the send queue. 4470 */ 4471 xpt_run_devq(sim->devq); 4472 } 4473 } 4474 mtx_unlock(&devq->send_mtx); 4475 } 4476 4477 void 4478 xpt_done(union ccb *done_ccb) 4479 { 4480 struct cam_doneq *queue; 4481 int run, hash; 4482 4483 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) 4484 if (done_ccb->ccb_h.func_code == XPT_SCSI_IO && 4485 done_ccb->csio.bio != NULL) 4486 biotrack(done_ccb->csio.bio, __func__); 4487 #endif 4488 4489 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, 4490 ("xpt_done: func= %#x %s status %#x\n", 4491 done_ccb->ccb_h.func_code, 4492 xpt_action_name(done_ccb->ccb_h.func_code), 4493 done_ccb->ccb_h.status)); 4494 if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0) 4495 return; 4496 4497 /* Store the time the ccb was in the sim */ 4498 done_ccb->ccb_h.qos.periph_data = cam_iosched_delta_t(done_ccb->ccb_h.qos.periph_data); 4499 done_ccb->ccb_h.status |= CAM_QOS_VALID; 4500 hash = (u_int)(done_ccb->ccb_h.path_id + done_ccb->ccb_h.target_id + 4501 done_ccb->ccb_h.target_lun) % cam_num_doneqs; 4502 queue = &cam_doneqs[hash]; 4503 mtx_lock(&queue->cam_doneq_mtx); 4504 run = (queue->cam_doneq_sleep && STAILQ_EMPTY(&queue->cam_doneq)); 4505 STAILQ_INSERT_TAIL(&queue->cam_doneq, &done_ccb->ccb_h, sim_links.stqe); 4506 done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX; 4507 mtx_unlock(&queue->cam_doneq_mtx); 4508 if (run && !dumping) 4509 wakeup(&queue->cam_doneq); 4510 } 4511 4512 void 4513 xpt_done_direct(union ccb *done_ccb) 4514 { 4515 4516 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, 4517 ("xpt_done_direct: status %#x\n", done_ccb->ccb_h.status)); 4518 if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0) 4519 return; 4520 4521 /* Store the time the ccb was in the sim */ 4522 done_ccb->ccb_h.qos.periph_data = cam_iosched_delta_t(done_ccb->ccb_h.qos.periph_data); 4523 done_ccb->ccb_h.status |= CAM_QOS_VALID; 4524 xpt_done_process(&done_ccb->ccb_h); 4525 } 4526 4527 union ccb * 4528 xpt_alloc_ccb(void) 4529 { 4530 union ccb *new_ccb; 4531 4532 new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_WAITOK); 4533 return (new_ccb); 4534 } 4535 4536 union ccb * 4537 xpt_alloc_ccb_nowait(void) 4538 { 4539 union ccb *new_ccb; 4540 4541 new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_NOWAIT); 4542 return (new_ccb); 4543 } 4544 4545 void 4546 xpt_free_ccb(union ccb *free_ccb) 4547 { 4548 struct cam_periph *periph; 4549 4550 if (free_ccb->ccb_h.alloc_flags & CAM_CCB_FROM_UMA) { 4551 /* 4552 * Looks like a CCB allocated from a periph UMA zone. 4553 */ 4554 periph = free_ccb->ccb_h.path->periph; 4555 uma_zfree(periph->ccb_zone, free_ccb); 4556 } else { 4557 free(free_ccb, M_CAMCCB); 4558 } 4559 } 4560 4561 /* Private XPT functions */ 4562 4563 /* 4564 * Get a CAM control block for the caller. Charge the structure to the device 4565 * referenced by the path. If we don't have sufficient resources to allocate 4566 * more ccbs, we return NULL. 4567 */ 4568 static union ccb * 4569 xpt_get_ccb_nowait(struct cam_periph *periph) 4570 { 4571 union ccb *new_ccb; 4572 int alloc_flags; 4573 4574 if (periph->ccb_zone != NULL) { 4575 alloc_flags = CAM_CCB_FROM_UMA; 4576 new_ccb = uma_zalloc(periph->ccb_zone, M_ZERO|M_NOWAIT); 4577 } else { 4578 alloc_flags = 0; 4579 new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_NOWAIT); 4580 } 4581 if (new_ccb == NULL) 4582 return (NULL); 4583 new_ccb->ccb_h.alloc_flags = alloc_flags; 4584 periph->periph_allocated++; 4585 cam_ccbq_take_opening(&periph->path->device->ccbq); 4586 return (new_ccb); 4587 } 4588 4589 static union ccb * 4590 xpt_get_ccb(struct cam_periph *periph) 4591 { 4592 union ccb *new_ccb; 4593 int alloc_flags; 4594 4595 cam_periph_unlock(periph); 4596 if (periph->ccb_zone != NULL) { 4597 alloc_flags = CAM_CCB_FROM_UMA; 4598 new_ccb = uma_zalloc(periph->ccb_zone, M_ZERO|M_WAITOK); 4599 } else { 4600 alloc_flags = 0; 4601 new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_WAITOK); 4602 } 4603 new_ccb->ccb_h.alloc_flags = alloc_flags; 4604 cam_periph_lock(periph); 4605 periph->periph_allocated++; 4606 cam_ccbq_take_opening(&periph->path->device->ccbq); 4607 return (new_ccb); 4608 } 4609 4610 union ccb * 4611 cam_periph_getccb(struct cam_periph *periph, uint32_t priority) 4612 { 4613 struct ccb_hdr *ccb_h; 4614 4615 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("cam_periph_getccb\n")); 4616 cam_periph_assert(periph, MA_OWNED); 4617 while ((ccb_h = SLIST_FIRST(&periph->ccb_list)) == NULL || 4618 ccb_h->pinfo.priority != priority) { 4619 if (priority < periph->immediate_priority) { 4620 periph->immediate_priority = priority; 4621 xpt_run_allocq(periph, 0); 4622 } else 4623 cam_periph_sleep(periph, &periph->ccb_list, PRIBIO, 4624 "cgticb", 0); 4625 } 4626 SLIST_REMOVE_HEAD(&periph->ccb_list, periph_links.sle); 4627 return ((union ccb *)ccb_h); 4628 } 4629 4630 static void 4631 xpt_acquire_bus(struct cam_eb *bus) 4632 { 4633 4634 xpt_lock_buses(); 4635 bus->refcount++; 4636 xpt_unlock_buses(); 4637 } 4638 4639 static void 4640 xpt_release_bus(struct cam_eb *bus) 4641 { 4642 4643 xpt_lock_buses(); 4644 KASSERT(bus->refcount >= 1, ("bus->refcount >= 1")); 4645 if (--bus->refcount > 0) { 4646 xpt_unlock_buses(); 4647 return; 4648 } 4649 TAILQ_REMOVE(&xsoftc.xpt_busses, bus, links); 4650 xsoftc.bus_generation++; 4651 xpt_unlock_buses(); 4652 KASSERT(TAILQ_EMPTY(&bus->et_entries), 4653 ("destroying bus, but target list is not empty")); 4654 cam_sim_release(bus->sim); 4655 mtx_destroy(&bus->eb_mtx); 4656 free(bus, M_CAMXPT); 4657 } 4658 4659 static struct cam_et * 4660 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id) 4661 { 4662 struct cam_et *cur_target, *target; 4663 4664 mtx_assert(&xsoftc.xpt_topo_lock, MA_OWNED); 4665 mtx_assert(&bus->eb_mtx, MA_OWNED); 4666 target = (struct cam_et *)malloc(sizeof(*target), M_CAMXPT, 4667 M_NOWAIT|M_ZERO); 4668 if (target == NULL) 4669 return (NULL); 4670 4671 TAILQ_INIT(&target->ed_entries); 4672 target->bus = bus; 4673 target->target_id = target_id; 4674 target->refcount = 1; 4675 target->generation = 0; 4676 target->luns = NULL; 4677 mtx_init(&target->luns_mtx, "CAM LUNs lock", NULL, MTX_DEF); 4678 timevalclear(&target->last_reset); 4679 /* 4680 * Hold a reference to our parent bus so it 4681 * will not go away before we do. 4682 */ 4683 bus->refcount++; 4684 4685 /* Insertion sort into our bus's target list */ 4686 cur_target = TAILQ_FIRST(&bus->et_entries); 4687 while (cur_target != NULL && cur_target->target_id < target_id) 4688 cur_target = TAILQ_NEXT(cur_target, links); 4689 if (cur_target != NULL) { 4690 TAILQ_INSERT_BEFORE(cur_target, target, links); 4691 } else { 4692 TAILQ_INSERT_TAIL(&bus->et_entries, target, links); 4693 } 4694 bus->generation++; 4695 return (target); 4696 } 4697 4698 static void 4699 xpt_acquire_target(struct cam_et *target) 4700 { 4701 struct cam_eb *bus = target->bus; 4702 4703 mtx_lock(&bus->eb_mtx); 4704 target->refcount++; 4705 mtx_unlock(&bus->eb_mtx); 4706 } 4707 4708 static void 4709 xpt_release_target(struct cam_et *target) 4710 { 4711 struct cam_eb *bus = target->bus; 4712 4713 mtx_lock(&bus->eb_mtx); 4714 if (--target->refcount > 0) { 4715 mtx_unlock(&bus->eb_mtx); 4716 return; 4717 } 4718 TAILQ_REMOVE(&bus->et_entries, target, links); 4719 bus->generation++; 4720 mtx_unlock(&bus->eb_mtx); 4721 KASSERT(TAILQ_EMPTY(&target->ed_entries), 4722 ("destroying target, but device list is not empty")); 4723 xpt_release_bus(bus); 4724 mtx_destroy(&target->luns_mtx); 4725 if (target->luns) 4726 free(target->luns, M_CAMXPT); 4727 free(target, M_CAMXPT); 4728 } 4729 4730 static struct cam_ed * 4731 xpt_alloc_device_default(struct cam_eb *bus, struct cam_et *target, 4732 lun_id_t lun_id) 4733 { 4734 struct cam_ed *device; 4735 4736 device = xpt_alloc_device(bus, target, lun_id); 4737 if (device == NULL) 4738 return (NULL); 4739 4740 device->mintags = 1; 4741 device->maxtags = 1; 4742 return (device); 4743 } 4744 4745 static void 4746 xpt_destroy_device(void *context, int pending) 4747 { 4748 struct cam_ed *device = context; 4749 4750 mtx_lock(&device->device_mtx); 4751 mtx_destroy(&device->device_mtx); 4752 free(device, M_CAMDEV); 4753 } 4754 4755 struct cam_ed * 4756 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id) 4757 { 4758 struct cam_ed *cur_device, *device; 4759 struct cam_devq *devq; 4760 cam_status status; 4761 4762 mtx_assert(&bus->eb_mtx, MA_OWNED); 4763 /* Make space for us in the device queue on our bus */ 4764 devq = bus->sim->devq; 4765 mtx_lock(&devq->send_mtx); 4766 status = cam_devq_resize(devq, devq->send_queue.array_size + 1); 4767 mtx_unlock(&devq->send_mtx); 4768 if (status != CAM_REQ_CMP) 4769 return (NULL); 4770 4771 device = (struct cam_ed *)malloc(sizeof(*device), 4772 M_CAMDEV, M_NOWAIT|M_ZERO); 4773 if (device == NULL) 4774 return (NULL); 4775 4776 cam_init_pinfo(&device->devq_entry); 4777 device->target = target; 4778 device->lun_id = lun_id; 4779 device->sim = bus->sim; 4780 if (cam_ccbq_init(&device->ccbq, 4781 bus->sim->max_dev_openings) != 0) { 4782 free(device, M_CAMDEV); 4783 return (NULL); 4784 } 4785 SLIST_INIT(&device->asyncs); 4786 SLIST_INIT(&device->periphs); 4787 device->generation = 0; 4788 device->flags = CAM_DEV_UNCONFIGURED; 4789 device->tag_delay_count = 0; 4790 device->tag_saved_openings = 0; 4791 device->refcount = 1; 4792 mtx_init(&device->device_mtx, "CAM device lock", NULL, MTX_DEF); 4793 callout_init_mtx(&device->callout, &devq->send_mtx, 0); 4794 TASK_INIT(&device->device_destroy_task, 0, xpt_destroy_device, device); 4795 /* 4796 * Hold a reference to our parent bus so it 4797 * will not go away before we do. 4798 */ 4799 target->refcount++; 4800 4801 cur_device = TAILQ_FIRST(&target->ed_entries); 4802 while (cur_device != NULL && cur_device->lun_id < lun_id) 4803 cur_device = TAILQ_NEXT(cur_device, links); 4804 if (cur_device != NULL) 4805 TAILQ_INSERT_BEFORE(cur_device, device, links); 4806 else 4807 TAILQ_INSERT_TAIL(&target->ed_entries, device, links); 4808 target->generation++; 4809 return (device); 4810 } 4811 4812 void 4813 xpt_acquire_device(struct cam_ed *device) 4814 { 4815 struct cam_eb *bus = device->target->bus; 4816 4817 mtx_lock(&bus->eb_mtx); 4818 device->refcount++; 4819 mtx_unlock(&bus->eb_mtx); 4820 } 4821 4822 void 4823 xpt_release_device(struct cam_ed *device) 4824 { 4825 struct cam_eb *bus = device->target->bus; 4826 struct cam_devq *devq; 4827 4828 mtx_lock(&bus->eb_mtx); 4829 if (--device->refcount > 0) { 4830 mtx_unlock(&bus->eb_mtx); 4831 return; 4832 } 4833 4834 TAILQ_REMOVE(&device->target->ed_entries, device,links); 4835 device->target->generation++; 4836 mtx_unlock(&bus->eb_mtx); 4837 4838 /* Release our slot in the devq */ 4839 devq = bus->sim->devq; 4840 mtx_lock(&devq->send_mtx); 4841 cam_devq_resize(devq, devq->send_queue.array_size - 1); 4842 4843 KASSERT(SLIST_EMPTY(&device->periphs), 4844 ("destroying device, but periphs list is not empty")); 4845 KASSERT(device->devq_entry.index == CAM_UNQUEUED_INDEX, 4846 ("destroying device while still queued for ccbs")); 4847 4848 /* The send_mtx must be held when accessing the callout */ 4849 if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) 4850 callout_stop(&device->callout); 4851 4852 mtx_unlock(&devq->send_mtx); 4853 4854 xpt_release_target(device->target); 4855 4856 cam_ccbq_fini(&device->ccbq); 4857 /* 4858 * Free allocated memory. free(9) does nothing if the 4859 * supplied pointer is NULL, so it is safe to call without 4860 * checking. 4861 */ 4862 free(device->supported_vpds, M_CAMXPT); 4863 free(device->device_id, M_CAMXPT); 4864 free(device->ext_inq, M_CAMXPT); 4865 free(device->physpath, M_CAMXPT); 4866 free(device->rcap_buf, M_CAMXPT); 4867 free(device->serial_num, M_CAMXPT); 4868 free(device->nvme_data, M_CAMXPT); 4869 free(device->nvme_cdata, M_CAMXPT); 4870 taskqueue_enqueue(xsoftc.xpt_taskq, &device->device_destroy_task); 4871 } 4872 4873 uint32_t 4874 xpt_dev_ccbq_resize(struct cam_path *path, int newopenings) 4875 { 4876 int result; 4877 struct cam_ed *dev; 4878 4879 dev = path->device; 4880 mtx_lock(&dev->sim->devq->send_mtx); 4881 result = cam_ccbq_resize(&dev->ccbq, newopenings); 4882 mtx_unlock(&dev->sim->devq->send_mtx); 4883 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 4884 || (dev->inq_flags & SID_CmdQue) != 0) 4885 dev->tag_saved_openings = newopenings; 4886 return (result); 4887 } 4888 4889 static struct cam_eb * 4890 xpt_find_bus(path_id_t path_id) 4891 { 4892 struct cam_eb *bus; 4893 4894 xpt_lock_buses(); 4895 for (bus = TAILQ_FIRST(&xsoftc.xpt_busses); 4896 bus != NULL; 4897 bus = TAILQ_NEXT(bus, links)) { 4898 if (bus->path_id == path_id) { 4899 bus->refcount++; 4900 break; 4901 } 4902 } 4903 xpt_unlock_buses(); 4904 return (bus); 4905 } 4906 4907 static struct cam_et * 4908 xpt_find_target(struct cam_eb *bus, target_id_t target_id) 4909 { 4910 struct cam_et *target; 4911 4912 mtx_assert(&bus->eb_mtx, MA_OWNED); 4913 for (target = TAILQ_FIRST(&bus->et_entries); 4914 target != NULL; 4915 target = TAILQ_NEXT(target, links)) { 4916 if (target->target_id == target_id) { 4917 target->refcount++; 4918 break; 4919 } 4920 } 4921 return (target); 4922 } 4923 4924 static struct cam_ed * 4925 xpt_find_device(struct cam_et *target, lun_id_t lun_id) 4926 { 4927 struct cam_ed *device; 4928 4929 mtx_assert(&target->bus->eb_mtx, MA_OWNED); 4930 for (device = TAILQ_FIRST(&target->ed_entries); 4931 device != NULL; 4932 device = TAILQ_NEXT(device, links)) { 4933 if (device->lun_id == lun_id) { 4934 device->refcount++; 4935 break; 4936 } 4937 } 4938 return (device); 4939 } 4940 4941 void 4942 xpt_start_tags(struct cam_path *path) 4943 { 4944 struct ccb_relsim crs; 4945 struct cam_ed *device; 4946 struct cam_sim *sim; 4947 int newopenings; 4948 4949 device = path->device; 4950 sim = path->bus->sim; 4951 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT; 4952 xpt_freeze_devq(path, /*count*/1); 4953 device->inq_flags |= SID_CmdQue; 4954 if (device->tag_saved_openings != 0) 4955 newopenings = device->tag_saved_openings; 4956 else 4957 newopenings = min(device->maxtags, 4958 sim->max_tagged_dev_openings); 4959 xpt_dev_ccbq_resize(path, newopenings); 4960 xpt_async(AC_GETDEV_CHANGED, path, NULL); 4961 memset(&crs, 0, sizeof(crs)); 4962 xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL); 4963 crs.ccb_h.func_code = XPT_REL_SIMQ; 4964 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY; 4965 crs.openings 4966 = crs.release_timeout 4967 = crs.qfrozen_cnt 4968 = 0; 4969 xpt_action((union ccb *)&crs); 4970 } 4971 4972 void 4973 xpt_stop_tags(struct cam_path *path) 4974 { 4975 struct ccb_relsim crs; 4976 struct cam_ed *device; 4977 struct cam_sim *sim; 4978 4979 device = path->device; 4980 sim = path->bus->sim; 4981 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT; 4982 device->tag_delay_count = 0; 4983 xpt_freeze_devq(path, /*count*/1); 4984 device->inq_flags &= ~SID_CmdQue; 4985 xpt_dev_ccbq_resize(path, sim->max_dev_openings); 4986 xpt_async(AC_GETDEV_CHANGED, path, NULL); 4987 memset(&crs, 0, sizeof(crs)); 4988 xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL); 4989 crs.ccb_h.func_code = XPT_REL_SIMQ; 4990 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY; 4991 crs.openings 4992 = crs.release_timeout 4993 = crs.qfrozen_cnt 4994 = 0; 4995 xpt_action((union ccb *)&crs); 4996 } 4997 4998 /* 4999 * Assume all possible buses are detected by this time, so allow boot 5000 * as soon as they all are scanned. 5001 */ 5002 static void 5003 xpt_boot_delay(void *arg) 5004 { 5005 5006 xpt_release_boot(); 5007 } 5008 5009 /* 5010 * Now that all config hooks have completed, start boot_delay timer, 5011 * waiting for possibly still undetected buses (USB) to appear. 5012 */ 5013 static void 5014 xpt_ch_done(void *arg) 5015 { 5016 5017 callout_init(&xsoftc.boot_callout, 1); 5018 callout_reset_sbt(&xsoftc.boot_callout, SBT_1MS * xsoftc.boot_delay, 5019 SBT_1MS, xpt_boot_delay, NULL, 0); 5020 } 5021 SYSINIT(xpt_hw_delay, SI_SUB_INT_CONFIG_HOOKS, SI_ORDER_ANY, xpt_ch_done, NULL); 5022 5023 /* 5024 * Now that interrupts are enabled, go find our devices 5025 */ 5026 static void 5027 xpt_config(void *arg) 5028 { 5029 if (taskqueue_start_threads(&xsoftc.xpt_taskq, 1, PRIBIO, "CAM taskq")) 5030 printf("xpt_config: failed to create taskqueue thread.\n"); 5031 5032 /* Setup debugging path */ 5033 if (cam_dflags != CAM_DEBUG_NONE) { 5034 if (xpt_create_path(&cam_dpath, NULL, 5035 CAM_DEBUG_BUS, CAM_DEBUG_TARGET, 5036 CAM_DEBUG_LUN) != CAM_REQ_CMP) { 5037 printf("xpt_config: xpt_create_path() failed for debug" 5038 " target %d:%d:%d, debugging disabled\n", 5039 CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN); 5040 cam_dflags = CAM_DEBUG_NONE; 5041 } 5042 } else 5043 cam_dpath = NULL; 5044 5045 periphdriver_init(1); 5046 xpt_hold_boot(); 5047 5048 /* Fire up rescan thread. */ 5049 if (kproc_kthread_add(xpt_scanner_thread, NULL, &cam_proc, NULL, 0, 0, 5050 "cam", "scanner")) { 5051 printf("xpt_config: failed to create rescan thread.\n"); 5052 } 5053 } 5054 5055 void 5056 xpt_hold_boot_locked(void) 5057 { 5058 5059 if (xsoftc.buses_to_config++ == 0) 5060 root_mount_hold_token("CAM", &xsoftc.xpt_rootmount); 5061 } 5062 5063 void 5064 xpt_hold_boot(void) 5065 { 5066 5067 xpt_lock_buses(); 5068 xpt_hold_boot_locked(); 5069 xpt_unlock_buses(); 5070 } 5071 5072 void 5073 xpt_release_boot(void) 5074 { 5075 5076 xpt_lock_buses(); 5077 if (--xsoftc.buses_to_config == 0) { 5078 if (xsoftc.buses_config_done == 0) { 5079 xsoftc.buses_config_done = 1; 5080 xsoftc.buses_to_config++; 5081 TASK_INIT(&xsoftc.boot_task, 0, xpt_finishconfig_task, 5082 NULL); 5083 taskqueue_enqueue(taskqueue_thread, &xsoftc.boot_task); 5084 } else 5085 root_mount_rel(&xsoftc.xpt_rootmount); 5086 } 5087 xpt_unlock_buses(); 5088 } 5089 5090 /* 5091 * If the given device only has one peripheral attached to it, and if that 5092 * peripheral is the passthrough driver, announce it. This insures that the 5093 * user sees some sort of announcement for every peripheral in their system. 5094 */ 5095 static int 5096 xptpassannouncefunc(struct cam_ed *device, void *arg) 5097 { 5098 struct cam_periph *periph; 5099 int i; 5100 5101 for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL; 5102 periph = SLIST_NEXT(periph, periph_links), i++); 5103 5104 periph = SLIST_FIRST(&device->periphs); 5105 if ((i == 1) 5106 && (strncmp(periph->periph_name, "pass", 4) == 0)) 5107 xpt_announce_periph(periph, NULL); 5108 5109 return(1); 5110 } 5111 5112 static void 5113 xpt_finishconfig_task(void *context, int pending) 5114 { 5115 5116 periphdriver_init(2); 5117 /* 5118 * Check for devices with no "standard" peripheral driver 5119 * attached. For any devices like that, announce the 5120 * passthrough driver so the user will see something. 5121 */ 5122 if (!bootverbose) 5123 xpt_for_all_devices(xptpassannouncefunc, NULL); 5124 5125 xpt_release_boot(); 5126 } 5127 5128 cam_status 5129 xpt_register_async(int event, ac_callback_t *cbfunc, void *cbarg, 5130 struct cam_path *path) 5131 { 5132 struct ccb_setasync csa; 5133 cam_status status; 5134 bool xptpath = false; 5135 5136 if (path == NULL) { 5137 status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID, 5138 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 5139 if (status != CAM_REQ_CMP) 5140 return (status); 5141 xpt_path_lock(path); 5142 xptpath = true; 5143 } 5144 5145 memset(&csa, 0, sizeof(csa)); 5146 xpt_setup_ccb(&csa.ccb_h, path, CAM_PRIORITY_NORMAL); 5147 csa.ccb_h.func_code = XPT_SASYNC_CB; 5148 csa.event_enable = event; 5149 csa.callback = cbfunc; 5150 csa.callback_arg = cbarg; 5151 xpt_action((union ccb *)&csa); 5152 status = csa.ccb_h.status; 5153 5154 CAM_DEBUG(csa.ccb_h.path, CAM_DEBUG_TRACE, 5155 ("xpt_register_async: func %p\n", cbfunc)); 5156 5157 if (xptpath) { 5158 xpt_path_unlock(path); 5159 xpt_free_path(path); 5160 } 5161 5162 if ((status == CAM_REQ_CMP) && 5163 (csa.event_enable & AC_FOUND_DEVICE)) { 5164 /* 5165 * Get this peripheral up to date with all 5166 * the currently existing devices. 5167 */ 5168 xpt_for_all_devices(xptsetasyncfunc, &csa); 5169 } 5170 if ((status == CAM_REQ_CMP) && 5171 (csa.event_enable & AC_PATH_REGISTERED)) { 5172 /* 5173 * Get this peripheral up to date with all 5174 * the currently existing buses. 5175 */ 5176 xpt_for_all_busses(xptsetasyncbusfunc, &csa); 5177 } 5178 5179 return (status); 5180 } 5181 5182 static void 5183 xptaction(struct cam_sim *sim, union ccb *work_ccb) 5184 { 5185 CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n")); 5186 5187 switch (work_ccb->ccb_h.func_code) { 5188 /* Common cases first */ 5189 case XPT_PATH_INQ: /* Path routing inquiry */ 5190 { 5191 struct ccb_pathinq *cpi; 5192 5193 cpi = &work_ccb->cpi; 5194 cpi->version_num = 1; /* XXX??? */ 5195 cpi->hba_inquiry = 0; 5196 cpi->target_sprt = 0; 5197 cpi->hba_misc = 0; 5198 cpi->hba_eng_cnt = 0; 5199 cpi->max_target = 0; 5200 cpi->max_lun = 0; 5201 cpi->initiator_id = 0; 5202 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 5203 strlcpy(cpi->hba_vid, "", HBA_IDLEN); 5204 strlcpy(cpi->dev_name, sim->sim_name, DEV_IDLEN); 5205 cpi->unit_number = sim->unit_number; 5206 cpi->bus_id = sim->bus_id; 5207 cpi->base_transfer_speed = 0; 5208 cpi->protocol = PROTO_UNSPECIFIED; 5209 cpi->protocol_version = PROTO_VERSION_UNSPECIFIED; 5210 cpi->transport = XPORT_UNSPECIFIED; 5211 cpi->transport_version = XPORT_VERSION_UNSPECIFIED; 5212 cpi->ccb_h.status = CAM_REQ_CMP; 5213 break; 5214 } 5215 default: 5216 work_ccb->ccb_h.status = CAM_REQ_INVALID; 5217 break; 5218 } 5219 xpt_done(work_ccb); 5220 } 5221 5222 /* 5223 * The xpt as a "controller" has no interrupt sources, so polling 5224 * is a no-op. 5225 */ 5226 static void 5227 xptpoll(struct cam_sim *sim) 5228 { 5229 } 5230 5231 void 5232 xpt_lock_buses(void) 5233 { 5234 mtx_lock(&xsoftc.xpt_topo_lock); 5235 } 5236 5237 void 5238 xpt_unlock_buses(void) 5239 { 5240 mtx_unlock(&xsoftc.xpt_topo_lock); 5241 } 5242 5243 struct mtx * 5244 xpt_path_mtx(struct cam_path *path) 5245 { 5246 5247 return (&path->device->device_mtx); 5248 } 5249 5250 static void 5251 xpt_done_process(struct ccb_hdr *ccb_h) 5252 { 5253 struct cam_sim *sim = NULL; 5254 struct cam_devq *devq = NULL; 5255 struct mtx *mtx = NULL; 5256 5257 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) 5258 struct ccb_scsiio *csio; 5259 5260 if (ccb_h->func_code == XPT_SCSI_IO) { 5261 csio = &((union ccb *)ccb_h)->csio; 5262 if (csio->bio != NULL) 5263 biotrack(csio->bio, __func__); 5264 } 5265 #endif 5266 5267 if (ccb_h->flags & CAM_HIGH_POWER) { 5268 struct highpowerlist *hphead; 5269 struct cam_ed *device; 5270 5271 mtx_lock(&xsoftc.xpt_highpower_lock); 5272 hphead = &xsoftc.highpowerq; 5273 5274 device = STAILQ_FIRST(hphead); 5275 5276 /* 5277 * Increment the count since this command is done. 5278 */ 5279 xsoftc.num_highpower++; 5280 5281 /* 5282 * Any high powered commands queued up? 5283 */ 5284 if (device != NULL) { 5285 STAILQ_REMOVE_HEAD(hphead, highpowerq_entry); 5286 mtx_unlock(&xsoftc.xpt_highpower_lock); 5287 5288 mtx_lock(&device->sim->devq->send_mtx); 5289 xpt_release_devq_device(device, 5290 /*count*/1, /*runqueue*/TRUE); 5291 mtx_unlock(&device->sim->devq->send_mtx); 5292 } else 5293 mtx_unlock(&xsoftc.xpt_highpower_lock); 5294 } 5295 5296 /* 5297 * Insulate against a race where the periph is destroyed but CCBs are 5298 * still not all processed. This shouldn't happen, but allows us better 5299 * bug diagnostic when it does. 5300 */ 5301 if (ccb_h->path->bus) 5302 sim = ccb_h->path->bus->sim; 5303 5304 if (ccb_h->status & CAM_RELEASE_SIMQ) { 5305 KASSERT(sim, ("sim missing for CAM_RELEASE_SIMQ request")); 5306 xpt_release_simq(sim, /*run_queue*/FALSE); 5307 ccb_h->status &= ~CAM_RELEASE_SIMQ; 5308 } 5309 5310 if ((ccb_h->flags & CAM_DEV_QFRZDIS) 5311 && (ccb_h->status & CAM_DEV_QFRZN)) { 5312 xpt_release_devq(ccb_h->path, /*count*/1, /*run_queue*/TRUE); 5313 ccb_h->status &= ~CAM_DEV_QFRZN; 5314 } 5315 5316 if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) { 5317 struct cam_ed *dev = ccb_h->path->device; 5318 5319 if (sim) 5320 devq = sim->devq; 5321 KASSERT(devq, ("Periph disappeared with CCB %p %s request pending.", 5322 ccb_h, xpt_action_name(ccb_h->func_code))); 5323 5324 mtx_lock(&devq->send_mtx); 5325 devq->send_active--; 5326 devq->send_openings++; 5327 cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h); 5328 5329 if (((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0 5330 && (dev->ccbq.dev_active == 0))) { 5331 dev->flags &= ~CAM_DEV_REL_ON_QUEUE_EMPTY; 5332 xpt_release_devq_device(dev, /*count*/1, 5333 /*run_queue*/FALSE); 5334 } 5335 5336 if (((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0 5337 && (ccb_h->status&CAM_STATUS_MASK) != CAM_REQUEUE_REQ)) { 5338 dev->flags &= ~CAM_DEV_REL_ON_COMPLETE; 5339 xpt_release_devq_device(dev, /*count*/1, 5340 /*run_queue*/FALSE); 5341 } 5342 5343 if (!device_is_queued(dev)) 5344 (void)xpt_schedule_devq(devq, dev); 5345 xpt_run_devq(devq); 5346 mtx_unlock(&devq->send_mtx); 5347 5348 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0) { 5349 mtx = xpt_path_mtx(ccb_h->path); 5350 mtx_lock(mtx); 5351 5352 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 5353 && (--dev->tag_delay_count == 0)) 5354 xpt_start_tags(ccb_h->path); 5355 } 5356 } 5357 5358 if ((ccb_h->flags & CAM_UNLOCKED) == 0) { 5359 if (mtx == NULL) { 5360 mtx = xpt_path_mtx(ccb_h->path); 5361 mtx_lock(mtx); 5362 } 5363 } else { 5364 if (mtx != NULL) { 5365 mtx_unlock(mtx); 5366 mtx = NULL; 5367 } 5368 } 5369 5370 /* Call the peripheral driver's callback */ 5371 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX; 5372 (*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h); 5373 if (mtx != NULL) 5374 mtx_unlock(mtx); 5375 } 5376 5377 /* 5378 * Parameterize instead and use xpt_done_td? 5379 */ 5380 static void 5381 xpt_async_td(void *arg) 5382 { 5383 struct cam_doneq *queue = arg; 5384 struct ccb_hdr *ccb_h; 5385 STAILQ_HEAD(, ccb_hdr) doneq; 5386 5387 STAILQ_INIT(&doneq); 5388 mtx_lock(&queue->cam_doneq_mtx); 5389 while (1) { 5390 while (STAILQ_EMPTY(&queue->cam_doneq)) 5391 msleep(&queue->cam_doneq, &queue->cam_doneq_mtx, 5392 PRIBIO, "-", 0); 5393 STAILQ_CONCAT(&doneq, &queue->cam_doneq); 5394 mtx_unlock(&queue->cam_doneq_mtx); 5395 5396 while ((ccb_h = STAILQ_FIRST(&doneq)) != NULL) { 5397 STAILQ_REMOVE_HEAD(&doneq, sim_links.stqe); 5398 xpt_done_process(ccb_h); 5399 } 5400 5401 mtx_lock(&queue->cam_doneq_mtx); 5402 } 5403 } 5404 5405 void 5406 xpt_done_td(void *arg) 5407 { 5408 struct cam_doneq *queue = arg; 5409 struct ccb_hdr *ccb_h; 5410 STAILQ_HEAD(, ccb_hdr) doneq; 5411 5412 STAILQ_INIT(&doneq); 5413 mtx_lock(&queue->cam_doneq_mtx); 5414 while (1) { 5415 while (STAILQ_EMPTY(&queue->cam_doneq)) { 5416 queue->cam_doneq_sleep = 1; 5417 msleep(&queue->cam_doneq, &queue->cam_doneq_mtx, 5418 PRIBIO, "-", 0); 5419 queue->cam_doneq_sleep = 0; 5420 } 5421 STAILQ_CONCAT(&doneq, &queue->cam_doneq); 5422 mtx_unlock(&queue->cam_doneq_mtx); 5423 5424 THREAD_NO_SLEEPING(); 5425 while ((ccb_h = STAILQ_FIRST(&doneq)) != NULL) { 5426 STAILQ_REMOVE_HEAD(&doneq, sim_links.stqe); 5427 xpt_done_process(ccb_h); 5428 } 5429 THREAD_SLEEPING_OK(); 5430 5431 mtx_lock(&queue->cam_doneq_mtx); 5432 } 5433 } 5434 5435 static void 5436 camisr_runqueue(void) 5437 { 5438 struct ccb_hdr *ccb_h; 5439 struct cam_doneq *queue; 5440 int i; 5441 5442 /* Process global queues. */ 5443 for (i = 0; i < cam_num_doneqs; i++) { 5444 queue = &cam_doneqs[i]; 5445 mtx_lock(&queue->cam_doneq_mtx); 5446 while ((ccb_h = STAILQ_FIRST(&queue->cam_doneq)) != NULL) { 5447 STAILQ_REMOVE_HEAD(&queue->cam_doneq, sim_links.stqe); 5448 mtx_unlock(&queue->cam_doneq_mtx); 5449 xpt_done_process(ccb_h); 5450 mtx_lock(&queue->cam_doneq_mtx); 5451 } 5452 mtx_unlock(&queue->cam_doneq_mtx); 5453 } 5454 } 5455 5456 /** 5457 * @brief Return the device_t associated with the path 5458 * 5459 * When a SIM is created, it registers a bus with a NEWBUS device_t. This is 5460 * stored in the internal cam_eb bus structure. There is no guarnatee any given 5461 * path will have a @c device_t associated with it (it's legal to call @c 5462 * xpt_bus_register with a @c NULL @c device_t. 5463 * 5464 * @param path Path to return the device_t for. 5465 */ 5466 device_t 5467 xpt_path_sim_device(const struct cam_path *path) 5468 { 5469 return (path->bus->parent_dev); 5470 } 5471 5472 struct kv 5473 { 5474 uint32_t v; 5475 const char *name; 5476 }; 5477 5478 static struct kv map[] = { 5479 { XPT_NOOP, "XPT_NOOP" }, 5480 { XPT_SCSI_IO, "XPT_SCSI_IO" }, 5481 { XPT_GDEV_TYPE, "XPT_GDEV_TYPE" }, 5482 { XPT_GDEVLIST, "XPT_GDEVLIST" }, 5483 { XPT_PATH_INQ, "XPT_PATH_INQ" }, 5484 { XPT_REL_SIMQ, "XPT_REL_SIMQ" }, 5485 { XPT_SASYNC_CB, "XPT_SASYNC_CB" }, 5486 { XPT_SDEV_TYPE, "XPT_SDEV_TYPE" }, 5487 { XPT_SCAN_BUS, "XPT_SCAN_BUS" }, 5488 { XPT_DEV_MATCH, "XPT_DEV_MATCH" }, 5489 { XPT_DEBUG, "XPT_DEBUG" }, 5490 { XPT_PATH_STATS, "XPT_PATH_STATS" }, 5491 { XPT_GDEV_STATS, "XPT_GDEV_STATS" }, 5492 { XPT_DEV_ADVINFO, "XPT_DEV_ADVINFO" }, 5493 { XPT_ASYNC, "XPT_ASYNC" }, 5494 { XPT_ABORT, "XPT_ABORT" }, 5495 { XPT_RESET_BUS, "XPT_RESET_BUS" }, 5496 { XPT_RESET_DEV, "XPT_RESET_DEV" }, 5497 { XPT_TERM_IO, "XPT_TERM_IO" }, 5498 { XPT_SCAN_LUN, "XPT_SCAN_LUN" }, 5499 { XPT_GET_TRAN_SETTINGS, "XPT_GET_TRAN_SETTINGS" }, 5500 { XPT_SET_TRAN_SETTINGS, "XPT_SET_TRAN_SETTINGS" }, 5501 { XPT_CALC_GEOMETRY, "XPT_CALC_GEOMETRY" }, 5502 { XPT_ATA_IO, "XPT_ATA_IO" }, 5503 { XPT_GET_SIM_KNOB, "XPT_GET_SIM_KNOB" }, 5504 { XPT_SET_SIM_KNOB, "XPT_SET_SIM_KNOB" }, 5505 { XPT_NVME_IO, "XPT_NVME_IO" }, 5506 { XPT_MMC_IO, "XPT_MMC_IO" }, 5507 { XPT_SMP_IO, "XPT_SMP_IO" }, 5508 { XPT_SCAN_TGT, "XPT_SCAN_TGT" }, 5509 { XPT_NVME_ADMIN, "XPT_NVME_ADMIN" }, 5510 { XPT_ENG_INQ, "XPT_ENG_INQ" }, 5511 { XPT_ENG_EXEC, "XPT_ENG_EXEC" }, 5512 { XPT_EN_LUN, "XPT_EN_LUN" }, 5513 { XPT_TARGET_IO, "XPT_TARGET_IO" }, 5514 { XPT_ACCEPT_TARGET_IO, "XPT_ACCEPT_TARGET_IO" }, 5515 { XPT_CONT_TARGET_IO, "XPT_CONT_TARGET_IO" }, 5516 { XPT_IMMED_NOTIFY, "XPT_IMMED_NOTIFY" }, 5517 { XPT_NOTIFY_ACK, "XPT_NOTIFY_ACK" }, 5518 { XPT_IMMEDIATE_NOTIFY, "XPT_IMMEDIATE_NOTIFY" }, 5519 { XPT_NOTIFY_ACKNOWLEDGE, "XPT_NOTIFY_ACKNOWLEDGE" }, 5520 { 0, 0 } 5521 }; 5522 5523 const char * 5524 xpt_action_name(uint32_t action) 5525 { 5526 static char buffer[32]; /* Only for unknown messages -- racy */ 5527 struct kv *walker = map; 5528 5529 while (walker->name != NULL) { 5530 if (walker->v == action) 5531 return (walker->name); 5532 walker++; 5533 } 5534 5535 snprintf(buffer, sizeof(buffer), "%#x", action); 5536 return (buffer); 5537 } 5538 5539 void 5540 xpt_cam_path_debug(struct cam_path *path, const char *fmt, ...) 5541 { 5542 struct sbuf sbuf; 5543 char buf[XPT_PRINT_LEN]; /* balance to not eat too much stack */ 5544 struct sbuf *sb = sbuf_new(&sbuf, buf, sizeof(buf), SBUF_FIXEDLEN); 5545 va_list ap; 5546 5547 sbuf_set_drain(sb, sbuf_printf_drain, NULL); 5548 xpt_path_sbuf(path, sb); 5549 va_start(ap, fmt); 5550 sbuf_vprintf(sb, fmt, ap); 5551 va_end(ap); 5552 sbuf_finish(sb); 5553 sbuf_delete(sb); 5554 if (cam_debug_delay != 0) 5555 DELAY(cam_debug_delay); 5556 } 5557 5558 void 5559 xpt_cam_dev_debug(struct cam_ed *dev, const char *fmt, ...) 5560 { 5561 struct sbuf sbuf; 5562 char buf[XPT_PRINT_LEN]; /* balance to not eat too much stack */ 5563 struct sbuf *sb = sbuf_new(&sbuf, buf, sizeof(buf), SBUF_FIXEDLEN); 5564 va_list ap; 5565 5566 sbuf_set_drain(sb, sbuf_printf_drain, NULL); 5567 xpt_device_sbuf(dev, sb); 5568 va_start(ap, fmt); 5569 sbuf_vprintf(sb, fmt, ap); 5570 va_end(ap); 5571 sbuf_finish(sb); 5572 sbuf_delete(sb); 5573 if (cam_debug_delay != 0) 5574 DELAY(cam_debug_delay); 5575 } 5576 5577 void 5578 xpt_cam_debug(const char *fmt, ...) 5579 { 5580 struct sbuf sbuf; 5581 char buf[XPT_PRINT_LEN]; /* balance to not eat too much stack */ 5582 struct sbuf *sb = sbuf_new(&sbuf, buf, sizeof(buf), SBUF_FIXEDLEN); 5583 va_list ap; 5584 5585 sbuf_set_drain(sb, sbuf_printf_drain, NULL); 5586 sbuf_cat(sb, "cam_debug: "); 5587 va_start(ap, fmt); 5588 sbuf_vprintf(sb, fmt, ap); 5589 va_end(ap); 5590 sbuf_finish(sb); 5591 sbuf_delete(sb); 5592 if (cam_debug_delay != 0) 5593 DELAY(cam_debug_delay); 5594 } 5595