1 /*- 2 * Implementation of the Common Access Method Transport (XPT) layer. 3 * 4 * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs. 5 * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions, and the following disclaimer, 13 * without modification, immediately at the beginning of the file. 14 * 2. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include <sys/param.h> 34 #include <sys/bio.h> 35 #include <sys/bus.h> 36 #include <sys/systm.h> 37 #include <sys/types.h> 38 #include <sys/malloc.h> 39 #include <sys/kernel.h> 40 #include <sys/time.h> 41 #include <sys/conf.h> 42 #include <sys/fcntl.h> 43 #include <sys/interrupt.h> 44 #include <sys/proc.h> 45 #include <sys/sbuf.h> 46 #include <sys/smp.h> 47 #include <sys/taskqueue.h> 48 49 #include <sys/lock.h> 50 #include <sys/mutex.h> 51 #include <sys/sysctl.h> 52 #include <sys/kthread.h> 53 54 #include <cam/cam.h> 55 #include <cam/cam_ccb.h> 56 #include <cam/cam_periph.h> 57 #include <cam/cam_queue.h> 58 #include <cam/cam_sim.h> 59 #include <cam/cam_xpt.h> 60 #include <cam/cam_xpt_sim.h> 61 #include <cam/cam_xpt_periph.h> 62 #include <cam/cam_xpt_internal.h> 63 #include <cam/cam_debug.h> 64 #include <cam/cam_compat.h> 65 66 #include <cam/scsi/scsi_all.h> 67 #include <cam/scsi/scsi_message.h> 68 #include <cam/scsi/scsi_pass.h> 69 70 #include <machine/md_var.h> /* geometry translation */ 71 #include <machine/stdarg.h> /* for xpt_print below */ 72 73 #include "opt_cam.h" 74 75 /* 76 * This is the maximum number of high powered commands (e.g. start unit) 77 * that can be outstanding at a particular time. 78 */ 79 #ifndef CAM_MAX_HIGHPOWER 80 #define CAM_MAX_HIGHPOWER 4 81 #endif 82 83 /* Datastructures internal to the xpt layer */ 84 MALLOC_DEFINE(M_CAMXPT, "CAM XPT", "CAM XPT buffers"); 85 MALLOC_DEFINE(M_CAMDEV, "CAM DEV", "CAM devices"); 86 MALLOC_DEFINE(M_CAMCCB, "CAM CCB", "CAM CCBs"); 87 MALLOC_DEFINE(M_CAMPATH, "CAM path", "CAM paths"); 88 89 /* Object for defering XPT actions to a taskqueue */ 90 struct xpt_task { 91 struct task task; 92 void *data1; 93 uintptr_t data2; 94 }; 95 96 struct xpt_softc { 97 uint32_t xpt_generation; 98 99 /* number of high powered commands that can go through right now */ 100 struct mtx xpt_highpower_lock; 101 STAILQ_HEAD(highpowerlist, cam_ed) highpowerq; 102 int num_highpower; 103 104 /* queue for handling async rescan requests. */ 105 TAILQ_HEAD(, ccb_hdr) ccb_scanq; 106 int buses_to_config; 107 int buses_config_done; 108 109 /* 110 * Registered buses 111 * 112 * N.B., "busses" is an archaic spelling of "buses". In new code 113 * "buses" is preferred. 114 */ 115 TAILQ_HEAD(,cam_eb) xpt_busses; 116 u_int bus_generation; 117 118 struct intr_config_hook *xpt_config_hook; 119 120 int boot_delay; 121 struct callout boot_callout; 122 123 struct mtx xpt_topo_lock; 124 struct mtx xpt_lock; 125 struct taskqueue *xpt_taskq; 126 }; 127 128 typedef enum { 129 DM_RET_COPY = 0x01, 130 DM_RET_FLAG_MASK = 0x0f, 131 DM_RET_NONE = 0x00, 132 DM_RET_STOP = 0x10, 133 DM_RET_DESCEND = 0x20, 134 DM_RET_ERROR = 0x30, 135 DM_RET_ACTION_MASK = 0xf0 136 } dev_match_ret; 137 138 typedef enum { 139 XPT_DEPTH_BUS, 140 XPT_DEPTH_TARGET, 141 XPT_DEPTH_DEVICE, 142 XPT_DEPTH_PERIPH 143 } xpt_traverse_depth; 144 145 struct xpt_traverse_config { 146 xpt_traverse_depth depth; 147 void *tr_func; 148 void *tr_arg; 149 }; 150 151 typedef int xpt_busfunc_t (struct cam_eb *bus, void *arg); 152 typedef int xpt_targetfunc_t (struct cam_et *target, void *arg); 153 typedef int xpt_devicefunc_t (struct cam_ed *device, void *arg); 154 typedef int xpt_periphfunc_t (struct cam_periph *periph, void *arg); 155 typedef int xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg); 156 157 /* Transport layer configuration information */ 158 static struct xpt_softc xsoftc; 159 160 MTX_SYSINIT(xpt_topo_init, &xsoftc.xpt_topo_lock, "XPT topology lock", MTX_DEF); 161 162 SYSCTL_INT(_kern_cam, OID_AUTO, boot_delay, CTLFLAG_RDTUN, 163 &xsoftc.boot_delay, 0, "Bus registration wait time"); 164 SYSCTL_UINT(_kern_cam, OID_AUTO, xpt_generation, CTLFLAG_RD, 165 &xsoftc.xpt_generation, 0, "CAM peripheral generation count"); 166 167 struct cam_doneq { 168 struct mtx_padalign cam_doneq_mtx; 169 STAILQ_HEAD(, ccb_hdr) cam_doneq; 170 int cam_doneq_sleep; 171 }; 172 173 static struct cam_doneq cam_doneqs[MAXCPU]; 174 static int cam_num_doneqs; 175 static struct proc *cam_proc; 176 177 SYSCTL_INT(_kern_cam, OID_AUTO, num_doneqs, CTLFLAG_RDTUN, 178 &cam_num_doneqs, 0, "Number of completion queues/threads"); 179 180 struct cam_periph *xpt_periph; 181 182 static periph_init_t xpt_periph_init; 183 184 static struct periph_driver xpt_driver = 185 { 186 xpt_periph_init, "xpt", 187 TAILQ_HEAD_INITIALIZER(xpt_driver.units), /* generation */ 0, 188 CAM_PERIPH_DRV_EARLY 189 }; 190 191 PERIPHDRIVER_DECLARE(xpt, xpt_driver); 192 193 static d_open_t xptopen; 194 static d_close_t xptclose; 195 static d_ioctl_t xptioctl; 196 static d_ioctl_t xptdoioctl; 197 198 static struct cdevsw xpt_cdevsw = { 199 .d_version = D_VERSION, 200 .d_flags = 0, 201 .d_open = xptopen, 202 .d_close = xptclose, 203 .d_ioctl = xptioctl, 204 .d_name = "xpt", 205 }; 206 207 /* Storage for debugging datastructures */ 208 struct cam_path *cam_dpath; 209 u_int32_t cam_dflags = CAM_DEBUG_FLAGS; 210 SYSCTL_UINT(_kern_cam, OID_AUTO, dflags, CTLFLAG_RWTUN, 211 &cam_dflags, 0, "Enabled debug flags"); 212 u_int32_t cam_debug_delay = CAM_DEBUG_DELAY; 213 SYSCTL_UINT(_kern_cam, OID_AUTO, debug_delay, CTLFLAG_RWTUN, 214 &cam_debug_delay, 0, "Delay in us after each debug message"); 215 216 /* Our boot-time initialization hook */ 217 static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *); 218 219 static moduledata_t cam_moduledata = { 220 "cam", 221 cam_module_event_handler, 222 NULL 223 }; 224 225 static int xpt_init(void *); 226 227 DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND); 228 MODULE_VERSION(cam, 1); 229 230 231 static void xpt_async_bcast(struct async_list *async_head, 232 u_int32_t async_code, 233 struct cam_path *path, 234 void *async_arg); 235 static path_id_t xptnextfreepathid(void); 236 static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus); 237 static union ccb *xpt_get_ccb(struct cam_periph *periph); 238 static union ccb *xpt_get_ccb_nowait(struct cam_periph *periph); 239 static void xpt_run_allocq(struct cam_periph *periph, int sleep); 240 static void xpt_run_allocq_task(void *context, int pending); 241 static void xpt_run_devq(struct cam_devq *devq); 242 static timeout_t xpt_release_devq_timeout; 243 static void xpt_release_simq_timeout(void *arg) __unused; 244 static void xpt_acquire_bus(struct cam_eb *bus); 245 static void xpt_release_bus(struct cam_eb *bus); 246 static uint32_t xpt_freeze_devq_device(struct cam_ed *dev, u_int count); 247 static int xpt_release_devq_device(struct cam_ed *dev, u_int count, 248 int run_queue); 249 static struct cam_et* 250 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id); 251 static void xpt_acquire_target(struct cam_et *target); 252 static void xpt_release_target(struct cam_et *target); 253 static struct cam_eb* 254 xpt_find_bus(path_id_t path_id); 255 static struct cam_et* 256 xpt_find_target(struct cam_eb *bus, target_id_t target_id); 257 static struct cam_ed* 258 xpt_find_device(struct cam_et *target, lun_id_t lun_id); 259 static void xpt_config(void *arg); 260 static int xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo, 261 u_int32_t new_priority); 262 static xpt_devicefunc_t xptpassannouncefunc; 263 static void xptaction(struct cam_sim *sim, union ccb *work_ccb); 264 static void xptpoll(struct cam_sim *sim); 265 static void camisr_runqueue(void); 266 static void xpt_done_process(struct ccb_hdr *ccb_h); 267 static void xpt_done_td(void *); 268 static dev_match_ret xptbusmatch(struct dev_match_pattern *patterns, 269 u_int num_patterns, struct cam_eb *bus); 270 static dev_match_ret xptdevicematch(struct dev_match_pattern *patterns, 271 u_int num_patterns, 272 struct cam_ed *device); 273 static dev_match_ret xptperiphmatch(struct dev_match_pattern *patterns, 274 u_int num_patterns, 275 struct cam_periph *periph); 276 static xpt_busfunc_t xptedtbusfunc; 277 static xpt_targetfunc_t xptedttargetfunc; 278 static xpt_devicefunc_t xptedtdevicefunc; 279 static xpt_periphfunc_t xptedtperiphfunc; 280 static xpt_pdrvfunc_t xptplistpdrvfunc; 281 static xpt_periphfunc_t xptplistperiphfunc; 282 static int xptedtmatch(struct ccb_dev_match *cdm); 283 static int xptperiphlistmatch(struct ccb_dev_match *cdm); 284 static int xptbustraverse(struct cam_eb *start_bus, 285 xpt_busfunc_t *tr_func, void *arg); 286 static int xpttargettraverse(struct cam_eb *bus, 287 struct cam_et *start_target, 288 xpt_targetfunc_t *tr_func, void *arg); 289 static int xptdevicetraverse(struct cam_et *target, 290 struct cam_ed *start_device, 291 xpt_devicefunc_t *tr_func, void *arg); 292 static int xptperiphtraverse(struct cam_ed *device, 293 struct cam_periph *start_periph, 294 xpt_periphfunc_t *tr_func, void *arg); 295 static int xptpdrvtraverse(struct periph_driver **start_pdrv, 296 xpt_pdrvfunc_t *tr_func, void *arg); 297 static int xptpdperiphtraverse(struct periph_driver **pdrv, 298 struct cam_periph *start_periph, 299 xpt_periphfunc_t *tr_func, 300 void *arg); 301 static xpt_busfunc_t xptdefbusfunc; 302 static xpt_targetfunc_t xptdeftargetfunc; 303 static xpt_devicefunc_t xptdefdevicefunc; 304 static xpt_periphfunc_t xptdefperiphfunc; 305 static void xpt_finishconfig_task(void *context, int pending); 306 static void xpt_dev_async_default(u_int32_t async_code, 307 struct cam_eb *bus, 308 struct cam_et *target, 309 struct cam_ed *device, 310 void *async_arg); 311 static struct cam_ed * xpt_alloc_device_default(struct cam_eb *bus, 312 struct cam_et *target, 313 lun_id_t lun_id); 314 static xpt_devicefunc_t xptsetasyncfunc; 315 static xpt_busfunc_t xptsetasyncbusfunc; 316 static cam_status xptregister(struct cam_periph *periph, 317 void *arg); 318 static const char * xpt_action_name(uint32_t action); 319 static __inline int device_is_queued(struct cam_ed *device); 320 321 static __inline int 322 xpt_schedule_devq(struct cam_devq *devq, struct cam_ed *dev) 323 { 324 int retval; 325 326 mtx_assert(&devq->send_mtx, MA_OWNED); 327 if ((dev->ccbq.queue.entries > 0) && 328 (dev->ccbq.dev_openings > 0) && 329 (dev->ccbq.queue.qfrozen_cnt == 0)) { 330 /* 331 * The priority of a device waiting for controller 332 * resources is that of the highest priority CCB 333 * enqueued. 334 */ 335 retval = 336 xpt_schedule_dev(&devq->send_queue, 337 &dev->devq_entry, 338 CAMQ_GET_PRIO(&dev->ccbq.queue)); 339 } else { 340 retval = 0; 341 } 342 return (retval); 343 } 344 345 static __inline int 346 device_is_queued(struct cam_ed *device) 347 { 348 return (device->devq_entry.index != CAM_UNQUEUED_INDEX); 349 } 350 351 static void 352 xpt_periph_init() 353 { 354 make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0"); 355 } 356 357 static int 358 xptopen(struct cdev *dev, int flags, int fmt, struct thread *td) 359 { 360 361 /* 362 * Only allow read-write access. 363 */ 364 if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0)) 365 return(EPERM); 366 367 /* 368 * We don't allow nonblocking access. 369 */ 370 if ((flags & O_NONBLOCK) != 0) { 371 printf("%s: can't do nonblocking access\n", devtoname(dev)); 372 return(ENODEV); 373 } 374 375 return(0); 376 } 377 378 static int 379 xptclose(struct cdev *dev, int flag, int fmt, struct thread *td) 380 { 381 382 return(0); 383 } 384 385 /* 386 * Don't automatically grab the xpt softc lock here even though this is going 387 * through the xpt device. The xpt device is really just a back door for 388 * accessing other devices and SIMs, so the right thing to do is to grab 389 * the appropriate SIM lock once the bus/SIM is located. 390 */ 391 static int 392 xptioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) 393 { 394 int error; 395 396 if ((error = xptdoioctl(dev, cmd, addr, flag, td)) == ENOTTY) { 397 error = cam_compat_ioctl(dev, cmd, addr, flag, td, xptdoioctl); 398 } 399 return (error); 400 } 401 402 static int 403 xptdoioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) 404 { 405 int error; 406 407 error = 0; 408 409 switch(cmd) { 410 /* 411 * For the transport layer CAMIOCOMMAND ioctl, we really only want 412 * to accept CCB types that don't quite make sense to send through a 413 * passthrough driver. XPT_PATH_INQ is an exception to this, as stated 414 * in the CAM spec. 415 */ 416 case CAMIOCOMMAND: { 417 union ccb *ccb; 418 union ccb *inccb; 419 struct cam_eb *bus; 420 421 inccb = (union ccb *)addr; 422 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) 423 if (inccb->ccb_h.func_code == XPT_SCSI_IO) 424 inccb->csio.bio = NULL; 425 #endif 426 427 bus = xpt_find_bus(inccb->ccb_h.path_id); 428 if (bus == NULL) 429 return (EINVAL); 430 431 switch (inccb->ccb_h.func_code) { 432 case XPT_SCAN_BUS: 433 case XPT_RESET_BUS: 434 if (inccb->ccb_h.target_id != CAM_TARGET_WILDCARD || 435 inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) { 436 xpt_release_bus(bus); 437 return (EINVAL); 438 } 439 break; 440 case XPT_SCAN_TGT: 441 if (inccb->ccb_h.target_id == CAM_TARGET_WILDCARD || 442 inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) { 443 xpt_release_bus(bus); 444 return (EINVAL); 445 } 446 break; 447 default: 448 break; 449 } 450 451 switch(inccb->ccb_h.func_code) { 452 case XPT_SCAN_BUS: 453 case XPT_RESET_BUS: 454 case XPT_PATH_INQ: 455 case XPT_ENG_INQ: 456 case XPT_SCAN_LUN: 457 case XPT_SCAN_TGT: 458 459 ccb = xpt_alloc_ccb(); 460 461 /* 462 * Create a path using the bus, target, and lun the 463 * user passed in. 464 */ 465 if (xpt_create_path(&ccb->ccb_h.path, NULL, 466 inccb->ccb_h.path_id, 467 inccb->ccb_h.target_id, 468 inccb->ccb_h.target_lun) != 469 CAM_REQ_CMP){ 470 error = EINVAL; 471 xpt_free_ccb(ccb); 472 break; 473 } 474 /* Ensure all of our fields are correct */ 475 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, 476 inccb->ccb_h.pinfo.priority); 477 xpt_merge_ccb(ccb, inccb); 478 xpt_path_lock(ccb->ccb_h.path); 479 cam_periph_runccb(ccb, NULL, 0, 0, NULL); 480 xpt_path_unlock(ccb->ccb_h.path); 481 bcopy(ccb, inccb, sizeof(union ccb)); 482 xpt_free_path(ccb->ccb_h.path); 483 xpt_free_ccb(ccb); 484 break; 485 486 case XPT_DEBUG: { 487 union ccb ccb; 488 489 /* 490 * This is an immediate CCB, so it's okay to 491 * allocate it on the stack. 492 */ 493 494 /* 495 * Create a path using the bus, target, and lun the 496 * user passed in. 497 */ 498 if (xpt_create_path(&ccb.ccb_h.path, NULL, 499 inccb->ccb_h.path_id, 500 inccb->ccb_h.target_id, 501 inccb->ccb_h.target_lun) != 502 CAM_REQ_CMP){ 503 error = EINVAL; 504 break; 505 } 506 /* Ensure all of our fields are correct */ 507 xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path, 508 inccb->ccb_h.pinfo.priority); 509 xpt_merge_ccb(&ccb, inccb); 510 xpt_action(&ccb); 511 bcopy(&ccb, inccb, sizeof(union ccb)); 512 xpt_free_path(ccb.ccb_h.path); 513 break; 514 515 } 516 case XPT_DEV_MATCH: { 517 struct cam_periph_map_info mapinfo; 518 struct cam_path *old_path; 519 520 /* 521 * We can't deal with physical addresses for this 522 * type of transaction. 523 */ 524 if ((inccb->ccb_h.flags & CAM_DATA_MASK) != 525 CAM_DATA_VADDR) { 526 error = EINVAL; 527 break; 528 } 529 530 /* 531 * Save this in case the caller had it set to 532 * something in particular. 533 */ 534 old_path = inccb->ccb_h.path; 535 536 /* 537 * We really don't need a path for the matching 538 * code. The path is needed because of the 539 * debugging statements in xpt_action(). They 540 * assume that the CCB has a valid path. 541 */ 542 inccb->ccb_h.path = xpt_periph->path; 543 544 bzero(&mapinfo, sizeof(mapinfo)); 545 546 /* 547 * Map the pattern and match buffers into kernel 548 * virtual address space. 549 */ 550 error = cam_periph_mapmem(inccb, &mapinfo, MAXPHYS); 551 552 if (error) { 553 inccb->ccb_h.path = old_path; 554 break; 555 } 556 557 /* 558 * This is an immediate CCB, we can send it on directly. 559 */ 560 xpt_action(inccb); 561 562 /* 563 * Map the buffers back into user space. 564 */ 565 cam_periph_unmapmem(inccb, &mapinfo); 566 567 inccb->ccb_h.path = old_path; 568 569 error = 0; 570 break; 571 } 572 default: 573 error = ENOTSUP; 574 break; 575 } 576 xpt_release_bus(bus); 577 break; 578 } 579 /* 580 * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input, 581 * with the periphal driver name and unit name filled in. The other 582 * fields don't really matter as input. The passthrough driver name 583 * ("pass"), and unit number are passed back in the ccb. The current 584 * device generation number, and the index into the device peripheral 585 * driver list, and the status are also passed back. Note that 586 * since we do everything in one pass, unlike the XPT_GDEVLIST ccb, 587 * we never return a status of CAM_GDEVLIST_LIST_CHANGED. It is 588 * (or rather should be) impossible for the device peripheral driver 589 * list to change since we look at the whole thing in one pass, and 590 * we do it with lock protection. 591 * 592 */ 593 case CAMGETPASSTHRU: { 594 union ccb *ccb; 595 struct cam_periph *periph; 596 struct periph_driver **p_drv; 597 char *name; 598 u_int unit; 599 int base_periph_found; 600 601 ccb = (union ccb *)addr; 602 unit = ccb->cgdl.unit_number; 603 name = ccb->cgdl.periph_name; 604 base_periph_found = 0; 605 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) 606 if (ccb->ccb_h.func_code == XPT_SCSI_IO) 607 ccb->csio.bio = NULL; 608 #endif 609 610 /* 611 * Sanity check -- make sure we don't get a null peripheral 612 * driver name. 613 */ 614 if (*ccb->cgdl.periph_name == '\0') { 615 error = EINVAL; 616 break; 617 } 618 619 /* Keep the list from changing while we traverse it */ 620 xpt_lock_buses(); 621 622 /* first find our driver in the list of drivers */ 623 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) 624 if (strcmp((*p_drv)->driver_name, name) == 0) 625 break; 626 627 if (*p_drv == NULL) { 628 xpt_unlock_buses(); 629 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 630 ccb->cgdl.status = CAM_GDEVLIST_ERROR; 631 *ccb->cgdl.periph_name = '\0'; 632 ccb->cgdl.unit_number = 0; 633 error = ENOENT; 634 break; 635 } 636 637 /* 638 * Run through every peripheral instance of this driver 639 * and check to see whether it matches the unit passed 640 * in by the user. If it does, get out of the loops and 641 * find the passthrough driver associated with that 642 * peripheral driver. 643 */ 644 for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL; 645 periph = TAILQ_NEXT(periph, unit_links)) { 646 647 if (periph->unit_number == unit) 648 break; 649 } 650 /* 651 * If we found the peripheral driver that the user passed 652 * in, go through all of the peripheral drivers for that 653 * particular device and look for a passthrough driver. 654 */ 655 if (periph != NULL) { 656 struct cam_ed *device; 657 int i; 658 659 base_periph_found = 1; 660 device = periph->path->device; 661 for (i = 0, periph = SLIST_FIRST(&device->periphs); 662 periph != NULL; 663 periph = SLIST_NEXT(periph, periph_links), i++) { 664 /* 665 * Check to see whether we have a 666 * passthrough device or not. 667 */ 668 if (strcmp(periph->periph_name, "pass") == 0) { 669 /* 670 * Fill in the getdevlist fields. 671 */ 672 strcpy(ccb->cgdl.periph_name, 673 periph->periph_name); 674 ccb->cgdl.unit_number = 675 periph->unit_number; 676 if (SLIST_NEXT(periph, periph_links)) 677 ccb->cgdl.status = 678 CAM_GDEVLIST_MORE_DEVS; 679 else 680 ccb->cgdl.status = 681 CAM_GDEVLIST_LAST_DEVICE; 682 ccb->cgdl.generation = 683 device->generation; 684 ccb->cgdl.index = i; 685 /* 686 * Fill in some CCB header fields 687 * that the user may want. 688 */ 689 ccb->ccb_h.path_id = 690 periph->path->bus->path_id; 691 ccb->ccb_h.target_id = 692 periph->path->target->target_id; 693 ccb->ccb_h.target_lun = 694 periph->path->device->lun_id; 695 ccb->ccb_h.status = CAM_REQ_CMP; 696 break; 697 } 698 } 699 } 700 701 /* 702 * If the periph is null here, one of two things has 703 * happened. The first possibility is that we couldn't 704 * find the unit number of the particular peripheral driver 705 * that the user is asking about. e.g. the user asks for 706 * the passthrough driver for "da11". We find the list of 707 * "da" peripherals all right, but there is no unit 11. 708 * The other possibility is that we went through the list 709 * of peripheral drivers attached to the device structure, 710 * but didn't find one with the name "pass". Either way, 711 * we return ENOENT, since we couldn't find something. 712 */ 713 if (periph == NULL) { 714 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 715 ccb->cgdl.status = CAM_GDEVLIST_ERROR; 716 *ccb->cgdl.periph_name = '\0'; 717 ccb->cgdl.unit_number = 0; 718 error = ENOENT; 719 /* 720 * It is unfortunate that this is even necessary, 721 * but there are many, many clueless users out there. 722 * If this is true, the user is looking for the 723 * passthrough driver, but doesn't have one in his 724 * kernel. 725 */ 726 if (base_periph_found == 1) { 727 printf("xptioctl: pass driver is not in the " 728 "kernel\n"); 729 printf("xptioctl: put \"device pass\" in " 730 "your kernel config file\n"); 731 } 732 } 733 xpt_unlock_buses(); 734 break; 735 } 736 default: 737 error = ENOTTY; 738 break; 739 } 740 741 return(error); 742 } 743 744 static int 745 cam_module_event_handler(module_t mod, int what, void *arg) 746 { 747 int error; 748 749 switch (what) { 750 case MOD_LOAD: 751 if ((error = xpt_init(NULL)) != 0) 752 return (error); 753 break; 754 case MOD_UNLOAD: 755 return EBUSY; 756 default: 757 return EOPNOTSUPP; 758 } 759 760 return 0; 761 } 762 763 static struct xpt_proto * 764 xpt_proto_find(cam_proto proto) 765 { 766 struct xpt_proto **pp; 767 768 SET_FOREACH(pp, cam_xpt_proto_set) { 769 if ((*pp)->proto == proto) 770 return *pp; 771 } 772 773 return NULL; 774 } 775 776 static void 777 xpt_rescan_done(struct cam_periph *periph, union ccb *done_ccb) 778 { 779 780 if (done_ccb->ccb_h.ppriv_ptr1 == NULL) { 781 xpt_free_path(done_ccb->ccb_h.path); 782 xpt_free_ccb(done_ccb); 783 } else { 784 done_ccb->ccb_h.cbfcnp = done_ccb->ccb_h.ppriv_ptr1; 785 (*done_ccb->ccb_h.cbfcnp)(periph, done_ccb); 786 } 787 xpt_release_boot(); 788 } 789 790 /* thread to handle bus rescans */ 791 static void 792 xpt_scanner_thread(void *dummy) 793 { 794 union ccb *ccb; 795 struct cam_path path; 796 797 xpt_lock_buses(); 798 for (;;) { 799 if (TAILQ_EMPTY(&xsoftc.ccb_scanq)) 800 msleep(&xsoftc.ccb_scanq, &xsoftc.xpt_topo_lock, PRIBIO, 801 "-", 0); 802 if ((ccb = (union ccb *)TAILQ_FIRST(&xsoftc.ccb_scanq)) != NULL) { 803 TAILQ_REMOVE(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe); 804 xpt_unlock_buses(); 805 806 /* 807 * Since lock can be dropped inside and path freed 808 * by completion callback even before return here, 809 * take our own path copy for reference. 810 */ 811 xpt_copy_path(&path, ccb->ccb_h.path); 812 xpt_path_lock(&path); 813 xpt_action(ccb); 814 xpt_path_unlock(&path); 815 xpt_release_path(&path); 816 817 xpt_lock_buses(); 818 } 819 } 820 } 821 822 void 823 xpt_rescan(union ccb *ccb) 824 { 825 struct ccb_hdr *hdr; 826 827 /* Prepare request */ 828 if (ccb->ccb_h.path->target->target_id == CAM_TARGET_WILDCARD && 829 ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD) 830 ccb->ccb_h.func_code = XPT_SCAN_BUS; 831 else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD && 832 ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD) 833 ccb->ccb_h.func_code = XPT_SCAN_TGT; 834 else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD && 835 ccb->ccb_h.path->device->lun_id != CAM_LUN_WILDCARD) 836 ccb->ccb_h.func_code = XPT_SCAN_LUN; 837 else { 838 xpt_print(ccb->ccb_h.path, "illegal scan path\n"); 839 xpt_free_path(ccb->ccb_h.path); 840 xpt_free_ccb(ccb); 841 return; 842 } 843 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, 844 ("xpt_rescan: func %#x %s\n", ccb->ccb_h.func_code, 845 xpt_action_name(ccb->ccb_h.func_code))); 846 847 ccb->ccb_h.ppriv_ptr1 = ccb->ccb_h.cbfcnp; 848 ccb->ccb_h.cbfcnp = xpt_rescan_done; 849 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, CAM_PRIORITY_XPT); 850 /* Don't make duplicate entries for the same paths. */ 851 xpt_lock_buses(); 852 if (ccb->ccb_h.ppriv_ptr1 == NULL) { 853 TAILQ_FOREACH(hdr, &xsoftc.ccb_scanq, sim_links.tqe) { 854 if (xpt_path_comp(hdr->path, ccb->ccb_h.path) == 0) { 855 wakeup(&xsoftc.ccb_scanq); 856 xpt_unlock_buses(); 857 xpt_print(ccb->ccb_h.path, "rescan already queued\n"); 858 xpt_free_path(ccb->ccb_h.path); 859 xpt_free_ccb(ccb); 860 return; 861 } 862 } 863 } 864 TAILQ_INSERT_TAIL(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe); 865 xsoftc.buses_to_config++; 866 wakeup(&xsoftc.ccb_scanq); 867 xpt_unlock_buses(); 868 } 869 870 /* Functions accessed by the peripheral drivers */ 871 static int 872 xpt_init(void *dummy) 873 { 874 struct cam_sim *xpt_sim; 875 struct cam_path *path; 876 struct cam_devq *devq; 877 cam_status status; 878 int error, i; 879 880 TAILQ_INIT(&xsoftc.xpt_busses); 881 TAILQ_INIT(&xsoftc.ccb_scanq); 882 STAILQ_INIT(&xsoftc.highpowerq); 883 xsoftc.num_highpower = CAM_MAX_HIGHPOWER; 884 885 mtx_init(&xsoftc.xpt_lock, "XPT lock", NULL, MTX_DEF); 886 mtx_init(&xsoftc.xpt_highpower_lock, "XPT highpower lock", NULL, MTX_DEF); 887 xsoftc.xpt_taskq = taskqueue_create("CAM XPT task", M_WAITOK, 888 taskqueue_thread_enqueue, /*context*/&xsoftc.xpt_taskq); 889 890 #ifdef CAM_BOOT_DELAY 891 /* 892 * Override this value at compile time to assist our users 893 * who don't use loader to boot a kernel. 894 */ 895 xsoftc.boot_delay = CAM_BOOT_DELAY; 896 #endif 897 /* 898 * The xpt layer is, itself, the equivalent of a SIM. 899 * Allow 16 ccbs in the ccb pool for it. This should 900 * give decent parallelism when we probe buses and 901 * perform other XPT functions. 902 */ 903 devq = cam_simq_alloc(16); 904 xpt_sim = cam_sim_alloc(xptaction, 905 xptpoll, 906 "xpt", 907 /*softc*/NULL, 908 /*unit*/0, 909 /*mtx*/&xsoftc.xpt_lock, 910 /*max_dev_transactions*/0, 911 /*max_tagged_dev_transactions*/0, 912 devq); 913 if (xpt_sim == NULL) 914 return (ENOMEM); 915 916 mtx_lock(&xsoftc.xpt_lock); 917 if ((status = xpt_bus_register(xpt_sim, NULL, 0)) != CAM_SUCCESS) { 918 mtx_unlock(&xsoftc.xpt_lock); 919 printf("xpt_init: xpt_bus_register failed with status %#x," 920 " failing attach\n", status); 921 return (EINVAL); 922 } 923 mtx_unlock(&xsoftc.xpt_lock); 924 925 /* 926 * Looking at the XPT from the SIM layer, the XPT is 927 * the equivalent of a peripheral driver. Allocate 928 * a peripheral driver entry for us. 929 */ 930 if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID, 931 CAM_TARGET_WILDCARD, 932 CAM_LUN_WILDCARD)) != CAM_REQ_CMP) { 933 printf("xpt_init: xpt_create_path failed with status %#x," 934 " failing attach\n", status); 935 return (EINVAL); 936 } 937 xpt_path_lock(path); 938 cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO, 939 path, NULL, 0, xpt_sim); 940 xpt_path_unlock(path); 941 xpt_free_path(path); 942 943 if (cam_num_doneqs < 1) 944 cam_num_doneqs = 1 + mp_ncpus / 6; 945 else if (cam_num_doneqs > MAXCPU) 946 cam_num_doneqs = MAXCPU; 947 for (i = 0; i < cam_num_doneqs; i++) { 948 mtx_init(&cam_doneqs[i].cam_doneq_mtx, "CAM doneq", NULL, 949 MTX_DEF); 950 STAILQ_INIT(&cam_doneqs[i].cam_doneq); 951 error = kproc_kthread_add(xpt_done_td, &cam_doneqs[i], 952 &cam_proc, NULL, 0, 0, "cam", "doneq%d", i); 953 if (error != 0) { 954 cam_num_doneqs = i; 955 break; 956 } 957 } 958 if (cam_num_doneqs < 1) { 959 printf("xpt_init: Cannot init completion queues " 960 "- failing attach\n"); 961 return (ENOMEM); 962 } 963 /* 964 * Register a callback for when interrupts are enabled. 965 */ 966 xsoftc.xpt_config_hook = 967 (struct intr_config_hook *)malloc(sizeof(struct intr_config_hook), 968 M_CAMXPT, M_NOWAIT | M_ZERO); 969 if (xsoftc.xpt_config_hook == NULL) { 970 printf("xpt_init: Cannot malloc config hook " 971 "- failing attach\n"); 972 return (ENOMEM); 973 } 974 xsoftc.xpt_config_hook->ich_func = xpt_config; 975 if (config_intrhook_establish(xsoftc.xpt_config_hook) != 0) { 976 free (xsoftc.xpt_config_hook, M_CAMXPT); 977 printf("xpt_init: config_intrhook_establish failed " 978 "- failing attach\n"); 979 } 980 981 return (0); 982 } 983 984 static cam_status 985 xptregister(struct cam_periph *periph, void *arg) 986 { 987 struct cam_sim *xpt_sim; 988 989 if (periph == NULL) { 990 printf("xptregister: periph was NULL!!\n"); 991 return(CAM_REQ_CMP_ERR); 992 } 993 994 xpt_sim = (struct cam_sim *)arg; 995 xpt_sim->softc = periph; 996 xpt_periph = periph; 997 periph->softc = NULL; 998 999 return(CAM_REQ_CMP); 1000 } 1001 1002 int32_t 1003 xpt_add_periph(struct cam_periph *periph) 1004 { 1005 struct cam_ed *device; 1006 int32_t status; 1007 1008 TASK_INIT(&periph->periph_run_task, 0, xpt_run_allocq_task, periph); 1009 device = periph->path->device; 1010 status = CAM_REQ_CMP; 1011 if (device != NULL) { 1012 mtx_lock(&device->target->bus->eb_mtx); 1013 device->generation++; 1014 SLIST_INSERT_HEAD(&device->periphs, periph, periph_links); 1015 mtx_unlock(&device->target->bus->eb_mtx); 1016 atomic_add_32(&xsoftc.xpt_generation, 1); 1017 } 1018 1019 return (status); 1020 } 1021 1022 void 1023 xpt_remove_periph(struct cam_periph *periph) 1024 { 1025 struct cam_ed *device; 1026 1027 device = periph->path->device; 1028 if (device != NULL) { 1029 mtx_lock(&device->target->bus->eb_mtx); 1030 device->generation++; 1031 SLIST_REMOVE(&device->periphs, periph, cam_periph, periph_links); 1032 mtx_unlock(&device->target->bus->eb_mtx); 1033 atomic_add_32(&xsoftc.xpt_generation, 1); 1034 } 1035 } 1036 1037 1038 void 1039 xpt_announce_periph(struct cam_periph *periph, char *announce_string) 1040 { 1041 struct cam_path *path = periph->path; 1042 struct xpt_proto *proto; 1043 1044 cam_periph_assert(periph, MA_OWNED); 1045 periph->flags |= CAM_PERIPH_ANNOUNCED; 1046 1047 printf("%s%d at %s%d bus %d scbus%d target %d lun %jx\n", 1048 periph->periph_name, periph->unit_number, 1049 path->bus->sim->sim_name, 1050 path->bus->sim->unit_number, 1051 path->bus->sim->bus_id, 1052 path->bus->path_id, 1053 path->target->target_id, 1054 (uintmax_t)path->device->lun_id); 1055 printf("%s%d: ", periph->periph_name, periph->unit_number); 1056 proto = xpt_proto_find(path->device->protocol); 1057 if (proto) 1058 proto->ops->announce(path->device); 1059 else 1060 printf("%s%d: Unknown protocol device %d\n", 1061 periph->periph_name, periph->unit_number, 1062 path->device->protocol); 1063 if (path->device->serial_num_len > 0) { 1064 /* Don't wrap the screen - print only the first 60 chars */ 1065 printf("%s%d: Serial Number %.60s\n", periph->periph_name, 1066 periph->unit_number, path->device->serial_num); 1067 } 1068 /* Announce transport details. */ 1069 path->bus->xport->ops->announce(periph); 1070 /* Announce command queueing. */ 1071 if (path->device->inq_flags & SID_CmdQue 1072 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) { 1073 printf("%s%d: Command Queueing enabled\n", 1074 periph->periph_name, periph->unit_number); 1075 } 1076 /* Announce caller's details if they've passed in. */ 1077 if (announce_string != NULL) 1078 printf("%s%d: %s\n", periph->periph_name, 1079 periph->unit_number, announce_string); 1080 } 1081 1082 void 1083 xpt_announce_quirks(struct cam_periph *periph, int quirks, char *bit_string) 1084 { 1085 if (quirks != 0) { 1086 printf("%s%d: quirks=0x%b\n", periph->periph_name, 1087 periph->unit_number, quirks, bit_string); 1088 } 1089 } 1090 1091 void 1092 xpt_denounce_periph(struct cam_periph *periph) 1093 { 1094 struct cam_path *path = periph->path; 1095 struct xpt_proto *proto; 1096 1097 cam_periph_assert(periph, MA_OWNED); 1098 printf("%s%d at %s%d bus %d scbus%d target %d lun %jx\n", 1099 periph->periph_name, periph->unit_number, 1100 path->bus->sim->sim_name, 1101 path->bus->sim->unit_number, 1102 path->bus->sim->bus_id, 1103 path->bus->path_id, 1104 path->target->target_id, 1105 (uintmax_t)path->device->lun_id); 1106 printf("%s%d: ", periph->periph_name, periph->unit_number); 1107 proto = xpt_proto_find(path->device->protocol); 1108 if (proto) 1109 proto->ops->denounce(path->device); 1110 else 1111 printf("%s%d: Unknown protocol device %d\n", 1112 periph->periph_name, periph->unit_number, 1113 path->device->protocol); 1114 if (path->device->serial_num_len > 0) 1115 printf(" s/n %.60s", path->device->serial_num); 1116 printf(" detached\n"); 1117 } 1118 1119 1120 int 1121 xpt_getattr(char *buf, size_t len, const char *attr, struct cam_path *path) 1122 { 1123 int ret = -1, l, o; 1124 struct ccb_dev_advinfo cdai; 1125 struct scsi_vpd_id_descriptor *idd; 1126 1127 xpt_path_assert(path, MA_OWNED); 1128 1129 memset(&cdai, 0, sizeof(cdai)); 1130 xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL); 1131 cdai.ccb_h.func_code = XPT_DEV_ADVINFO; 1132 cdai.flags = CDAI_FLAG_NONE; 1133 cdai.bufsiz = len; 1134 1135 if (!strcmp(attr, "GEOM::ident")) 1136 cdai.buftype = CDAI_TYPE_SERIAL_NUM; 1137 else if (!strcmp(attr, "GEOM::physpath")) 1138 cdai.buftype = CDAI_TYPE_PHYS_PATH; 1139 else if (strcmp(attr, "GEOM::lunid") == 0 || 1140 strcmp(attr, "GEOM::lunname") == 0) { 1141 cdai.buftype = CDAI_TYPE_SCSI_DEVID; 1142 cdai.bufsiz = CAM_SCSI_DEVID_MAXLEN; 1143 } else 1144 goto out; 1145 1146 cdai.buf = malloc(cdai.bufsiz, M_CAMXPT, M_NOWAIT|M_ZERO); 1147 if (cdai.buf == NULL) { 1148 ret = ENOMEM; 1149 goto out; 1150 } 1151 xpt_action((union ccb *)&cdai); /* can only be synchronous */ 1152 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0) 1153 cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE); 1154 if (cdai.provsiz == 0) 1155 goto out; 1156 if (cdai.buftype == CDAI_TYPE_SCSI_DEVID) { 1157 if (strcmp(attr, "GEOM::lunid") == 0) { 1158 idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf, 1159 cdai.provsiz, scsi_devid_is_lun_naa); 1160 if (idd == NULL) 1161 idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf, 1162 cdai.provsiz, scsi_devid_is_lun_eui64); 1163 if (idd == NULL) 1164 idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf, 1165 cdai.provsiz, scsi_devid_is_lun_uuid); 1166 if (idd == NULL) 1167 idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf, 1168 cdai.provsiz, scsi_devid_is_lun_md5); 1169 } else 1170 idd = NULL; 1171 if (idd == NULL) 1172 idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf, 1173 cdai.provsiz, scsi_devid_is_lun_t10); 1174 if (idd == NULL) 1175 idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf, 1176 cdai.provsiz, scsi_devid_is_lun_name); 1177 if (idd == NULL) 1178 goto out; 1179 ret = 0; 1180 if ((idd->proto_codeset & SVPD_ID_CODESET_MASK) == SVPD_ID_CODESET_ASCII) { 1181 if (idd->length < len) { 1182 for (l = 0; l < idd->length; l++) 1183 buf[l] = idd->identifier[l] ? 1184 idd->identifier[l] : ' '; 1185 buf[l] = 0; 1186 } else 1187 ret = EFAULT; 1188 } else if ((idd->proto_codeset & SVPD_ID_CODESET_MASK) == SVPD_ID_CODESET_UTF8) { 1189 l = strnlen(idd->identifier, idd->length); 1190 if (l < len) { 1191 bcopy(idd->identifier, buf, l); 1192 buf[l] = 0; 1193 } else 1194 ret = EFAULT; 1195 } else if ((idd->id_type & SVPD_ID_TYPE_MASK) == SVPD_ID_TYPE_UUID 1196 && idd->identifier[0] == 0x10) { 1197 if ((idd->length - 2) * 2 + 4 < len) { 1198 for (l = 2, o = 0; l < idd->length; l++) { 1199 if (l == 6 || l == 8 || l == 10 || l == 12) 1200 o += sprintf(buf + o, "-"); 1201 o += sprintf(buf + o, "%02x", 1202 idd->identifier[l]); 1203 } 1204 } else 1205 ret = EFAULT; 1206 } else { 1207 if (idd->length * 2 < len) { 1208 for (l = 0; l < idd->length; l++) 1209 sprintf(buf + l * 2, "%02x", 1210 idd->identifier[l]); 1211 } else 1212 ret = EFAULT; 1213 } 1214 } else { 1215 ret = 0; 1216 if (strlcpy(buf, cdai.buf, len) >= len) 1217 ret = EFAULT; 1218 } 1219 1220 out: 1221 if (cdai.buf != NULL) 1222 free(cdai.buf, M_CAMXPT); 1223 return ret; 1224 } 1225 1226 static dev_match_ret 1227 xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns, 1228 struct cam_eb *bus) 1229 { 1230 dev_match_ret retval; 1231 u_int i; 1232 1233 retval = DM_RET_NONE; 1234 1235 /* 1236 * If we aren't given something to match against, that's an error. 1237 */ 1238 if (bus == NULL) 1239 return(DM_RET_ERROR); 1240 1241 /* 1242 * If there are no match entries, then this bus matches no 1243 * matter what. 1244 */ 1245 if ((patterns == NULL) || (num_patterns == 0)) 1246 return(DM_RET_DESCEND | DM_RET_COPY); 1247 1248 for (i = 0; i < num_patterns; i++) { 1249 struct bus_match_pattern *cur_pattern; 1250 1251 /* 1252 * If the pattern in question isn't for a bus node, we 1253 * aren't interested. However, we do indicate to the 1254 * calling routine that we should continue descending the 1255 * tree, since the user wants to match against lower-level 1256 * EDT elements. 1257 */ 1258 if (patterns[i].type != DEV_MATCH_BUS) { 1259 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) 1260 retval |= DM_RET_DESCEND; 1261 continue; 1262 } 1263 1264 cur_pattern = &patterns[i].pattern.bus_pattern; 1265 1266 /* 1267 * If they want to match any bus node, we give them any 1268 * device node. 1269 */ 1270 if (cur_pattern->flags == BUS_MATCH_ANY) { 1271 /* set the copy flag */ 1272 retval |= DM_RET_COPY; 1273 1274 /* 1275 * If we've already decided on an action, go ahead 1276 * and return. 1277 */ 1278 if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE) 1279 return(retval); 1280 } 1281 1282 /* 1283 * Not sure why someone would do this... 1284 */ 1285 if (cur_pattern->flags == BUS_MATCH_NONE) 1286 continue; 1287 1288 if (((cur_pattern->flags & BUS_MATCH_PATH) != 0) 1289 && (cur_pattern->path_id != bus->path_id)) 1290 continue; 1291 1292 if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0) 1293 && (cur_pattern->bus_id != bus->sim->bus_id)) 1294 continue; 1295 1296 if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0) 1297 && (cur_pattern->unit_number != bus->sim->unit_number)) 1298 continue; 1299 1300 if (((cur_pattern->flags & BUS_MATCH_NAME) != 0) 1301 && (strncmp(cur_pattern->dev_name, bus->sim->sim_name, 1302 DEV_IDLEN) != 0)) 1303 continue; 1304 1305 /* 1306 * If we get to this point, the user definitely wants 1307 * information on this bus. So tell the caller to copy the 1308 * data out. 1309 */ 1310 retval |= DM_RET_COPY; 1311 1312 /* 1313 * If the return action has been set to descend, then we 1314 * know that we've already seen a non-bus matching 1315 * expression, therefore we need to further descend the tree. 1316 * This won't change by continuing around the loop, so we 1317 * go ahead and return. If we haven't seen a non-bus 1318 * matching expression, we keep going around the loop until 1319 * we exhaust the matching expressions. We'll set the stop 1320 * flag once we fall out of the loop. 1321 */ 1322 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND) 1323 return(retval); 1324 } 1325 1326 /* 1327 * If the return action hasn't been set to descend yet, that means 1328 * we haven't seen anything other than bus matching patterns. So 1329 * tell the caller to stop descending the tree -- the user doesn't 1330 * want to match against lower level tree elements. 1331 */ 1332 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) 1333 retval |= DM_RET_STOP; 1334 1335 return(retval); 1336 } 1337 1338 static dev_match_ret 1339 xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns, 1340 struct cam_ed *device) 1341 { 1342 dev_match_ret retval; 1343 u_int i; 1344 1345 retval = DM_RET_NONE; 1346 1347 /* 1348 * If we aren't given something to match against, that's an error. 1349 */ 1350 if (device == NULL) 1351 return(DM_RET_ERROR); 1352 1353 /* 1354 * If there are no match entries, then this device matches no 1355 * matter what. 1356 */ 1357 if ((patterns == NULL) || (num_patterns == 0)) 1358 return(DM_RET_DESCEND | DM_RET_COPY); 1359 1360 for (i = 0; i < num_patterns; i++) { 1361 struct device_match_pattern *cur_pattern; 1362 struct scsi_vpd_device_id *device_id_page; 1363 1364 /* 1365 * If the pattern in question isn't for a device node, we 1366 * aren't interested. 1367 */ 1368 if (patterns[i].type != DEV_MATCH_DEVICE) { 1369 if ((patterns[i].type == DEV_MATCH_PERIPH) 1370 && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)) 1371 retval |= DM_RET_DESCEND; 1372 continue; 1373 } 1374 1375 cur_pattern = &patterns[i].pattern.device_pattern; 1376 1377 /* Error out if mutually exclusive options are specified. */ 1378 if ((cur_pattern->flags & (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID)) 1379 == (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID)) 1380 return(DM_RET_ERROR); 1381 1382 /* 1383 * If they want to match any device node, we give them any 1384 * device node. 1385 */ 1386 if (cur_pattern->flags == DEV_MATCH_ANY) 1387 goto copy_dev_node; 1388 1389 /* 1390 * Not sure why someone would do this... 1391 */ 1392 if (cur_pattern->flags == DEV_MATCH_NONE) 1393 continue; 1394 1395 if (((cur_pattern->flags & DEV_MATCH_PATH) != 0) 1396 && (cur_pattern->path_id != device->target->bus->path_id)) 1397 continue; 1398 1399 if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0) 1400 && (cur_pattern->target_id != device->target->target_id)) 1401 continue; 1402 1403 if (((cur_pattern->flags & DEV_MATCH_LUN) != 0) 1404 && (cur_pattern->target_lun != device->lun_id)) 1405 continue; 1406 1407 if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0) 1408 && (cam_quirkmatch((caddr_t)&device->inq_data, 1409 (caddr_t)&cur_pattern->data.inq_pat, 1410 1, sizeof(cur_pattern->data.inq_pat), 1411 scsi_static_inquiry_match) == NULL)) 1412 continue; 1413 1414 device_id_page = (struct scsi_vpd_device_id *)device->device_id; 1415 if (((cur_pattern->flags & DEV_MATCH_DEVID) != 0) 1416 && (device->device_id_len < SVPD_DEVICE_ID_HDR_LEN 1417 || scsi_devid_match((uint8_t *)device_id_page->desc_list, 1418 device->device_id_len 1419 - SVPD_DEVICE_ID_HDR_LEN, 1420 cur_pattern->data.devid_pat.id, 1421 cur_pattern->data.devid_pat.id_len) != 0)) 1422 continue; 1423 1424 copy_dev_node: 1425 /* 1426 * If we get to this point, the user definitely wants 1427 * information on this device. So tell the caller to copy 1428 * the data out. 1429 */ 1430 retval |= DM_RET_COPY; 1431 1432 /* 1433 * If the return action has been set to descend, then we 1434 * know that we've already seen a peripheral matching 1435 * expression, therefore we need to further descend the tree. 1436 * This won't change by continuing around the loop, so we 1437 * go ahead and return. If we haven't seen a peripheral 1438 * matching expression, we keep going around the loop until 1439 * we exhaust the matching expressions. We'll set the stop 1440 * flag once we fall out of the loop. 1441 */ 1442 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND) 1443 return(retval); 1444 } 1445 1446 /* 1447 * If the return action hasn't been set to descend yet, that means 1448 * we haven't seen any peripheral matching patterns. So tell the 1449 * caller to stop descending the tree -- the user doesn't want to 1450 * match against lower level tree elements. 1451 */ 1452 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) 1453 retval |= DM_RET_STOP; 1454 1455 return(retval); 1456 } 1457 1458 /* 1459 * Match a single peripheral against any number of match patterns. 1460 */ 1461 static dev_match_ret 1462 xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns, 1463 struct cam_periph *periph) 1464 { 1465 dev_match_ret retval; 1466 u_int i; 1467 1468 /* 1469 * If we aren't given something to match against, that's an error. 1470 */ 1471 if (periph == NULL) 1472 return(DM_RET_ERROR); 1473 1474 /* 1475 * If there are no match entries, then this peripheral matches no 1476 * matter what. 1477 */ 1478 if ((patterns == NULL) || (num_patterns == 0)) 1479 return(DM_RET_STOP | DM_RET_COPY); 1480 1481 /* 1482 * There aren't any nodes below a peripheral node, so there's no 1483 * reason to descend the tree any further. 1484 */ 1485 retval = DM_RET_STOP; 1486 1487 for (i = 0; i < num_patterns; i++) { 1488 struct periph_match_pattern *cur_pattern; 1489 1490 /* 1491 * If the pattern in question isn't for a peripheral, we 1492 * aren't interested. 1493 */ 1494 if (patterns[i].type != DEV_MATCH_PERIPH) 1495 continue; 1496 1497 cur_pattern = &patterns[i].pattern.periph_pattern; 1498 1499 /* 1500 * If they want to match on anything, then we will do so. 1501 */ 1502 if (cur_pattern->flags == PERIPH_MATCH_ANY) { 1503 /* set the copy flag */ 1504 retval |= DM_RET_COPY; 1505 1506 /* 1507 * We've already set the return action to stop, 1508 * since there are no nodes below peripherals in 1509 * the tree. 1510 */ 1511 return(retval); 1512 } 1513 1514 /* 1515 * Not sure why someone would do this... 1516 */ 1517 if (cur_pattern->flags == PERIPH_MATCH_NONE) 1518 continue; 1519 1520 if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0) 1521 && (cur_pattern->path_id != periph->path->bus->path_id)) 1522 continue; 1523 1524 /* 1525 * For the target and lun id's, we have to make sure the 1526 * target and lun pointers aren't NULL. The xpt peripheral 1527 * has a wildcard target and device. 1528 */ 1529 if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0) 1530 && ((periph->path->target == NULL) 1531 ||(cur_pattern->target_id != periph->path->target->target_id))) 1532 continue; 1533 1534 if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0) 1535 && ((periph->path->device == NULL) 1536 || (cur_pattern->target_lun != periph->path->device->lun_id))) 1537 continue; 1538 1539 if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0) 1540 && (cur_pattern->unit_number != periph->unit_number)) 1541 continue; 1542 1543 if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0) 1544 && (strncmp(cur_pattern->periph_name, periph->periph_name, 1545 DEV_IDLEN) != 0)) 1546 continue; 1547 1548 /* 1549 * If we get to this point, the user definitely wants 1550 * information on this peripheral. So tell the caller to 1551 * copy the data out. 1552 */ 1553 retval |= DM_RET_COPY; 1554 1555 /* 1556 * The return action has already been set to stop, since 1557 * peripherals don't have any nodes below them in the EDT. 1558 */ 1559 return(retval); 1560 } 1561 1562 /* 1563 * If we get to this point, the peripheral that was passed in 1564 * doesn't match any of the patterns. 1565 */ 1566 return(retval); 1567 } 1568 1569 static int 1570 xptedtbusfunc(struct cam_eb *bus, void *arg) 1571 { 1572 struct ccb_dev_match *cdm; 1573 struct cam_et *target; 1574 dev_match_ret retval; 1575 1576 cdm = (struct ccb_dev_match *)arg; 1577 1578 /* 1579 * If our position is for something deeper in the tree, that means 1580 * that we've already seen this node. So, we keep going down. 1581 */ 1582 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1583 && (cdm->pos.cookie.bus == bus) 1584 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1585 && (cdm->pos.cookie.target != NULL)) 1586 retval = DM_RET_DESCEND; 1587 else 1588 retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus); 1589 1590 /* 1591 * If we got an error, bail out of the search. 1592 */ 1593 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 1594 cdm->status = CAM_DEV_MATCH_ERROR; 1595 return(0); 1596 } 1597 1598 /* 1599 * If the copy flag is set, copy this bus out. 1600 */ 1601 if (retval & DM_RET_COPY) { 1602 int spaceleft, j; 1603 1604 spaceleft = cdm->match_buf_len - (cdm->num_matches * 1605 sizeof(struct dev_match_result)); 1606 1607 /* 1608 * If we don't have enough space to put in another 1609 * match result, save our position and tell the 1610 * user there are more devices to check. 1611 */ 1612 if (spaceleft < sizeof(struct dev_match_result)) { 1613 bzero(&cdm->pos, sizeof(cdm->pos)); 1614 cdm->pos.position_type = 1615 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS; 1616 1617 cdm->pos.cookie.bus = bus; 1618 cdm->pos.generations[CAM_BUS_GENERATION]= 1619 xsoftc.bus_generation; 1620 cdm->status = CAM_DEV_MATCH_MORE; 1621 return(0); 1622 } 1623 j = cdm->num_matches; 1624 cdm->num_matches++; 1625 cdm->matches[j].type = DEV_MATCH_BUS; 1626 cdm->matches[j].result.bus_result.path_id = bus->path_id; 1627 cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id; 1628 cdm->matches[j].result.bus_result.unit_number = 1629 bus->sim->unit_number; 1630 strncpy(cdm->matches[j].result.bus_result.dev_name, 1631 bus->sim->sim_name, DEV_IDLEN); 1632 } 1633 1634 /* 1635 * If the user is only interested in buses, there's no 1636 * reason to descend to the next level in the tree. 1637 */ 1638 if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP) 1639 return(1); 1640 1641 /* 1642 * If there is a target generation recorded, check it to 1643 * make sure the target list hasn't changed. 1644 */ 1645 mtx_lock(&bus->eb_mtx); 1646 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1647 && (cdm->pos.cookie.bus == bus) 1648 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1649 && (cdm->pos.cookie.target != NULL)) { 1650 if ((cdm->pos.generations[CAM_TARGET_GENERATION] != 1651 bus->generation)) { 1652 mtx_unlock(&bus->eb_mtx); 1653 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 1654 return (0); 1655 } 1656 target = (struct cam_et *)cdm->pos.cookie.target; 1657 target->refcount++; 1658 } else 1659 target = NULL; 1660 mtx_unlock(&bus->eb_mtx); 1661 1662 return (xpttargettraverse(bus, target, xptedttargetfunc, arg)); 1663 } 1664 1665 static int 1666 xptedttargetfunc(struct cam_et *target, void *arg) 1667 { 1668 struct ccb_dev_match *cdm; 1669 struct cam_eb *bus; 1670 struct cam_ed *device; 1671 1672 cdm = (struct ccb_dev_match *)arg; 1673 bus = target->bus; 1674 1675 /* 1676 * If there is a device list generation recorded, check it to 1677 * make sure the device list hasn't changed. 1678 */ 1679 mtx_lock(&bus->eb_mtx); 1680 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1681 && (cdm->pos.cookie.bus == bus) 1682 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1683 && (cdm->pos.cookie.target == target) 1684 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) 1685 && (cdm->pos.cookie.device != NULL)) { 1686 if (cdm->pos.generations[CAM_DEV_GENERATION] != 1687 target->generation) { 1688 mtx_unlock(&bus->eb_mtx); 1689 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 1690 return(0); 1691 } 1692 device = (struct cam_ed *)cdm->pos.cookie.device; 1693 device->refcount++; 1694 } else 1695 device = NULL; 1696 mtx_unlock(&bus->eb_mtx); 1697 1698 return (xptdevicetraverse(target, device, xptedtdevicefunc, arg)); 1699 } 1700 1701 static int 1702 xptedtdevicefunc(struct cam_ed *device, void *arg) 1703 { 1704 struct cam_eb *bus; 1705 struct cam_periph *periph; 1706 struct ccb_dev_match *cdm; 1707 dev_match_ret retval; 1708 1709 cdm = (struct ccb_dev_match *)arg; 1710 bus = device->target->bus; 1711 1712 /* 1713 * If our position is for something deeper in the tree, that means 1714 * that we've already seen this node. So, we keep going down. 1715 */ 1716 if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE) 1717 && (cdm->pos.cookie.device == device) 1718 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 1719 && (cdm->pos.cookie.periph != NULL)) 1720 retval = DM_RET_DESCEND; 1721 else 1722 retval = xptdevicematch(cdm->patterns, cdm->num_patterns, 1723 device); 1724 1725 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 1726 cdm->status = CAM_DEV_MATCH_ERROR; 1727 return(0); 1728 } 1729 1730 /* 1731 * If the copy flag is set, copy this device out. 1732 */ 1733 if (retval & DM_RET_COPY) { 1734 int spaceleft, j; 1735 1736 spaceleft = cdm->match_buf_len - (cdm->num_matches * 1737 sizeof(struct dev_match_result)); 1738 1739 /* 1740 * If we don't have enough space to put in another 1741 * match result, save our position and tell the 1742 * user there are more devices to check. 1743 */ 1744 if (spaceleft < sizeof(struct dev_match_result)) { 1745 bzero(&cdm->pos, sizeof(cdm->pos)); 1746 cdm->pos.position_type = 1747 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS | 1748 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE; 1749 1750 cdm->pos.cookie.bus = device->target->bus; 1751 cdm->pos.generations[CAM_BUS_GENERATION]= 1752 xsoftc.bus_generation; 1753 cdm->pos.cookie.target = device->target; 1754 cdm->pos.generations[CAM_TARGET_GENERATION] = 1755 device->target->bus->generation; 1756 cdm->pos.cookie.device = device; 1757 cdm->pos.generations[CAM_DEV_GENERATION] = 1758 device->target->generation; 1759 cdm->status = CAM_DEV_MATCH_MORE; 1760 return(0); 1761 } 1762 j = cdm->num_matches; 1763 cdm->num_matches++; 1764 cdm->matches[j].type = DEV_MATCH_DEVICE; 1765 cdm->matches[j].result.device_result.path_id = 1766 device->target->bus->path_id; 1767 cdm->matches[j].result.device_result.target_id = 1768 device->target->target_id; 1769 cdm->matches[j].result.device_result.target_lun = 1770 device->lun_id; 1771 cdm->matches[j].result.device_result.protocol = 1772 device->protocol; 1773 bcopy(&device->inq_data, 1774 &cdm->matches[j].result.device_result.inq_data, 1775 sizeof(struct scsi_inquiry_data)); 1776 bcopy(&device->ident_data, 1777 &cdm->matches[j].result.device_result.ident_data, 1778 sizeof(struct ata_params)); 1779 1780 /* Let the user know whether this device is unconfigured */ 1781 if (device->flags & CAM_DEV_UNCONFIGURED) 1782 cdm->matches[j].result.device_result.flags = 1783 DEV_RESULT_UNCONFIGURED; 1784 else 1785 cdm->matches[j].result.device_result.flags = 1786 DEV_RESULT_NOFLAG; 1787 } 1788 1789 /* 1790 * If the user isn't interested in peripherals, don't descend 1791 * the tree any further. 1792 */ 1793 if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP) 1794 return(1); 1795 1796 /* 1797 * If there is a peripheral list generation recorded, make sure 1798 * it hasn't changed. 1799 */ 1800 xpt_lock_buses(); 1801 mtx_lock(&bus->eb_mtx); 1802 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1803 && (cdm->pos.cookie.bus == bus) 1804 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1805 && (cdm->pos.cookie.target == device->target) 1806 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) 1807 && (cdm->pos.cookie.device == device) 1808 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 1809 && (cdm->pos.cookie.periph != NULL)) { 1810 if (cdm->pos.generations[CAM_PERIPH_GENERATION] != 1811 device->generation) { 1812 mtx_unlock(&bus->eb_mtx); 1813 xpt_unlock_buses(); 1814 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 1815 return(0); 1816 } 1817 periph = (struct cam_periph *)cdm->pos.cookie.periph; 1818 periph->refcount++; 1819 } else 1820 periph = NULL; 1821 mtx_unlock(&bus->eb_mtx); 1822 xpt_unlock_buses(); 1823 1824 return (xptperiphtraverse(device, periph, xptedtperiphfunc, arg)); 1825 } 1826 1827 static int 1828 xptedtperiphfunc(struct cam_periph *periph, void *arg) 1829 { 1830 struct ccb_dev_match *cdm; 1831 dev_match_ret retval; 1832 1833 cdm = (struct ccb_dev_match *)arg; 1834 1835 retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph); 1836 1837 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 1838 cdm->status = CAM_DEV_MATCH_ERROR; 1839 return(0); 1840 } 1841 1842 /* 1843 * If the copy flag is set, copy this peripheral out. 1844 */ 1845 if (retval & DM_RET_COPY) { 1846 int spaceleft, j; 1847 1848 spaceleft = cdm->match_buf_len - (cdm->num_matches * 1849 sizeof(struct dev_match_result)); 1850 1851 /* 1852 * If we don't have enough space to put in another 1853 * match result, save our position and tell the 1854 * user there are more devices to check. 1855 */ 1856 if (spaceleft < sizeof(struct dev_match_result)) { 1857 bzero(&cdm->pos, sizeof(cdm->pos)); 1858 cdm->pos.position_type = 1859 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS | 1860 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE | 1861 CAM_DEV_POS_PERIPH; 1862 1863 cdm->pos.cookie.bus = periph->path->bus; 1864 cdm->pos.generations[CAM_BUS_GENERATION]= 1865 xsoftc.bus_generation; 1866 cdm->pos.cookie.target = periph->path->target; 1867 cdm->pos.generations[CAM_TARGET_GENERATION] = 1868 periph->path->bus->generation; 1869 cdm->pos.cookie.device = periph->path->device; 1870 cdm->pos.generations[CAM_DEV_GENERATION] = 1871 periph->path->target->generation; 1872 cdm->pos.cookie.periph = periph; 1873 cdm->pos.generations[CAM_PERIPH_GENERATION] = 1874 periph->path->device->generation; 1875 cdm->status = CAM_DEV_MATCH_MORE; 1876 return(0); 1877 } 1878 1879 j = cdm->num_matches; 1880 cdm->num_matches++; 1881 cdm->matches[j].type = DEV_MATCH_PERIPH; 1882 cdm->matches[j].result.periph_result.path_id = 1883 periph->path->bus->path_id; 1884 cdm->matches[j].result.periph_result.target_id = 1885 periph->path->target->target_id; 1886 cdm->matches[j].result.periph_result.target_lun = 1887 periph->path->device->lun_id; 1888 cdm->matches[j].result.periph_result.unit_number = 1889 periph->unit_number; 1890 strncpy(cdm->matches[j].result.periph_result.periph_name, 1891 periph->periph_name, DEV_IDLEN); 1892 } 1893 1894 return(1); 1895 } 1896 1897 static int 1898 xptedtmatch(struct ccb_dev_match *cdm) 1899 { 1900 struct cam_eb *bus; 1901 int ret; 1902 1903 cdm->num_matches = 0; 1904 1905 /* 1906 * Check the bus list generation. If it has changed, the user 1907 * needs to reset everything and start over. 1908 */ 1909 xpt_lock_buses(); 1910 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1911 && (cdm->pos.cookie.bus != NULL)) { 1912 if (cdm->pos.generations[CAM_BUS_GENERATION] != 1913 xsoftc.bus_generation) { 1914 xpt_unlock_buses(); 1915 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 1916 return(0); 1917 } 1918 bus = (struct cam_eb *)cdm->pos.cookie.bus; 1919 bus->refcount++; 1920 } else 1921 bus = NULL; 1922 xpt_unlock_buses(); 1923 1924 ret = xptbustraverse(bus, xptedtbusfunc, cdm); 1925 1926 /* 1927 * If we get back 0, that means that we had to stop before fully 1928 * traversing the EDT. It also means that one of the subroutines 1929 * has set the status field to the proper value. If we get back 1, 1930 * we've fully traversed the EDT and copied out any matching entries. 1931 */ 1932 if (ret == 1) 1933 cdm->status = CAM_DEV_MATCH_LAST; 1934 1935 return(ret); 1936 } 1937 1938 static int 1939 xptplistpdrvfunc(struct periph_driver **pdrv, void *arg) 1940 { 1941 struct cam_periph *periph; 1942 struct ccb_dev_match *cdm; 1943 1944 cdm = (struct ccb_dev_match *)arg; 1945 1946 xpt_lock_buses(); 1947 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR) 1948 && (cdm->pos.cookie.pdrv == pdrv) 1949 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 1950 && (cdm->pos.cookie.periph != NULL)) { 1951 if (cdm->pos.generations[CAM_PERIPH_GENERATION] != 1952 (*pdrv)->generation) { 1953 xpt_unlock_buses(); 1954 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 1955 return(0); 1956 } 1957 periph = (struct cam_periph *)cdm->pos.cookie.periph; 1958 periph->refcount++; 1959 } else 1960 periph = NULL; 1961 xpt_unlock_buses(); 1962 1963 return (xptpdperiphtraverse(pdrv, periph, xptplistperiphfunc, arg)); 1964 } 1965 1966 static int 1967 xptplistperiphfunc(struct cam_periph *periph, void *arg) 1968 { 1969 struct ccb_dev_match *cdm; 1970 dev_match_ret retval; 1971 1972 cdm = (struct ccb_dev_match *)arg; 1973 1974 retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph); 1975 1976 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 1977 cdm->status = CAM_DEV_MATCH_ERROR; 1978 return(0); 1979 } 1980 1981 /* 1982 * If the copy flag is set, copy this peripheral out. 1983 */ 1984 if (retval & DM_RET_COPY) { 1985 int spaceleft, j; 1986 1987 spaceleft = cdm->match_buf_len - (cdm->num_matches * 1988 sizeof(struct dev_match_result)); 1989 1990 /* 1991 * If we don't have enough space to put in another 1992 * match result, save our position and tell the 1993 * user there are more devices to check. 1994 */ 1995 if (spaceleft < sizeof(struct dev_match_result)) { 1996 struct periph_driver **pdrv; 1997 1998 pdrv = NULL; 1999 bzero(&cdm->pos, sizeof(cdm->pos)); 2000 cdm->pos.position_type = 2001 CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR | 2002 CAM_DEV_POS_PERIPH; 2003 2004 /* 2005 * This may look a bit non-sensical, but it is 2006 * actually quite logical. There are very few 2007 * peripheral drivers, and bloating every peripheral 2008 * structure with a pointer back to its parent 2009 * peripheral driver linker set entry would cost 2010 * more in the long run than doing this quick lookup. 2011 */ 2012 for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) { 2013 if (strcmp((*pdrv)->driver_name, 2014 periph->periph_name) == 0) 2015 break; 2016 } 2017 2018 if (*pdrv == NULL) { 2019 cdm->status = CAM_DEV_MATCH_ERROR; 2020 return(0); 2021 } 2022 2023 cdm->pos.cookie.pdrv = pdrv; 2024 /* 2025 * The periph generation slot does double duty, as 2026 * does the periph pointer slot. They are used for 2027 * both edt and pdrv lookups and positioning. 2028 */ 2029 cdm->pos.cookie.periph = periph; 2030 cdm->pos.generations[CAM_PERIPH_GENERATION] = 2031 (*pdrv)->generation; 2032 cdm->status = CAM_DEV_MATCH_MORE; 2033 return(0); 2034 } 2035 2036 j = cdm->num_matches; 2037 cdm->num_matches++; 2038 cdm->matches[j].type = DEV_MATCH_PERIPH; 2039 cdm->matches[j].result.periph_result.path_id = 2040 periph->path->bus->path_id; 2041 2042 /* 2043 * The transport layer peripheral doesn't have a target or 2044 * lun. 2045 */ 2046 if (periph->path->target) 2047 cdm->matches[j].result.periph_result.target_id = 2048 periph->path->target->target_id; 2049 else 2050 cdm->matches[j].result.periph_result.target_id = 2051 CAM_TARGET_WILDCARD; 2052 2053 if (periph->path->device) 2054 cdm->matches[j].result.periph_result.target_lun = 2055 periph->path->device->lun_id; 2056 else 2057 cdm->matches[j].result.periph_result.target_lun = 2058 CAM_LUN_WILDCARD; 2059 2060 cdm->matches[j].result.periph_result.unit_number = 2061 periph->unit_number; 2062 strncpy(cdm->matches[j].result.periph_result.periph_name, 2063 periph->periph_name, DEV_IDLEN); 2064 } 2065 2066 return(1); 2067 } 2068 2069 static int 2070 xptperiphlistmatch(struct ccb_dev_match *cdm) 2071 { 2072 int ret; 2073 2074 cdm->num_matches = 0; 2075 2076 /* 2077 * At this point in the edt traversal function, we check the bus 2078 * list generation to make sure that no buses have been added or 2079 * removed since the user last sent a XPT_DEV_MATCH ccb through. 2080 * For the peripheral driver list traversal function, however, we 2081 * don't have to worry about new peripheral driver types coming or 2082 * going; they're in a linker set, and therefore can't change 2083 * without a recompile. 2084 */ 2085 2086 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR) 2087 && (cdm->pos.cookie.pdrv != NULL)) 2088 ret = xptpdrvtraverse( 2089 (struct periph_driver **)cdm->pos.cookie.pdrv, 2090 xptplistpdrvfunc, cdm); 2091 else 2092 ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm); 2093 2094 /* 2095 * If we get back 0, that means that we had to stop before fully 2096 * traversing the peripheral driver tree. It also means that one of 2097 * the subroutines has set the status field to the proper value. If 2098 * we get back 1, we've fully traversed the EDT and copied out any 2099 * matching entries. 2100 */ 2101 if (ret == 1) 2102 cdm->status = CAM_DEV_MATCH_LAST; 2103 2104 return(ret); 2105 } 2106 2107 static int 2108 xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg) 2109 { 2110 struct cam_eb *bus, *next_bus; 2111 int retval; 2112 2113 retval = 1; 2114 if (start_bus) 2115 bus = start_bus; 2116 else { 2117 xpt_lock_buses(); 2118 bus = TAILQ_FIRST(&xsoftc.xpt_busses); 2119 if (bus == NULL) { 2120 xpt_unlock_buses(); 2121 return (retval); 2122 } 2123 bus->refcount++; 2124 xpt_unlock_buses(); 2125 } 2126 for (; bus != NULL; bus = next_bus) { 2127 retval = tr_func(bus, arg); 2128 if (retval == 0) { 2129 xpt_release_bus(bus); 2130 break; 2131 } 2132 xpt_lock_buses(); 2133 next_bus = TAILQ_NEXT(bus, links); 2134 if (next_bus) 2135 next_bus->refcount++; 2136 xpt_unlock_buses(); 2137 xpt_release_bus(bus); 2138 } 2139 return(retval); 2140 } 2141 2142 static int 2143 xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target, 2144 xpt_targetfunc_t *tr_func, void *arg) 2145 { 2146 struct cam_et *target, *next_target; 2147 int retval; 2148 2149 retval = 1; 2150 if (start_target) 2151 target = start_target; 2152 else { 2153 mtx_lock(&bus->eb_mtx); 2154 target = TAILQ_FIRST(&bus->et_entries); 2155 if (target == NULL) { 2156 mtx_unlock(&bus->eb_mtx); 2157 return (retval); 2158 } 2159 target->refcount++; 2160 mtx_unlock(&bus->eb_mtx); 2161 } 2162 for (; target != NULL; target = next_target) { 2163 retval = tr_func(target, arg); 2164 if (retval == 0) { 2165 xpt_release_target(target); 2166 break; 2167 } 2168 mtx_lock(&bus->eb_mtx); 2169 next_target = TAILQ_NEXT(target, links); 2170 if (next_target) 2171 next_target->refcount++; 2172 mtx_unlock(&bus->eb_mtx); 2173 xpt_release_target(target); 2174 } 2175 return(retval); 2176 } 2177 2178 static int 2179 xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device, 2180 xpt_devicefunc_t *tr_func, void *arg) 2181 { 2182 struct cam_eb *bus; 2183 struct cam_ed *device, *next_device; 2184 int retval; 2185 2186 retval = 1; 2187 bus = target->bus; 2188 if (start_device) 2189 device = start_device; 2190 else { 2191 mtx_lock(&bus->eb_mtx); 2192 device = TAILQ_FIRST(&target->ed_entries); 2193 if (device == NULL) { 2194 mtx_unlock(&bus->eb_mtx); 2195 return (retval); 2196 } 2197 device->refcount++; 2198 mtx_unlock(&bus->eb_mtx); 2199 } 2200 for (; device != NULL; device = next_device) { 2201 mtx_lock(&device->device_mtx); 2202 retval = tr_func(device, arg); 2203 mtx_unlock(&device->device_mtx); 2204 if (retval == 0) { 2205 xpt_release_device(device); 2206 break; 2207 } 2208 mtx_lock(&bus->eb_mtx); 2209 next_device = TAILQ_NEXT(device, links); 2210 if (next_device) 2211 next_device->refcount++; 2212 mtx_unlock(&bus->eb_mtx); 2213 xpt_release_device(device); 2214 } 2215 return(retval); 2216 } 2217 2218 static int 2219 xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph, 2220 xpt_periphfunc_t *tr_func, void *arg) 2221 { 2222 struct cam_eb *bus; 2223 struct cam_periph *periph, *next_periph; 2224 int retval; 2225 2226 retval = 1; 2227 2228 bus = device->target->bus; 2229 if (start_periph) 2230 periph = start_periph; 2231 else { 2232 xpt_lock_buses(); 2233 mtx_lock(&bus->eb_mtx); 2234 periph = SLIST_FIRST(&device->periphs); 2235 while (periph != NULL && (periph->flags & CAM_PERIPH_FREE) != 0) 2236 periph = SLIST_NEXT(periph, periph_links); 2237 if (periph == NULL) { 2238 mtx_unlock(&bus->eb_mtx); 2239 xpt_unlock_buses(); 2240 return (retval); 2241 } 2242 periph->refcount++; 2243 mtx_unlock(&bus->eb_mtx); 2244 xpt_unlock_buses(); 2245 } 2246 for (; periph != NULL; periph = next_periph) { 2247 retval = tr_func(periph, arg); 2248 if (retval == 0) { 2249 cam_periph_release_locked(periph); 2250 break; 2251 } 2252 xpt_lock_buses(); 2253 mtx_lock(&bus->eb_mtx); 2254 next_periph = SLIST_NEXT(periph, periph_links); 2255 while (next_periph != NULL && 2256 (next_periph->flags & CAM_PERIPH_FREE) != 0) 2257 next_periph = SLIST_NEXT(next_periph, periph_links); 2258 if (next_periph) 2259 next_periph->refcount++; 2260 mtx_unlock(&bus->eb_mtx); 2261 xpt_unlock_buses(); 2262 cam_periph_release_locked(periph); 2263 } 2264 return(retval); 2265 } 2266 2267 static int 2268 xptpdrvtraverse(struct periph_driver **start_pdrv, 2269 xpt_pdrvfunc_t *tr_func, void *arg) 2270 { 2271 struct periph_driver **pdrv; 2272 int retval; 2273 2274 retval = 1; 2275 2276 /* 2277 * We don't traverse the peripheral driver list like we do the 2278 * other lists, because it is a linker set, and therefore cannot be 2279 * changed during runtime. If the peripheral driver list is ever 2280 * re-done to be something other than a linker set (i.e. it can 2281 * change while the system is running), the list traversal should 2282 * be modified to work like the other traversal functions. 2283 */ 2284 for (pdrv = (start_pdrv ? start_pdrv : periph_drivers); 2285 *pdrv != NULL; pdrv++) { 2286 retval = tr_func(pdrv, arg); 2287 2288 if (retval == 0) 2289 return(retval); 2290 } 2291 2292 return(retval); 2293 } 2294 2295 static int 2296 xptpdperiphtraverse(struct periph_driver **pdrv, 2297 struct cam_periph *start_periph, 2298 xpt_periphfunc_t *tr_func, void *arg) 2299 { 2300 struct cam_periph *periph, *next_periph; 2301 int retval; 2302 2303 retval = 1; 2304 2305 if (start_periph) 2306 periph = start_periph; 2307 else { 2308 xpt_lock_buses(); 2309 periph = TAILQ_FIRST(&(*pdrv)->units); 2310 while (periph != NULL && (periph->flags & CAM_PERIPH_FREE) != 0) 2311 periph = TAILQ_NEXT(periph, unit_links); 2312 if (periph == NULL) { 2313 xpt_unlock_buses(); 2314 return (retval); 2315 } 2316 periph->refcount++; 2317 xpt_unlock_buses(); 2318 } 2319 for (; periph != NULL; periph = next_periph) { 2320 cam_periph_lock(periph); 2321 retval = tr_func(periph, arg); 2322 cam_periph_unlock(periph); 2323 if (retval == 0) { 2324 cam_periph_release(periph); 2325 break; 2326 } 2327 xpt_lock_buses(); 2328 next_periph = TAILQ_NEXT(periph, unit_links); 2329 while (next_periph != NULL && 2330 (next_periph->flags & CAM_PERIPH_FREE) != 0) 2331 next_periph = TAILQ_NEXT(next_periph, unit_links); 2332 if (next_periph) 2333 next_periph->refcount++; 2334 xpt_unlock_buses(); 2335 cam_periph_release(periph); 2336 } 2337 return(retval); 2338 } 2339 2340 static int 2341 xptdefbusfunc(struct cam_eb *bus, void *arg) 2342 { 2343 struct xpt_traverse_config *tr_config; 2344 2345 tr_config = (struct xpt_traverse_config *)arg; 2346 2347 if (tr_config->depth == XPT_DEPTH_BUS) { 2348 xpt_busfunc_t *tr_func; 2349 2350 tr_func = (xpt_busfunc_t *)tr_config->tr_func; 2351 2352 return(tr_func(bus, tr_config->tr_arg)); 2353 } else 2354 return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg)); 2355 } 2356 2357 static int 2358 xptdeftargetfunc(struct cam_et *target, void *arg) 2359 { 2360 struct xpt_traverse_config *tr_config; 2361 2362 tr_config = (struct xpt_traverse_config *)arg; 2363 2364 if (tr_config->depth == XPT_DEPTH_TARGET) { 2365 xpt_targetfunc_t *tr_func; 2366 2367 tr_func = (xpt_targetfunc_t *)tr_config->tr_func; 2368 2369 return(tr_func(target, tr_config->tr_arg)); 2370 } else 2371 return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg)); 2372 } 2373 2374 static int 2375 xptdefdevicefunc(struct cam_ed *device, void *arg) 2376 { 2377 struct xpt_traverse_config *tr_config; 2378 2379 tr_config = (struct xpt_traverse_config *)arg; 2380 2381 if (tr_config->depth == XPT_DEPTH_DEVICE) { 2382 xpt_devicefunc_t *tr_func; 2383 2384 tr_func = (xpt_devicefunc_t *)tr_config->tr_func; 2385 2386 return(tr_func(device, tr_config->tr_arg)); 2387 } else 2388 return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg)); 2389 } 2390 2391 static int 2392 xptdefperiphfunc(struct cam_periph *periph, void *arg) 2393 { 2394 struct xpt_traverse_config *tr_config; 2395 xpt_periphfunc_t *tr_func; 2396 2397 tr_config = (struct xpt_traverse_config *)arg; 2398 2399 tr_func = (xpt_periphfunc_t *)tr_config->tr_func; 2400 2401 /* 2402 * Unlike the other default functions, we don't check for depth 2403 * here. The peripheral driver level is the last level in the EDT, 2404 * so if we're here, we should execute the function in question. 2405 */ 2406 return(tr_func(periph, tr_config->tr_arg)); 2407 } 2408 2409 /* 2410 * Execute the given function for every bus in the EDT. 2411 */ 2412 static int 2413 xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg) 2414 { 2415 struct xpt_traverse_config tr_config; 2416 2417 tr_config.depth = XPT_DEPTH_BUS; 2418 tr_config.tr_func = tr_func; 2419 tr_config.tr_arg = arg; 2420 2421 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config)); 2422 } 2423 2424 /* 2425 * Execute the given function for every device in the EDT. 2426 */ 2427 static int 2428 xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg) 2429 { 2430 struct xpt_traverse_config tr_config; 2431 2432 tr_config.depth = XPT_DEPTH_DEVICE; 2433 tr_config.tr_func = tr_func; 2434 tr_config.tr_arg = arg; 2435 2436 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config)); 2437 } 2438 2439 static int 2440 xptsetasyncfunc(struct cam_ed *device, void *arg) 2441 { 2442 struct cam_path path; 2443 struct ccb_getdev cgd; 2444 struct ccb_setasync *csa = (struct ccb_setasync *)arg; 2445 2446 /* 2447 * Don't report unconfigured devices (Wildcard devs, 2448 * devices only for target mode, device instances 2449 * that have been invalidated but are waiting for 2450 * their last reference count to be released). 2451 */ 2452 if ((device->flags & CAM_DEV_UNCONFIGURED) != 0) 2453 return (1); 2454 2455 xpt_compile_path(&path, 2456 NULL, 2457 device->target->bus->path_id, 2458 device->target->target_id, 2459 device->lun_id); 2460 xpt_setup_ccb(&cgd.ccb_h, &path, CAM_PRIORITY_NORMAL); 2461 cgd.ccb_h.func_code = XPT_GDEV_TYPE; 2462 xpt_action((union ccb *)&cgd); 2463 csa->callback(csa->callback_arg, 2464 AC_FOUND_DEVICE, 2465 &path, &cgd); 2466 xpt_release_path(&path); 2467 2468 return(1); 2469 } 2470 2471 static int 2472 xptsetasyncbusfunc(struct cam_eb *bus, void *arg) 2473 { 2474 struct cam_path path; 2475 struct ccb_pathinq cpi; 2476 struct ccb_setasync *csa = (struct ccb_setasync *)arg; 2477 2478 xpt_compile_path(&path, /*periph*/NULL, 2479 bus->path_id, 2480 CAM_TARGET_WILDCARD, 2481 CAM_LUN_WILDCARD); 2482 xpt_path_lock(&path); 2483 xpt_setup_ccb(&cpi.ccb_h, &path, CAM_PRIORITY_NORMAL); 2484 cpi.ccb_h.func_code = XPT_PATH_INQ; 2485 xpt_action((union ccb *)&cpi); 2486 csa->callback(csa->callback_arg, 2487 AC_PATH_REGISTERED, 2488 &path, &cpi); 2489 xpt_path_unlock(&path); 2490 xpt_release_path(&path); 2491 2492 return(1); 2493 } 2494 2495 void 2496 xpt_action(union ccb *start_ccb) 2497 { 2498 2499 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, 2500 ("xpt_action: func %#x %s\n", start_ccb->ccb_h.func_code, 2501 xpt_action_name(start_ccb->ccb_h.func_code))); 2502 2503 start_ccb->ccb_h.status = CAM_REQ_INPROG; 2504 (*(start_ccb->ccb_h.path->bus->xport->ops->action))(start_ccb); 2505 } 2506 2507 void 2508 xpt_action_default(union ccb *start_ccb) 2509 { 2510 struct cam_path *path; 2511 struct cam_sim *sim; 2512 int lock; 2513 2514 path = start_ccb->ccb_h.path; 2515 CAM_DEBUG(path, CAM_DEBUG_TRACE, 2516 ("xpt_action_default: func %#x %s\n", start_ccb->ccb_h.func_code, 2517 xpt_action_name(start_ccb->ccb_h.func_code))); 2518 2519 switch (start_ccb->ccb_h.func_code) { 2520 case XPT_SCSI_IO: 2521 { 2522 struct cam_ed *device; 2523 2524 /* 2525 * For the sake of compatibility with SCSI-1 2526 * devices that may not understand the identify 2527 * message, we include lun information in the 2528 * second byte of all commands. SCSI-1 specifies 2529 * that luns are a 3 bit value and reserves only 3 2530 * bits for lun information in the CDB. Later 2531 * revisions of the SCSI spec allow for more than 8 2532 * luns, but have deprecated lun information in the 2533 * CDB. So, if the lun won't fit, we must omit. 2534 * 2535 * Also be aware that during initial probing for devices, 2536 * the inquiry information is unknown but initialized to 0. 2537 * This means that this code will be exercised while probing 2538 * devices with an ANSI revision greater than 2. 2539 */ 2540 device = path->device; 2541 if (device->protocol_version <= SCSI_REV_2 2542 && start_ccb->ccb_h.target_lun < 8 2543 && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) { 2544 2545 start_ccb->csio.cdb_io.cdb_bytes[1] |= 2546 start_ccb->ccb_h.target_lun << 5; 2547 } 2548 start_ccb->csio.scsi_status = SCSI_STATUS_OK; 2549 } 2550 /* FALLTHROUGH */ 2551 case XPT_TARGET_IO: 2552 case XPT_CONT_TARGET_IO: 2553 start_ccb->csio.sense_resid = 0; 2554 start_ccb->csio.resid = 0; 2555 /* FALLTHROUGH */ 2556 case XPT_ATA_IO: 2557 if (start_ccb->ccb_h.func_code == XPT_ATA_IO) 2558 start_ccb->ataio.resid = 0; 2559 /* FALLTHROUGH */ 2560 case XPT_NVME_IO: 2561 if (start_ccb->ccb_h.func_code == XPT_NVME_IO) 2562 start_ccb->nvmeio.resid = 0; 2563 /* FALLTHROUGH */ 2564 case XPT_RESET_DEV: 2565 case XPT_ENG_EXEC: 2566 case XPT_SMP_IO: 2567 { 2568 struct cam_devq *devq; 2569 2570 devq = path->bus->sim->devq; 2571 mtx_lock(&devq->send_mtx); 2572 cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb); 2573 if (xpt_schedule_devq(devq, path->device) != 0) 2574 xpt_run_devq(devq); 2575 mtx_unlock(&devq->send_mtx); 2576 break; 2577 } 2578 case XPT_CALC_GEOMETRY: 2579 /* Filter out garbage */ 2580 if (start_ccb->ccg.block_size == 0 2581 || start_ccb->ccg.volume_size == 0) { 2582 start_ccb->ccg.cylinders = 0; 2583 start_ccb->ccg.heads = 0; 2584 start_ccb->ccg.secs_per_track = 0; 2585 start_ccb->ccb_h.status = CAM_REQ_CMP; 2586 break; 2587 } 2588 #if defined(__sparc64__) 2589 /* 2590 * For sparc64, we may need adjust the geometry of large 2591 * disks in order to fit the limitations of the 16-bit 2592 * fields of the VTOC8 disk label. 2593 */ 2594 if (scsi_da_bios_params(&start_ccb->ccg) != 0) { 2595 start_ccb->ccb_h.status = CAM_REQ_CMP; 2596 break; 2597 } 2598 #endif 2599 goto call_sim; 2600 case XPT_ABORT: 2601 { 2602 union ccb* abort_ccb; 2603 2604 abort_ccb = start_ccb->cab.abort_ccb; 2605 if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) { 2606 struct cam_ed *device; 2607 struct cam_devq *devq; 2608 2609 device = abort_ccb->ccb_h.path->device; 2610 devq = device->sim->devq; 2611 2612 mtx_lock(&devq->send_mtx); 2613 if (abort_ccb->ccb_h.pinfo.index > 0) { 2614 cam_ccbq_remove_ccb(&device->ccbq, abort_ccb); 2615 abort_ccb->ccb_h.status = 2616 CAM_REQ_ABORTED|CAM_DEV_QFRZN; 2617 xpt_freeze_devq_device(device, 1); 2618 mtx_unlock(&devq->send_mtx); 2619 xpt_done(abort_ccb); 2620 start_ccb->ccb_h.status = CAM_REQ_CMP; 2621 break; 2622 } 2623 mtx_unlock(&devq->send_mtx); 2624 2625 if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX 2626 && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) { 2627 /* 2628 * We've caught this ccb en route to 2629 * the SIM. Flag it for abort and the 2630 * SIM will do so just before starting 2631 * real work on the CCB. 2632 */ 2633 abort_ccb->ccb_h.status = 2634 CAM_REQ_ABORTED|CAM_DEV_QFRZN; 2635 xpt_freeze_devq(abort_ccb->ccb_h.path, 1); 2636 start_ccb->ccb_h.status = CAM_REQ_CMP; 2637 break; 2638 } 2639 } 2640 if (XPT_FC_IS_QUEUED(abort_ccb) 2641 && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) { 2642 /* 2643 * It's already completed but waiting 2644 * for our SWI to get to it. 2645 */ 2646 start_ccb->ccb_h.status = CAM_UA_ABORT; 2647 break; 2648 } 2649 /* 2650 * If we weren't able to take care of the abort request 2651 * in the XPT, pass the request down to the SIM for processing. 2652 */ 2653 } 2654 /* FALLTHROUGH */ 2655 case XPT_ACCEPT_TARGET_IO: 2656 case XPT_EN_LUN: 2657 case XPT_IMMED_NOTIFY: 2658 case XPT_NOTIFY_ACK: 2659 case XPT_RESET_BUS: 2660 case XPT_IMMEDIATE_NOTIFY: 2661 case XPT_NOTIFY_ACKNOWLEDGE: 2662 case XPT_GET_SIM_KNOB_OLD: 2663 case XPT_GET_SIM_KNOB: 2664 case XPT_SET_SIM_KNOB: 2665 case XPT_GET_TRAN_SETTINGS: 2666 case XPT_SET_TRAN_SETTINGS: 2667 case XPT_PATH_INQ: 2668 call_sim: 2669 sim = path->bus->sim; 2670 lock = (mtx_owned(sim->mtx) == 0); 2671 if (lock) 2672 CAM_SIM_LOCK(sim); 2673 CAM_DEBUG(path, CAM_DEBUG_TRACE, 2674 ("sim->sim_action: func=%#x\n", start_ccb->ccb_h.func_code)); 2675 (*(sim->sim_action))(sim, start_ccb); 2676 CAM_DEBUG(path, CAM_DEBUG_TRACE, 2677 ("sim->sim_action: status=%#x\n", start_ccb->ccb_h.status)); 2678 if (lock) 2679 CAM_SIM_UNLOCK(sim); 2680 break; 2681 case XPT_PATH_STATS: 2682 start_ccb->cpis.last_reset = path->bus->last_reset; 2683 start_ccb->ccb_h.status = CAM_REQ_CMP; 2684 break; 2685 case XPT_GDEV_TYPE: 2686 { 2687 struct cam_ed *dev; 2688 2689 dev = path->device; 2690 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) { 2691 start_ccb->ccb_h.status = CAM_DEV_NOT_THERE; 2692 } else { 2693 struct ccb_getdev *cgd; 2694 2695 cgd = &start_ccb->cgd; 2696 cgd->protocol = dev->protocol; 2697 cgd->inq_data = dev->inq_data; 2698 cgd->ident_data = dev->ident_data; 2699 cgd->inq_flags = dev->inq_flags; 2700 cgd->nvme_data = dev->nvme_data; 2701 cgd->nvme_cdata = dev->nvme_cdata; 2702 cgd->ccb_h.status = CAM_REQ_CMP; 2703 cgd->serial_num_len = dev->serial_num_len; 2704 if ((dev->serial_num_len > 0) 2705 && (dev->serial_num != NULL)) 2706 bcopy(dev->serial_num, cgd->serial_num, 2707 dev->serial_num_len); 2708 } 2709 break; 2710 } 2711 case XPT_GDEV_STATS: 2712 { 2713 struct cam_ed *dev; 2714 2715 dev = path->device; 2716 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) { 2717 start_ccb->ccb_h.status = CAM_DEV_NOT_THERE; 2718 } else { 2719 struct ccb_getdevstats *cgds; 2720 struct cam_eb *bus; 2721 struct cam_et *tar; 2722 struct cam_devq *devq; 2723 2724 cgds = &start_ccb->cgds; 2725 bus = path->bus; 2726 tar = path->target; 2727 devq = bus->sim->devq; 2728 mtx_lock(&devq->send_mtx); 2729 cgds->dev_openings = dev->ccbq.dev_openings; 2730 cgds->dev_active = dev->ccbq.dev_active; 2731 cgds->allocated = dev->ccbq.allocated; 2732 cgds->queued = cam_ccbq_pending_ccb_count(&dev->ccbq); 2733 cgds->held = cgds->allocated - cgds->dev_active - 2734 cgds->queued; 2735 cgds->last_reset = tar->last_reset; 2736 cgds->maxtags = dev->maxtags; 2737 cgds->mintags = dev->mintags; 2738 if (timevalcmp(&tar->last_reset, &bus->last_reset, <)) 2739 cgds->last_reset = bus->last_reset; 2740 mtx_unlock(&devq->send_mtx); 2741 cgds->ccb_h.status = CAM_REQ_CMP; 2742 } 2743 break; 2744 } 2745 case XPT_GDEVLIST: 2746 { 2747 struct cam_periph *nperiph; 2748 struct periph_list *periph_head; 2749 struct ccb_getdevlist *cgdl; 2750 u_int i; 2751 struct cam_ed *device; 2752 int found; 2753 2754 2755 found = 0; 2756 2757 /* 2758 * Don't want anyone mucking with our data. 2759 */ 2760 device = path->device; 2761 periph_head = &device->periphs; 2762 cgdl = &start_ccb->cgdl; 2763 2764 /* 2765 * Check and see if the list has changed since the user 2766 * last requested a list member. If so, tell them that the 2767 * list has changed, and therefore they need to start over 2768 * from the beginning. 2769 */ 2770 if ((cgdl->index != 0) && 2771 (cgdl->generation != device->generation)) { 2772 cgdl->status = CAM_GDEVLIST_LIST_CHANGED; 2773 break; 2774 } 2775 2776 /* 2777 * Traverse the list of peripherals and attempt to find 2778 * the requested peripheral. 2779 */ 2780 for (nperiph = SLIST_FIRST(periph_head), i = 0; 2781 (nperiph != NULL) && (i <= cgdl->index); 2782 nperiph = SLIST_NEXT(nperiph, periph_links), i++) { 2783 if (i == cgdl->index) { 2784 strncpy(cgdl->periph_name, 2785 nperiph->periph_name, 2786 DEV_IDLEN); 2787 cgdl->unit_number = nperiph->unit_number; 2788 found = 1; 2789 } 2790 } 2791 if (found == 0) { 2792 cgdl->status = CAM_GDEVLIST_ERROR; 2793 break; 2794 } 2795 2796 if (nperiph == NULL) 2797 cgdl->status = CAM_GDEVLIST_LAST_DEVICE; 2798 else 2799 cgdl->status = CAM_GDEVLIST_MORE_DEVS; 2800 2801 cgdl->index++; 2802 cgdl->generation = device->generation; 2803 2804 cgdl->ccb_h.status = CAM_REQ_CMP; 2805 break; 2806 } 2807 case XPT_DEV_MATCH: 2808 { 2809 dev_pos_type position_type; 2810 struct ccb_dev_match *cdm; 2811 2812 cdm = &start_ccb->cdm; 2813 2814 /* 2815 * There are two ways of getting at information in the EDT. 2816 * The first way is via the primary EDT tree. It starts 2817 * with a list of buses, then a list of targets on a bus, 2818 * then devices/luns on a target, and then peripherals on a 2819 * device/lun. The "other" way is by the peripheral driver 2820 * lists. The peripheral driver lists are organized by 2821 * peripheral driver. (obviously) So it makes sense to 2822 * use the peripheral driver list if the user is looking 2823 * for something like "da1", or all "da" devices. If the 2824 * user is looking for something on a particular bus/target 2825 * or lun, it's generally better to go through the EDT tree. 2826 */ 2827 2828 if (cdm->pos.position_type != CAM_DEV_POS_NONE) 2829 position_type = cdm->pos.position_type; 2830 else { 2831 u_int i; 2832 2833 position_type = CAM_DEV_POS_NONE; 2834 2835 for (i = 0; i < cdm->num_patterns; i++) { 2836 if ((cdm->patterns[i].type == DEV_MATCH_BUS) 2837 ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){ 2838 position_type = CAM_DEV_POS_EDT; 2839 break; 2840 } 2841 } 2842 2843 if (cdm->num_patterns == 0) 2844 position_type = CAM_DEV_POS_EDT; 2845 else if (position_type == CAM_DEV_POS_NONE) 2846 position_type = CAM_DEV_POS_PDRV; 2847 } 2848 2849 switch(position_type & CAM_DEV_POS_TYPEMASK) { 2850 case CAM_DEV_POS_EDT: 2851 xptedtmatch(cdm); 2852 break; 2853 case CAM_DEV_POS_PDRV: 2854 xptperiphlistmatch(cdm); 2855 break; 2856 default: 2857 cdm->status = CAM_DEV_MATCH_ERROR; 2858 break; 2859 } 2860 2861 if (cdm->status == CAM_DEV_MATCH_ERROR) 2862 start_ccb->ccb_h.status = CAM_REQ_CMP_ERR; 2863 else 2864 start_ccb->ccb_h.status = CAM_REQ_CMP; 2865 2866 break; 2867 } 2868 case XPT_SASYNC_CB: 2869 { 2870 struct ccb_setasync *csa; 2871 struct async_node *cur_entry; 2872 struct async_list *async_head; 2873 u_int32_t added; 2874 2875 csa = &start_ccb->csa; 2876 added = csa->event_enable; 2877 async_head = &path->device->asyncs; 2878 2879 /* 2880 * If there is already an entry for us, simply 2881 * update it. 2882 */ 2883 cur_entry = SLIST_FIRST(async_head); 2884 while (cur_entry != NULL) { 2885 if ((cur_entry->callback_arg == csa->callback_arg) 2886 && (cur_entry->callback == csa->callback)) 2887 break; 2888 cur_entry = SLIST_NEXT(cur_entry, links); 2889 } 2890 2891 if (cur_entry != NULL) { 2892 /* 2893 * If the request has no flags set, 2894 * remove the entry. 2895 */ 2896 added &= ~cur_entry->event_enable; 2897 if (csa->event_enable == 0) { 2898 SLIST_REMOVE(async_head, cur_entry, 2899 async_node, links); 2900 xpt_release_device(path->device); 2901 free(cur_entry, M_CAMXPT); 2902 } else { 2903 cur_entry->event_enable = csa->event_enable; 2904 } 2905 csa->event_enable = added; 2906 } else { 2907 cur_entry = malloc(sizeof(*cur_entry), M_CAMXPT, 2908 M_NOWAIT); 2909 if (cur_entry == NULL) { 2910 csa->ccb_h.status = CAM_RESRC_UNAVAIL; 2911 break; 2912 } 2913 cur_entry->event_enable = csa->event_enable; 2914 cur_entry->event_lock = 2915 mtx_owned(path->bus->sim->mtx) ? 1 : 0; 2916 cur_entry->callback_arg = csa->callback_arg; 2917 cur_entry->callback = csa->callback; 2918 SLIST_INSERT_HEAD(async_head, cur_entry, links); 2919 xpt_acquire_device(path->device); 2920 } 2921 start_ccb->ccb_h.status = CAM_REQ_CMP; 2922 break; 2923 } 2924 case XPT_REL_SIMQ: 2925 { 2926 struct ccb_relsim *crs; 2927 struct cam_ed *dev; 2928 2929 crs = &start_ccb->crs; 2930 dev = path->device; 2931 if (dev == NULL) { 2932 2933 crs->ccb_h.status = CAM_DEV_NOT_THERE; 2934 break; 2935 } 2936 2937 if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) { 2938 2939 /* Don't ever go below one opening */ 2940 if (crs->openings > 0) { 2941 xpt_dev_ccbq_resize(path, crs->openings); 2942 if (bootverbose) { 2943 xpt_print(path, 2944 "number of openings is now %d\n", 2945 crs->openings); 2946 } 2947 } 2948 } 2949 2950 mtx_lock(&dev->sim->devq->send_mtx); 2951 if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) { 2952 2953 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) { 2954 2955 /* 2956 * Just extend the old timeout and decrement 2957 * the freeze count so that a single timeout 2958 * is sufficient for releasing the queue. 2959 */ 2960 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE; 2961 callout_stop(&dev->callout); 2962 } else { 2963 2964 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 2965 } 2966 2967 callout_reset_sbt(&dev->callout, 2968 SBT_1MS * crs->release_timeout, 0, 2969 xpt_release_devq_timeout, dev, 0); 2970 2971 dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING; 2972 2973 } 2974 2975 if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) { 2976 2977 if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) { 2978 /* 2979 * Decrement the freeze count so that a single 2980 * completion is still sufficient to unfreeze 2981 * the queue. 2982 */ 2983 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE; 2984 } else { 2985 2986 dev->flags |= CAM_DEV_REL_ON_COMPLETE; 2987 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 2988 } 2989 } 2990 2991 if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) { 2992 2993 if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0 2994 || (dev->ccbq.dev_active == 0)) { 2995 2996 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE; 2997 } else { 2998 2999 dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY; 3000 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 3001 } 3002 } 3003 mtx_unlock(&dev->sim->devq->send_mtx); 3004 3005 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) 3006 xpt_release_devq(path, /*count*/1, /*run_queue*/TRUE); 3007 start_ccb->crs.qfrozen_cnt = dev->ccbq.queue.qfrozen_cnt; 3008 start_ccb->ccb_h.status = CAM_REQ_CMP; 3009 break; 3010 } 3011 case XPT_DEBUG: { 3012 struct cam_path *oldpath; 3013 3014 /* Check that all request bits are supported. */ 3015 if (start_ccb->cdbg.flags & ~(CAM_DEBUG_COMPILE)) { 3016 start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 3017 break; 3018 } 3019 3020 cam_dflags = CAM_DEBUG_NONE; 3021 if (cam_dpath != NULL) { 3022 oldpath = cam_dpath; 3023 cam_dpath = NULL; 3024 xpt_free_path(oldpath); 3025 } 3026 if (start_ccb->cdbg.flags != CAM_DEBUG_NONE) { 3027 if (xpt_create_path(&cam_dpath, NULL, 3028 start_ccb->ccb_h.path_id, 3029 start_ccb->ccb_h.target_id, 3030 start_ccb->ccb_h.target_lun) != 3031 CAM_REQ_CMP) { 3032 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 3033 } else { 3034 cam_dflags = start_ccb->cdbg.flags; 3035 start_ccb->ccb_h.status = CAM_REQ_CMP; 3036 xpt_print(cam_dpath, "debugging flags now %x\n", 3037 cam_dflags); 3038 } 3039 } else 3040 start_ccb->ccb_h.status = CAM_REQ_CMP; 3041 break; 3042 } 3043 case XPT_NOOP: 3044 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0) 3045 xpt_freeze_devq(path, 1); 3046 start_ccb->ccb_h.status = CAM_REQ_CMP; 3047 break; 3048 case XPT_REPROBE_LUN: 3049 xpt_async(AC_INQ_CHANGED, path, NULL); 3050 start_ccb->ccb_h.status = CAM_REQ_CMP; 3051 xpt_done(start_ccb); 3052 break; 3053 default: 3054 case XPT_SDEV_TYPE: 3055 case XPT_TERM_IO: 3056 case XPT_ENG_INQ: 3057 /* XXX Implement */ 3058 xpt_print(start_ccb->ccb_h.path, 3059 "%s: CCB type %#x %s not supported\n", __func__, 3060 start_ccb->ccb_h.func_code, 3061 xpt_action_name(start_ccb->ccb_h.func_code)); 3062 start_ccb->ccb_h.status = CAM_PROVIDE_FAIL; 3063 if (start_ccb->ccb_h.func_code & XPT_FC_DEV_QUEUED) { 3064 xpt_done(start_ccb); 3065 } 3066 break; 3067 } 3068 CAM_DEBUG(path, CAM_DEBUG_TRACE, 3069 ("xpt_action_default: func= %#x %s status %#x\n", 3070 start_ccb->ccb_h.func_code, 3071 xpt_action_name(start_ccb->ccb_h.func_code), 3072 start_ccb->ccb_h.status)); 3073 } 3074 3075 void 3076 xpt_polled_action(union ccb *start_ccb) 3077 { 3078 u_int32_t timeout; 3079 struct cam_sim *sim; 3080 struct cam_devq *devq; 3081 struct cam_ed *dev; 3082 3083 timeout = start_ccb->ccb_h.timeout * 10; 3084 sim = start_ccb->ccb_h.path->bus->sim; 3085 devq = sim->devq; 3086 dev = start_ccb->ccb_h.path->device; 3087 3088 mtx_unlock(&dev->device_mtx); 3089 3090 /* 3091 * Steal an opening so that no other queued requests 3092 * can get it before us while we simulate interrupts. 3093 */ 3094 mtx_lock(&devq->send_mtx); 3095 dev->ccbq.dev_openings--; 3096 while((devq->send_openings <= 0 || dev->ccbq.dev_openings < 0) && 3097 (--timeout > 0)) { 3098 mtx_unlock(&devq->send_mtx); 3099 DELAY(100); 3100 CAM_SIM_LOCK(sim); 3101 (*(sim->sim_poll))(sim); 3102 CAM_SIM_UNLOCK(sim); 3103 camisr_runqueue(); 3104 mtx_lock(&devq->send_mtx); 3105 } 3106 dev->ccbq.dev_openings++; 3107 mtx_unlock(&devq->send_mtx); 3108 3109 if (timeout != 0) { 3110 xpt_action(start_ccb); 3111 while(--timeout > 0) { 3112 CAM_SIM_LOCK(sim); 3113 (*(sim->sim_poll))(sim); 3114 CAM_SIM_UNLOCK(sim); 3115 camisr_runqueue(); 3116 if ((start_ccb->ccb_h.status & CAM_STATUS_MASK) 3117 != CAM_REQ_INPROG) 3118 break; 3119 DELAY(100); 3120 } 3121 if (timeout == 0) { 3122 /* 3123 * XXX Is it worth adding a sim_timeout entry 3124 * point so we can attempt recovery? If 3125 * this is only used for dumps, I don't think 3126 * it is. 3127 */ 3128 start_ccb->ccb_h.status = CAM_CMD_TIMEOUT; 3129 } 3130 } else { 3131 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 3132 } 3133 3134 mtx_lock(&dev->device_mtx); 3135 } 3136 3137 /* 3138 * Schedule a peripheral driver to receive a ccb when its 3139 * target device has space for more transactions. 3140 */ 3141 void 3142 xpt_schedule(struct cam_periph *periph, u_int32_t new_priority) 3143 { 3144 3145 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n")); 3146 cam_periph_assert(periph, MA_OWNED); 3147 if (new_priority < periph->scheduled_priority) { 3148 periph->scheduled_priority = new_priority; 3149 xpt_run_allocq(periph, 0); 3150 } 3151 } 3152 3153 3154 /* 3155 * Schedule a device to run on a given queue. 3156 * If the device was inserted as a new entry on the queue, 3157 * return 1 meaning the device queue should be run. If we 3158 * were already queued, implying someone else has already 3159 * started the queue, return 0 so the caller doesn't attempt 3160 * to run the queue. 3161 */ 3162 static int 3163 xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo, 3164 u_int32_t new_priority) 3165 { 3166 int retval; 3167 u_int32_t old_priority; 3168 3169 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n")); 3170 3171 old_priority = pinfo->priority; 3172 3173 /* 3174 * Are we already queued? 3175 */ 3176 if (pinfo->index != CAM_UNQUEUED_INDEX) { 3177 /* Simply reorder based on new priority */ 3178 if (new_priority < old_priority) { 3179 camq_change_priority(queue, pinfo->index, 3180 new_priority); 3181 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3182 ("changed priority to %d\n", 3183 new_priority)); 3184 retval = 1; 3185 } else 3186 retval = 0; 3187 } else { 3188 /* New entry on the queue */ 3189 if (new_priority < old_priority) 3190 pinfo->priority = new_priority; 3191 3192 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3193 ("Inserting onto queue\n")); 3194 pinfo->generation = ++queue->generation; 3195 camq_insert(queue, pinfo); 3196 retval = 1; 3197 } 3198 return (retval); 3199 } 3200 3201 static void 3202 xpt_run_allocq_task(void *context, int pending) 3203 { 3204 struct cam_periph *periph = context; 3205 3206 cam_periph_lock(periph); 3207 periph->flags &= ~CAM_PERIPH_RUN_TASK; 3208 xpt_run_allocq(periph, 1); 3209 cam_periph_unlock(periph); 3210 cam_periph_release(periph); 3211 } 3212 3213 static void 3214 xpt_run_allocq(struct cam_periph *periph, int sleep) 3215 { 3216 struct cam_ed *device; 3217 union ccb *ccb; 3218 uint32_t prio; 3219 3220 cam_periph_assert(periph, MA_OWNED); 3221 if (periph->periph_allocating) 3222 return; 3223 periph->periph_allocating = 1; 3224 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_allocq(%p)\n", periph)); 3225 device = periph->path->device; 3226 ccb = NULL; 3227 restart: 3228 while ((prio = min(periph->scheduled_priority, 3229 periph->immediate_priority)) != CAM_PRIORITY_NONE && 3230 (periph->periph_allocated - (ccb != NULL ? 1 : 0) < 3231 device->ccbq.total_openings || prio <= CAM_PRIORITY_OOB)) { 3232 3233 if (ccb == NULL && 3234 (ccb = xpt_get_ccb_nowait(periph)) == NULL) { 3235 if (sleep) { 3236 ccb = xpt_get_ccb(periph); 3237 goto restart; 3238 } 3239 if (periph->flags & CAM_PERIPH_RUN_TASK) 3240 break; 3241 cam_periph_doacquire(periph); 3242 periph->flags |= CAM_PERIPH_RUN_TASK; 3243 taskqueue_enqueue(xsoftc.xpt_taskq, 3244 &periph->periph_run_task); 3245 break; 3246 } 3247 xpt_setup_ccb(&ccb->ccb_h, periph->path, prio); 3248 if (prio == periph->immediate_priority) { 3249 periph->immediate_priority = CAM_PRIORITY_NONE; 3250 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3251 ("waking cam_periph_getccb()\n")); 3252 SLIST_INSERT_HEAD(&periph->ccb_list, &ccb->ccb_h, 3253 periph_links.sle); 3254 wakeup(&periph->ccb_list); 3255 } else { 3256 periph->scheduled_priority = CAM_PRIORITY_NONE; 3257 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3258 ("calling periph_start()\n")); 3259 periph->periph_start(periph, ccb); 3260 } 3261 ccb = NULL; 3262 } 3263 if (ccb != NULL) 3264 xpt_release_ccb(ccb); 3265 periph->periph_allocating = 0; 3266 } 3267 3268 static void 3269 xpt_run_devq(struct cam_devq *devq) 3270 { 3271 int lock; 3272 3273 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_devq\n")); 3274 3275 devq->send_queue.qfrozen_cnt++; 3276 while ((devq->send_queue.entries > 0) 3277 && (devq->send_openings > 0) 3278 && (devq->send_queue.qfrozen_cnt <= 1)) { 3279 struct cam_ed *device; 3280 union ccb *work_ccb; 3281 struct cam_sim *sim; 3282 struct xpt_proto *proto; 3283 3284 device = (struct cam_ed *)camq_remove(&devq->send_queue, 3285 CAMQ_HEAD); 3286 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3287 ("running device %p\n", device)); 3288 3289 work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD); 3290 if (work_ccb == NULL) { 3291 printf("device on run queue with no ccbs???\n"); 3292 continue; 3293 } 3294 3295 if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) { 3296 3297 mtx_lock(&xsoftc.xpt_highpower_lock); 3298 if (xsoftc.num_highpower <= 0) { 3299 /* 3300 * We got a high power command, but we 3301 * don't have any available slots. Freeze 3302 * the device queue until we have a slot 3303 * available. 3304 */ 3305 xpt_freeze_devq_device(device, 1); 3306 STAILQ_INSERT_TAIL(&xsoftc.highpowerq, device, 3307 highpowerq_entry); 3308 3309 mtx_unlock(&xsoftc.xpt_highpower_lock); 3310 continue; 3311 } else { 3312 /* 3313 * Consume a high power slot while 3314 * this ccb runs. 3315 */ 3316 xsoftc.num_highpower--; 3317 } 3318 mtx_unlock(&xsoftc.xpt_highpower_lock); 3319 } 3320 cam_ccbq_remove_ccb(&device->ccbq, work_ccb); 3321 cam_ccbq_send_ccb(&device->ccbq, work_ccb); 3322 devq->send_openings--; 3323 devq->send_active++; 3324 xpt_schedule_devq(devq, device); 3325 mtx_unlock(&devq->send_mtx); 3326 3327 if ((work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0) { 3328 /* 3329 * The client wants to freeze the queue 3330 * after this CCB is sent. 3331 */ 3332 xpt_freeze_devq(work_ccb->ccb_h.path, 1); 3333 } 3334 3335 /* In Target mode, the peripheral driver knows best... */ 3336 if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) { 3337 if ((device->inq_flags & SID_CmdQue) != 0 3338 && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE) 3339 work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID; 3340 else 3341 /* 3342 * Clear this in case of a retried CCB that 3343 * failed due to a rejected tag. 3344 */ 3345 work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID; 3346 } 3347 3348 KASSERT(device == work_ccb->ccb_h.path->device, 3349 ("device (%p) / path->device (%p) mismatch", 3350 device, work_ccb->ccb_h.path->device)); 3351 proto = xpt_proto_find(device->protocol); 3352 if (proto && proto->ops->debug_out) 3353 proto->ops->debug_out(work_ccb); 3354 3355 /* 3356 * Device queues can be shared among multiple SIM instances 3357 * that reside on different buses. Use the SIM from the 3358 * queued device, rather than the one from the calling bus. 3359 */ 3360 sim = device->sim; 3361 lock = (mtx_owned(sim->mtx) == 0); 3362 if (lock) 3363 CAM_SIM_LOCK(sim); 3364 work_ccb->ccb_h.qos.sim_data = sbinuptime(); // xxx uintprt_t too small 32bit platforms 3365 (*(sim->sim_action))(sim, work_ccb); 3366 if (lock) 3367 CAM_SIM_UNLOCK(sim); 3368 mtx_lock(&devq->send_mtx); 3369 } 3370 devq->send_queue.qfrozen_cnt--; 3371 } 3372 3373 /* 3374 * This function merges stuff from the slave ccb into the master ccb, while 3375 * keeping important fields in the master ccb constant. 3376 */ 3377 void 3378 xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb) 3379 { 3380 3381 /* 3382 * Pull fields that are valid for peripheral drivers to set 3383 * into the master CCB along with the CCB "payload". 3384 */ 3385 master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count; 3386 master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code; 3387 master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout; 3388 master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags; 3389 bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1], 3390 sizeof(union ccb) - sizeof(struct ccb_hdr)); 3391 } 3392 3393 void 3394 xpt_setup_ccb_flags(struct ccb_hdr *ccb_h, struct cam_path *path, 3395 u_int32_t priority, u_int32_t flags) 3396 { 3397 3398 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n")); 3399 ccb_h->pinfo.priority = priority; 3400 ccb_h->path = path; 3401 ccb_h->path_id = path->bus->path_id; 3402 if (path->target) 3403 ccb_h->target_id = path->target->target_id; 3404 else 3405 ccb_h->target_id = CAM_TARGET_WILDCARD; 3406 if (path->device) { 3407 ccb_h->target_lun = path->device->lun_id; 3408 ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation; 3409 } else { 3410 ccb_h->target_lun = CAM_TARGET_WILDCARD; 3411 } 3412 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX; 3413 ccb_h->flags = flags; 3414 ccb_h->xflags = 0; 3415 } 3416 3417 void 3418 xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority) 3419 { 3420 xpt_setup_ccb_flags(ccb_h, path, priority, /*flags*/ 0); 3421 } 3422 3423 /* Path manipulation functions */ 3424 cam_status 3425 xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph, 3426 path_id_t path_id, target_id_t target_id, lun_id_t lun_id) 3427 { 3428 struct cam_path *path; 3429 cam_status status; 3430 3431 path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_NOWAIT); 3432 3433 if (path == NULL) { 3434 status = CAM_RESRC_UNAVAIL; 3435 return(status); 3436 } 3437 status = xpt_compile_path(path, perph, path_id, target_id, lun_id); 3438 if (status != CAM_REQ_CMP) { 3439 free(path, M_CAMPATH); 3440 path = NULL; 3441 } 3442 *new_path_ptr = path; 3443 return (status); 3444 } 3445 3446 cam_status 3447 xpt_create_path_unlocked(struct cam_path **new_path_ptr, 3448 struct cam_periph *periph, path_id_t path_id, 3449 target_id_t target_id, lun_id_t lun_id) 3450 { 3451 3452 return (xpt_create_path(new_path_ptr, periph, path_id, target_id, 3453 lun_id)); 3454 } 3455 3456 cam_status 3457 xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph, 3458 path_id_t path_id, target_id_t target_id, lun_id_t lun_id) 3459 { 3460 struct cam_eb *bus; 3461 struct cam_et *target; 3462 struct cam_ed *device; 3463 cam_status status; 3464 3465 status = CAM_REQ_CMP; /* Completed without error */ 3466 target = NULL; /* Wildcarded */ 3467 device = NULL; /* Wildcarded */ 3468 3469 /* 3470 * We will potentially modify the EDT, so block interrupts 3471 * that may attempt to create cam paths. 3472 */ 3473 bus = xpt_find_bus(path_id); 3474 if (bus == NULL) { 3475 status = CAM_PATH_INVALID; 3476 } else { 3477 xpt_lock_buses(); 3478 mtx_lock(&bus->eb_mtx); 3479 target = xpt_find_target(bus, target_id); 3480 if (target == NULL) { 3481 /* Create one */ 3482 struct cam_et *new_target; 3483 3484 new_target = xpt_alloc_target(bus, target_id); 3485 if (new_target == NULL) { 3486 status = CAM_RESRC_UNAVAIL; 3487 } else { 3488 target = new_target; 3489 } 3490 } 3491 xpt_unlock_buses(); 3492 if (target != NULL) { 3493 device = xpt_find_device(target, lun_id); 3494 if (device == NULL) { 3495 /* Create one */ 3496 struct cam_ed *new_device; 3497 3498 new_device = 3499 (*(bus->xport->ops->alloc_device))(bus, 3500 target, 3501 lun_id); 3502 if (new_device == NULL) { 3503 status = CAM_RESRC_UNAVAIL; 3504 } else { 3505 device = new_device; 3506 } 3507 } 3508 } 3509 mtx_unlock(&bus->eb_mtx); 3510 } 3511 3512 /* 3513 * Only touch the user's data if we are successful. 3514 */ 3515 if (status == CAM_REQ_CMP) { 3516 new_path->periph = perph; 3517 new_path->bus = bus; 3518 new_path->target = target; 3519 new_path->device = device; 3520 CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n")); 3521 } else { 3522 if (device != NULL) 3523 xpt_release_device(device); 3524 if (target != NULL) 3525 xpt_release_target(target); 3526 if (bus != NULL) 3527 xpt_release_bus(bus); 3528 } 3529 return (status); 3530 } 3531 3532 cam_status 3533 xpt_clone_path(struct cam_path **new_path_ptr, struct cam_path *path) 3534 { 3535 struct cam_path *new_path; 3536 3537 new_path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_NOWAIT); 3538 if (new_path == NULL) 3539 return(CAM_RESRC_UNAVAIL); 3540 xpt_copy_path(new_path, path); 3541 *new_path_ptr = new_path; 3542 return (CAM_REQ_CMP); 3543 } 3544 3545 void 3546 xpt_copy_path(struct cam_path *new_path, struct cam_path *path) 3547 { 3548 3549 *new_path = *path; 3550 if (path->bus != NULL) 3551 xpt_acquire_bus(path->bus); 3552 if (path->target != NULL) 3553 xpt_acquire_target(path->target); 3554 if (path->device != NULL) 3555 xpt_acquire_device(path->device); 3556 } 3557 3558 void 3559 xpt_release_path(struct cam_path *path) 3560 { 3561 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n")); 3562 if (path->device != NULL) { 3563 xpt_release_device(path->device); 3564 path->device = NULL; 3565 } 3566 if (path->target != NULL) { 3567 xpt_release_target(path->target); 3568 path->target = NULL; 3569 } 3570 if (path->bus != NULL) { 3571 xpt_release_bus(path->bus); 3572 path->bus = NULL; 3573 } 3574 } 3575 3576 void 3577 xpt_free_path(struct cam_path *path) 3578 { 3579 3580 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n")); 3581 xpt_release_path(path); 3582 free(path, M_CAMPATH); 3583 } 3584 3585 void 3586 xpt_path_counts(struct cam_path *path, uint32_t *bus_ref, 3587 uint32_t *periph_ref, uint32_t *target_ref, uint32_t *device_ref) 3588 { 3589 3590 xpt_lock_buses(); 3591 if (bus_ref) { 3592 if (path->bus) 3593 *bus_ref = path->bus->refcount; 3594 else 3595 *bus_ref = 0; 3596 } 3597 if (periph_ref) { 3598 if (path->periph) 3599 *periph_ref = path->periph->refcount; 3600 else 3601 *periph_ref = 0; 3602 } 3603 xpt_unlock_buses(); 3604 if (target_ref) { 3605 if (path->target) 3606 *target_ref = path->target->refcount; 3607 else 3608 *target_ref = 0; 3609 } 3610 if (device_ref) { 3611 if (path->device) 3612 *device_ref = path->device->refcount; 3613 else 3614 *device_ref = 0; 3615 } 3616 } 3617 3618 /* 3619 * Return -1 for failure, 0 for exact match, 1 for match with wildcards 3620 * in path1, 2 for match with wildcards in path2. 3621 */ 3622 int 3623 xpt_path_comp(struct cam_path *path1, struct cam_path *path2) 3624 { 3625 int retval = 0; 3626 3627 if (path1->bus != path2->bus) { 3628 if (path1->bus->path_id == CAM_BUS_WILDCARD) 3629 retval = 1; 3630 else if (path2->bus->path_id == CAM_BUS_WILDCARD) 3631 retval = 2; 3632 else 3633 return (-1); 3634 } 3635 if (path1->target != path2->target) { 3636 if (path1->target->target_id == CAM_TARGET_WILDCARD) { 3637 if (retval == 0) 3638 retval = 1; 3639 } else if (path2->target->target_id == CAM_TARGET_WILDCARD) 3640 retval = 2; 3641 else 3642 return (-1); 3643 } 3644 if (path1->device != path2->device) { 3645 if (path1->device->lun_id == CAM_LUN_WILDCARD) { 3646 if (retval == 0) 3647 retval = 1; 3648 } else if (path2->device->lun_id == CAM_LUN_WILDCARD) 3649 retval = 2; 3650 else 3651 return (-1); 3652 } 3653 return (retval); 3654 } 3655 3656 int 3657 xpt_path_comp_dev(struct cam_path *path, struct cam_ed *dev) 3658 { 3659 int retval = 0; 3660 3661 if (path->bus != dev->target->bus) { 3662 if (path->bus->path_id == CAM_BUS_WILDCARD) 3663 retval = 1; 3664 else if (dev->target->bus->path_id == CAM_BUS_WILDCARD) 3665 retval = 2; 3666 else 3667 return (-1); 3668 } 3669 if (path->target != dev->target) { 3670 if (path->target->target_id == CAM_TARGET_WILDCARD) { 3671 if (retval == 0) 3672 retval = 1; 3673 } else if (dev->target->target_id == CAM_TARGET_WILDCARD) 3674 retval = 2; 3675 else 3676 return (-1); 3677 } 3678 if (path->device != dev) { 3679 if (path->device->lun_id == CAM_LUN_WILDCARD) { 3680 if (retval == 0) 3681 retval = 1; 3682 } else if (dev->lun_id == CAM_LUN_WILDCARD) 3683 retval = 2; 3684 else 3685 return (-1); 3686 } 3687 return (retval); 3688 } 3689 3690 void 3691 xpt_print_path(struct cam_path *path) 3692 { 3693 struct sbuf sb; 3694 char buffer[XPT_PRINT_LEN]; 3695 3696 sbuf_new(&sb, buffer, XPT_PRINT_LEN, SBUF_FIXEDLEN); 3697 xpt_path_sbuf(path, &sb); 3698 sbuf_finish(&sb); 3699 printf("%s", sbuf_data(&sb)); 3700 sbuf_delete(&sb); 3701 } 3702 3703 void 3704 xpt_print_device(struct cam_ed *device) 3705 { 3706 3707 if (device == NULL) 3708 printf("(nopath): "); 3709 else { 3710 printf("(noperiph:%s%d:%d:%d:%jx): ", device->sim->sim_name, 3711 device->sim->unit_number, 3712 device->sim->bus_id, 3713 device->target->target_id, 3714 (uintmax_t)device->lun_id); 3715 } 3716 } 3717 3718 void 3719 xpt_print(struct cam_path *path, const char *fmt, ...) 3720 { 3721 va_list ap; 3722 struct sbuf sb; 3723 char buffer[XPT_PRINT_LEN]; 3724 3725 sbuf_new(&sb, buffer, XPT_PRINT_LEN, SBUF_FIXEDLEN); 3726 3727 xpt_path_sbuf(path, &sb); 3728 va_start(ap, fmt); 3729 sbuf_vprintf(&sb, fmt, ap); 3730 va_end(ap); 3731 3732 sbuf_finish(&sb); 3733 printf("%s", sbuf_data(&sb)); 3734 sbuf_delete(&sb); 3735 } 3736 3737 int 3738 xpt_path_string(struct cam_path *path, char *str, size_t str_len) 3739 { 3740 struct sbuf sb; 3741 int len; 3742 3743 sbuf_new(&sb, str, str_len, 0); 3744 len = xpt_path_sbuf(path, &sb); 3745 sbuf_finish(&sb); 3746 return (len); 3747 } 3748 3749 int 3750 xpt_path_sbuf(struct cam_path *path, struct sbuf *sb) 3751 { 3752 3753 if (path == NULL) 3754 sbuf_printf(sb, "(nopath): "); 3755 else { 3756 if (path->periph != NULL) 3757 sbuf_printf(sb, "(%s%d:", path->periph->periph_name, 3758 path->periph->unit_number); 3759 else 3760 sbuf_printf(sb, "(noperiph:"); 3761 3762 if (path->bus != NULL) 3763 sbuf_printf(sb, "%s%d:%d:", path->bus->sim->sim_name, 3764 path->bus->sim->unit_number, 3765 path->bus->sim->bus_id); 3766 else 3767 sbuf_printf(sb, "nobus:"); 3768 3769 if (path->target != NULL) 3770 sbuf_printf(sb, "%d:", path->target->target_id); 3771 else 3772 sbuf_printf(sb, "X:"); 3773 3774 if (path->device != NULL) 3775 sbuf_printf(sb, "%jx): ", 3776 (uintmax_t)path->device->lun_id); 3777 else 3778 sbuf_printf(sb, "X): "); 3779 } 3780 3781 return(sbuf_len(sb)); 3782 } 3783 3784 path_id_t 3785 xpt_path_path_id(struct cam_path *path) 3786 { 3787 return(path->bus->path_id); 3788 } 3789 3790 target_id_t 3791 xpt_path_target_id(struct cam_path *path) 3792 { 3793 if (path->target != NULL) 3794 return (path->target->target_id); 3795 else 3796 return (CAM_TARGET_WILDCARD); 3797 } 3798 3799 lun_id_t 3800 xpt_path_lun_id(struct cam_path *path) 3801 { 3802 if (path->device != NULL) 3803 return (path->device->lun_id); 3804 else 3805 return (CAM_LUN_WILDCARD); 3806 } 3807 3808 struct cam_sim * 3809 xpt_path_sim(struct cam_path *path) 3810 { 3811 3812 return (path->bus->sim); 3813 } 3814 3815 struct cam_periph* 3816 xpt_path_periph(struct cam_path *path) 3817 { 3818 3819 return (path->periph); 3820 } 3821 3822 /* 3823 * Release a CAM control block for the caller. Remit the cost of the structure 3824 * to the device referenced by the path. If the this device had no 'credits' 3825 * and peripheral drivers have registered async callbacks for this notification 3826 * call them now. 3827 */ 3828 void 3829 xpt_release_ccb(union ccb *free_ccb) 3830 { 3831 struct cam_ed *device; 3832 struct cam_periph *periph; 3833 3834 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n")); 3835 xpt_path_assert(free_ccb->ccb_h.path, MA_OWNED); 3836 device = free_ccb->ccb_h.path->device; 3837 periph = free_ccb->ccb_h.path->periph; 3838 3839 xpt_free_ccb(free_ccb); 3840 periph->periph_allocated--; 3841 cam_ccbq_release_opening(&device->ccbq); 3842 xpt_run_allocq(periph, 0); 3843 } 3844 3845 /* Functions accessed by SIM drivers */ 3846 3847 static struct xpt_xport_ops xport_default_ops = { 3848 .alloc_device = xpt_alloc_device_default, 3849 .action = xpt_action_default, 3850 .async = xpt_dev_async_default, 3851 }; 3852 static struct xpt_xport xport_default = { 3853 .xport = XPORT_UNKNOWN, 3854 .name = "unknown", 3855 .ops = &xport_default_ops, 3856 }; 3857 3858 CAM_XPT_XPORT(xport_default); 3859 3860 /* 3861 * A sim structure, listing the SIM entry points and instance 3862 * identification info is passed to xpt_bus_register to hook the SIM 3863 * into the CAM framework. xpt_bus_register creates a cam_eb entry 3864 * for this new bus and places it in the array of buses and assigns 3865 * it a path_id. The path_id may be influenced by "hard wiring" 3866 * information specified by the user. Once interrupt services are 3867 * available, the bus will be probed. 3868 */ 3869 int32_t 3870 xpt_bus_register(struct cam_sim *sim, device_t parent, u_int32_t bus) 3871 { 3872 struct cam_eb *new_bus; 3873 struct cam_eb *old_bus; 3874 struct ccb_pathinq cpi; 3875 struct cam_path *path; 3876 cam_status status; 3877 3878 mtx_assert(sim->mtx, MA_OWNED); 3879 3880 sim->bus_id = bus; 3881 new_bus = (struct cam_eb *)malloc(sizeof(*new_bus), 3882 M_CAMXPT, M_NOWAIT|M_ZERO); 3883 if (new_bus == NULL) { 3884 /* Couldn't satisfy request */ 3885 return (CAM_RESRC_UNAVAIL); 3886 } 3887 3888 mtx_init(&new_bus->eb_mtx, "CAM bus lock", NULL, MTX_DEF); 3889 TAILQ_INIT(&new_bus->et_entries); 3890 cam_sim_hold(sim); 3891 new_bus->sim = sim; 3892 timevalclear(&new_bus->last_reset); 3893 new_bus->flags = 0; 3894 new_bus->refcount = 1; /* Held until a bus_deregister event */ 3895 new_bus->generation = 0; 3896 3897 xpt_lock_buses(); 3898 sim->path_id = new_bus->path_id = 3899 xptpathid(sim->sim_name, sim->unit_number, sim->bus_id); 3900 old_bus = TAILQ_FIRST(&xsoftc.xpt_busses); 3901 while (old_bus != NULL 3902 && old_bus->path_id < new_bus->path_id) 3903 old_bus = TAILQ_NEXT(old_bus, links); 3904 if (old_bus != NULL) 3905 TAILQ_INSERT_BEFORE(old_bus, new_bus, links); 3906 else 3907 TAILQ_INSERT_TAIL(&xsoftc.xpt_busses, new_bus, links); 3908 xsoftc.bus_generation++; 3909 xpt_unlock_buses(); 3910 3911 /* 3912 * Set a default transport so that a PATH_INQ can be issued to 3913 * the SIM. This will then allow for probing and attaching of 3914 * a more appropriate transport. 3915 */ 3916 new_bus->xport = &xport_default; 3917 3918 status = xpt_create_path(&path, /*periph*/NULL, sim->path_id, 3919 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 3920 if (status != CAM_REQ_CMP) { 3921 xpt_release_bus(new_bus); 3922 free(path, M_CAMXPT); 3923 return (CAM_RESRC_UNAVAIL); 3924 } 3925 3926 xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NORMAL); 3927 cpi.ccb_h.func_code = XPT_PATH_INQ; 3928 xpt_action((union ccb *)&cpi); 3929 3930 if (cpi.ccb_h.status == CAM_REQ_CMP) { 3931 struct xpt_xport **xpt; 3932 3933 SET_FOREACH(xpt, cam_xpt_xport_set) { 3934 if ((*xpt)->xport == cpi.transport) { 3935 new_bus->xport = *xpt; 3936 break; 3937 } 3938 } 3939 if (new_bus->xport == NULL) { 3940 xpt_print(path, 3941 "No transport found for %d\n", cpi.transport); 3942 xpt_release_bus(new_bus); 3943 free(path, M_CAMXPT); 3944 return (CAM_RESRC_UNAVAIL); 3945 } 3946 } 3947 3948 /* Notify interested parties */ 3949 if (sim->path_id != CAM_XPT_PATH_ID) { 3950 3951 xpt_async(AC_PATH_REGISTERED, path, &cpi); 3952 if ((cpi.hba_misc & PIM_NOSCAN) == 0) { 3953 union ccb *scan_ccb; 3954 3955 /* Initiate bus rescan. */ 3956 scan_ccb = xpt_alloc_ccb_nowait(); 3957 if (scan_ccb != NULL) { 3958 scan_ccb->ccb_h.path = path; 3959 scan_ccb->ccb_h.func_code = XPT_SCAN_BUS; 3960 scan_ccb->crcn.flags = 0; 3961 xpt_rescan(scan_ccb); 3962 } else { 3963 xpt_print(path, 3964 "Can't allocate CCB to scan bus\n"); 3965 xpt_free_path(path); 3966 } 3967 } else 3968 xpt_free_path(path); 3969 } else 3970 xpt_free_path(path); 3971 return (CAM_SUCCESS); 3972 } 3973 3974 int32_t 3975 xpt_bus_deregister(path_id_t pathid) 3976 { 3977 struct cam_path bus_path; 3978 cam_status status; 3979 3980 status = xpt_compile_path(&bus_path, NULL, pathid, 3981 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 3982 if (status != CAM_REQ_CMP) 3983 return (status); 3984 3985 xpt_async(AC_LOST_DEVICE, &bus_path, NULL); 3986 xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL); 3987 3988 /* Release the reference count held while registered. */ 3989 xpt_release_bus(bus_path.bus); 3990 xpt_release_path(&bus_path); 3991 3992 return (CAM_REQ_CMP); 3993 } 3994 3995 static path_id_t 3996 xptnextfreepathid(void) 3997 { 3998 struct cam_eb *bus; 3999 path_id_t pathid; 4000 const char *strval; 4001 4002 mtx_assert(&xsoftc.xpt_topo_lock, MA_OWNED); 4003 pathid = 0; 4004 bus = TAILQ_FIRST(&xsoftc.xpt_busses); 4005 retry: 4006 /* Find an unoccupied pathid */ 4007 while (bus != NULL && bus->path_id <= pathid) { 4008 if (bus->path_id == pathid) 4009 pathid++; 4010 bus = TAILQ_NEXT(bus, links); 4011 } 4012 4013 /* 4014 * Ensure that this pathid is not reserved for 4015 * a bus that may be registered in the future. 4016 */ 4017 if (resource_string_value("scbus", pathid, "at", &strval) == 0) { 4018 ++pathid; 4019 /* Start the search over */ 4020 goto retry; 4021 } 4022 return (pathid); 4023 } 4024 4025 static path_id_t 4026 xptpathid(const char *sim_name, int sim_unit, int sim_bus) 4027 { 4028 path_id_t pathid; 4029 int i, dunit, val; 4030 char buf[32]; 4031 const char *dname; 4032 4033 pathid = CAM_XPT_PATH_ID; 4034 snprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit); 4035 if (strcmp(buf, "xpt0") == 0 && sim_bus == 0) 4036 return (pathid); 4037 i = 0; 4038 while ((resource_find_match(&i, &dname, &dunit, "at", buf)) == 0) { 4039 if (strcmp(dname, "scbus")) { 4040 /* Avoid a bit of foot shooting. */ 4041 continue; 4042 } 4043 if (dunit < 0) /* unwired?! */ 4044 continue; 4045 if (resource_int_value("scbus", dunit, "bus", &val) == 0) { 4046 if (sim_bus == val) { 4047 pathid = dunit; 4048 break; 4049 } 4050 } else if (sim_bus == 0) { 4051 /* Unspecified matches bus 0 */ 4052 pathid = dunit; 4053 break; 4054 } else { 4055 printf("Ambiguous scbus configuration for %s%d " 4056 "bus %d, cannot wire down. The kernel " 4057 "config entry for scbus%d should " 4058 "specify a controller bus.\n" 4059 "Scbus will be assigned dynamically.\n", 4060 sim_name, sim_unit, sim_bus, dunit); 4061 break; 4062 } 4063 } 4064 4065 if (pathid == CAM_XPT_PATH_ID) 4066 pathid = xptnextfreepathid(); 4067 return (pathid); 4068 } 4069 4070 static const char * 4071 xpt_async_string(u_int32_t async_code) 4072 { 4073 4074 switch (async_code) { 4075 case AC_BUS_RESET: return ("AC_BUS_RESET"); 4076 case AC_UNSOL_RESEL: return ("AC_UNSOL_RESEL"); 4077 case AC_SCSI_AEN: return ("AC_SCSI_AEN"); 4078 case AC_SENT_BDR: return ("AC_SENT_BDR"); 4079 case AC_PATH_REGISTERED: return ("AC_PATH_REGISTERED"); 4080 case AC_PATH_DEREGISTERED: return ("AC_PATH_DEREGISTERED"); 4081 case AC_FOUND_DEVICE: return ("AC_FOUND_DEVICE"); 4082 case AC_LOST_DEVICE: return ("AC_LOST_DEVICE"); 4083 case AC_TRANSFER_NEG: return ("AC_TRANSFER_NEG"); 4084 case AC_INQ_CHANGED: return ("AC_INQ_CHANGED"); 4085 case AC_GETDEV_CHANGED: return ("AC_GETDEV_CHANGED"); 4086 case AC_CONTRACT: return ("AC_CONTRACT"); 4087 case AC_ADVINFO_CHANGED: return ("AC_ADVINFO_CHANGED"); 4088 case AC_UNIT_ATTENTION: return ("AC_UNIT_ATTENTION"); 4089 } 4090 return ("AC_UNKNOWN"); 4091 } 4092 4093 static int 4094 xpt_async_size(u_int32_t async_code) 4095 { 4096 4097 switch (async_code) { 4098 case AC_BUS_RESET: return (0); 4099 case AC_UNSOL_RESEL: return (0); 4100 case AC_SCSI_AEN: return (0); 4101 case AC_SENT_BDR: return (0); 4102 case AC_PATH_REGISTERED: return (sizeof(struct ccb_pathinq)); 4103 case AC_PATH_DEREGISTERED: return (0); 4104 case AC_FOUND_DEVICE: return (sizeof(struct ccb_getdev)); 4105 case AC_LOST_DEVICE: return (0); 4106 case AC_TRANSFER_NEG: return (sizeof(struct ccb_trans_settings)); 4107 case AC_INQ_CHANGED: return (0); 4108 case AC_GETDEV_CHANGED: return (0); 4109 case AC_CONTRACT: return (sizeof(struct ac_contract)); 4110 case AC_ADVINFO_CHANGED: return (-1); 4111 case AC_UNIT_ATTENTION: return (sizeof(struct ccb_scsiio)); 4112 } 4113 return (0); 4114 } 4115 4116 static int 4117 xpt_async_process_dev(struct cam_ed *device, void *arg) 4118 { 4119 union ccb *ccb = arg; 4120 struct cam_path *path = ccb->ccb_h.path; 4121 void *async_arg = ccb->casync.async_arg_ptr; 4122 u_int32_t async_code = ccb->casync.async_code; 4123 int relock; 4124 4125 if (path->device != device 4126 && path->device->lun_id != CAM_LUN_WILDCARD 4127 && device->lun_id != CAM_LUN_WILDCARD) 4128 return (1); 4129 4130 /* 4131 * The async callback could free the device. 4132 * If it is a broadcast async, it doesn't hold 4133 * device reference, so take our own reference. 4134 */ 4135 xpt_acquire_device(device); 4136 4137 /* 4138 * If async for specific device is to be delivered to 4139 * the wildcard client, take the specific device lock. 4140 * XXX: We may need a way for client to specify it. 4141 */ 4142 if ((device->lun_id == CAM_LUN_WILDCARD && 4143 path->device->lun_id != CAM_LUN_WILDCARD) || 4144 (device->target->target_id == CAM_TARGET_WILDCARD && 4145 path->target->target_id != CAM_TARGET_WILDCARD) || 4146 (device->target->bus->path_id == CAM_BUS_WILDCARD && 4147 path->target->bus->path_id != CAM_BUS_WILDCARD)) { 4148 mtx_unlock(&device->device_mtx); 4149 xpt_path_lock(path); 4150 relock = 1; 4151 } else 4152 relock = 0; 4153 4154 (*(device->target->bus->xport->ops->async))(async_code, 4155 device->target->bus, device->target, device, async_arg); 4156 xpt_async_bcast(&device->asyncs, async_code, path, async_arg); 4157 4158 if (relock) { 4159 xpt_path_unlock(path); 4160 mtx_lock(&device->device_mtx); 4161 } 4162 xpt_release_device(device); 4163 return (1); 4164 } 4165 4166 static int 4167 xpt_async_process_tgt(struct cam_et *target, void *arg) 4168 { 4169 union ccb *ccb = arg; 4170 struct cam_path *path = ccb->ccb_h.path; 4171 4172 if (path->target != target 4173 && path->target->target_id != CAM_TARGET_WILDCARD 4174 && target->target_id != CAM_TARGET_WILDCARD) 4175 return (1); 4176 4177 if (ccb->casync.async_code == AC_SENT_BDR) { 4178 /* Update our notion of when the last reset occurred */ 4179 microtime(&target->last_reset); 4180 } 4181 4182 return (xptdevicetraverse(target, NULL, xpt_async_process_dev, ccb)); 4183 } 4184 4185 static void 4186 xpt_async_process(struct cam_periph *periph, union ccb *ccb) 4187 { 4188 struct cam_eb *bus; 4189 struct cam_path *path; 4190 void *async_arg; 4191 u_int32_t async_code; 4192 4193 path = ccb->ccb_h.path; 4194 async_code = ccb->casync.async_code; 4195 async_arg = ccb->casync.async_arg_ptr; 4196 CAM_DEBUG(path, CAM_DEBUG_TRACE | CAM_DEBUG_INFO, 4197 ("xpt_async(%s)\n", xpt_async_string(async_code))); 4198 bus = path->bus; 4199 4200 if (async_code == AC_BUS_RESET) { 4201 /* Update our notion of when the last reset occurred */ 4202 microtime(&bus->last_reset); 4203 } 4204 4205 xpttargettraverse(bus, NULL, xpt_async_process_tgt, ccb); 4206 4207 /* 4208 * If this wasn't a fully wildcarded async, tell all 4209 * clients that want all async events. 4210 */ 4211 if (bus != xpt_periph->path->bus) { 4212 xpt_path_lock(xpt_periph->path); 4213 xpt_async_process_dev(xpt_periph->path->device, ccb); 4214 xpt_path_unlock(xpt_periph->path); 4215 } 4216 4217 if (path->device != NULL && path->device->lun_id != CAM_LUN_WILDCARD) 4218 xpt_release_devq(path, 1, TRUE); 4219 else 4220 xpt_release_simq(path->bus->sim, TRUE); 4221 if (ccb->casync.async_arg_size > 0) 4222 free(async_arg, M_CAMXPT); 4223 xpt_free_path(path); 4224 xpt_free_ccb(ccb); 4225 } 4226 4227 static void 4228 xpt_async_bcast(struct async_list *async_head, 4229 u_int32_t async_code, 4230 struct cam_path *path, void *async_arg) 4231 { 4232 struct async_node *cur_entry; 4233 int lock; 4234 4235 cur_entry = SLIST_FIRST(async_head); 4236 while (cur_entry != NULL) { 4237 struct async_node *next_entry; 4238 /* 4239 * Grab the next list entry before we call the current 4240 * entry's callback. This is because the callback function 4241 * can delete its async callback entry. 4242 */ 4243 next_entry = SLIST_NEXT(cur_entry, links); 4244 if ((cur_entry->event_enable & async_code) != 0) { 4245 lock = cur_entry->event_lock; 4246 if (lock) 4247 CAM_SIM_LOCK(path->device->sim); 4248 cur_entry->callback(cur_entry->callback_arg, 4249 async_code, path, 4250 async_arg); 4251 if (lock) 4252 CAM_SIM_UNLOCK(path->device->sim); 4253 } 4254 cur_entry = next_entry; 4255 } 4256 } 4257 4258 void 4259 xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg) 4260 { 4261 union ccb *ccb; 4262 int size; 4263 4264 ccb = xpt_alloc_ccb_nowait(); 4265 if (ccb == NULL) { 4266 xpt_print(path, "Can't allocate CCB to send %s\n", 4267 xpt_async_string(async_code)); 4268 return; 4269 } 4270 4271 if (xpt_clone_path(&ccb->ccb_h.path, path) != CAM_REQ_CMP) { 4272 xpt_print(path, "Can't allocate path to send %s\n", 4273 xpt_async_string(async_code)); 4274 xpt_free_ccb(ccb); 4275 return; 4276 } 4277 ccb->ccb_h.path->periph = NULL; 4278 ccb->ccb_h.func_code = XPT_ASYNC; 4279 ccb->ccb_h.cbfcnp = xpt_async_process; 4280 ccb->ccb_h.flags |= CAM_UNLOCKED; 4281 ccb->casync.async_code = async_code; 4282 ccb->casync.async_arg_size = 0; 4283 size = xpt_async_size(async_code); 4284 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, 4285 ("xpt_async: func %#x %s aync_code %d %s\n", 4286 ccb->ccb_h.func_code, 4287 xpt_action_name(ccb->ccb_h.func_code), 4288 async_code, 4289 xpt_async_string(async_code))); 4290 if (size > 0 && async_arg != NULL) { 4291 ccb->casync.async_arg_ptr = malloc(size, M_CAMXPT, M_NOWAIT); 4292 if (ccb->casync.async_arg_ptr == NULL) { 4293 xpt_print(path, "Can't allocate argument to send %s\n", 4294 xpt_async_string(async_code)); 4295 xpt_free_path(ccb->ccb_h.path); 4296 xpt_free_ccb(ccb); 4297 return; 4298 } 4299 memcpy(ccb->casync.async_arg_ptr, async_arg, size); 4300 ccb->casync.async_arg_size = size; 4301 } else if (size < 0) { 4302 ccb->casync.async_arg_ptr = async_arg; 4303 ccb->casync.async_arg_size = size; 4304 } 4305 if (path->device != NULL && path->device->lun_id != CAM_LUN_WILDCARD) 4306 xpt_freeze_devq(path, 1); 4307 else 4308 xpt_freeze_simq(path->bus->sim, 1); 4309 xpt_done(ccb); 4310 } 4311 4312 static void 4313 xpt_dev_async_default(u_int32_t async_code, struct cam_eb *bus, 4314 struct cam_et *target, struct cam_ed *device, 4315 void *async_arg) 4316 { 4317 4318 /* 4319 * We only need to handle events for real devices. 4320 */ 4321 if (target->target_id == CAM_TARGET_WILDCARD 4322 || device->lun_id == CAM_LUN_WILDCARD) 4323 return; 4324 4325 printf("%s called\n", __func__); 4326 } 4327 4328 static uint32_t 4329 xpt_freeze_devq_device(struct cam_ed *dev, u_int count) 4330 { 4331 struct cam_devq *devq; 4332 uint32_t freeze; 4333 4334 devq = dev->sim->devq; 4335 mtx_assert(&devq->send_mtx, MA_OWNED); 4336 CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE, 4337 ("xpt_freeze_devq_device(%d) %u->%u\n", count, 4338 dev->ccbq.queue.qfrozen_cnt, dev->ccbq.queue.qfrozen_cnt + count)); 4339 freeze = (dev->ccbq.queue.qfrozen_cnt += count); 4340 /* Remove frozen device from sendq. */ 4341 if (device_is_queued(dev)) 4342 camq_remove(&devq->send_queue, dev->devq_entry.index); 4343 return (freeze); 4344 } 4345 4346 u_int32_t 4347 xpt_freeze_devq(struct cam_path *path, u_int count) 4348 { 4349 struct cam_ed *dev = path->device; 4350 struct cam_devq *devq; 4351 uint32_t freeze; 4352 4353 devq = dev->sim->devq; 4354 mtx_lock(&devq->send_mtx); 4355 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_freeze_devq(%d)\n", count)); 4356 freeze = xpt_freeze_devq_device(dev, count); 4357 mtx_unlock(&devq->send_mtx); 4358 return (freeze); 4359 } 4360 4361 u_int32_t 4362 xpt_freeze_simq(struct cam_sim *sim, u_int count) 4363 { 4364 struct cam_devq *devq; 4365 uint32_t freeze; 4366 4367 devq = sim->devq; 4368 mtx_lock(&devq->send_mtx); 4369 freeze = (devq->send_queue.qfrozen_cnt += count); 4370 mtx_unlock(&devq->send_mtx); 4371 return (freeze); 4372 } 4373 4374 static void 4375 xpt_release_devq_timeout(void *arg) 4376 { 4377 struct cam_ed *dev; 4378 struct cam_devq *devq; 4379 4380 dev = (struct cam_ed *)arg; 4381 CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE, ("xpt_release_devq_timeout\n")); 4382 devq = dev->sim->devq; 4383 mtx_assert(&devq->send_mtx, MA_OWNED); 4384 if (xpt_release_devq_device(dev, /*count*/1, /*run_queue*/TRUE)) 4385 xpt_run_devq(devq); 4386 } 4387 4388 void 4389 xpt_release_devq(struct cam_path *path, u_int count, int run_queue) 4390 { 4391 struct cam_ed *dev; 4392 struct cam_devq *devq; 4393 4394 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_devq(%d, %d)\n", 4395 count, run_queue)); 4396 dev = path->device; 4397 devq = dev->sim->devq; 4398 mtx_lock(&devq->send_mtx); 4399 if (xpt_release_devq_device(dev, count, run_queue)) 4400 xpt_run_devq(dev->sim->devq); 4401 mtx_unlock(&devq->send_mtx); 4402 } 4403 4404 static int 4405 xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue) 4406 { 4407 4408 mtx_assert(&dev->sim->devq->send_mtx, MA_OWNED); 4409 CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE, 4410 ("xpt_release_devq_device(%d, %d) %u->%u\n", count, run_queue, 4411 dev->ccbq.queue.qfrozen_cnt, dev->ccbq.queue.qfrozen_cnt - count)); 4412 if (count > dev->ccbq.queue.qfrozen_cnt) { 4413 #ifdef INVARIANTS 4414 printf("xpt_release_devq(): requested %u > present %u\n", 4415 count, dev->ccbq.queue.qfrozen_cnt); 4416 #endif 4417 count = dev->ccbq.queue.qfrozen_cnt; 4418 } 4419 dev->ccbq.queue.qfrozen_cnt -= count; 4420 if (dev->ccbq.queue.qfrozen_cnt == 0) { 4421 /* 4422 * No longer need to wait for a successful 4423 * command completion. 4424 */ 4425 dev->flags &= ~CAM_DEV_REL_ON_COMPLETE; 4426 /* 4427 * Remove any timeouts that might be scheduled 4428 * to release this queue. 4429 */ 4430 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) { 4431 callout_stop(&dev->callout); 4432 dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING; 4433 } 4434 /* 4435 * Now that we are unfrozen schedule the 4436 * device so any pending transactions are 4437 * run. 4438 */ 4439 xpt_schedule_devq(dev->sim->devq, dev); 4440 } else 4441 run_queue = 0; 4442 return (run_queue); 4443 } 4444 4445 void 4446 xpt_release_simq(struct cam_sim *sim, int run_queue) 4447 { 4448 struct cam_devq *devq; 4449 4450 devq = sim->devq; 4451 mtx_lock(&devq->send_mtx); 4452 if (devq->send_queue.qfrozen_cnt <= 0) { 4453 #ifdef INVARIANTS 4454 printf("xpt_release_simq: requested 1 > present %u\n", 4455 devq->send_queue.qfrozen_cnt); 4456 #endif 4457 } else 4458 devq->send_queue.qfrozen_cnt--; 4459 if (devq->send_queue.qfrozen_cnt == 0) { 4460 /* 4461 * If there is a timeout scheduled to release this 4462 * sim queue, remove it. The queue frozen count is 4463 * already at 0. 4464 */ 4465 if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){ 4466 callout_stop(&sim->callout); 4467 sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING; 4468 } 4469 if (run_queue) { 4470 /* 4471 * Now that we are unfrozen run the send queue. 4472 */ 4473 xpt_run_devq(sim->devq); 4474 } 4475 } 4476 mtx_unlock(&devq->send_mtx); 4477 } 4478 4479 /* 4480 * XXX Appears to be unused. 4481 */ 4482 static void 4483 xpt_release_simq_timeout(void *arg) 4484 { 4485 struct cam_sim *sim; 4486 4487 sim = (struct cam_sim *)arg; 4488 xpt_release_simq(sim, /* run_queue */ TRUE); 4489 } 4490 4491 void 4492 xpt_done(union ccb *done_ccb) 4493 { 4494 struct cam_doneq *queue; 4495 int run, hash; 4496 4497 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) 4498 if (done_ccb->ccb_h.func_code == XPT_SCSI_IO && 4499 done_ccb->csio.bio != NULL) 4500 biotrack(done_ccb->csio.bio, __func__); 4501 #endif 4502 4503 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, 4504 ("xpt_done: func= %#x %s status %#x\n", 4505 done_ccb->ccb_h.func_code, 4506 xpt_action_name(done_ccb->ccb_h.func_code), 4507 done_ccb->ccb_h.status)); 4508 if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0) 4509 return; 4510 4511 /* Store the time the ccb was in the sim */ 4512 done_ccb->ccb_h.qos.sim_data = sbinuptime() - done_ccb->ccb_h.qos.sim_data; 4513 hash = (done_ccb->ccb_h.path_id + done_ccb->ccb_h.target_id + 4514 done_ccb->ccb_h.target_lun) % cam_num_doneqs; 4515 queue = &cam_doneqs[hash]; 4516 mtx_lock(&queue->cam_doneq_mtx); 4517 run = (queue->cam_doneq_sleep && STAILQ_EMPTY(&queue->cam_doneq)); 4518 STAILQ_INSERT_TAIL(&queue->cam_doneq, &done_ccb->ccb_h, sim_links.stqe); 4519 done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX; 4520 mtx_unlock(&queue->cam_doneq_mtx); 4521 if (run) 4522 wakeup(&queue->cam_doneq); 4523 } 4524 4525 void 4526 xpt_done_direct(union ccb *done_ccb) 4527 { 4528 4529 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, 4530 ("xpt_done_direct: status %#x\n", done_ccb->ccb_h.status)); 4531 if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0) 4532 return; 4533 4534 /* Store the time the ccb was in the sim */ 4535 done_ccb->ccb_h.qos.sim_data = sbinuptime() - done_ccb->ccb_h.qos.sim_data; 4536 xpt_done_process(&done_ccb->ccb_h); 4537 } 4538 4539 union ccb * 4540 xpt_alloc_ccb() 4541 { 4542 union ccb *new_ccb; 4543 4544 new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_WAITOK); 4545 return (new_ccb); 4546 } 4547 4548 union ccb * 4549 xpt_alloc_ccb_nowait() 4550 { 4551 union ccb *new_ccb; 4552 4553 new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_NOWAIT); 4554 return (new_ccb); 4555 } 4556 4557 void 4558 xpt_free_ccb(union ccb *free_ccb) 4559 { 4560 free(free_ccb, M_CAMCCB); 4561 } 4562 4563 4564 4565 /* Private XPT functions */ 4566 4567 /* 4568 * Get a CAM control block for the caller. Charge the structure to the device 4569 * referenced by the path. If we don't have sufficient resources to allocate 4570 * more ccbs, we return NULL. 4571 */ 4572 static union ccb * 4573 xpt_get_ccb_nowait(struct cam_periph *periph) 4574 { 4575 union ccb *new_ccb; 4576 4577 new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_NOWAIT); 4578 if (new_ccb == NULL) 4579 return (NULL); 4580 periph->periph_allocated++; 4581 cam_ccbq_take_opening(&periph->path->device->ccbq); 4582 return (new_ccb); 4583 } 4584 4585 static union ccb * 4586 xpt_get_ccb(struct cam_periph *periph) 4587 { 4588 union ccb *new_ccb; 4589 4590 cam_periph_unlock(periph); 4591 new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_WAITOK); 4592 cam_periph_lock(periph); 4593 periph->periph_allocated++; 4594 cam_ccbq_take_opening(&periph->path->device->ccbq); 4595 return (new_ccb); 4596 } 4597 4598 union ccb * 4599 cam_periph_getccb(struct cam_periph *periph, u_int32_t priority) 4600 { 4601 struct ccb_hdr *ccb_h; 4602 4603 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("cam_periph_getccb\n")); 4604 cam_periph_assert(periph, MA_OWNED); 4605 while ((ccb_h = SLIST_FIRST(&periph->ccb_list)) == NULL || 4606 ccb_h->pinfo.priority != priority) { 4607 if (priority < periph->immediate_priority) { 4608 periph->immediate_priority = priority; 4609 xpt_run_allocq(periph, 0); 4610 } else 4611 cam_periph_sleep(periph, &periph->ccb_list, PRIBIO, 4612 "cgticb", 0); 4613 } 4614 SLIST_REMOVE_HEAD(&periph->ccb_list, periph_links.sle); 4615 return ((union ccb *)ccb_h); 4616 } 4617 4618 static void 4619 xpt_acquire_bus(struct cam_eb *bus) 4620 { 4621 4622 xpt_lock_buses(); 4623 bus->refcount++; 4624 xpt_unlock_buses(); 4625 } 4626 4627 static void 4628 xpt_release_bus(struct cam_eb *bus) 4629 { 4630 4631 xpt_lock_buses(); 4632 KASSERT(bus->refcount >= 1, ("bus->refcount >= 1")); 4633 if (--bus->refcount > 0) { 4634 xpt_unlock_buses(); 4635 return; 4636 } 4637 TAILQ_REMOVE(&xsoftc.xpt_busses, bus, links); 4638 xsoftc.bus_generation++; 4639 xpt_unlock_buses(); 4640 KASSERT(TAILQ_EMPTY(&bus->et_entries), 4641 ("destroying bus, but target list is not empty")); 4642 cam_sim_release(bus->sim); 4643 mtx_destroy(&bus->eb_mtx); 4644 free(bus, M_CAMXPT); 4645 } 4646 4647 static struct cam_et * 4648 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id) 4649 { 4650 struct cam_et *cur_target, *target; 4651 4652 mtx_assert(&xsoftc.xpt_topo_lock, MA_OWNED); 4653 mtx_assert(&bus->eb_mtx, MA_OWNED); 4654 target = (struct cam_et *)malloc(sizeof(*target), M_CAMXPT, 4655 M_NOWAIT|M_ZERO); 4656 if (target == NULL) 4657 return (NULL); 4658 4659 TAILQ_INIT(&target->ed_entries); 4660 target->bus = bus; 4661 target->target_id = target_id; 4662 target->refcount = 1; 4663 target->generation = 0; 4664 target->luns = NULL; 4665 mtx_init(&target->luns_mtx, "CAM LUNs lock", NULL, MTX_DEF); 4666 timevalclear(&target->last_reset); 4667 /* 4668 * Hold a reference to our parent bus so it 4669 * will not go away before we do. 4670 */ 4671 bus->refcount++; 4672 4673 /* Insertion sort into our bus's target list */ 4674 cur_target = TAILQ_FIRST(&bus->et_entries); 4675 while (cur_target != NULL && cur_target->target_id < target_id) 4676 cur_target = TAILQ_NEXT(cur_target, links); 4677 if (cur_target != NULL) { 4678 TAILQ_INSERT_BEFORE(cur_target, target, links); 4679 } else { 4680 TAILQ_INSERT_TAIL(&bus->et_entries, target, links); 4681 } 4682 bus->generation++; 4683 return (target); 4684 } 4685 4686 static void 4687 xpt_acquire_target(struct cam_et *target) 4688 { 4689 struct cam_eb *bus = target->bus; 4690 4691 mtx_lock(&bus->eb_mtx); 4692 target->refcount++; 4693 mtx_unlock(&bus->eb_mtx); 4694 } 4695 4696 static void 4697 xpt_release_target(struct cam_et *target) 4698 { 4699 struct cam_eb *bus = target->bus; 4700 4701 mtx_lock(&bus->eb_mtx); 4702 if (--target->refcount > 0) { 4703 mtx_unlock(&bus->eb_mtx); 4704 return; 4705 } 4706 TAILQ_REMOVE(&bus->et_entries, target, links); 4707 bus->generation++; 4708 mtx_unlock(&bus->eb_mtx); 4709 KASSERT(TAILQ_EMPTY(&target->ed_entries), 4710 ("destroying target, but device list is not empty")); 4711 xpt_release_bus(bus); 4712 mtx_destroy(&target->luns_mtx); 4713 if (target->luns) 4714 free(target->luns, M_CAMXPT); 4715 free(target, M_CAMXPT); 4716 } 4717 4718 static struct cam_ed * 4719 xpt_alloc_device_default(struct cam_eb *bus, struct cam_et *target, 4720 lun_id_t lun_id) 4721 { 4722 struct cam_ed *device; 4723 4724 device = xpt_alloc_device(bus, target, lun_id); 4725 if (device == NULL) 4726 return (NULL); 4727 4728 device->mintags = 1; 4729 device->maxtags = 1; 4730 return (device); 4731 } 4732 4733 static void 4734 xpt_destroy_device(void *context, int pending) 4735 { 4736 struct cam_ed *device = context; 4737 4738 mtx_lock(&device->device_mtx); 4739 mtx_destroy(&device->device_mtx); 4740 free(device, M_CAMDEV); 4741 } 4742 4743 struct cam_ed * 4744 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id) 4745 { 4746 struct cam_ed *cur_device, *device; 4747 struct cam_devq *devq; 4748 cam_status status; 4749 4750 mtx_assert(&bus->eb_mtx, MA_OWNED); 4751 /* Make space for us in the device queue on our bus */ 4752 devq = bus->sim->devq; 4753 mtx_lock(&devq->send_mtx); 4754 status = cam_devq_resize(devq, devq->send_queue.array_size + 1); 4755 mtx_unlock(&devq->send_mtx); 4756 if (status != CAM_REQ_CMP) 4757 return (NULL); 4758 4759 device = (struct cam_ed *)malloc(sizeof(*device), 4760 M_CAMDEV, M_NOWAIT|M_ZERO); 4761 if (device == NULL) 4762 return (NULL); 4763 4764 cam_init_pinfo(&device->devq_entry); 4765 device->target = target; 4766 device->lun_id = lun_id; 4767 device->sim = bus->sim; 4768 if (cam_ccbq_init(&device->ccbq, 4769 bus->sim->max_dev_openings) != 0) { 4770 free(device, M_CAMDEV); 4771 return (NULL); 4772 } 4773 SLIST_INIT(&device->asyncs); 4774 SLIST_INIT(&device->periphs); 4775 device->generation = 0; 4776 device->flags = CAM_DEV_UNCONFIGURED; 4777 device->tag_delay_count = 0; 4778 device->tag_saved_openings = 0; 4779 device->refcount = 1; 4780 mtx_init(&device->device_mtx, "CAM device lock", NULL, MTX_DEF); 4781 callout_init_mtx(&device->callout, &devq->send_mtx, 0); 4782 TASK_INIT(&device->device_destroy_task, 0, xpt_destroy_device, device); 4783 /* 4784 * Hold a reference to our parent bus so it 4785 * will not go away before we do. 4786 */ 4787 target->refcount++; 4788 4789 cur_device = TAILQ_FIRST(&target->ed_entries); 4790 while (cur_device != NULL && cur_device->lun_id < lun_id) 4791 cur_device = TAILQ_NEXT(cur_device, links); 4792 if (cur_device != NULL) 4793 TAILQ_INSERT_BEFORE(cur_device, device, links); 4794 else 4795 TAILQ_INSERT_TAIL(&target->ed_entries, device, links); 4796 target->generation++; 4797 return (device); 4798 } 4799 4800 void 4801 xpt_acquire_device(struct cam_ed *device) 4802 { 4803 struct cam_eb *bus = device->target->bus; 4804 4805 mtx_lock(&bus->eb_mtx); 4806 device->refcount++; 4807 mtx_unlock(&bus->eb_mtx); 4808 } 4809 4810 void 4811 xpt_release_device(struct cam_ed *device) 4812 { 4813 struct cam_eb *bus = device->target->bus; 4814 struct cam_devq *devq; 4815 4816 mtx_lock(&bus->eb_mtx); 4817 if (--device->refcount > 0) { 4818 mtx_unlock(&bus->eb_mtx); 4819 return; 4820 } 4821 4822 TAILQ_REMOVE(&device->target->ed_entries, device,links); 4823 device->target->generation++; 4824 mtx_unlock(&bus->eb_mtx); 4825 4826 /* Release our slot in the devq */ 4827 devq = bus->sim->devq; 4828 mtx_lock(&devq->send_mtx); 4829 cam_devq_resize(devq, devq->send_queue.array_size - 1); 4830 mtx_unlock(&devq->send_mtx); 4831 4832 KASSERT(SLIST_EMPTY(&device->periphs), 4833 ("destroying device, but periphs list is not empty")); 4834 KASSERT(device->devq_entry.index == CAM_UNQUEUED_INDEX, 4835 ("destroying device while still queued for ccbs")); 4836 4837 if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) 4838 callout_stop(&device->callout); 4839 4840 xpt_release_target(device->target); 4841 4842 cam_ccbq_fini(&device->ccbq); 4843 /* 4844 * Free allocated memory. free(9) does nothing if the 4845 * supplied pointer is NULL, so it is safe to call without 4846 * checking. 4847 */ 4848 free(device->supported_vpds, M_CAMXPT); 4849 free(device->device_id, M_CAMXPT); 4850 free(device->ext_inq, M_CAMXPT); 4851 free(device->physpath, M_CAMXPT); 4852 free(device->rcap_buf, M_CAMXPT); 4853 free(device->serial_num, M_CAMXPT); 4854 taskqueue_enqueue(xsoftc.xpt_taskq, &device->device_destroy_task); 4855 } 4856 4857 u_int32_t 4858 xpt_dev_ccbq_resize(struct cam_path *path, int newopenings) 4859 { 4860 int result; 4861 struct cam_ed *dev; 4862 4863 dev = path->device; 4864 mtx_lock(&dev->sim->devq->send_mtx); 4865 result = cam_ccbq_resize(&dev->ccbq, newopenings); 4866 mtx_unlock(&dev->sim->devq->send_mtx); 4867 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 4868 || (dev->inq_flags & SID_CmdQue) != 0) 4869 dev->tag_saved_openings = newopenings; 4870 return (result); 4871 } 4872 4873 static struct cam_eb * 4874 xpt_find_bus(path_id_t path_id) 4875 { 4876 struct cam_eb *bus; 4877 4878 xpt_lock_buses(); 4879 for (bus = TAILQ_FIRST(&xsoftc.xpt_busses); 4880 bus != NULL; 4881 bus = TAILQ_NEXT(bus, links)) { 4882 if (bus->path_id == path_id) { 4883 bus->refcount++; 4884 break; 4885 } 4886 } 4887 xpt_unlock_buses(); 4888 return (bus); 4889 } 4890 4891 static struct cam_et * 4892 xpt_find_target(struct cam_eb *bus, target_id_t target_id) 4893 { 4894 struct cam_et *target; 4895 4896 mtx_assert(&bus->eb_mtx, MA_OWNED); 4897 for (target = TAILQ_FIRST(&bus->et_entries); 4898 target != NULL; 4899 target = TAILQ_NEXT(target, links)) { 4900 if (target->target_id == target_id) { 4901 target->refcount++; 4902 break; 4903 } 4904 } 4905 return (target); 4906 } 4907 4908 static struct cam_ed * 4909 xpt_find_device(struct cam_et *target, lun_id_t lun_id) 4910 { 4911 struct cam_ed *device; 4912 4913 mtx_assert(&target->bus->eb_mtx, MA_OWNED); 4914 for (device = TAILQ_FIRST(&target->ed_entries); 4915 device != NULL; 4916 device = TAILQ_NEXT(device, links)) { 4917 if (device->lun_id == lun_id) { 4918 device->refcount++; 4919 break; 4920 } 4921 } 4922 return (device); 4923 } 4924 4925 void 4926 xpt_start_tags(struct cam_path *path) 4927 { 4928 struct ccb_relsim crs; 4929 struct cam_ed *device; 4930 struct cam_sim *sim; 4931 int newopenings; 4932 4933 device = path->device; 4934 sim = path->bus->sim; 4935 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT; 4936 xpt_freeze_devq(path, /*count*/1); 4937 device->inq_flags |= SID_CmdQue; 4938 if (device->tag_saved_openings != 0) 4939 newopenings = device->tag_saved_openings; 4940 else 4941 newopenings = min(device->maxtags, 4942 sim->max_tagged_dev_openings); 4943 xpt_dev_ccbq_resize(path, newopenings); 4944 xpt_async(AC_GETDEV_CHANGED, path, NULL); 4945 xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL); 4946 crs.ccb_h.func_code = XPT_REL_SIMQ; 4947 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY; 4948 crs.openings 4949 = crs.release_timeout 4950 = crs.qfrozen_cnt 4951 = 0; 4952 xpt_action((union ccb *)&crs); 4953 } 4954 4955 void 4956 xpt_stop_tags(struct cam_path *path) 4957 { 4958 struct ccb_relsim crs; 4959 struct cam_ed *device; 4960 struct cam_sim *sim; 4961 4962 device = path->device; 4963 sim = path->bus->sim; 4964 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT; 4965 device->tag_delay_count = 0; 4966 xpt_freeze_devq(path, /*count*/1); 4967 device->inq_flags &= ~SID_CmdQue; 4968 xpt_dev_ccbq_resize(path, sim->max_dev_openings); 4969 xpt_async(AC_GETDEV_CHANGED, path, NULL); 4970 xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL); 4971 crs.ccb_h.func_code = XPT_REL_SIMQ; 4972 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY; 4973 crs.openings 4974 = crs.release_timeout 4975 = crs.qfrozen_cnt 4976 = 0; 4977 xpt_action((union ccb *)&crs); 4978 } 4979 4980 static void 4981 xpt_boot_delay(void *arg) 4982 { 4983 4984 xpt_release_boot(); 4985 } 4986 4987 static void 4988 xpt_config(void *arg) 4989 { 4990 /* 4991 * Now that interrupts are enabled, go find our devices 4992 */ 4993 if (taskqueue_start_threads(&xsoftc.xpt_taskq, 1, PRIBIO, "CAM taskq")) 4994 printf("xpt_config: failed to create taskqueue thread.\n"); 4995 4996 /* Setup debugging path */ 4997 if (cam_dflags != CAM_DEBUG_NONE) { 4998 if (xpt_create_path(&cam_dpath, NULL, 4999 CAM_DEBUG_BUS, CAM_DEBUG_TARGET, 5000 CAM_DEBUG_LUN) != CAM_REQ_CMP) { 5001 printf("xpt_config: xpt_create_path() failed for debug" 5002 " target %d:%d:%d, debugging disabled\n", 5003 CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN); 5004 cam_dflags = CAM_DEBUG_NONE; 5005 } 5006 } else 5007 cam_dpath = NULL; 5008 5009 periphdriver_init(1); 5010 xpt_hold_boot(); 5011 callout_init(&xsoftc.boot_callout, 1); 5012 callout_reset_sbt(&xsoftc.boot_callout, SBT_1MS * xsoftc.boot_delay, 0, 5013 xpt_boot_delay, NULL, 0); 5014 /* Fire up rescan thread. */ 5015 if (kproc_kthread_add(xpt_scanner_thread, NULL, &cam_proc, NULL, 0, 0, 5016 "cam", "scanner")) { 5017 printf("xpt_config: failed to create rescan thread.\n"); 5018 } 5019 } 5020 5021 void 5022 xpt_hold_boot(void) 5023 { 5024 xpt_lock_buses(); 5025 xsoftc.buses_to_config++; 5026 xpt_unlock_buses(); 5027 } 5028 5029 void 5030 xpt_release_boot(void) 5031 { 5032 xpt_lock_buses(); 5033 xsoftc.buses_to_config--; 5034 if (xsoftc.buses_to_config == 0 && xsoftc.buses_config_done == 0) { 5035 struct xpt_task *task; 5036 5037 xsoftc.buses_config_done = 1; 5038 xpt_unlock_buses(); 5039 /* Call manually because we don't have any buses */ 5040 task = malloc(sizeof(struct xpt_task), M_CAMXPT, M_NOWAIT); 5041 if (task != NULL) { 5042 TASK_INIT(&task->task, 0, xpt_finishconfig_task, task); 5043 taskqueue_enqueue(taskqueue_thread, &task->task); 5044 } 5045 } else 5046 xpt_unlock_buses(); 5047 } 5048 5049 /* 5050 * If the given device only has one peripheral attached to it, and if that 5051 * peripheral is the passthrough driver, announce it. This insures that the 5052 * user sees some sort of announcement for every peripheral in their system. 5053 */ 5054 static int 5055 xptpassannouncefunc(struct cam_ed *device, void *arg) 5056 { 5057 struct cam_periph *periph; 5058 int i; 5059 5060 for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL; 5061 periph = SLIST_NEXT(periph, periph_links), i++); 5062 5063 periph = SLIST_FIRST(&device->periphs); 5064 if ((i == 1) 5065 && (strncmp(periph->periph_name, "pass", 4) == 0)) 5066 xpt_announce_periph(periph, NULL); 5067 5068 return(1); 5069 } 5070 5071 static void 5072 xpt_finishconfig_task(void *context, int pending) 5073 { 5074 5075 periphdriver_init(2); 5076 /* 5077 * Check for devices with no "standard" peripheral driver 5078 * attached. For any devices like that, announce the 5079 * passthrough driver so the user will see something. 5080 */ 5081 if (!bootverbose) 5082 xpt_for_all_devices(xptpassannouncefunc, NULL); 5083 5084 /* Release our hook so that the boot can continue. */ 5085 config_intrhook_disestablish(xsoftc.xpt_config_hook); 5086 free(xsoftc.xpt_config_hook, M_CAMXPT); 5087 xsoftc.xpt_config_hook = NULL; 5088 5089 free(context, M_CAMXPT); 5090 } 5091 5092 cam_status 5093 xpt_register_async(int event, ac_callback_t *cbfunc, void *cbarg, 5094 struct cam_path *path) 5095 { 5096 struct ccb_setasync csa; 5097 cam_status status; 5098 int xptpath = 0; 5099 5100 if (path == NULL) { 5101 status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID, 5102 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 5103 if (status != CAM_REQ_CMP) 5104 return (status); 5105 xpt_path_lock(path); 5106 xptpath = 1; 5107 } 5108 5109 xpt_setup_ccb(&csa.ccb_h, path, CAM_PRIORITY_NORMAL); 5110 csa.ccb_h.func_code = XPT_SASYNC_CB; 5111 csa.event_enable = event; 5112 csa.callback = cbfunc; 5113 csa.callback_arg = cbarg; 5114 xpt_action((union ccb *)&csa); 5115 status = csa.ccb_h.status; 5116 5117 CAM_DEBUG(csa.ccb_h.path, CAM_DEBUG_TRACE, 5118 ("xpt_register_async: func %p\n", cbfunc)); 5119 5120 if (xptpath) { 5121 xpt_path_unlock(path); 5122 xpt_free_path(path); 5123 } 5124 5125 if ((status == CAM_REQ_CMP) && 5126 (csa.event_enable & AC_FOUND_DEVICE)) { 5127 /* 5128 * Get this peripheral up to date with all 5129 * the currently existing devices. 5130 */ 5131 xpt_for_all_devices(xptsetasyncfunc, &csa); 5132 } 5133 if ((status == CAM_REQ_CMP) && 5134 (csa.event_enable & AC_PATH_REGISTERED)) { 5135 /* 5136 * Get this peripheral up to date with all 5137 * the currently existing buses. 5138 */ 5139 xpt_for_all_busses(xptsetasyncbusfunc, &csa); 5140 } 5141 5142 return (status); 5143 } 5144 5145 static void 5146 xptaction(struct cam_sim *sim, union ccb *work_ccb) 5147 { 5148 CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n")); 5149 5150 switch (work_ccb->ccb_h.func_code) { 5151 /* Common cases first */ 5152 case XPT_PATH_INQ: /* Path routing inquiry */ 5153 { 5154 struct ccb_pathinq *cpi; 5155 5156 cpi = &work_ccb->cpi; 5157 cpi->version_num = 1; /* XXX??? */ 5158 cpi->hba_inquiry = 0; 5159 cpi->target_sprt = 0; 5160 cpi->hba_misc = 0; 5161 cpi->hba_eng_cnt = 0; 5162 cpi->max_target = 0; 5163 cpi->max_lun = 0; 5164 cpi->initiator_id = 0; 5165 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 5166 strlcpy(cpi->hba_vid, "", HBA_IDLEN); 5167 strlcpy(cpi->dev_name, sim->sim_name, DEV_IDLEN); 5168 cpi->unit_number = sim->unit_number; 5169 cpi->bus_id = sim->bus_id; 5170 cpi->base_transfer_speed = 0; 5171 cpi->protocol = PROTO_UNSPECIFIED; 5172 cpi->protocol_version = PROTO_VERSION_UNSPECIFIED; 5173 cpi->transport = XPORT_UNSPECIFIED; 5174 cpi->transport_version = XPORT_VERSION_UNSPECIFIED; 5175 cpi->ccb_h.status = CAM_REQ_CMP; 5176 xpt_done(work_ccb); 5177 break; 5178 } 5179 default: 5180 work_ccb->ccb_h.status = CAM_REQ_INVALID; 5181 xpt_done(work_ccb); 5182 break; 5183 } 5184 } 5185 5186 /* 5187 * The xpt as a "controller" has no interrupt sources, so polling 5188 * is a no-op. 5189 */ 5190 static void 5191 xptpoll(struct cam_sim *sim) 5192 { 5193 } 5194 5195 void 5196 xpt_lock_buses(void) 5197 { 5198 mtx_lock(&xsoftc.xpt_topo_lock); 5199 } 5200 5201 void 5202 xpt_unlock_buses(void) 5203 { 5204 mtx_unlock(&xsoftc.xpt_topo_lock); 5205 } 5206 5207 struct mtx * 5208 xpt_path_mtx(struct cam_path *path) 5209 { 5210 5211 return (&path->device->device_mtx); 5212 } 5213 5214 static void 5215 xpt_done_process(struct ccb_hdr *ccb_h) 5216 { 5217 struct cam_sim *sim; 5218 struct cam_devq *devq; 5219 struct mtx *mtx = NULL; 5220 5221 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) 5222 struct ccb_scsiio *csio; 5223 5224 if (ccb_h->func_code == XPT_SCSI_IO) { 5225 csio = &((union ccb *)ccb_h)->csio; 5226 if (csio->bio != NULL) 5227 biotrack(csio->bio, __func__); 5228 } 5229 #endif 5230 5231 if (ccb_h->flags & CAM_HIGH_POWER) { 5232 struct highpowerlist *hphead; 5233 struct cam_ed *device; 5234 5235 mtx_lock(&xsoftc.xpt_highpower_lock); 5236 hphead = &xsoftc.highpowerq; 5237 5238 device = STAILQ_FIRST(hphead); 5239 5240 /* 5241 * Increment the count since this command is done. 5242 */ 5243 xsoftc.num_highpower++; 5244 5245 /* 5246 * Any high powered commands queued up? 5247 */ 5248 if (device != NULL) { 5249 5250 STAILQ_REMOVE_HEAD(hphead, highpowerq_entry); 5251 mtx_unlock(&xsoftc.xpt_highpower_lock); 5252 5253 mtx_lock(&device->sim->devq->send_mtx); 5254 xpt_release_devq_device(device, 5255 /*count*/1, /*runqueue*/TRUE); 5256 mtx_unlock(&device->sim->devq->send_mtx); 5257 } else 5258 mtx_unlock(&xsoftc.xpt_highpower_lock); 5259 } 5260 5261 sim = ccb_h->path->bus->sim; 5262 5263 if (ccb_h->status & CAM_RELEASE_SIMQ) { 5264 xpt_release_simq(sim, /*run_queue*/FALSE); 5265 ccb_h->status &= ~CAM_RELEASE_SIMQ; 5266 } 5267 5268 if ((ccb_h->flags & CAM_DEV_QFRZDIS) 5269 && (ccb_h->status & CAM_DEV_QFRZN)) { 5270 xpt_release_devq(ccb_h->path, /*count*/1, /*run_queue*/TRUE); 5271 ccb_h->status &= ~CAM_DEV_QFRZN; 5272 } 5273 5274 devq = sim->devq; 5275 if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) { 5276 struct cam_ed *dev = ccb_h->path->device; 5277 5278 mtx_lock(&devq->send_mtx); 5279 devq->send_active--; 5280 devq->send_openings++; 5281 cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h); 5282 5283 if (((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0 5284 && (dev->ccbq.dev_active == 0))) { 5285 dev->flags &= ~CAM_DEV_REL_ON_QUEUE_EMPTY; 5286 xpt_release_devq_device(dev, /*count*/1, 5287 /*run_queue*/FALSE); 5288 } 5289 5290 if (((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0 5291 && (ccb_h->status&CAM_STATUS_MASK) != CAM_REQUEUE_REQ)) { 5292 dev->flags &= ~CAM_DEV_REL_ON_COMPLETE; 5293 xpt_release_devq_device(dev, /*count*/1, 5294 /*run_queue*/FALSE); 5295 } 5296 5297 if (!device_is_queued(dev)) 5298 (void)xpt_schedule_devq(devq, dev); 5299 xpt_run_devq(devq); 5300 mtx_unlock(&devq->send_mtx); 5301 5302 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0) { 5303 mtx = xpt_path_mtx(ccb_h->path); 5304 mtx_lock(mtx); 5305 5306 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 5307 && (--dev->tag_delay_count == 0)) 5308 xpt_start_tags(ccb_h->path); 5309 } 5310 } 5311 5312 if ((ccb_h->flags & CAM_UNLOCKED) == 0) { 5313 if (mtx == NULL) { 5314 mtx = xpt_path_mtx(ccb_h->path); 5315 mtx_lock(mtx); 5316 } 5317 } else { 5318 if (mtx != NULL) { 5319 mtx_unlock(mtx); 5320 mtx = NULL; 5321 } 5322 } 5323 5324 /* Call the peripheral driver's callback */ 5325 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX; 5326 (*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h); 5327 if (mtx != NULL) 5328 mtx_unlock(mtx); 5329 } 5330 5331 void 5332 xpt_done_td(void *arg) 5333 { 5334 struct cam_doneq *queue = arg; 5335 struct ccb_hdr *ccb_h; 5336 STAILQ_HEAD(, ccb_hdr) doneq; 5337 5338 STAILQ_INIT(&doneq); 5339 mtx_lock(&queue->cam_doneq_mtx); 5340 while (1) { 5341 while (STAILQ_EMPTY(&queue->cam_doneq)) { 5342 queue->cam_doneq_sleep = 1; 5343 msleep(&queue->cam_doneq, &queue->cam_doneq_mtx, 5344 PRIBIO, "-", 0); 5345 queue->cam_doneq_sleep = 0; 5346 } 5347 STAILQ_CONCAT(&doneq, &queue->cam_doneq); 5348 mtx_unlock(&queue->cam_doneq_mtx); 5349 5350 THREAD_NO_SLEEPING(); 5351 while ((ccb_h = STAILQ_FIRST(&doneq)) != NULL) { 5352 STAILQ_REMOVE_HEAD(&doneq, sim_links.stqe); 5353 xpt_done_process(ccb_h); 5354 } 5355 THREAD_SLEEPING_OK(); 5356 5357 mtx_lock(&queue->cam_doneq_mtx); 5358 } 5359 } 5360 5361 static void 5362 camisr_runqueue(void) 5363 { 5364 struct ccb_hdr *ccb_h; 5365 struct cam_doneq *queue; 5366 int i; 5367 5368 /* Process global queues. */ 5369 for (i = 0; i < cam_num_doneqs; i++) { 5370 queue = &cam_doneqs[i]; 5371 mtx_lock(&queue->cam_doneq_mtx); 5372 while ((ccb_h = STAILQ_FIRST(&queue->cam_doneq)) != NULL) { 5373 STAILQ_REMOVE_HEAD(&queue->cam_doneq, sim_links.stqe); 5374 mtx_unlock(&queue->cam_doneq_mtx); 5375 xpt_done_process(ccb_h); 5376 mtx_lock(&queue->cam_doneq_mtx); 5377 } 5378 mtx_unlock(&queue->cam_doneq_mtx); 5379 } 5380 } 5381 5382 struct kv 5383 { 5384 uint32_t v; 5385 const char *name; 5386 }; 5387 5388 static struct kv map[] = { 5389 { XPT_NOOP, "XPT_NOOP" }, 5390 { XPT_SCSI_IO, "XPT_SCSI_IO" }, 5391 { XPT_GDEV_TYPE, "XPT_GDEV_TYPE" }, 5392 { XPT_GDEVLIST, "XPT_GDEVLIST" }, 5393 { XPT_PATH_INQ, "XPT_PATH_INQ" }, 5394 { XPT_REL_SIMQ, "XPT_REL_SIMQ" }, 5395 { XPT_SASYNC_CB, "XPT_SASYNC_CB" }, 5396 { XPT_SDEV_TYPE, "XPT_SDEV_TYPE" }, 5397 { XPT_SCAN_BUS, "XPT_SCAN_BUS" }, 5398 { XPT_DEV_MATCH, "XPT_DEV_MATCH" }, 5399 { XPT_DEBUG, "XPT_DEBUG" }, 5400 { XPT_PATH_STATS, "XPT_PATH_STATS" }, 5401 { XPT_GDEV_STATS, "XPT_GDEV_STATS" }, 5402 { XPT_DEV_ADVINFO, "XPT_DEV_ADVINFO" }, 5403 { XPT_ASYNC, "XPT_ASYNC" }, 5404 { XPT_ABORT, "XPT_ABORT" }, 5405 { XPT_RESET_BUS, "XPT_RESET_BUS" }, 5406 { XPT_RESET_DEV, "XPT_RESET_DEV" }, 5407 { XPT_TERM_IO, "XPT_TERM_IO" }, 5408 { XPT_SCAN_LUN, "XPT_SCAN_LUN" }, 5409 { XPT_GET_TRAN_SETTINGS, "XPT_GET_TRAN_SETTINGS" }, 5410 { XPT_SET_TRAN_SETTINGS, "XPT_SET_TRAN_SETTINGS" }, 5411 { XPT_CALC_GEOMETRY, "XPT_CALC_GEOMETRY" }, 5412 { XPT_ATA_IO, "XPT_ATA_IO" }, 5413 { XPT_GET_SIM_KNOB, "XPT_GET_SIM_KNOB" }, 5414 { XPT_SET_SIM_KNOB, "XPT_SET_SIM_KNOB" }, 5415 { XPT_NVME_IO, "XPT_NVME_IO" }, 5416 { XPT_MMCSD_IO, "XPT_MMCSD_IO" }, 5417 { XPT_SMP_IO, "XPT_SMP_IO" }, 5418 { XPT_SCAN_TGT, "XPT_SCAN_TGT" }, 5419 { XPT_ENG_INQ, "XPT_ENG_INQ" }, 5420 { XPT_ENG_EXEC, "XPT_ENG_EXEC" }, 5421 { XPT_EN_LUN, "XPT_EN_LUN" }, 5422 { XPT_TARGET_IO, "XPT_TARGET_IO" }, 5423 { XPT_ACCEPT_TARGET_IO, "XPT_ACCEPT_TARGET_IO" }, 5424 { XPT_CONT_TARGET_IO, "XPT_CONT_TARGET_IO" }, 5425 { XPT_IMMED_NOTIFY, "XPT_IMMED_NOTIFY" }, 5426 { XPT_NOTIFY_ACK, "XPT_NOTIFY_ACK" }, 5427 { XPT_IMMEDIATE_NOTIFY, "XPT_IMMEDIATE_NOTIFY" }, 5428 { XPT_NOTIFY_ACKNOWLEDGE, "XPT_NOTIFY_ACKNOWLEDGE" }, 5429 { 0, 0 } 5430 }; 5431 5432 static const char * 5433 xpt_action_name(uint32_t action) 5434 { 5435 static char buffer[32]; /* Only for unknown messages -- racy */ 5436 struct kv *walker = map; 5437 5438 while (walker->name != NULL) { 5439 if (walker->v == action) 5440 return (walker->name); 5441 walker++; 5442 } 5443 5444 snprintf(buffer, sizeof(buffer), "%#x", action); 5445 return (buffer); 5446 } 5447