1 /*- 2 * Implementation of the Common Access Method Transport (XPT) layer. 3 * 4 * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs. 5 * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions, and the following disclaimer, 13 * without modification, immediately at the beginning of the file. 14 * 2. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include <sys/param.h> 34 #include <sys/bio.h> 35 #include <sys/bus.h> 36 #include <sys/systm.h> 37 #include <sys/types.h> 38 #include <sys/malloc.h> 39 #include <sys/kernel.h> 40 #include <sys/time.h> 41 #include <sys/conf.h> 42 #include <sys/fcntl.h> 43 #include <sys/interrupt.h> 44 #include <sys/proc.h> 45 #include <sys/sbuf.h> 46 #include <sys/smp.h> 47 #include <sys/taskqueue.h> 48 49 #include <sys/lock.h> 50 #include <sys/mutex.h> 51 #include <sys/sysctl.h> 52 #include <sys/kthread.h> 53 54 #include <cam/cam.h> 55 #include <cam/cam_ccb.h> 56 #include <cam/cam_periph.h> 57 #include <cam/cam_queue.h> 58 #include <cam/cam_sim.h> 59 #include <cam/cam_xpt.h> 60 #include <cam/cam_xpt_sim.h> 61 #include <cam/cam_xpt_periph.h> 62 #include <cam/cam_xpt_internal.h> 63 #include <cam/cam_debug.h> 64 #include <cam/cam_compat.h> 65 66 #include <cam/scsi/scsi_all.h> 67 #include <cam/scsi/scsi_message.h> 68 #include <cam/scsi/scsi_pass.h> 69 70 #include <machine/md_var.h> /* geometry translation */ 71 #include <machine/stdarg.h> /* for xpt_print below */ 72 73 #include "opt_cam.h" 74 75 /* 76 * This is the maximum number of high powered commands (e.g. start unit) 77 * that can be outstanding at a particular time. 78 */ 79 #ifndef CAM_MAX_HIGHPOWER 80 #define CAM_MAX_HIGHPOWER 4 81 #endif 82 83 /* Datastructures internal to the xpt layer */ 84 MALLOC_DEFINE(M_CAMXPT, "CAM XPT", "CAM XPT buffers"); 85 MALLOC_DEFINE(M_CAMDEV, "CAM DEV", "CAM devices"); 86 MALLOC_DEFINE(M_CAMCCB, "CAM CCB", "CAM CCBs"); 87 MALLOC_DEFINE(M_CAMPATH, "CAM path", "CAM paths"); 88 89 /* Object for defering XPT actions to a taskqueue */ 90 struct xpt_task { 91 struct task task; 92 void *data1; 93 uintptr_t data2; 94 }; 95 96 struct xpt_softc { 97 uint32_t xpt_generation; 98 99 /* number of high powered commands that can go through right now */ 100 struct mtx xpt_highpower_lock; 101 STAILQ_HEAD(highpowerlist, cam_ed) highpowerq; 102 int num_highpower; 103 104 /* queue for handling async rescan requests. */ 105 TAILQ_HEAD(, ccb_hdr) ccb_scanq; 106 int buses_to_config; 107 int buses_config_done; 108 109 /* 110 * Registered buses 111 * 112 * N.B., "busses" is an archaic spelling of "buses". In new code 113 * "buses" is preferred. 114 */ 115 TAILQ_HEAD(,cam_eb) xpt_busses; 116 u_int bus_generation; 117 118 struct intr_config_hook *xpt_config_hook; 119 120 int boot_delay; 121 struct callout boot_callout; 122 123 struct mtx xpt_topo_lock; 124 struct mtx xpt_lock; 125 struct taskqueue *xpt_taskq; 126 }; 127 128 typedef enum { 129 DM_RET_COPY = 0x01, 130 DM_RET_FLAG_MASK = 0x0f, 131 DM_RET_NONE = 0x00, 132 DM_RET_STOP = 0x10, 133 DM_RET_DESCEND = 0x20, 134 DM_RET_ERROR = 0x30, 135 DM_RET_ACTION_MASK = 0xf0 136 } dev_match_ret; 137 138 typedef enum { 139 XPT_DEPTH_BUS, 140 XPT_DEPTH_TARGET, 141 XPT_DEPTH_DEVICE, 142 XPT_DEPTH_PERIPH 143 } xpt_traverse_depth; 144 145 struct xpt_traverse_config { 146 xpt_traverse_depth depth; 147 void *tr_func; 148 void *tr_arg; 149 }; 150 151 typedef int xpt_busfunc_t (struct cam_eb *bus, void *arg); 152 typedef int xpt_targetfunc_t (struct cam_et *target, void *arg); 153 typedef int xpt_devicefunc_t (struct cam_ed *device, void *arg); 154 typedef int xpt_periphfunc_t (struct cam_periph *periph, void *arg); 155 typedef int xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg); 156 157 /* Transport layer configuration information */ 158 static struct xpt_softc xsoftc; 159 160 MTX_SYSINIT(xpt_topo_init, &xsoftc.xpt_topo_lock, "XPT topology lock", MTX_DEF); 161 162 SYSCTL_INT(_kern_cam, OID_AUTO, boot_delay, CTLFLAG_RDTUN, 163 &xsoftc.boot_delay, 0, "Bus registration wait time"); 164 SYSCTL_UINT(_kern_cam, OID_AUTO, xpt_generation, CTLFLAG_RD, 165 &xsoftc.xpt_generation, 0, "CAM peripheral generation count"); 166 167 struct cam_doneq { 168 struct mtx_padalign cam_doneq_mtx; 169 STAILQ_HEAD(, ccb_hdr) cam_doneq; 170 int cam_doneq_sleep; 171 }; 172 173 static struct cam_doneq cam_doneqs[MAXCPU]; 174 static int cam_num_doneqs; 175 static struct proc *cam_proc; 176 177 SYSCTL_INT(_kern_cam, OID_AUTO, num_doneqs, CTLFLAG_RDTUN, 178 &cam_num_doneqs, 0, "Number of completion queues/threads"); 179 180 struct cam_periph *xpt_periph; 181 182 static periph_init_t xpt_periph_init; 183 184 static struct periph_driver xpt_driver = 185 { 186 xpt_periph_init, "xpt", 187 TAILQ_HEAD_INITIALIZER(xpt_driver.units), /* generation */ 0, 188 CAM_PERIPH_DRV_EARLY 189 }; 190 191 PERIPHDRIVER_DECLARE(xpt, xpt_driver); 192 193 static d_open_t xptopen; 194 static d_close_t xptclose; 195 static d_ioctl_t xptioctl; 196 static d_ioctl_t xptdoioctl; 197 198 static struct cdevsw xpt_cdevsw = { 199 .d_version = D_VERSION, 200 .d_flags = 0, 201 .d_open = xptopen, 202 .d_close = xptclose, 203 .d_ioctl = xptioctl, 204 .d_name = "xpt", 205 }; 206 207 /* Storage for debugging datastructures */ 208 struct cam_path *cam_dpath; 209 u_int32_t cam_dflags = CAM_DEBUG_FLAGS; 210 SYSCTL_UINT(_kern_cam, OID_AUTO, dflags, CTLFLAG_RWTUN, 211 &cam_dflags, 0, "Enabled debug flags"); 212 u_int32_t cam_debug_delay = CAM_DEBUG_DELAY; 213 SYSCTL_UINT(_kern_cam, OID_AUTO, debug_delay, CTLFLAG_RWTUN, 214 &cam_debug_delay, 0, "Delay in us after each debug message"); 215 216 /* Our boot-time initialization hook */ 217 static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *); 218 219 static moduledata_t cam_moduledata = { 220 "cam", 221 cam_module_event_handler, 222 NULL 223 }; 224 225 static int xpt_init(void *); 226 227 DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND); 228 MODULE_VERSION(cam, 1); 229 230 231 static void xpt_async_bcast(struct async_list *async_head, 232 u_int32_t async_code, 233 struct cam_path *path, 234 void *async_arg); 235 static path_id_t xptnextfreepathid(void); 236 static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus); 237 static union ccb *xpt_get_ccb(struct cam_periph *periph); 238 static union ccb *xpt_get_ccb_nowait(struct cam_periph *periph); 239 static void xpt_run_allocq(struct cam_periph *periph, int sleep); 240 static void xpt_run_allocq_task(void *context, int pending); 241 static void xpt_run_devq(struct cam_devq *devq); 242 static timeout_t xpt_release_devq_timeout; 243 static void xpt_release_simq_timeout(void *arg) __unused; 244 static void xpt_acquire_bus(struct cam_eb *bus); 245 static void xpt_release_bus(struct cam_eb *bus); 246 static uint32_t xpt_freeze_devq_device(struct cam_ed *dev, u_int count); 247 static int xpt_release_devq_device(struct cam_ed *dev, u_int count, 248 int run_queue); 249 static struct cam_et* 250 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id); 251 static void xpt_acquire_target(struct cam_et *target); 252 static void xpt_release_target(struct cam_et *target); 253 static struct cam_eb* 254 xpt_find_bus(path_id_t path_id); 255 static struct cam_et* 256 xpt_find_target(struct cam_eb *bus, target_id_t target_id); 257 static struct cam_ed* 258 xpt_find_device(struct cam_et *target, lun_id_t lun_id); 259 static void xpt_config(void *arg); 260 static int xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo, 261 u_int32_t new_priority); 262 static xpt_devicefunc_t xptpassannouncefunc; 263 static void xptaction(struct cam_sim *sim, union ccb *work_ccb); 264 static void xptpoll(struct cam_sim *sim); 265 static void camisr_runqueue(void); 266 static void xpt_done_process(struct ccb_hdr *ccb_h); 267 static void xpt_done_td(void *); 268 static dev_match_ret xptbusmatch(struct dev_match_pattern *patterns, 269 u_int num_patterns, struct cam_eb *bus); 270 static dev_match_ret xptdevicematch(struct dev_match_pattern *patterns, 271 u_int num_patterns, 272 struct cam_ed *device); 273 static dev_match_ret xptperiphmatch(struct dev_match_pattern *patterns, 274 u_int num_patterns, 275 struct cam_periph *periph); 276 static xpt_busfunc_t xptedtbusfunc; 277 static xpt_targetfunc_t xptedttargetfunc; 278 static xpt_devicefunc_t xptedtdevicefunc; 279 static xpt_periphfunc_t xptedtperiphfunc; 280 static xpt_pdrvfunc_t xptplistpdrvfunc; 281 static xpt_periphfunc_t xptplistperiphfunc; 282 static int xptedtmatch(struct ccb_dev_match *cdm); 283 static int xptperiphlistmatch(struct ccb_dev_match *cdm); 284 static int xptbustraverse(struct cam_eb *start_bus, 285 xpt_busfunc_t *tr_func, void *arg); 286 static int xpttargettraverse(struct cam_eb *bus, 287 struct cam_et *start_target, 288 xpt_targetfunc_t *tr_func, void *arg); 289 static int xptdevicetraverse(struct cam_et *target, 290 struct cam_ed *start_device, 291 xpt_devicefunc_t *tr_func, void *arg); 292 static int xptperiphtraverse(struct cam_ed *device, 293 struct cam_periph *start_periph, 294 xpt_periphfunc_t *tr_func, void *arg); 295 static int xptpdrvtraverse(struct periph_driver **start_pdrv, 296 xpt_pdrvfunc_t *tr_func, void *arg); 297 static int xptpdperiphtraverse(struct periph_driver **pdrv, 298 struct cam_periph *start_periph, 299 xpt_periphfunc_t *tr_func, 300 void *arg); 301 static xpt_busfunc_t xptdefbusfunc; 302 static xpt_targetfunc_t xptdeftargetfunc; 303 static xpt_devicefunc_t xptdefdevicefunc; 304 static xpt_periphfunc_t xptdefperiphfunc; 305 static void xpt_finishconfig_task(void *context, int pending); 306 static void xpt_dev_async_default(u_int32_t async_code, 307 struct cam_eb *bus, 308 struct cam_et *target, 309 struct cam_ed *device, 310 void *async_arg); 311 static struct cam_ed * xpt_alloc_device_default(struct cam_eb *bus, 312 struct cam_et *target, 313 lun_id_t lun_id); 314 static xpt_devicefunc_t xptsetasyncfunc; 315 static xpt_busfunc_t xptsetasyncbusfunc; 316 static cam_status xptregister(struct cam_periph *periph, 317 void *arg); 318 static const char * xpt_action_name(uint32_t action); 319 static __inline int device_is_queued(struct cam_ed *device); 320 321 static __inline int 322 xpt_schedule_devq(struct cam_devq *devq, struct cam_ed *dev) 323 { 324 int retval; 325 326 mtx_assert(&devq->send_mtx, MA_OWNED); 327 if ((dev->ccbq.queue.entries > 0) && 328 (dev->ccbq.dev_openings > 0) && 329 (dev->ccbq.queue.qfrozen_cnt == 0)) { 330 /* 331 * The priority of a device waiting for controller 332 * resources is that of the highest priority CCB 333 * enqueued. 334 */ 335 retval = 336 xpt_schedule_dev(&devq->send_queue, 337 &dev->devq_entry, 338 CAMQ_GET_PRIO(&dev->ccbq.queue)); 339 } else { 340 retval = 0; 341 } 342 return (retval); 343 } 344 345 static __inline int 346 device_is_queued(struct cam_ed *device) 347 { 348 return (device->devq_entry.index != CAM_UNQUEUED_INDEX); 349 } 350 351 static void 352 xpt_periph_init() 353 { 354 make_dev(&xpt_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0"); 355 } 356 357 static int 358 xptopen(struct cdev *dev, int flags, int fmt, struct thread *td) 359 { 360 361 /* 362 * Only allow read-write access. 363 */ 364 if (((flags & FWRITE) == 0) || ((flags & FREAD) == 0)) 365 return(EPERM); 366 367 /* 368 * We don't allow nonblocking access. 369 */ 370 if ((flags & O_NONBLOCK) != 0) { 371 printf("%s: can't do nonblocking access\n", devtoname(dev)); 372 return(ENODEV); 373 } 374 375 return(0); 376 } 377 378 static int 379 xptclose(struct cdev *dev, int flag, int fmt, struct thread *td) 380 { 381 382 return(0); 383 } 384 385 /* 386 * Don't automatically grab the xpt softc lock here even though this is going 387 * through the xpt device. The xpt device is really just a back door for 388 * accessing other devices and SIMs, so the right thing to do is to grab 389 * the appropriate SIM lock once the bus/SIM is located. 390 */ 391 static int 392 xptioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) 393 { 394 int error; 395 396 if ((error = xptdoioctl(dev, cmd, addr, flag, td)) == ENOTTY) { 397 error = cam_compat_ioctl(dev, cmd, addr, flag, td, xptdoioctl); 398 } 399 return (error); 400 } 401 402 static int 403 xptdoioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) 404 { 405 int error; 406 407 error = 0; 408 409 switch(cmd) { 410 /* 411 * For the transport layer CAMIOCOMMAND ioctl, we really only want 412 * to accept CCB types that don't quite make sense to send through a 413 * passthrough driver. XPT_PATH_INQ is an exception to this, as stated 414 * in the CAM spec. 415 */ 416 case CAMIOCOMMAND: { 417 union ccb *ccb; 418 union ccb *inccb; 419 struct cam_eb *bus; 420 421 inccb = (union ccb *)addr; 422 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) 423 if (inccb->ccb_h.func_code == XPT_SCSI_IO) 424 inccb->csio.bio = NULL; 425 #endif 426 427 bus = xpt_find_bus(inccb->ccb_h.path_id); 428 if (bus == NULL) 429 return (EINVAL); 430 431 switch (inccb->ccb_h.func_code) { 432 case XPT_SCAN_BUS: 433 case XPT_RESET_BUS: 434 if (inccb->ccb_h.target_id != CAM_TARGET_WILDCARD || 435 inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) { 436 xpt_release_bus(bus); 437 return (EINVAL); 438 } 439 break; 440 case XPT_SCAN_TGT: 441 if (inccb->ccb_h.target_id == CAM_TARGET_WILDCARD || 442 inccb->ccb_h.target_lun != CAM_LUN_WILDCARD) { 443 xpt_release_bus(bus); 444 return (EINVAL); 445 } 446 break; 447 default: 448 break; 449 } 450 451 switch(inccb->ccb_h.func_code) { 452 case XPT_SCAN_BUS: 453 case XPT_RESET_BUS: 454 case XPT_PATH_INQ: 455 case XPT_ENG_INQ: 456 case XPT_SCAN_LUN: 457 case XPT_SCAN_TGT: 458 459 ccb = xpt_alloc_ccb(); 460 461 /* 462 * Create a path using the bus, target, and lun the 463 * user passed in. 464 */ 465 if (xpt_create_path(&ccb->ccb_h.path, NULL, 466 inccb->ccb_h.path_id, 467 inccb->ccb_h.target_id, 468 inccb->ccb_h.target_lun) != 469 CAM_REQ_CMP){ 470 error = EINVAL; 471 xpt_free_ccb(ccb); 472 break; 473 } 474 /* Ensure all of our fields are correct */ 475 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, 476 inccb->ccb_h.pinfo.priority); 477 xpt_merge_ccb(ccb, inccb); 478 xpt_path_lock(ccb->ccb_h.path); 479 cam_periph_runccb(ccb, NULL, 0, 0, NULL); 480 xpt_path_unlock(ccb->ccb_h.path); 481 bcopy(ccb, inccb, sizeof(union ccb)); 482 xpt_free_path(ccb->ccb_h.path); 483 xpt_free_ccb(ccb); 484 break; 485 486 case XPT_DEBUG: { 487 union ccb ccb; 488 489 /* 490 * This is an immediate CCB, so it's okay to 491 * allocate it on the stack. 492 */ 493 494 /* 495 * Create a path using the bus, target, and lun the 496 * user passed in. 497 */ 498 if (xpt_create_path(&ccb.ccb_h.path, NULL, 499 inccb->ccb_h.path_id, 500 inccb->ccb_h.target_id, 501 inccb->ccb_h.target_lun) != 502 CAM_REQ_CMP){ 503 error = EINVAL; 504 break; 505 } 506 /* Ensure all of our fields are correct */ 507 xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path, 508 inccb->ccb_h.pinfo.priority); 509 xpt_merge_ccb(&ccb, inccb); 510 xpt_action(&ccb); 511 bcopy(&ccb, inccb, sizeof(union ccb)); 512 xpt_free_path(ccb.ccb_h.path); 513 break; 514 515 } 516 case XPT_DEV_MATCH: { 517 struct cam_periph_map_info mapinfo; 518 struct cam_path *old_path; 519 520 /* 521 * We can't deal with physical addresses for this 522 * type of transaction. 523 */ 524 if ((inccb->ccb_h.flags & CAM_DATA_MASK) != 525 CAM_DATA_VADDR) { 526 error = EINVAL; 527 break; 528 } 529 530 /* 531 * Save this in case the caller had it set to 532 * something in particular. 533 */ 534 old_path = inccb->ccb_h.path; 535 536 /* 537 * We really don't need a path for the matching 538 * code. The path is needed because of the 539 * debugging statements in xpt_action(). They 540 * assume that the CCB has a valid path. 541 */ 542 inccb->ccb_h.path = xpt_periph->path; 543 544 bzero(&mapinfo, sizeof(mapinfo)); 545 546 /* 547 * Map the pattern and match buffers into kernel 548 * virtual address space. 549 */ 550 error = cam_periph_mapmem(inccb, &mapinfo, MAXPHYS); 551 552 if (error) { 553 inccb->ccb_h.path = old_path; 554 break; 555 } 556 557 /* 558 * This is an immediate CCB, we can send it on directly. 559 */ 560 xpt_action(inccb); 561 562 /* 563 * Map the buffers back into user space. 564 */ 565 cam_periph_unmapmem(inccb, &mapinfo); 566 567 inccb->ccb_h.path = old_path; 568 569 error = 0; 570 break; 571 } 572 default: 573 error = ENOTSUP; 574 break; 575 } 576 xpt_release_bus(bus); 577 break; 578 } 579 /* 580 * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input, 581 * with the periphal driver name and unit name filled in. The other 582 * fields don't really matter as input. The passthrough driver name 583 * ("pass"), and unit number are passed back in the ccb. The current 584 * device generation number, and the index into the device peripheral 585 * driver list, and the status are also passed back. Note that 586 * since we do everything in one pass, unlike the XPT_GDEVLIST ccb, 587 * we never return a status of CAM_GDEVLIST_LIST_CHANGED. It is 588 * (or rather should be) impossible for the device peripheral driver 589 * list to change since we look at the whole thing in one pass, and 590 * we do it with lock protection. 591 * 592 */ 593 case CAMGETPASSTHRU: { 594 union ccb *ccb; 595 struct cam_periph *periph; 596 struct periph_driver **p_drv; 597 char *name; 598 u_int unit; 599 int base_periph_found; 600 601 ccb = (union ccb *)addr; 602 unit = ccb->cgdl.unit_number; 603 name = ccb->cgdl.periph_name; 604 base_periph_found = 0; 605 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) 606 if (ccb->ccb_h.func_code == XPT_SCSI_IO) 607 ccb->csio.bio = NULL; 608 #endif 609 610 /* 611 * Sanity check -- make sure we don't get a null peripheral 612 * driver name. 613 */ 614 if (*ccb->cgdl.periph_name == '\0') { 615 error = EINVAL; 616 break; 617 } 618 619 /* Keep the list from changing while we traverse it */ 620 xpt_lock_buses(); 621 622 /* first find our driver in the list of drivers */ 623 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) 624 if (strcmp((*p_drv)->driver_name, name) == 0) 625 break; 626 627 if (*p_drv == NULL) { 628 xpt_unlock_buses(); 629 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 630 ccb->cgdl.status = CAM_GDEVLIST_ERROR; 631 *ccb->cgdl.periph_name = '\0'; 632 ccb->cgdl.unit_number = 0; 633 error = ENOENT; 634 break; 635 } 636 637 /* 638 * Run through every peripheral instance of this driver 639 * and check to see whether it matches the unit passed 640 * in by the user. If it does, get out of the loops and 641 * find the passthrough driver associated with that 642 * peripheral driver. 643 */ 644 for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL; 645 periph = TAILQ_NEXT(periph, unit_links)) { 646 647 if (periph->unit_number == unit) 648 break; 649 } 650 /* 651 * If we found the peripheral driver that the user passed 652 * in, go through all of the peripheral drivers for that 653 * particular device and look for a passthrough driver. 654 */ 655 if (periph != NULL) { 656 struct cam_ed *device; 657 int i; 658 659 base_periph_found = 1; 660 device = periph->path->device; 661 for (i = 0, periph = SLIST_FIRST(&device->periphs); 662 periph != NULL; 663 periph = SLIST_NEXT(periph, periph_links), i++) { 664 /* 665 * Check to see whether we have a 666 * passthrough device or not. 667 */ 668 if (strcmp(periph->periph_name, "pass") == 0) { 669 /* 670 * Fill in the getdevlist fields. 671 */ 672 strcpy(ccb->cgdl.periph_name, 673 periph->periph_name); 674 ccb->cgdl.unit_number = 675 periph->unit_number; 676 if (SLIST_NEXT(periph, periph_links)) 677 ccb->cgdl.status = 678 CAM_GDEVLIST_MORE_DEVS; 679 else 680 ccb->cgdl.status = 681 CAM_GDEVLIST_LAST_DEVICE; 682 ccb->cgdl.generation = 683 device->generation; 684 ccb->cgdl.index = i; 685 /* 686 * Fill in some CCB header fields 687 * that the user may want. 688 */ 689 ccb->ccb_h.path_id = 690 periph->path->bus->path_id; 691 ccb->ccb_h.target_id = 692 periph->path->target->target_id; 693 ccb->ccb_h.target_lun = 694 periph->path->device->lun_id; 695 ccb->ccb_h.status = CAM_REQ_CMP; 696 break; 697 } 698 } 699 } 700 701 /* 702 * If the periph is null here, one of two things has 703 * happened. The first possibility is that we couldn't 704 * find the unit number of the particular peripheral driver 705 * that the user is asking about. e.g. the user asks for 706 * the passthrough driver for "da11". We find the list of 707 * "da" peripherals all right, but there is no unit 11. 708 * The other possibility is that we went through the list 709 * of peripheral drivers attached to the device structure, 710 * but didn't find one with the name "pass". Either way, 711 * we return ENOENT, since we couldn't find something. 712 */ 713 if (periph == NULL) { 714 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 715 ccb->cgdl.status = CAM_GDEVLIST_ERROR; 716 *ccb->cgdl.periph_name = '\0'; 717 ccb->cgdl.unit_number = 0; 718 error = ENOENT; 719 /* 720 * It is unfortunate that this is even necessary, 721 * but there are many, many clueless users out there. 722 * If this is true, the user is looking for the 723 * passthrough driver, but doesn't have one in his 724 * kernel. 725 */ 726 if (base_periph_found == 1) { 727 printf("xptioctl: pass driver is not in the " 728 "kernel\n"); 729 printf("xptioctl: put \"device pass\" in " 730 "your kernel config file\n"); 731 } 732 } 733 xpt_unlock_buses(); 734 break; 735 } 736 default: 737 error = ENOTTY; 738 break; 739 } 740 741 return(error); 742 } 743 744 static int 745 cam_module_event_handler(module_t mod, int what, void *arg) 746 { 747 int error; 748 749 switch (what) { 750 case MOD_LOAD: 751 if ((error = xpt_init(NULL)) != 0) 752 return (error); 753 break; 754 case MOD_UNLOAD: 755 return EBUSY; 756 default: 757 return EOPNOTSUPP; 758 } 759 760 return 0; 761 } 762 763 static struct xpt_proto * 764 xpt_proto_find(cam_proto proto) 765 { 766 struct xpt_proto **pp; 767 768 SET_FOREACH(pp, cam_xpt_proto_set) { 769 if ((*pp)->proto == proto) 770 return *pp; 771 } 772 773 return NULL; 774 } 775 776 static void 777 xpt_rescan_done(struct cam_periph *periph, union ccb *done_ccb) 778 { 779 780 if (done_ccb->ccb_h.ppriv_ptr1 == NULL) { 781 xpt_free_path(done_ccb->ccb_h.path); 782 xpt_free_ccb(done_ccb); 783 } else { 784 done_ccb->ccb_h.cbfcnp = done_ccb->ccb_h.ppriv_ptr1; 785 (*done_ccb->ccb_h.cbfcnp)(periph, done_ccb); 786 } 787 xpt_release_boot(); 788 } 789 790 /* thread to handle bus rescans */ 791 static void 792 xpt_scanner_thread(void *dummy) 793 { 794 union ccb *ccb; 795 struct cam_path path; 796 797 xpt_lock_buses(); 798 for (;;) { 799 if (TAILQ_EMPTY(&xsoftc.ccb_scanq)) 800 msleep(&xsoftc.ccb_scanq, &xsoftc.xpt_topo_lock, PRIBIO, 801 "-", 0); 802 if ((ccb = (union ccb *)TAILQ_FIRST(&xsoftc.ccb_scanq)) != NULL) { 803 TAILQ_REMOVE(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe); 804 xpt_unlock_buses(); 805 806 /* 807 * Since lock can be dropped inside and path freed 808 * by completion callback even before return here, 809 * take our own path copy for reference. 810 */ 811 xpt_copy_path(&path, ccb->ccb_h.path); 812 xpt_path_lock(&path); 813 xpt_action(ccb); 814 xpt_path_unlock(&path); 815 xpt_release_path(&path); 816 817 xpt_lock_buses(); 818 } 819 } 820 } 821 822 void 823 xpt_rescan(union ccb *ccb) 824 { 825 struct ccb_hdr *hdr; 826 827 /* Prepare request */ 828 if (ccb->ccb_h.path->target->target_id == CAM_TARGET_WILDCARD && 829 ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD) 830 ccb->ccb_h.func_code = XPT_SCAN_BUS; 831 else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD && 832 ccb->ccb_h.path->device->lun_id == CAM_LUN_WILDCARD) 833 ccb->ccb_h.func_code = XPT_SCAN_TGT; 834 else if (ccb->ccb_h.path->target->target_id != CAM_TARGET_WILDCARD && 835 ccb->ccb_h.path->device->lun_id != CAM_LUN_WILDCARD) 836 ccb->ccb_h.func_code = XPT_SCAN_LUN; 837 else { 838 xpt_print(ccb->ccb_h.path, "illegal scan path\n"); 839 xpt_free_path(ccb->ccb_h.path); 840 xpt_free_ccb(ccb); 841 return; 842 } 843 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, 844 ("xpt_rescan: func %#x %s\n", ccb->ccb_h.func_code, 845 xpt_action_name(ccb->ccb_h.func_code))); 846 847 ccb->ccb_h.ppriv_ptr1 = ccb->ccb_h.cbfcnp; 848 ccb->ccb_h.cbfcnp = xpt_rescan_done; 849 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, CAM_PRIORITY_XPT); 850 /* Don't make duplicate entries for the same paths. */ 851 xpt_lock_buses(); 852 if (ccb->ccb_h.ppriv_ptr1 == NULL) { 853 TAILQ_FOREACH(hdr, &xsoftc.ccb_scanq, sim_links.tqe) { 854 if (xpt_path_comp(hdr->path, ccb->ccb_h.path) == 0) { 855 wakeup(&xsoftc.ccb_scanq); 856 xpt_unlock_buses(); 857 xpt_print(ccb->ccb_h.path, "rescan already queued\n"); 858 xpt_free_path(ccb->ccb_h.path); 859 xpt_free_ccb(ccb); 860 return; 861 } 862 } 863 } 864 TAILQ_INSERT_TAIL(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe); 865 xsoftc.buses_to_config++; 866 wakeup(&xsoftc.ccb_scanq); 867 xpt_unlock_buses(); 868 } 869 870 /* Functions accessed by the peripheral drivers */ 871 static int 872 xpt_init(void *dummy) 873 { 874 struct cam_sim *xpt_sim; 875 struct cam_path *path; 876 struct cam_devq *devq; 877 cam_status status; 878 int error, i; 879 880 TAILQ_INIT(&xsoftc.xpt_busses); 881 TAILQ_INIT(&xsoftc.ccb_scanq); 882 STAILQ_INIT(&xsoftc.highpowerq); 883 xsoftc.num_highpower = CAM_MAX_HIGHPOWER; 884 885 mtx_init(&xsoftc.xpt_lock, "XPT lock", NULL, MTX_DEF); 886 mtx_init(&xsoftc.xpt_highpower_lock, "XPT highpower lock", NULL, MTX_DEF); 887 xsoftc.xpt_taskq = taskqueue_create("CAM XPT task", M_WAITOK, 888 taskqueue_thread_enqueue, /*context*/&xsoftc.xpt_taskq); 889 890 #ifdef CAM_BOOT_DELAY 891 /* 892 * Override this value at compile time to assist our users 893 * who don't use loader to boot a kernel. 894 */ 895 xsoftc.boot_delay = CAM_BOOT_DELAY; 896 #endif 897 /* 898 * The xpt layer is, itself, the equivalent of a SIM. 899 * Allow 16 ccbs in the ccb pool for it. This should 900 * give decent parallelism when we probe buses and 901 * perform other XPT functions. 902 */ 903 devq = cam_simq_alloc(16); 904 xpt_sim = cam_sim_alloc(xptaction, 905 xptpoll, 906 "xpt", 907 /*softc*/NULL, 908 /*unit*/0, 909 /*mtx*/&xsoftc.xpt_lock, 910 /*max_dev_transactions*/0, 911 /*max_tagged_dev_transactions*/0, 912 devq); 913 if (xpt_sim == NULL) 914 return (ENOMEM); 915 916 mtx_lock(&xsoftc.xpt_lock); 917 if ((status = xpt_bus_register(xpt_sim, NULL, 0)) != CAM_SUCCESS) { 918 mtx_unlock(&xsoftc.xpt_lock); 919 printf("xpt_init: xpt_bus_register failed with status %#x," 920 " failing attach\n", status); 921 return (EINVAL); 922 } 923 mtx_unlock(&xsoftc.xpt_lock); 924 925 /* 926 * Looking at the XPT from the SIM layer, the XPT is 927 * the equivalent of a peripheral driver. Allocate 928 * a peripheral driver entry for us. 929 */ 930 if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID, 931 CAM_TARGET_WILDCARD, 932 CAM_LUN_WILDCARD)) != CAM_REQ_CMP) { 933 printf("xpt_init: xpt_create_path failed with status %#x," 934 " failing attach\n", status); 935 return (EINVAL); 936 } 937 xpt_path_lock(path); 938 cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO, 939 path, NULL, 0, xpt_sim); 940 xpt_path_unlock(path); 941 xpt_free_path(path); 942 943 if (cam_num_doneqs < 1) 944 cam_num_doneqs = 1 + mp_ncpus / 6; 945 else if (cam_num_doneqs > MAXCPU) 946 cam_num_doneqs = MAXCPU; 947 for (i = 0; i < cam_num_doneqs; i++) { 948 mtx_init(&cam_doneqs[i].cam_doneq_mtx, "CAM doneq", NULL, 949 MTX_DEF); 950 STAILQ_INIT(&cam_doneqs[i].cam_doneq); 951 error = kproc_kthread_add(xpt_done_td, &cam_doneqs[i], 952 &cam_proc, NULL, 0, 0, "cam", "doneq%d", i); 953 if (error != 0) { 954 cam_num_doneqs = i; 955 break; 956 } 957 } 958 if (cam_num_doneqs < 1) { 959 printf("xpt_init: Cannot init completion queues " 960 "- failing attach\n"); 961 return (ENOMEM); 962 } 963 /* 964 * Register a callback for when interrupts are enabled. 965 */ 966 xsoftc.xpt_config_hook = 967 (struct intr_config_hook *)malloc(sizeof(struct intr_config_hook), 968 M_CAMXPT, M_NOWAIT | M_ZERO); 969 if (xsoftc.xpt_config_hook == NULL) { 970 printf("xpt_init: Cannot malloc config hook " 971 "- failing attach\n"); 972 return (ENOMEM); 973 } 974 xsoftc.xpt_config_hook->ich_func = xpt_config; 975 if (config_intrhook_establish(xsoftc.xpt_config_hook) != 0) { 976 free (xsoftc.xpt_config_hook, M_CAMXPT); 977 printf("xpt_init: config_intrhook_establish failed " 978 "- failing attach\n"); 979 } 980 981 return (0); 982 } 983 984 static cam_status 985 xptregister(struct cam_periph *periph, void *arg) 986 { 987 struct cam_sim *xpt_sim; 988 989 if (periph == NULL) { 990 printf("xptregister: periph was NULL!!\n"); 991 return(CAM_REQ_CMP_ERR); 992 } 993 994 xpt_sim = (struct cam_sim *)arg; 995 xpt_sim->softc = periph; 996 xpt_periph = periph; 997 periph->softc = NULL; 998 999 return(CAM_REQ_CMP); 1000 } 1001 1002 int32_t 1003 xpt_add_periph(struct cam_periph *periph) 1004 { 1005 struct cam_ed *device; 1006 int32_t status; 1007 1008 TASK_INIT(&periph->periph_run_task, 0, xpt_run_allocq_task, periph); 1009 device = periph->path->device; 1010 status = CAM_REQ_CMP; 1011 if (device != NULL) { 1012 mtx_lock(&device->target->bus->eb_mtx); 1013 device->generation++; 1014 SLIST_INSERT_HEAD(&device->periphs, periph, periph_links); 1015 mtx_unlock(&device->target->bus->eb_mtx); 1016 atomic_add_32(&xsoftc.xpt_generation, 1); 1017 } 1018 1019 return (status); 1020 } 1021 1022 void 1023 xpt_remove_periph(struct cam_periph *periph) 1024 { 1025 struct cam_ed *device; 1026 1027 device = periph->path->device; 1028 if (device != NULL) { 1029 mtx_lock(&device->target->bus->eb_mtx); 1030 device->generation++; 1031 SLIST_REMOVE(&device->periphs, periph, cam_periph, periph_links); 1032 mtx_unlock(&device->target->bus->eb_mtx); 1033 atomic_add_32(&xsoftc.xpt_generation, 1); 1034 } 1035 } 1036 1037 1038 void 1039 xpt_announce_periph(struct cam_periph *periph, char *announce_string) 1040 { 1041 struct cam_path *path = periph->path; 1042 struct xpt_proto *proto; 1043 1044 cam_periph_assert(periph, MA_OWNED); 1045 periph->flags |= CAM_PERIPH_ANNOUNCED; 1046 1047 printf("%s%d at %s%d bus %d scbus%d target %d lun %jx\n", 1048 periph->periph_name, periph->unit_number, 1049 path->bus->sim->sim_name, 1050 path->bus->sim->unit_number, 1051 path->bus->sim->bus_id, 1052 path->bus->path_id, 1053 path->target->target_id, 1054 (uintmax_t)path->device->lun_id); 1055 printf("%s%d: ", periph->periph_name, periph->unit_number); 1056 proto = xpt_proto_find(path->device->protocol); 1057 if (proto) 1058 proto->ops->announce(path->device); 1059 else 1060 printf("%s%d: Unknown protocol device %d\n", 1061 periph->periph_name, periph->unit_number, 1062 path->device->protocol); 1063 if (path->device->serial_num_len > 0) { 1064 /* Don't wrap the screen - print only the first 60 chars */ 1065 printf("%s%d: Serial Number %.60s\n", periph->periph_name, 1066 periph->unit_number, path->device->serial_num); 1067 } 1068 /* Announce transport details. */ 1069 path->bus->xport->ops->announce(periph); 1070 /* Announce command queueing. */ 1071 if (path->device->inq_flags & SID_CmdQue 1072 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) { 1073 printf("%s%d: Command Queueing enabled\n", 1074 periph->periph_name, periph->unit_number); 1075 } 1076 /* Announce caller's details if they've passed in. */ 1077 if (announce_string != NULL) 1078 printf("%s%d: %s\n", periph->periph_name, 1079 periph->unit_number, announce_string); 1080 } 1081 1082 void 1083 xpt_announce_quirks(struct cam_periph *periph, int quirks, char *bit_string) 1084 { 1085 if (quirks != 0) { 1086 printf("%s%d: quirks=0x%b\n", periph->periph_name, 1087 periph->unit_number, quirks, bit_string); 1088 } 1089 } 1090 1091 void 1092 xpt_denounce_periph(struct cam_periph *periph) 1093 { 1094 struct cam_path *path = periph->path; 1095 struct xpt_proto *proto; 1096 1097 cam_periph_assert(periph, MA_OWNED); 1098 printf("%s%d at %s%d bus %d scbus%d target %d lun %jx\n", 1099 periph->periph_name, periph->unit_number, 1100 path->bus->sim->sim_name, 1101 path->bus->sim->unit_number, 1102 path->bus->sim->bus_id, 1103 path->bus->path_id, 1104 path->target->target_id, 1105 (uintmax_t)path->device->lun_id); 1106 printf("%s%d: ", periph->periph_name, periph->unit_number); 1107 proto = xpt_proto_find(path->device->protocol); 1108 if (proto) 1109 proto->ops->denounce(path->device); 1110 else 1111 printf("%s%d: Unknown protocol device %d\n", 1112 periph->periph_name, periph->unit_number, 1113 path->device->protocol); 1114 if (path->device->serial_num_len > 0) 1115 printf(" s/n %.60s", path->device->serial_num); 1116 printf(" detached\n"); 1117 } 1118 1119 1120 int 1121 xpt_getattr(char *buf, size_t len, const char *attr, struct cam_path *path) 1122 { 1123 int ret = -1, l, o; 1124 struct ccb_dev_advinfo cdai; 1125 struct scsi_vpd_id_descriptor *idd; 1126 1127 xpt_path_assert(path, MA_OWNED); 1128 1129 memset(&cdai, 0, sizeof(cdai)); 1130 xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL); 1131 cdai.ccb_h.func_code = XPT_DEV_ADVINFO; 1132 cdai.flags = CDAI_FLAG_NONE; 1133 cdai.bufsiz = len; 1134 1135 if (!strcmp(attr, "GEOM::ident")) 1136 cdai.buftype = CDAI_TYPE_SERIAL_NUM; 1137 else if (!strcmp(attr, "GEOM::physpath")) 1138 cdai.buftype = CDAI_TYPE_PHYS_PATH; 1139 else if (strcmp(attr, "GEOM::lunid") == 0 || 1140 strcmp(attr, "GEOM::lunname") == 0) { 1141 cdai.buftype = CDAI_TYPE_SCSI_DEVID; 1142 cdai.bufsiz = CAM_SCSI_DEVID_MAXLEN; 1143 } else 1144 goto out; 1145 1146 cdai.buf = malloc(cdai.bufsiz, M_CAMXPT, M_NOWAIT|M_ZERO); 1147 if (cdai.buf == NULL) { 1148 ret = ENOMEM; 1149 goto out; 1150 } 1151 xpt_action((union ccb *)&cdai); /* can only be synchronous */ 1152 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0) 1153 cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE); 1154 if (cdai.provsiz == 0) 1155 goto out; 1156 if (cdai.buftype == CDAI_TYPE_SCSI_DEVID) { 1157 if (strcmp(attr, "GEOM::lunid") == 0) { 1158 idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf, 1159 cdai.provsiz, scsi_devid_is_lun_naa); 1160 if (idd == NULL) 1161 idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf, 1162 cdai.provsiz, scsi_devid_is_lun_eui64); 1163 if (idd == NULL) 1164 idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf, 1165 cdai.provsiz, scsi_devid_is_lun_uuid); 1166 if (idd == NULL) 1167 idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf, 1168 cdai.provsiz, scsi_devid_is_lun_md5); 1169 } else 1170 idd = NULL; 1171 if (idd == NULL) 1172 idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf, 1173 cdai.provsiz, scsi_devid_is_lun_t10); 1174 if (idd == NULL) 1175 idd = scsi_get_devid((struct scsi_vpd_device_id *)cdai.buf, 1176 cdai.provsiz, scsi_devid_is_lun_name); 1177 if (idd == NULL) 1178 goto out; 1179 ret = 0; 1180 if ((idd->proto_codeset & SVPD_ID_CODESET_MASK) == SVPD_ID_CODESET_ASCII) { 1181 if (idd->length < len) { 1182 for (l = 0; l < idd->length; l++) 1183 buf[l] = idd->identifier[l] ? 1184 idd->identifier[l] : ' '; 1185 buf[l] = 0; 1186 } else 1187 ret = EFAULT; 1188 } else if ((idd->proto_codeset & SVPD_ID_CODESET_MASK) == SVPD_ID_CODESET_UTF8) { 1189 l = strnlen(idd->identifier, idd->length); 1190 if (l < len) { 1191 bcopy(idd->identifier, buf, l); 1192 buf[l] = 0; 1193 } else 1194 ret = EFAULT; 1195 } else if ((idd->id_type & SVPD_ID_TYPE_MASK) == SVPD_ID_TYPE_UUID 1196 && idd->identifier[0] == 0x10) { 1197 if ((idd->length - 2) * 2 + 4 < len) { 1198 for (l = 2, o = 0; l < idd->length; l++) { 1199 if (l == 6 || l == 8 || l == 10 || l == 12) 1200 o += sprintf(buf + o, "-"); 1201 o += sprintf(buf + o, "%02x", 1202 idd->identifier[l]); 1203 } 1204 } else 1205 ret = EFAULT; 1206 } else { 1207 if (idd->length * 2 < len) { 1208 for (l = 0; l < idd->length; l++) 1209 sprintf(buf + l * 2, "%02x", 1210 idd->identifier[l]); 1211 } else 1212 ret = EFAULT; 1213 } 1214 } else { 1215 ret = 0; 1216 if (strlcpy(buf, cdai.buf, len) >= len) 1217 ret = EFAULT; 1218 } 1219 1220 out: 1221 if (cdai.buf != NULL) 1222 free(cdai.buf, M_CAMXPT); 1223 return ret; 1224 } 1225 1226 static dev_match_ret 1227 xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns, 1228 struct cam_eb *bus) 1229 { 1230 dev_match_ret retval; 1231 u_int i; 1232 1233 retval = DM_RET_NONE; 1234 1235 /* 1236 * If we aren't given something to match against, that's an error. 1237 */ 1238 if (bus == NULL) 1239 return(DM_RET_ERROR); 1240 1241 /* 1242 * If there are no match entries, then this bus matches no 1243 * matter what. 1244 */ 1245 if ((patterns == NULL) || (num_patterns == 0)) 1246 return(DM_RET_DESCEND | DM_RET_COPY); 1247 1248 for (i = 0; i < num_patterns; i++) { 1249 struct bus_match_pattern *cur_pattern; 1250 1251 /* 1252 * If the pattern in question isn't for a bus node, we 1253 * aren't interested. However, we do indicate to the 1254 * calling routine that we should continue descending the 1255 * tree, since the user wants to match against lower-level 1256 * EDT elements. 1257 */ 1258 if (patterns[i].type != DEV_MATCH_BUS) { 1259 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) 1260 retval |= DM_RET_DESCEND; 1261 continue; 1262 } 1263 1264 cur_pattern = &patterns[i].pattern.bus_pattern; 1265 1266 /* 1267 * If they want to match any bus node, we give them any 1268 * device node. 1269 */ 1270 if (cur_pattern->flags == BUS_MATCH_ANY) { 1271 /* set the copy flag */ 1272 retval |= DM_RET_COPY; 1273 1274 /* 1275 * If we've already decided on an action, go ahead 1276 * and return. 1277 */ 1278 if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE) 1279 return(retval); 1280 } 1281 1282 /* 1283 * Not sure why someone would do this... 1284 */ 1285 if (cur_pattern->flags == BUS_MATCH_NONE) 1286 continue; 1287 1288 if (((cur_pattern->flags & BUS_MATCH_PATH) != 0) 1289 && (cur_pattern->path_id != bus->path_id)) 1290 continue; 1291 1292 if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0) 1293 && (cur_pattern->bus_id != bus->sim->bus_id)) 1294 continue; 1295 1296 if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0) 1297 && (cur_pattern->unit_number != bus->sim->unit_number)) 1298 continue; 1299 1300 if (((cur_pattern->flags & BUS_MATCH_NAME) != 0) 1301 && (strncmp(cur_pattern->dev_name, bus->sim->sim_name, 1302 DEV_IDLEN) != 0)) 1303 continue; 1304 1305 /* 1306 * If we get to this point, the user definitely wants 1307 * information on this bus. So tell the caller to copy the 1308 * data out. 1309 */ 1310 retval |= DM_RET_COPY; 1311 1312 /* 1313 * If the return action has been set to descend, then we 1314 * know that we've already seen a non-bus matching 1315 * expression, therefore we need to further descend the tree. 1316 * This won't change by continuing around the loop, so we 1317 * go ahead and return. If we haven't seen a non-bus 1318 * matching expression, we keep going around the loop until 1319 * we exhaust the matching expressions. We'll set the stop 1320 * flag once we fall out of the loop. 1321 */ 1322 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND) 1323 return(retval); 1324 } 1325 1326 /* 1327 * If the return action hasn't been set to descend yet, that means 1328 * we haven't seen anything other than bus matching patterns. So 1329 * tell the caller to stop descending the tree -- the user doesn't 1330 * want to match against lower level tree elements. 1331 */ 1332 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) 1333 retval |= DM_RET_STOP; 1334 1335 return(retval); 1336 } 1337 1338 static dev_match_ret 1339 xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns, 1340 struct cam_ed *device) 1341 { 1342 dev_match_ret retval; 1343 u_int i; 1344 1345 retval = DM_RET_NONE; 1346 1347 /* 1348 * If we aren't given something to match against, that's an error. 1349 */ 1350 if (device == NULL) 1351 return(DM_RET_ERROR); 1352 1353 /* 1354 * If there are no match entries, then this device matches no 1355 * matter what. 1356 */ 1357 if ((patterns == NULL) || (num_patterns == 0)) 1358 return(DM_RET_DESCEND | DM_RET_COPY); 1359 1360 for (i = 0; i < num_patterns; i++) { 1361 struct device_match_pattern *cur_pattern; 1362 struct scsi_vpd_device_id *device_id_page; 1363 1364 /* 1365 * If the pattern in question isn't for a device node, we 1366 * aren't interested. 1367 */ 1368 if (patterns[i].type != DEV_MATCH_DEVICE) { 1369 if ((patterns[i].type == DEV_MATCH_PERIPH) 1370 && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)) 1371 retval |= DM_RET_DESCEND; 1372 continue; 1373 } 1374 1375 cur_pattern = &patterns[i].pattern.device_pattern; 1376 1377 /* Error out if mutually exclusive options are specified. */ 1378 if ((cur_pattern->flags & (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID)) 1379 == (DEV_MATCH_INQUIRY|DEV_MATCH_DEVID)) 1380 return(DM_RET_ERROR); 1381 1382 /* 1383 * If they want to match any device node, we give them any 1384 * device node. 1385 */ 1386 if (cur_pattern->flags == DEV_MATCH_ANY) 1387 goto copy_dev_node; 1388 1389 /* 1390 * Not sure why someone would do this... 1391 */ 1392 if (cur_pattern->flags == DEV_MATCH_NONE) 1393 continue; 1394 1395 if (((cur_pattern->flags & DEV_MATCH_PATH) != 0) 1396 && (cur_pattern->path_id != device->target->bus->path_id)) 1397 continue; 1398 1399 if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0) 1400 && (cur_pattern->target_id != device->target->target_id)) 1401 continue; 1402 1403 if (((cur_pattern->flags & DEV_MATCH_LUN) != 0) 1404 && (cur_pattern->target_lun != device->lun_id)) 1405 continue; 1406 1407 if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0) 1408 && (cam_quirkmatch((caddr_t)&device->inq_data, 1409 (caddr_t)&cur_pattern->data.inq_pat, 1410 1, sizeof(cur_pattern->data.inq_pat), 1411 scsi_static_inquiry_match) == NULL)) 1412 continue; 1413 1414 device_id_page = (struct scsi_vpd_device_id *)device->device_id; 1415 if (((cur_pattern->flags & DEV_MATCH_DEVID) != 0) 1416 && (device->device_id_len < SVPD_DEVICE_ID_HDR_LEN 1417 || scsi_devid_match((uint8_t *)device_id_page->desc_list, 1418 device->device_id_len 1419 - SVPD_DEVICE_ID_HDR_LEN, 1420 cur_pattern->data.devid_pat.id, 1421 cur_pattern->data.devid_pat.id_len) != 0)) 1422 continue; 1423 1424 copy_dev_node: 1425 /* 1426 * If we get to this point, the user definitely wants 1427 * information on this device. So tell the caller to copy 1428 * the data out. 1429 */ 1430 retval |= DM_RET_COPY; 1431 1432 /* 1433 * If the return action has been set to descend, then we 1434 * know that we've already seen a peripheral matching 1435 * expression, therefore we need to further descend the tree. 1436 * This won't change by continuing around the loop, so we 1437 * go ahead and return. If we haven't seen a peripheral 1438 * matching expression, we keep going around the loop until 1439 * we exhaust the matching expressions. We'll set the stop 1440 * flag once we fall out of the loop. 1441 */ 1442 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND) 1443 return(retval); 1444 } 1445 1446 /* 1447 * If the return action hasn't been set to descend yet, that means 1448 * we haven't seen any peripheral matching patterns. So tell the 1449 * caller to stop descending the tree -- the user doesn't want to 1450 * match against lower level tree elements. 1451 */ 1452 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE) 1453 retval |= DM_RET_STOP; 1454 1455 return(retval); 1456 } 1457 1458 /* 1459 * Match a single peripheral against any number of match patterns. 1460 */ 1461 static dev_match_ret 1462 xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns, 1463 struct cam_periph *periph) 1464 { 1465 dev_match_ret retval; 1466 u_int i; 1467 1468 /* 1469 * If we aren't given something to match against, that's an error. 1470 */ 1471 if (periph == NULL) 1472 return(DM_RET_ERROR); 1473 1474 /* 1475 * If there are no match entries, then this peripheral matches no 1476 * matter what. 1477 */ 1478 if ((patterns == NULL) || (num_patterns == 0)) 1479 return(DM_RET_STOP | DM_RET_COPY); 1480 1481 /* 1482 * There aren't any nodes below a peripheral node, so there's no 1483 * reason to descend the tree any further. 1484 */ 1485 retval = DM_RET_STOP; 1486 1487 for (i = 0; i < num_patterns; i++) { 1488 struct periph_match_pattern *cur_pattern; 1489 1490 /* 1491 * If the pattern in question isn't for a peripheral, we 1492 * aren't interested. 1493 */ 1494 if (patterns[i].type != DEV_MATCH_PERIPH) 1495 continue; 1496 1497 cur_pattern = &patterns[i].pattern.periph_pattern; 1498 1499 /* 1500 * If they want to match on anything, then we will do so. 1501 */ 1502 if (cur_pattern->flags == PERIPH_MATCH_ANY) { 1503 /* set the copy flag */ 1504 retval |= DM_RET_COPY; 1505 1506 /* 1507 * We've already set the return action to stop, 1508 * since there are no nodes below peripherals in 1509 * the tree. 1510 */ 1511 return(retval); 1512 } 1513 1514 /* 1515 * Not sure why someone would do this... 1516 */ 1517 if (cur_pattern->flags == PERIPH_MATCH_NONE) 1518 continue; 1519 1520 if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0) 1521 && (cur_pattern->path_id != periph->path->bus->path_id)) 1522 continue; 1523 1524 /* 1525 * For the target and lun id's, we have to make sure the 1526 * target and lun pointers aren't NULL. The xpt peripheral 1527 * has a wildcard target and device. 1528 */ 1529 if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0) 1530 && ((periph->path->target == NULL) 1531 ||(cur_pattern->target_id != periph->path->target->target_id))) 1532 continue; 1533 1534 if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0) 1535 && ((periph->path->device == NULL) 1536 || (cur_pattern->target_lun != periph->path->device->lun_id))) 1537 continue; 1538 1539 if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0) 1540 && (cur_pattern->unit_number != periph->unit_number)) 1541 continue; 1542 1543 if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0) 1544 && (strncmp(cur_pattern->periph_name, periph->periph_name, 1545 DEV_IDLEN) != 0)) 1546 continue; 1547 1548 /* 1549 * If we get to this point, the user definitely wants 1550 * information on this peripheral. So tell the caller to 1551 * copy the data out. 1552 */ 1553 retval |= DM_RET_COPY; 1554 1555 /* 1556 * The return action has already been set to stop, since 1557 * peripherals don't have any nodes below them in the EDT. 1558 */ 1559 return(retval); 1560 } 1561 1562 /* 1563 * If we get to this point, the peripheral that was passed in 1564 * doesn't match any of the patterns. 1565 */ 1566 return(retval); 1567 } 1568 1569 static int 1570 xptedtbusfunc(struct cam_eb *bus, void *arg) 1571 { 1572 struct ccb_dev_match *cdm; 1573 struct cam_et *target; 1574 dev_match_ret retval; 1575 1576 cdm = (struct ccb_dev_match *)arg; 1577 1578 /* 1579 * If our position is for something deeper in the tree, that means 1580 * that we've already seen this node. So, we keep going down. 1581 */ 1582 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1583 && (cdm->pos.cookie.bus == bus) 1584 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1585 && (cdm->pos.cookie.target != NULL)) 1586 retval = DM_RET_DESCEND; 1587 else 1588 retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus); 1589 1590 /* 1591 * If we got an error, bail out of the search. 1592 */ 1593 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 1594 cdm->status = CAM_DEV_MATCH_ERROR; 1595 return(0); 1596 } 1597 1598 /* 1599 * If the copy flag is set, copy this bus out. 1600 */ 1601 if (retval & DM_RET_COPY) { 1602 int spaceleft, j; 1603 1604 spaceleft = cdm->match_buf_len - (cdm->num_matches * 1605 sizeof(struct dev_match_result)); 1606 1607 /* 1608 * If we don't have enough space to put in another 1609 * match result, save our position and tell the 1610 * user there are more devices to check. 1611 */ 1612 if (spaceleft < sizeof(struct dev_match_result)) { 1613 bzero(&cdm->pos, sizeof(cdm->pos)); 1614 cdm->pos.position_type = 1615 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS; 1616 1617 cdm->pos.cookie.bus = bus; 1618 cdm->pos.generations[CAM_BUS_GENERATION]= 1619 xsoftc.bus_generation; 1620 cdm->status = CAM_DEV_MATCH_MORE; 1621 return(0); 1622 } 1623 j = cdm->num_matches; 1624 cdm->num_matches++; 1625 cdm->matches[j].type = DEV_MATCH_BUS; 1626 cdm->matches[j].result.bus_result.path_id = bus->path_id; 1627 cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id; 1628 cdm->matches[j].result.bus_result.unit_number = 1629 bus->sim->unit_number; 1630 strncpy(cdm->matches[j].result.bus_result.dev_name, 1631 bus->sim->sim_name, DEV_IDLEN); 1632 } 1633 1634 /* 1635 * If the user is only interested in buses, there's no 1636 * reason to descend to the next level in the tree. 1637 */ 1638 if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP) 1639 return(1); 1640 1641 /* 1642 * If there is a target generation recorded, check it to 1643 * make sure the target list hasn't changed. 1644 */ 1645 mtx_lock(&bus->eb_mtx); 1646 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1647 && (cdm->pos.cookie.bus == bus) 1648 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1649 && (cdm->pos.cookie.target != NULL)) { 1650 if ((cdm->pos.generations[CAM_TARGET_GENERATION] != 1651 bus->generation)) { 1652 mtx_unlock(&bus->eb_mtx); 1653 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 1654 return (0); 1655 } 1656 target = (struct cam_et *)cdm->pos.cookie.target; 1657 target->refcount++; 1658 } else 1659 target = NULL; 1660 mtx_unlock(&bus->eb_mtx); 1661 1662 return (xpttargettraverse(bus, target, xptedttargetfunc, arg)); 1663 } 1664 1665 static int 1666 xptedttargetfunc(struct cam_et *target, void *arg) 1667 { 1668 struct ccb_dev_match *cdm; 1669 struct cam_eb *bus; 1670 struct cam_ed *device; 1671 1672 cdm = (struct ccb_dev_match *)arg; 1673 bus = target->bus; 1674 1675 /* 1676 * If there is a device list generation recorded, check it to 1677 * make sure the device list hasn't changed. 1678 */ 1679 mtx_lock(&bus->eb_mtx); 1680 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1681 && (cdm->pos.cookie.bus == bus) 1682 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1683 && (cdm->pos.cookie.target == target) 1684 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) 1685 && (cdm->pos.cookie.device != NULL)) { 1686 if (cdm->pos.generations[CAM_DEV_GENERATION] != 1687 target->generation) { 1688 mtx_unlock(&bus->eb_mtx); 1689 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 1690 return(0); 1691 } 1692 device = (struct cam_ed *)cdm->pos.cookie.device; 1693 device->refcount++; 1694 } else 1695 device = NULL; 1696 mtx_unlock(&bus->eb_mtx); 1697 1698 return (xptdevicetraverse(target, device, xptedtdevicefunc, arg)); 1699 } 1700 1701 static int 1702 xptedtdevicefunc(struct cam_ed *device, void *arg) 1703 { 1704 struct cam_eb *bus; 1705 struct cam_periph *periph; 1706 struct ccb_dev_match *cdm; 1707 dev_match_ret retval; 1708 1709 cdm = (struct ccb_dev_match *)arg; 1710 bus = device->target->bus; 1711 1712 /* 1713 * If our position is for something deeper in the tree, that means 1714 * that we've already seen this node. So, we keep going down. 1715 */ 1716 if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE) 1717 && (cdm->pos.cookie.device == device) 1718 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 1719 && (cdm->pos.cookie.periph != NULL)) 1720 retval = DM_RET_DESCEND; 1721 else 1722 retval = xptdevicematch(cdm->patterns, cdm->num_patterns, 1723 device); 1724 1725 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 1726 cdm->status = CAM_DEV_MATCH_ERROR; 1727 return(0); 1728 } 1729 1730 /* 1731 * If the copy flag is set, copy this device out. 1732 */ 1733 if (retval & DM_RET_COPY) { 1734 int spaceleft, j; 1735 1736 spaceleft = cdm->match_buf_len - (cdm->num_matches * 1737 sizeof(struct dev_match_result)); 1738 1739 /* 1740 * If we don't have enough space to put in another 1741 * match result, save our position and tell the 1742 * user there are more devices to check. 1743 */ 1744 if (spaceleft < sizeof(struct dev_match_result)) { 1745 bzero(&cdm->pos, sizeof(cdm->pos)); 1746 cdm->pos.position_type = 1747 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS | 1748 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE; 1749 1750 cdm->pos.cookie.bus = device->target->bus; 1751 cdm->pos.generations[CAM_BUS_GENERATION]= 1752 xsoftc.bus_generation; 1753 cdm->pos.cookie.target = device->target; 1754 cdm->pos.generations[CAM_TARGET_GENERATION] = 1755 device->target->bus->generation; 1756 cdm->pos.cookie.device = device; 1757 cdm->pos.generations[CAM_DEV_GENERATION] = 1758 device->target->generation; 1759 cdm->status = CAM_DEV_MATCH_MORE; 1760 return(0); 1761 } 1762 j = cdm->num_matches; 1763 cdm->num_matches++; 1764 cdm->matches[j].type = DEV_MATCH_DEVICE; 1765 cdm->matches[j].result.device_result.path_id = 1766 device->target->bus->path_id; 1767 cdm->matches[j].result.device_result.target_id = 1768 device->target->target_id; 1769 cdm->matches[j].result.device_result.target_lun = 1770 device->lun_id; 1771 cdm->matches[j].result.device_result.protocol = 1772 device->protocol; 1773 bcopy(&device->inq_data, 1774 &cdm->matches[j].result.device_result.inq_data, 1775 sizeof(struct scsi_inquiry_data)); 1776 bcopy(&device->ident_data, 1777 &cdm->matches[j].result.device_result.ident_data, 1778 sizeof(struct ata_params)); 1779 1780 /* Let the user know whether this device is unconfigured */ 1781 if (device->flags & CAM_DEV_UNCONFIGURED) 1782 cdm->matches[j].result.device_result.flags = 1783 DEV_RESULT_UNCONFIGURED; 1784 else 1785 cdm->matches[j].result.device_result.flags = 1786 DEV_RESULT_NOFLAG; 1787 } 1788 1789 /* 1790 * If the user isn't interested in peripherals, don't descend 1791 * the tree any further. 1792 */ 1793 if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP) 1794 return(1); 1795 1796 /* 1797 * If there is a peripheral list generation recorded, make sure 1798 * it hasn't changed. 1799 */ 1800 xpt_lock_buses(); 1801 mtx_lock(&bus->eb_mtx); 1802 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1803 && (cdm->pos.cookie.bus == bus) 1804 && (cdm->pos.position_type & CAM_DEV_POS_TARGET) 1805 && (cdm->pos.cookie.target == device->target) 1806 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE) 1807 && (cdm->pos.cookie.device == device) 1808 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 1809 && (cdm->pos.cookie.periph != NULL)) { 1810 if (cdm->pos.generations[CAM_PERIPH_GENERATION] != 1811 device->generation) { 1812 mtx_unlock(&bus->eb_mtx); 1813 xpt_unlock_buses(); 1814 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 1815 return(0); 1816 } 1817 periph = (struct cam_periph *)cdm->pos.cookie.periph; 1818 periph->refcount++; 1819 } else 1820 periph = NULL; 1821 mtx_unlock(&bus->eb_mtx); 1822 xpt_unlock_buses(); 1823 1824 return (xptperiphtraverse(device, periph, xptedtperiphfunc, arg)); 1825 } 1826 1827 static int 1828 xptedtperiphfunc(struct cam_periph *periph, void *arg) 1829 { 1830 struct ccb_dev_match *cdm; 1831 dev_match_ret retval; 1832 1833 cdm = (struct ccb_dev_match *)arg; 1834 1835 retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph); 1836 1837 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 1838 cdm->status = CAM_DEV_MATCH_ERROR; 1839 return(0); 1840 } 1841 1842 /* 1843 * If the copy flag is set, copy this peripheral out. 1844 */ 1845 if (retval & DM_RET_COPY) { 1846 int spaceleft, j; 1847 1848 spaceleft = cdm->match_buf_len - (cdm->num_matches * 1849 sizeof(struct dev_match_result)); 1850 1851 /* 1852 * If we don't have enough space to put in another 1853 * match result, save our position and tell the 1854 * user there are more devices to check. 1855 */ 1856 if (spaceleft < sizeof(struct dev_match_result)) { 1857 bzero(&cdm->pos, sizeof(cdm->pos)); 1858 cdm->pos.position_type = 1859 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS | 1860 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE | 1861 CAM_DEV_POS_PERIPH; 1862 1863 cdm->pos.cookie.bus = periph->path->bus; 1864 cdm->pos.generations[CAM_BUS_GENERATION]= 1865 xsoftc.bus_generation; 1866 cdm->pos.cookie.target = periph->path->target; 1867 cdm->pos.generations[CAM_TARGET_GENERATION] = 1868 periph->path->bus->generation; 1869 cdm->pos.cookie.device = periph->path->device; 1870 cdm->pos.generations[CAM_DEV_GENERATION] = 1871 periph->path->target->generation; 1872 cdm->pos.cookie.periph = periph; 1873 cdm->pos.generations[CAM_PERIPH_GENERATION] = 1874 periph->path->device->generation; 1875 cdm->status = CAM_DEV_MATCH_MORE; 1876 return(0); 1877 } 1878 1879 j = cdm->num_matches; 1880 cdm->num_matches++; 1881 cdm->matches[j].type = DEV_MATCH_PERIPH; 1882 cdm->matches[j].result.periph_result.path_id = 1883 periph->path->bus->path_id; 1884 cdm->matches[j].result.periph_result.target_id = 1885 periph->path->target->target_id; 1886 cdm->matches[j].result.periph_result.target_lun = 1887 periph->path->device->lun_id; 1888 cdm->matches[j].result.periph_result.unit_number = 1889 periph->unit_number; 1890 strncpy(cdm->matches[j].result.periph_result.periph_name, 1891 periph->periph_name, DEV_IDLEN); 1892 } 1893 1894 return(1); 1895 } 1896 1897 static int 1898 xptedtmatch(struct ccb_dev_match *cdm) 1899 { 1900 struct cam_eb *bus; 1901 int ret; 1902 1903 cdm->num_matches = 0; 1904 1905 /* 1906 * Check the bus list generation. If it has changed, the user 1907 * needs to reset everything and start over. 1908 */ 1909 xpt_lock_buses(); 1910 if ((cdm->pos.position_type & CAM_DEV_POS_BUS) 1911 && (cdm->pos.cookie.bus != NULL)) { 1912 if (cdm->pos.generations[CAM_BUS_GENERATION] != 1913 xsoftc.bus_generation) { 1914 xpt_unlock_buses(); 1915 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 1916 return(0); 1917 } 1918 bus = (struct cam_eb *)cdm->pos.cookie.bus; 1919 bus->refcount++; 1920 } else 1921 bus = NULL; 1922 xpt_unlock_buses(); 1923 1924 ret = xptbustraverse(bus, xptedtbusfunc, cdm); 1925 1926 /* 1927 * If we get back 0, that means that we had to stop before fully 1928 * traversing the EDT. It also means that one of the subroutines 1929 * has set the status field to the proper value. If we get back 1, 1930 * we've fully traversed the EDT and copied out any matching entries. 1931 */ 1932 if (ret == 1) 1933 cdm->status = CAM_DEV_MATCH_LAST; 1934 1935 return(ret); 1936 } 1937 1938 static int 1939 xptplistpdrvfunc(struct periph_driver **pdrv, void *arg) 1940 { 1941 struct cam_periph *periph; 1942 struct ccb_dev_match *cdm; 1943 1944 cdm = (struct ccb_dev_match *)arg; 1945 1946 xpt_lock_buses(); 1947 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR) 1948 && (cdm->pos.cookie.pdrv == pdrv) 1949 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH) 1950 && (cdm->pos.cookie.periph != NULL)) { 1951 if (cdm->pos.generations[CAM_PERIPH_GENERATION] != 1952 (*pdrv)->generation) { 1953 xpt_unlock_buses(); 1954 cdm->status = CAM_DEV_MATCH_LIST_CHANGED; 1955 return(0); 1956 } 1957 periph = (struct cam_periph *)cdm->pos.cookie.periph; 1958 periph->refcount++; 1959 } else 1960 periph = NULL; 1961 xpt_unlock_buses(); 1962 1963 return (xptpdperiphtraverse(pdrv, periph, xptplistperiphfunc, arg)); 1964 } 1965 1966 static int 1967 xptplistperiphfunc(struct cam_periph *periph, void *arg) 1968 { 1969 struct ccb_dev_match *cdm; 1970 dev_match_ret retval; 1971 1972 cdm = (struct ccb_dev_match *)arg; 1973 1974 retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph); 1975 1976 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) { 1977 cdm->status = CAM_DEV_MATCH_ERROR; 1978 return(0); 1979 } 1980 1981 /* 1982 * If the copy flag is set, copy this peripheral out. 1983 */ 1984 if (retval & DM_RET_COPY) { 1985 int spaceleft, j; 1986 1987 spaceleft = cdm->match_buf_len - (cdm->num_matches * 1988 sizeof(struct dev_match_result)); 1989 1990 /* 1991 * If we don't have enough space to put in another 1992 * match result, save our position and tell the 1993 * user there are more devices to check. 1994 */ 1995 if (spaceleft < sizeof(struct dev_match_result)) { 1996 struct periph_driver **pdrv; 1997 1998 pdrv = NULL; 1999 bzero(&cdm->pos, sizeof(cdm->pos)); 2000 cdm->pos.position_type = 2001 CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR | 2002 CAM_DEV_POS_PERIPH; 2003 2004 /* 2005 * This may look a bit non-sensical, but it is 2006 * actually quite logical. There are very few 2007 * peripheral drivers, and bloating every peripheral 2008 * structure with a pointer back to its parent 2009 * peripheral driver linker set entry would cost 2010 * more in the long run than doing this quick lookup. 2011 */ 2012 for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) { 2013 if (strcmp((*pdrv)->driver_name, 2014 periph->periph_name) == 0) 2015 break; 2016 } 2017 2018 if (*pdrv == NULL) { 2019 cdm->status = CAM_DEV_MATCH_ERROR; 2020 return(0); 2021 } 2022 2023 cdm->pos.cookie.pdrv = pdrv; 2024 /* 2025 * The periph generation slot does double duty, as 2026 * does the periph pointer slot. They are used for 2027 * both edt and pdrv lookups and positioning. 2028 */ 2029 cdm->pos.cookie.periph = periph; 2030 cdm->pos.generations[CAM_PERIPH_GENERATION] = 2031 (*pdrv)->generation; 2032 cdm->status = CAM_DEV_MATCH_MORE; 2033 return(0); 2034 } 2035 2036 j = cdm->num_matches; 2037 cdm->num_matches++; 2038 cdm->matches[j].type = DEV_MATCH_PERIPH; 2039 cdm->matches[j].result.periph_result.path_id = 2040 periph->path->bus->path_id; 2041 2042 /* 2043 * The transport layer peripheral doesn't have a target or 2044 * lun. 2045 */ 2046 if (periph->path->target) 2047 cdm->matches[j].result.periph_result.target_id = 2048 periph->path->target->target_id; 2049 else 2050 cdm->matches[j].result.periph_result.target_id = 2051 CAM_TARGET_WILDCARD; 2052 2053 if (periph->path->device) 2054 cdm->matches[j].result.periph_result.target_lun = 2055 periph->path->device->lun_id; 2056 else 2057 cdm->matches[j].result.periph_result.target_lun = 2058 CAM_LUN_WILDCARD; 2059 2060 cdm->matches[j].result.periph_result.unit_number = 2061 periph->unit_number; 2062 strncpy(cdm->matches[j].result.periph_result.periph_name, 2063 periph->periph_name, DEV_IDLEN); 2064 } 2065 2066 return(1); 2067 } 2068 2069 static int 2070 xptperiphlistmatch(struct ccb_dev_match *cdm) 2071 { 2072 int ret; 2073 2074 cdm->num_matches = 0; 2075 2076 /* 2077 * At this point in the edt traversal function, we check the bus 2078 * list generation to make sure that no buses have been added or 2079 * removed since the user last sent a XPT_DEV_MATCH ccb through. 2080 * For the peripheral driver list traversal function, however, we 2081 * don't have to worry about new peripheral driver types coming or 2082 * going; they're in a linker set, and therefore can't change 2083 * without a recompile. 2084 */ 2085 2086 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR) 2087 && (cdm->pos.cookie.pdrv != NULL)) 2088 ret = xptpdrvtraverse( 2089 (struct periph_driver **)cdm->pos.cookie.pdrv, 2090 xptplistpdrvfunc, cdm); 2091 else 2092 ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm); 2093 2094 /* 2095 * If we get back 0, that means that we had to stop before fully 2096 * traversing the peripheral driver tree. It also means that one of 2097 * the subroutines has set the status field to the proper value. If 2098 * we get back 1, we've fully traversed the EDT and copied out any 2099 * matching entries. 2100 */ 2101 if (ret == 1) 2102 cdm->status = CAM_DEV_MATCH_LAST; 2103 2104 return(ret); 2105 } 2106 2107 static int 2108 xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg) 2109 { 2110 struct cam_eb *bus, *next_bus; 2111 int retval; 2112 2113 retval = 1; 2114 if (start_bus) 2115 bus = start_bus; 2116 else { 2117 xpt_lock_buses(); 2118 bus = TAILQ_FIRST(&xsoftc.xpt_busses); 2119 if (bus == NULL) { 2120 xpt_unlock_buses(); 2121 return (retval); 2122 } 2123 bus->refcount++; 2124 xpt_unlock_buses(); 2125 } 2126 for (; bus != NULL; bus = next_bus) { 2127 retval = tr_func(bus, arg); 2128 if (retval == 0) { 2129 xpt_release_bus(bus); 2130 break; 2131 } 2132 xpt_lock_buses(); 2133 next_bus = TAILQ_NEXT(bus, links); 2134 if (next_bus) 2135 next_bus->refcount++; 2136 xpt_unlock_buses(); 2137 xpt_release_bus(bus); 2138 } 2139 return(retval); 2140 } 2141 2142 static int 2143 xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target, 2144 xpt_targetfunc_t *tr_func, void *arg) 2145 { 2146 struct cam_et *target, *next_target; 2147 int retval; 2148 2149 retval = 1; 2150 if (start_target) 2151 target = start_target; 2152 else { 2153 mtx_lock(&bus->eb_mtx); 2154 target = TAILQ_FIRST(&bus->et_entries); 2155 if (target == NULL) { 2156 mtx_unlock(&bus->eb_mtx); 2157 return (retval); 2158 } 2159 target->refcount++; 2160 mtx_unlock(&bus->eb_mtx); 2161 } 2162 for (; target != NULL; target = next_target) { 2163 retval = tr_func(target, arg); 2164 if (retval == 0) { 2165 xpt_release_target(target); 2166 break; 2167 } 2168 mtx_lock(&bus->eb_mtx); 2169 next_target = TAILQ_NEXT(target, links); 2170 if (next_target) 2171 next_target->refcount++; 2172 mtx_unlock(&bus->eb_mtx); 2173 xpt_release_target(target); 2174 } 2175 return(retval); 2176 } 2177 2178 static int 2179 xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device, 2180 xpt_devicefunc_t *tr_func, void *arg) 2181 { 2182 struct cam_eb *bus; 2183 struct cam_ed *device, *next_device; 2184 int retval; 2185 2186 retval = 1; 2187 bus = target->bus; 2188 if (start_device) 2189 device = start_device; 2190 else { 2191 mtx_lock(&bus->eb_mtx); 2192 device = TAILQ_FIRST(&target->ed_entries); 2193 if (device == NULL) { 2194 mtx_unlock(&bus->eb_mtx); 2195 return (retval); 2196 } 2197 device->refcount++; 2198 mtx_unlock(&bus->eb_mtx); 2199 } 2200 for (; device != NULL; device = next_device) { 2201 mtx_lock(&device->device_mtx); 2202 retval = tr_func(device, arg); 2203 mtx_unlock(&device->device_mtx); 2204 if (retval == 0) { 2205 xpt_release_device(device); 2206 break; 2207 } 2208 mtx_lock(&bus->eb_mtx); 2209 next_device = TAILQ_NEXT(device, links); 2210 if (next_device) 2211 next_device->refcount++; 2212 mtx_unlock(&bus->eb_mtx); 2213 xpt_release_device(device); 2214 } 2215 return(retval); 2216 } 2217 2218 static int 2219 xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph, 2220 xpt_periphfunc_t *tr_func, void *arg) 2221 { 2222 struct cam_eb *bus; 2223 struct cam_periph *periph, *next_periph; 2224 int retval; 2225 2226 retval = 1; 2227 2228 bus = device->target->bus; 2229 if (start_periph) 2230 periph = start_periph; 2231 else { 2232 xpt_lock_buses(); 2233 mtx_lock(&bus->eb_mtx); 2234 periph = SLIST_FIRST(&device->periphs); 2235 while (periph != NULL && (periph->flags & CAM_PERIPH_FREE) != 0) 2236 periph = SLIST_NEXT(periph, periph_links); 2237 if (periph == NULL) { 2238 mtx_unlock(&bus->eb_mtx); 2239 xpt_unlock_buses(); 2240 return (retval); 2241 } 2242 periph->refcount++; 2243 mtx_unlock(&bus->eb_mtx); 2244 xpt_unlock_buses(); 2245 } 2246 for (; periph != NULL; periph = next_periph) { 2247 retval = tr_func(periph, arg); 2248 if (retval == 0) { 2249 cam_periph_release_locked(periph); 2250 break; 2251 } 2252 xpt_lock_buses(); 2253 mtx_lock(&bus->eb_mtx); 2254 next_periph = SLIST_NEXT(periph, periph_links); 2255 while (next_periph != NULL && 2256 (next_periph->flags & CAM_PERIPH_FREE) != 0) 2257 next_periph = SLIST_NEXT(next_periph, periph_links); 2258 if (next_periph) 2259 next_periph->refcount++; 2260 mtx_unlock(&bus->eb_mtx); 2261 xpt_unlock_buses(); 2262 cam_periph_release_locked(periph); 2263 } 2264 return(retval); 2265 } 2266 2267 static int 2268 xptpdrvtraverse(struct periph_driver **start_pdrv, 2269 xpt_pdrvfunc_t *tr_func, void *arg) 2270 { 2271 struct periph_driver **pdrv; 2272 int retval; 2273 2274 retval = 1; 2275 2276 /* 2277 * We don't traverse the peripheral driver list like we do the 2278 * other lists, because it is a linker set, and therefore cannot be 2279 * changed during runtime. If the peripheral driver list is ever 2280 * re-done to be something other than a linker set (i.e. it can 2281 * change while the system is running), the list traversal should 2282 * be modified to work like the other traversal functions. 2283 */ 2284 for (pdrv = (start_pdrv ? start_pdrv : periph_drivers); 2285 *pdrv != NULL; pdrv++) { 2286 retval = tr_func(pdrv, arg); 2287 2288 if (retval == 0) 2289 return(retval); 2290 } 2291 2292 return(retval); 2293 } 2294 2295 static int 2296 xptpdperiphtraverse(struct periph_driver **pdrv, 2297 struct cam_periph *start_periph, 2298 xpt_periphfunc_t *tr_func, void *arg) 2299 { 2300 struct cam_periph *periph, *next_periph; 2301 int retval; 2302 2303 retval = 1; 2304 2305 if (start_periph) 2306 periph = start_periph; 2307 else { 2308 xpt_lock_buses(); 2309 periph = TAILQ_FIRST(&(*pdrv)->units); 2310 while (periph != NULL && (periph->flags & CAM_PERIPH_FREE) != 0) 2311 periph = TAILQ_NEXT(periph, unit_links); 2312 if (periph == NULL) { 2313 xpt_unlock_buses(); 2314 return (retval); 2315 } 2316 periph->refcount++; 2317 xpt_unlock_buses(); 2318 } 2319 for (; periph != NULL; periph = next_periph) { 2320 cam_periph_lock(periph); 2321 retval = tr_func(periph, arg); 2322 cam_periph_unlock(periph); 2323 if (retval == 0) { 2324 cam_periph_release(periph); 2325 break; 2326 } 2327 xpt_lock_buses(); 2328 next_periph = TAILQ_NEXT(periph, unit_links); 2329 while (next_periph != NULL && 2330 (next_periph->flags & CAM_PERIPH_FREE) != 0) 2331 next_periph = TAILQ_NEXT(next_periph, unit_links); 2332 if (next_periph) 2333 next_periph->refcount++; 2334 xpt_unlock_buses(); 2335 cam_periph_release(periph); 2336 } 2337 return(retval); 2338 } 2339 2340 static int 2341 xptdefbusfunc(struct cam_eb *bus, void *arg) 2342 { 2343 struct xpt_traverse_config *tr_config; 2344 2345 tr_config = (struct xpt_traverse_config *)arg; 2346 2347 if (tr_config->depth == XPT_DEPTH_BUS) { 2348 xpt_busfunc_t *tr_func; 2349 2350 tr_func = (xpt_busfunc_t *)tr_config->tr_func; 2351 2352 return(tr_func(bus, tr_config->tr_arg)); 2353 } else 2354 return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg)); 2355 } 2356 2357 static int 2358 xptdeftargetfunc(struct cam_et *target, void *arg) 2359 { 2360 struct xpt_traverse_config *tr_config; 2361 2362 tr_config = (struct xpt_traverse_config *)arg; 2363 2364 if (tr_config->depth == XPT_DEPTH_TARGET) { 2365 xpt_targetfunc_t *tr_func; 2366 2367 tr_func = (xpt_targetfunc_t *)tr_config->tr_func; 2368 2369 return(tr_func(target, tr_config->tr_arg)); 2370 } else 2371 return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg)); 2372 } 2373 2374 static int 2375 xptdefdevicefunc(struct cam_ed *device, void *arg) 2376 { 2377 struct xpt_traverse_config *tr_config; 2378 2379 tr_config = (struct xpt_traverse_config *)arg; 2380 2381 if (tr_config->depth == XPT_DEPTH_DEVICE) { 2382 xpt_devicefunc_t *tr_func; 2383 2384 tr_func = (xpt_devicefunc_t *)tr_config->tr_func; 2385 2386 return(tr_func(device, tr_config->tr_arg)); 2387 } else 2388 return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg)); 2389 } 2390 2391 static int 2392 xptdefperiphfunc(struct cam_periph *periph, void *arg) 2393 { 2394 struct xpt_traverse_config *tr_config; 2395 xpt_periphfunc_t *tr_func; 2396 2397 tr_config = (struct xpt_traverse_config *)arg; 2398 2399 tr_func = (xpt_periphfunc_t *)tr_config->tr_func; 2400 2401 /* 2402 * Unlike the other default functions, we don't check for depth 2403 * here. The peripheral driver level is the last level in the EDT, 2404 * so if we're here, we should execute the function in question. 2405 */ 2406 return(tr_func(periph, tr_config->tr_arg)); 2407 } 2408 2409 /* 2410 * Execute the given function for every bus in the EDT. 2411 */ 2412 static int 2413 xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg) 2414 { 2415 struct xpt_traverse_config tr_config; 2416 2417 tr_config.depth = XPT_DEPTH_BUS; 2418 tr_config.tr_func = tr_func; 2419 tr_config.tr_arg = arg; 2420 2421 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config)); 2422 } 2423 2424 /* 2425 * Execute the given function for every device in the EDT. 2426 */ 2427 static int 2428 xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg) 2429 { 2430 struct xpt_traverse_config tr_config; 2431 2432 tr_config.depth = XPT_DEPTH_DEVICE; 2433 tr_config.tr_func = tr_func; 2434 tr_config.tr_arg = arg; 2435 2436 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config)); 2437 } 2438 2439 static int 2440 xptsetasyncfunc(struct cam_ed *device, void *arg) 2441 { 2442 struct cam_path path; 2443 struct ccb_getdev cgd; 2444 struct ccb_setasync *csa = (struct ccb_setasync *)arg; 2445 2446 /* 2447 * Don't report unconfigured devices (Wildcard devs, 2448 * devices only for target mode, device instances 2449 * that have been invalidated but are waiting for 2450 * their last reference count to be released). 2451 */ 2452 if ((device->flags & CAM_DEV_UNCONFIGURED) != 0) 2453 return (1); 2454 2455 xpt_compile_path(&path, 2456 NULL, 2457 device->target->bus->path_id, 2458 device->target->target_id, 2459 device->lun_id); 2460 xpt_setup_ccb(&cgd.ccb_h, &path, CAM_PRIORITY_NORMAL); 2461 cgd.ccb_h.func_code = XPT_GDEV_TYPE; 2462 xpt_action((union ccb *)&cgd); 2463 csa->callback(csa->callback_arg, 2464 AC_FOUND_DEVICE, 2465 &path, &cgd); 2466 xpt_release_path(&path); 2467 2468 return(1); 2469 } 2470 2471 static int 2472 xptsetasyncbusfunc(struct cam_eb *bus, void *arg) 2473 { 2474 struct cam_path path; 2475 struct ccb_pathinq cpi; 2476 struct ccb_setasync *csa = (struct ccb_setasync *)arg; 2477 2478 xpt_compile_path(&path, /*periph*/NULL, 2479 bus->path_id, 2480 CAM_TARGET_WILDCARD, 2481 CAM_LUN_WILDCARD); 2482 xpt_path_lock(&path); 2483 xpt_setup_ccb(&cpi.ccb_h, &path, CAM_PRIORITY_NORMAL); 2484 cpi.ccb_h.func_code = XPT_PATH_INQ; 2485 xpt_action((union ccb *)&cpi); 2486 csa->callback(csa->callback_arg, 2487 AC_PATH_REGISTERED, 2488 &path, &cpi); 2489 xpt_path_unlock(&path); 2490 xpt_release_path(&path); 2491 2492 return(1); 2493 } 2494 2495 void 2496 xpt_action(union ccb *start_ccb) 2497 { 2498 2499 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, 2500 ("xpt_action: func %#x %s\n", start_ccb->ccb_h.func_code, 2501 xpt_action_name(start_ccb->ccb_h.func_code))); 2502 2503 start_ccb->ccb_h.status = CAM_REQ_INPROG; 2504 (*(start_ccb->ccb_h.path->bus->xport->ops->action))(start_ccb); 2505 } 2506 2507 void 2508 xpt_action_default(union ccb *start_ccb) 2509 { 2510 struct cam_path *path; 2511 struct cam_sim *sim; 2512 int lock; 2513 2514 path = start_ccb->ccb_h.path; 2515 CAM_DEBUG(path, CAM_DEBUG_TRACE, 2516 ("xpt_action_default: func %#x %s\n", start_ccb->ccb_h.func_code, 2517 xpt_action_name(start_ccb->ccb_h.func_code))); 2518 2519 switch (start_ccb->ccb_h.func_code) { 2520 case XPT_SCSI_IO: 2521 { 2522 struct cam_ed *device; 2523 2524 /* 2525 * For the sake of compatibility with SCSI-1 2526 * devices that may not understand the identify 2527 * message, we include lun information in the 2528 * second byte of all commands. SCSI-1 specifies 2529 * that luns are a 3 bit value and reserves only 3 2530 * bits for lun information in the CDB. Later 2531 * revisions of the SCSI spec allow for more than 8 2532 * luns, but have deprecated lun information in the 2533 * CDB. So, if the lun won't fit, we must omit. 2534 * 2535 * Also be aware that during initial probing for devices, 2536 * the inquiry information is unknown but initialized to 0. 2537 * This means that this code will be exercised while probing 2538 * devices with an ANSI revision greater than 2. 2539 */ 2540 device = path->device; 2541 if (device->protocol_version <= SCSI_REV_2 2542 && start_ccb->ccb_h.target_lun < 8 2543 && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) { 2544 2545 start_ccb->csio.cdb_io.cdb_bytes[1] |= 2546 start_ccb->ccb_h.target_lun << 5; 2547 } 2548 start_ccb->csio.scsi_status = SCSI_STATUS_OK; 2549 } 2550 /* FALLTHROUGH */ 2551 case XPT_TARGET_IO: 2552 case XPT_CONT_TARGET_IO: 2553 start_ccb->csio.sense_resid = 0; 2554 start_ccb->csio.resid = 0; 2555 /* FALLTHROUGH */ 2556 case XPT_ATA_IO: 2557 if (start_ccb->ccb_h.func_code == XPT_ATA_IO) 2558 start_ccb->ataio.resid = 0; 2559 /* FALLTHROUGH */ 2560 case XPT_NVME_IO: 2561 if (start_ccb->ccb_h.func_code == XPT_NVME_IO) 2562 start_ccb->nvmeio.resid = 0; 2563 /* FALLTHROUGH */ 2564 case XPT_RESET_DEV: 2565 case XPT_ENG_EXEC: 2566 case XPT_SMP_IO: 2567 { 2568 struct cam_devq *devq; 2569 2570 devq = path->bus->sim->devq; 2571 mtx_lock(&devq->send_mtx); 2572 cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb); 2573 if (xpt_schedule_devq(devq, path->device) != 0) 2574 xpt_run_devq(devq); 2575 mtx_unlock(&devq->send_mtx); 2576 break; 2577 } 2578 case XPT_CALC_GEOMETRY: 2579 /* Filter out garbage */ 2580 if (start_ccb->ccg.block_size == 0 2581 || start_ccb->ccg.volume_size == 0) { 2582 start_ccb->ccg.cylinders = 0; 2583 start_ccb->ccg.heads = 0; 2584 start_ccb->ccg.secs_per_track = 0; 2585 start_ccb->ccb_h.status = CAM_REQ_CMP; 2586 break; 2587 } 2588 #if defined(PC98) || defined(__sparc64__) 2589 /* 2590 * In a PC-98 system, geometry translation depens on 2591 * the "real" device geometry obtained from mode page 4. 2592 * SCSI geometry translation is performed in the 2593 * initialization routine of the SCSI BIOS and the result 2594 * stored in host memory. If the translation is available 2595 * in host memory, use it. If not, rely on the default 2596 * translation the device driver performs. 2597 * For sparc64, we may need adjust the geometry of large 2598 * disks in order to fit the limitations of the 16-bit 2599 * fields of the VTOC8 disk label. 2600 */ 2601 if (scsi_da_bios_params(&start_ccb->ccg) != 0) { 2602 start_ccb->ccb_h.status = CAM_REQ_CMP; 2603 break; 2604 } 2605 #endif 2606 goto call_sim; 2607 case XPT_ABORT: 2608 { 2609 union ccb* abort_ccb; 2610 2611 abort_ccb = start_ccb->cab.abort_ccb; 2612 if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) { 2613 struct cam_ed *device; 2614 struct cam_devq *devq; 2615 2616 device = abort_ccb->ccb_h.path->device; 2617 devq = device->sim->devq; 2618 2619 mtx_lock(&devq->send_mtx); 2620 if (abort_ccb->ccb_h.pinfo.index > 0) { 2621 cam_ccbq_remove_ccb(&device->ccbq, abort_ccb); 2622 abort_ccb->ccb_h.status = 2623 CAM_REQ_ABORTED|CAM_DEV_QFRZN; 2624 xpt_freeze_devq_device(device, 1); 2625 mtx_unlock(&devq->send_mtx); 2626 xpt_done(abort_ccb); 2627 start_ccb->ccb_h.status = CAM_REQ_CMP; 2628 break; 2629 } 2630 mtx_unlock(&devq->send_mtx); 2631 2632 if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX 2633 && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) { 2634 /* 2635 * We've caught this ccb en route to 2636 * the SIM. Flag it for abort and the 2637 * SIM will do so just before starting 2638 * real work on the CCB. 2639 */ 2640 abort_ccb->ccb_h.status = 2641 CAM_REQ_ABORTED|CAM_DEV_QFRZN; 2642 xpt_freeze_devq(abort_ccb->ccb_h.path, 1); 2643 start_ccb->ccb_h.status = CAM_REQ_CMP; 2644 break; 2645 } 2646 } 2647 if (XPT_FC_IS_QUEUED(abort_ccb) 2648 && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) { 2649 /* 2650 * It's already completed but waiting 2651 * for our SWI to get to it. 2652 */ 2653 start_ccb->ccb_h.status = CAM_UA_ABORT; 2654 break; 2655 } 2656 /* 2657 * If we weren't able to take care of the abort request 2658 * in the XPT, pass the request down to the SIM for processing. 2659 */ 2660 } 2661 /* FALLTHROUGH */ 2662 case XPT_ACCEPT_TARGET_IO: 2663 case XPT_EN_LUN: 2664 case XPT_IMMED_NOTIFY: 2665 case XPT_NOTIFY_ACK: 2666 case XPT_RESET_BUS: 2667 case XPT_IMMEDIATE_NOTIFY: 2668 case XPT_NOTIFY_ACKNOWLEDGE: 2669 case XPT_GET_SIM_KNOB_OLD: 2670 case XPT_GET_SIM_KNOB: 2671 case XPT_SET_SIM_KNOB: 2672 case XPT_GET_TRAN_SETTINGS: 2673 case XPT_SET_TRAN_SETTINGS: 2674 case XPT_PATH_INQ: 2675 call_sim: 2676 sim = path->bus->sim; 2677 lock = (mtx_owned(sim->mtx) == 0); 2678 if (lock) 2679 CAM_SIM_LOCK(sim); 2680 CAM_DEBUG(path, CAM_DEBUG_TRACE, 2681 ("sim->sim_action: func=%#x\n", start_ccb->ccb_h.func_code)); 2682 (*(sim->sim_action))(sim, start_ccb); 2683 CAM_DEBUG(path, CAM_DEBUG_TRACE, 2684 ("sim->sim_action: status=%#x\n", start_ccb->ccb_h.status)); 2685 if (lock) 2686 CAM_SIM_UNLOCK(sim); 2687 break; 2688 case XPT_PATH_STATS: 2689 start_ccb->cpis.last_reset = path->bus->last_reset; 2690 start_ccb->ccb_h.status = CAM_REQ_CMP; 2691 break; 2692 case XPT_GDEV_TYPE: 2693 { 2694 struct cam_ed *dev; 2695 2696 dev = path->device; 2697 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) { 2698 start_ccb->ccb_h.status = CAM_DEV_NOT_THERE; 2699 } else { 2700 struct ccb_getdev *cgd; 2701 2702 cgd = &start_ccb->cgd; 2703 cgd->protocol = dev->protocol; 2704 cgd->inq_data = dev->inq_data; 2705 cgd->ident_data = dev->ident_data; 2706 cgd->inq_flags = dev->inq_flags; 2707 cgd->nvme_data = dev->nvme_data; 2708 cgd->nvme_cdata = dev->nvme_cdata; 2709 cgd->ccb_h.status = CAM_REQ_CMP; 2710 cgd->serial_num_len = dev->serial_num_len; 2711 if ((dev->serial_num_len > 0) 2712 && (dev->serial_num != NULL)) 2713 bcopy(dev->serial_num, cgd->serial_num, 2714 dev->serial_num_len); 2715 } 2716 break; 2717 } 2718 case XPT_GDEV_STATS: 2719 { 2720 struct cam_ed *dev; 2721 2722 dev = path->device; 2723 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) { 2724 start_ccb->ccb_h.status = CAM_DEV_NOT_THERE; 2725 } else { 2726 struct ccb_getdevstats *cgds; 2727 struct cam_eb *bus; 2728 struct cam_et *tar; 2729 struct cam_devq *devq; 2730 2731 cgds = &start_ccb->cgds; 2732 bus = path->bus; 2733 tar = path->target; 2734 devq = bus->sim->devq; 2735 mtx_lock(&devq->send_mtx); 2736 cgds->dev_openings = dev->ccbq.dev_openings; 2737 cgds->dev_active = dev->ccbq.dev_active; 2738 cgds->allocated = dev->ccbq.allocated; 2739 cgds->queued = cam_ccbq_pending_ccb_count(&dev->ccbq); 2740 cgds->held = cgds->allocated - cgds->dev_active - 2741 cgds->queued; 2742 cgds->last_reset = tar->last_reset; 2743 cgds->maxtags = dev->maxtags; 2744 cgds->mintags = dev->mintags; 2745 if (timevalcmp(&tar->last_reset, &bus->last_reset, <)) 2746 cgds->last_reset = bus->last_reset; 2747 mtx_unlock(&devq->send_mtx); 2748 cgds->ccb_h.status = CAM_REQ_CMP; 2749 } 2750 break; 2751 } 2752 case XPT_GDEVLIST: 2753 { 2754 struct cam_periph *nperiph; 2755 struct periph_list *periph_head; 2756 struct ccb_getdevlist *cgdl; 2757 u_int i; 2758 struct cam_ed *device; 2759 int found; 2760 2761 2762 found = 0; 2763 2764 /* 2765 * Don't want anyone mucking with our data. 2766 */ 2767 device = path->device; 2768 periph_head = &device->periphs; 2769 cgdl = &start_ccb->cgdl; 2770 2771 /* 2772 * Check and see if the list has changed since the user 2773 * last requested a list member. If so, tell them that the 2774 * list has changed, and therefore they need to start over 2775 * from the beginning. 2776 */ 2777 if ((cgdl->index != 0) && 2778 (cgdl->generation != device->generation)) { 2779 cgdl->status = CAM_GDEVLIST_LIST_CHANGED; 2780 break; 2781 } 2782 2783 /* 2784 * Traverse the list of peripherals and attempt to find 2785 * the requested peripheral. 2786 */ 2787 for (nperiph = SLIST_FIRST(periph_head), i = 0; 2788 (nperiph != NULL) && (i <= cgdl->index); 2789 nperiph = SLIST_NEXT(nperiph, periph_links), i++) { 2790 if (i == cgdl->index) { 2791 strncpy(cgdl->periph_name, 2792 nperiph->periph_name, 2793 DEV_IDLEN); 2794 cgdl->unit_number = nperiph->unit_number; 2795 found = 1; 2796 } 2797 } 2798 if (found == 0) { 2799 cgdl->status = CAM_GDEVLIST_ERROR; 2800 break; 2801 } 2802 2803 if (nperiph == NULL) 2804 cgdl->status = CAM_GDEVLIST_LAST_DEVICE; 2805 else 2806 cgdl->status = CAM_GDEVLIST_MORE_DEVS; 2807 2808 cgdl->index++; 2809 cgdl->generation = device->generation; 2810 2811 cgdl->ccb_h.status = CAM_REQ_CMP; 2812 break; 2813 } 2814 case XPT_DEV_MATCH: 2815 { 2816 dev_pos_type position_type; 2817 struct ccb_dev_match *cdm; 2818 2819 cdm = &start_ccb->cdm; 2820 2821 /* 2822 * There are two ways of getting at information in the EDT. 2823 * The first way is via the primary EDT tree. It starts 2824 * with a list of buses, then a list of targets on a bus, 2825 * then devices/luns on a target, and then peripherals on a 2826 * device/lun. The "other" way is by the peripheral driver 2827 * lists. The peripheral driver lists are organized by 2828 * peripheral driver. (obviously) So it makes sense to 2829 * use the peripheral driver list if the user is looking 2830 * for something like "da1", or all "da" devices. If the 2831 * user is looking for something on a particular bus/target 2832 * or lun, it's generally better to go through the EDT tree. 2833 */ 2834 2835 if (cdm->pos.position_type != CAM_DEV_POS_NONE) 2836 position_type = cdm->pos.position_type; 2837 else { 2838 u_int i; 2839 2840 position_type = CAM_DEV_POS_NONE; 2841 2842 for (i = 0; i < cdm->num_patterns; i++) { 2843 if ((cdm->patterns[i].type == DEV_MATCH_BUS) 2844 ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){ 2845 position_type = CAM_DEV_POS_EDT; 2846 break; 2847 } 2848 } 2849 2850 if (cdm->num_patterns == 0) 2851 position_type = CAM_DEV_POS_EDT; 2852 else if (position_type == CAM_DEV_POS_NONE) 2853 position_type = CAM_DEV_POS_PDRV; 2854 } 2855 2856 switch(position_type & CAM_DEV_POS_TYPEMASK) { 2857 case CAM_DEV_POS_EDT: 2858 xptedtmatch(cdm); 2859 break; 2860 case CAM_DEV_POS_PDRV: 2861 xptperiphlistmatch(cdm); 2862 break; 2863 default: 2864 cdm->status = CAM_DEV_MATCH_ERROR; 2865 break; 2866 } 2867 2868 if (cdm->status == CAM_DEV_MATCH_ERROR) 2869 start_ccb->ccb_h.status = CAM_REQ_CMP_ERR; 2870 else 2871 start_ccb->ccb_h.status = CAM_REQ_CMP; 2872 2873 break; 2874 } 2875 case XPT_SASYNC_CB: 2876 { 2877 struct ccb_setasync *csa; 2878 struct async_node *cur_entry; 2879 struct async_list *async_head; 2880 u_int32_t added; 2881 2882 csa = &start_ccb->csa; 2883 added = csa->event_enable; 2884 async_head = &path->device->asyncs; 2885 2886 /* 2887 * If there is already an entry for us, simply 2888 * update it. 2889 */ 2890 cur_entry = SLIST_FIRST(async_head); 2891 while (cur_entry != NULL) { 2892 if ((cur_entry->callback_arg == csa->callback_arg) 2893 && (cur_entry->callback == csa->callback)) 2894 break; 2895 cur_entry = SLIST_NEXT(cur_entry, links); 2896 } 2897 2898 if (cur_entry != NULL) { 2899 /* 2900 * If the request has no flags set, 2901 * remove the entry. 2902 */ 2903 added &= ~cur_entry->event_enable; 2904 if (csa->event_enable == 0) { 2905 SLIST_REMOVE(async_head, cur_entry, 2906 async_node, links); 2907 xpt_release_device(path->device); 2908 free(cur_entry, M_CAMXPT); 2909 } else { 2910 cur_entry->event_enable = csa->event_enable; 2911 } 2912 csa->event_enable = added; 2913 } else { 2914 cur_entry = malloc(sizeof(*cur_entry), M_CAMXPT, 2915 M_NOWAIT); 2916 if (cur_entry == NULL) { 2917 csa->ccb_h.status = CAM_RESRC_UNAVAIL; 2918 break; 2919 } 2920 cur_entry->event_enable = csa->event_enable; 2921 cur_entry->event_lock = 2922 mtx_owned(path->bus->sim->mtx) ? 1 : 0; 2923 cur_entry->callback_arg = csa->callback_arg; 2924 cur_entry->callback = csa->callback; 2925 SLIST_INSERT_HEAD(async_head, cur_entry, links); 2926 xpt_acquire_device(path->device); 2927 } 2928 start_ccb->ccb_h.status = CAM_REQ_CMP; 2929 break; 2930 } 2931 case XPT_REL_SIMQ: 2932 { 2933 struct ccb_relsim *crs; 2934 struct cam_ed *dev; 2935 2936 crs = &start_ccb->crs; 2937 dev = path->device; 2938 if (dev == NULL) { 2939 2940 crs->ccb_h.status = CAM_DEV_NOT_THERE; 2941 break; 2942 } 2943 2944 if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) { 2945 2946 /* Don't ever go below one opening */ 2947 if (crs->openings > 0) { 2948 xpt_dev_ccbq_resize(path, crs->openings); 2949 if (bootverbose) { 2950 xpt_print(path, 2951 "number of openings is now %d\n", 2952 crs->openings); 2953 } 2954 } 2955 } 2956 2957 mtx_lock(&dev->sim->devq->send_mtx); 2958 if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) { 2959 2960 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) { 2961 2962 /* 2963 * Just extend the old timeout and decrement 2964 * the freeze count so that a single timeout 2965 * is sufficient for releasing the queue. 2966 */ 2967 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE; 2968 callout_stop(&dev->callout); 2969 } else { 2970 2971 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 2972 } 2973 2974 callout_reset_sbt(&dev->callout, 2975 SBT_1MS * crs->release_timeout, 0, 2976 xpt_release_devq_timeout, dev, 0); 2977 2978 dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING; 2979 2980 } 2981 2982 if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) { 2983 2984 if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) { 2985 /* 2986 * Decrement the freeze count so that a single 2987 * completion is still sufficient to unfreeze 2988 * the queue. 2989 */ 2990 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE; 2991 } else { 2992 2993 dev->flags |= CAM_DEV_REL_ON_COMPLETE; 2994 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 2995 } 2996 } 2997 2998 if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) { 2999 3000 if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0 3001 || (dev->ccbq.dev_active == 0)) { 3002 3003 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE; 3004 } else { 3005 3006 dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY; 3007 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 3008 } 3009 } 3010 mtx_unlock(&dev->sim->devq->send_mtx); 3011 3012 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) 3013 xpt_release_devq(path, /*count*/1, /*run_queue*/TRUE); 3014 start_ccb->crs.qfrozen_cnt = dev->ccbq.queue.qfrozen_cnt; 3015 start_ccb->ccb_h.status = CAM_REQ_CMP; 3016 break; 3017 } 3018 case XPT_DEBUG: { 3019 struct cam_path *oldpath; 3020 3021 /* Check that all request bits are supported. */ 3022 if (start_ccb->cdbg.flags & ~(CAM_DEBUG_COMPILE)) { 3023 start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 3024 break; 3025 } 3026 3027 cam_dflags = CAM_DEBUG_NONE; 3028 if (cam_dpath != NULL) { 3029 oldpath = cam_dpath; 3030 cam_dpath = NULL; 3031 xpt_free_path(oldpath); 3032 } 3033 if (start_ccb->cdbg.flags != CAM_DEBUG_NONE) { 3034 if (xpt_create_path(&cam_dpath, NULL, 3035 start_ccb->ccb_h.path_id, 3036 start_ccb->ccb_h.target_id, 3037 start_ccb->ccb_h.target_lun) != 3038 CAM_REQ_CMP) { 3039 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 3040 } else { 3041 cam_dflags = start_ccb->cdbg.flags; 3042 start_ccb->ccb_h.status = CAM_REQ_CMP; 3043 xpt_print(cam_dpath, "debugging flags now %x\n", 3044 cam_dflags); 3045 } 3046 } else 3047 start_ccb->ccb_h.status = CAM_REQ_CMP; 3048 break; 3049 } 3050 case XPT_NOOP: 3051 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0) 3052 xpt_freeze_devq(path, 1); 3053 start_ccb->ccb_h.status = CAM_REQ_CMP; 3054 break; 3055 case XPT_REPROBE_LUN: 3056 xpt_async(AC_INQ_CHANGED, path, NULL); 3057 start_ccb->ccb_h.status = CAM_REQ_CMP; 3058 xpt_done(start_ccb); 3059 break; 3060 default: 3061 case XPT_SDEV_TYPE: 3062 case XPT_TERM_IO: 3063 case XPT_ENG_INQ: 3064 /* XXX Implement */ 3065 xpt_print_path(start_ccb->ccb_h.path); 3066 printf("%s: CCB type %#x %s not supported\n", __func__, 3067 start_ccb->ccb_h.func_code, 3068 xpt_action_name(start_ccb->ccb_h.func_code)); 3069 start_ccb->ccb_h.status = CAM_PROVIDE_FAIL; 3070 if (start_ccb->ccb_h.func_code & XPT_FC_DEV_QUEUED) { 3071 xpt_done(start_ccb); 3072 } 3073 break; 3074 } 3075 CAM_DEBUG(path, CAM_DEBUG_TRACE, 3076 ("xpt_action_default: func= %#x %s status %#x\n", 3077 start_ccb->ccb_h.func_code, 3078 xpt_action_name(start_ccb->ccb_h.func_code), 3079 start_ccb->ccb_h.status)); 3080 } 3081 3082 void 3083 xpt_polled_action(union ccb *start_ccb) 3084 { 3085 u_int32_t timeout; 3086 struct cam_sim *sim; 3087 struct cam_devq *devq; 3088 struct cam_ed *dev; 3089 3090 timeout = start_ccb->ccb_h.timeout * 10; 3091 sim = start_ccb->ccb_h.path->bus->sim; 3092 devq = sim->devq; 3093 dev = start_ccb->ccb_h.path->device; 3094 3095 mtx_unlock(&dev->device_mtx); 3096 3097 /* 3098 * Steal an opening so that no other queued requests 3099 * can get it before us while we simulate interrupts. 3100 */ 3101 mtx_lock(&devq->send_mtx); 3102 dev->ccbq.dev_openings--; 3103 while((devq->send_openings <= 0 || dev->ccbq.dev_openings < 0) && 3104 (--timeout > 0)) { 3105 mtx_unlock(&devq->send_mtx); 3106 DELAY(100); 3107 CAM_SIM_LOCK(sim); 3108 (*(sim->sim_poll))(sim); 3109 CAM_SIM_UNLOCK(sim); 3110 camisr_runqueue(); 3111 mtx_lock(&devq->send_mtx); 3112 } 3113 dev->ccbq.dev_openings++; 3114 mtx_unlock(&devq->send_mtx); 3115 3116 if (timeout != 0) { 3117 xpt_action(start_ccb); 3118 while(--timeout > 0) { 3119 CAM_SIM_LOCK(sim); 3120 (*(sim->sim_poll))(sim); 3121 CAM_SIM_UNLOCK(sim); 3122 camisr_runqueue(); 3123 if ((start_ccb->ccb_h.status & CAM_STATUS_MASK) 3124 != CAM_REQ_INPROG) 3125 break; 3126 DELAY(100); 3127 } 3128 if (timeout == 0) { 3129 /* 3130 * XXX Is it worth adding a sim_timeout entry 3131 * point so we can attempt recovery? If 3132 * this is only used for dumps, I don't think 3133 * it is. 3134 */ 3135 start_ccb->ccb_h.status = CAM_CMD_TIMEOUT; 3136 } 3137 } else { 3138 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 3139 } 3140 3141 mtx_lock(&dev->device_mtx); 3142 } 3143 3144 /* 3145 * Schedule a peripheral driver to receive a ccb when its 3146 * target device has space for more transactions. 3147 */ 3148 void 3149 xpt_schedule(struct cam_periph *periph, u_int32_t new_priority) 3150 { 3151 3152 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n")); 3153 cam_periph_assert(periph, MA_OWNED); 3154 if (new_priority < periph->scheduled_priority) { 3155 periph->scheduled_priority = new_priority; 3156 xpt_run_allocq(periph, 0); 3157 } 3158 } 3159 3160 3161 /* 3162 * Schedule a device to run on a given queue. 3163 * If the device was inserted as a new entry on the queue, 3164 * return 1 meaning the device queue should be run. If we 3165 * were already queued, implying someone else has already 3166 * started the queue, return 0 so the caller doesn't attempt 3167 * to run the queue. 3168 */ 3169 static int 3170 xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo, 3171 u_int32_t new_priority) 3172 { 3173 int retval; 3174 u_int32_t old_priority; 3175 3176 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n")); 3177 3178 old_priority = pinfo->priority; 3179 3180 /* 3181 * Are we already queued? 3182 */ 3183 if (pinfo->index != CAM_UNQUEUED_INDEX) { 3184 /* Simply reorder based on new priority */ 3185 if (new_priority < old_priority) { 3186 camq_change_priority(queue, pinfo->index, 3187 new_priority); 3188 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3189 ("changed priority to %d\n", 3190 new_priority)); 3191 retval = 1; 3192 } else 3193 retval = 0; 3194 } else { 3195 /* New entry on the queue */ 3196 if (new_priority < old_priority) 3197 pinfo->priority = new_priority; 3198 3199 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3200 ("Inserting onto queue\n")); 3201 pinfo->generation = ++queue->generation; 3202 camq_insert(queue, pinfo); 3203 retval = 1; 3204 } 3205 return (retval); 3206 } 3207 3208 static void 3209 xpt_run_allocq_task(void *context, int pending) 3210 { 3211 struct cam_periph *periph = context; 3212 3213 cam_periph_lock(periph); 3214 periph->flags &= ~CAM_PERIPH_RUN_TASK; 3215 xpt_run_allocq(periph, 1); 3216 cam_periph_unlock(periph); 3217 cam_periph_release(periph); 3218 } 3219 3220 static void 3221 xpt_run_allocq(struct cam_periph *periph, int sleep) 3222 { 3223 struct cam_ed *device; 3224 union ccb *ccb; 3225 uint32_t prio; 3226 3227 cam_periph_assert(periph, MA_OWNED); 3228 if (periph->periph_allocating) 3229 return; 3230 periph->periph_allocating = 1; 3231 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_allocq(%p)\n", periph)); 3232 device = periph->path->device; 3233 ccb = NULL; 3234 restart: 3235 while ((prio = min(periph->scheduled_priority, 3236 periph->immediate_priority)) != CAM_PRIORITY_NONE && 3237 (periph->periph_allocated - (ccb != NULL ? 1 : 0) < 3238 device->ccbq.total_openings || prio <= CAM_PRIORITY_OOB)) { 3239 3240 if (ccb == NULL && 3241 (ccb = xpt_get_ccb_nowait(periph)) == NULL) { 3242 if (sleep) { 3243 ccb = xpt_get_ccb(periph); 3244 goto restart; 3245 } 3246 if (periph->flags & CAM_PERIPH_RUN_TASK) 3247 break; 3248 cam_periph_doacquire(periph); 3249 periph->flags |= CAM_PERIPH_RUN_TASK; 3250 taskqueue_enqueue(xsoftc.xpt_taskq, 3251 &periph->periph_run_task); 3252 break; 3253 } 3254 xpt_setup_ccb(&ccb->ccb_h, periph->path, prio); 3255 if (prio == periph->immediate_priority) { 3256 periph->immediate_priority = CAM_PRIORITY_NONE; 3257 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3258 ("waking cam_periph_getccb()\n")); 3259 SLIST_INSERT_HEAD(&periph->ccb_list, &ccb->ccb_h, 3260 periph_links.sle); 3261 wakeup(&periph->ccb_list); 3262 } else { 3263 periph->scheduled_priority = CAM_PRIORITY_NONE; 3264 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3265 ("calling periph_start()\n")); 3266 periph->periph_start(periph, ccb); 3267 } 3268 ccb = NULL; 3269 } 3270 if (ccb != NULL) 3271 xpt_release_ccb(ccb); 3272 periph->periph_allocating = 0; 3273 } 3274 3275 static void 3276 xpt_run_devq(struct cam_devq *devq) 3277 { 3278 int lock; 3279 3280 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_devq\n")); 3281 3282 devq->send_queue.qfrozen_cnt++; 3283 while ((devq->send_queue.entries > 0) 3284 && (devq->send_openings > 0) 3285 && (devq->send_queue.qfrozen_cnt <= 1)) { 3286 struct cam_ed *device; 3287 union ccb *work_ccb; 3288 struct cam_sim *sim; 3289 struct xpt_proto *proto; 3290 3291 device = (struct cam_ed *)camq_remove(&devq->send_queue, 3292 CAMQ_HEAD); 3293 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, 3294 ("running device %p\n", device)); 3295 3296 work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD); 3297 if (work_ccb == NULL) { 3298 printf("device on run queue with no ccbs???\n"); 3299 continue; 3300 } 3301 3302 if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) { 3303 3304 mtx_lock(&xsoftc.xpt_highpower_lock); 3305 if (xsoftc.num_highpower <= 0) { 3306 /* 3307 * We got a high power command, but we 3308 * don't have any available slots. Freeze 3309 * the device queue until we have a slot 3310 * available. 3311 */ 3312 xpt_freeze_devq_device(device, 1); 3313 STAILQ_INSERT_TAIL(&xsoftc.highpowerq, device, 3314 highpowerq_entry); 3315 3316 mtx_unlock(&xsoftc.xpt_highpower_lock); 3317 continue; 3318 } else { 3319 /* 3320 * Consume a high power slot while 3321 * this ccb runs. 3322 */ 3323 xsoftc.num_highpower--; 3324 } 3325 mtx_unlock(&xsoftc.xpt_highpower_lock); 3326 } 3327 cam_ccbq_remove_ccb(&device->ccbq, work_ccb); 3328 cam_ccbq_send_ccb(&device->ccbq, work_ccb); 3329 devq->send_openings--; 3330 devq->send_active++; 3331 xpt_schedule_devq(devq, device); 3332 mtx_unlock(&devq->send_mtx); 3333 3334 if ((work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0) { 3335 /* 3336 * The client wants to freeze the queue 3337 * after this CCB is sent. 3338 */ 3339 xpt_freeze_devq(work_ccb->ccb_h.path, 1); 3340 } 3341 3342 /* In Target mode, the peripheral driver knows best... */ 3343 if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) { 3344 if ((device->inq_flags & SID_CmdQue) != 0 3345 && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE) 3346 work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID; 3347 else 3348 /* 3349 * Clear this in case of a retried CCB that 3350 * failed due to a rejected tag. 3351 */ 3352 work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID; 3353 } 3354 3355 KASSERT(device == work_ccb->ccb_h.path->device, 3356 ("device (%p) / path->device (%p) mismatch", 3357 device, work_ccb->ccb_h.path->device)); 3358 proto = xpt_proto_find(device->protocol); 3359 if (proto && proto->ops->debug_out) 3360 proto->ops->debug_out(work_ccb); 3361 3362 /* 3363 * Device queues can be shared among multiple SIM instances 3364 * that reside on different buses. Use the SIM from the 3365 * queued device, rather than the one from the calling bus. 3366 */ 3367 sim = device->sim; 3368 lock = (mtx_owned(sim->mtx) == 0); 3369 if (lock) 3370 CAM_SIM_LOCK(sim); 3371 work_ccb->ccb_h.qos.sim_data = sbinuptime(); // xxx uintprt_t too small 32bit platforms 3372 (*(sim->sim_action))(sim, work_ccb); 3373 if (lock) 3374 CAM_SIM_UNLOCK(sim); 3375 mtx_lock(&devq->send_mtx); 3376 } 3377 devq->send_queue.qfrozen_cnt--; 3378 } 3379 3380 /* 3381 * This function merges stuff from the slave ccb into the master ccb, while 3382 * keeping important fields in the master ccb constant. 3383 */ 3384 void 3385 xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb) 3386 { 3387 3388 /* 3389 * Pull fields that are valid for peripheral drivers to set 3390 * into the master CCB along with the CCB "payload". 3391 */ 3392 master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count; 3393 master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code; 3394 master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout; 3395 master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags; 3396 bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1], 3397 sizeof(union ccb) - sizeof(struct ccb_hdr)); 3398 } 3399 3400 void 3401 xpt_setup_ccb_flags(struct ccb_hdr *ccb_h, struct cam_path *path, 3402 u_int32_t priority, u_int32_t flags) 3403 { 3404 3405 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n")); 3406 ccb_h->pinfo.priority = priority; 3407 ccb_h->path = path; 3408 ccb_h->path_id = path->bus->path_id; 3409 if (path->target) 3410 ccb_h->target_id = path->target->target_id; 3411 else 3412 ccb_h->target_id = CAM_TARGET_WILDCARD; 3413 if (path->device) { 3414 ccb_h->target_lun = path->device->lun_id; 3415 ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation; 3416 } else { 3417 ccb_h->target_lun = CAM_TARGET_WILDCARD; 3418 } 3419 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX; 3420 ccb_h->flags = flags; 3421 ccb_h->xflags = 0; 3422 } 3423 3424 void 3425 xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority) 3426 { 3427 xpt_setup_ccb_flags(ccb_h, path, priority, /*flags*/ 0); 3428 } 3429 3430 /* Path manipulation functions */ 3431 cam_status 3432 xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph, 3433 path_id_t path_id, target_id_t target_id, lun_id_t lun_id) 3434 { 3435 struct cam_path *path; 3436 cam_status status; 3437 3438 path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_NOWAIT); 3439 3440 if (path == NULL) { 3441 status = CAM_RESRC_UNAVAIL; 3442 return(status); 3443 } 3444 status = xpt_compile_path(path, perph, path_id, target_id, lun_id); 3445 if (status != CAM_REQ_CMP) { 3446 free(path, M_CAMPATH); 3447 path = NULL; 3448 } 3449 *new_path_ptr = path; 3450 return (status); 3451 } 3452 3453 cam_status 3454 xpt_create_path_unlocked(struct cam_path **new_path_ptr, 3455 struct cam_periph *periph, path_id_t path_id, 3456 target_id_t target_id, lun_id_t lun_id) 3457 { 3458 3459 return (xpt_create_path(new_path_ptr, periph, path_id, target_id, 3460 lun_id)); 3461 } 3462 3463 cam_status 3464 xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph, 3465 path_id_t path_id, target_id_t target_id, lun_id_t lun_id) 3466 { 3467 struct cam_eb *bus; 3468 struct cam_et *target; 3469 struct cam_ed *device; 3470 cam_status status; 3471 3472 status = CAM_REQ_CMP; /* Completed without error */ 3473 target = NULL; /* Wildcarded */ 3474 device = NULL; /* Wildcarded */ 3475 3476 /* 3477 * We will potentially modify the EDT, so block interrupts 3478 * that may attempt to create cam paths. 3479 */ 3480 bus = xpt_find_bus(path_id); 3481 if (bus == NULL) { 3482 status = CAM_PATH_INVALID; 3483 } else { 3484 xpt_lock_buses(); 3485 mtx_lock(&bus->eb_mtx); 3486 target = xpt_find_target(bus, target_id); 3487 if (target == NULL) { 3488 /* Create one */ 3489 struct cam_et *new_target; 3490 3491 new_target = xpt_alloc_target(bus, target_id); 3492 if (new_target == NULL) { 3493 status = CAM_RESRC_UNAVAIL; 3494 } else { 3495 target = new_target; 3496 } 3497 } 3498 xpt_unlock_buses(); 3499 if (target != NULL) { 3500 device = xpt_find_device(target, lun_id); 3501 if (device == NULL) { 3502 /* Create one */ 3503 struct cam_ed *new_device; 3504 3505 new_device = 3506 (*(bus->xport->ops->alloc_device))(bus, 3507 target, 3508 lun_id); 3509 if (new_device == NULL) { 3510 status = CAM_RESRC_UNAVAIL; 3511 } else { 3512 device = new_device; 3513 } 3514 } 3515 } 3516 mtx_unlock(&bus->eb_mtx); 3517 } 3518 3519 /* 3520 * Only touch the user's data if we are successful. 3521 */ 3522 if (status == CAM_REQ_CMP) { 3523 new_path->periph = perph; 3524 new_path->bus = bus; 3525 new_path->target = target; 3526 new_path->device = device; 3527 CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n")); 3528 } else { 3529 if (device != NULL) 3530 xpt_release_device(device); 3531 if (target != NULL) 3532 xpt_release_target(target); 3533 if (bus != NULL) 3534 xpt_release_bus(bus); 3535 } 3536 return (status); 3537 } 3538 3539 cam_status 3540 xpt_clone_path(struct cam_path **new_path_ptr, struct cam_path *path) 3541 { 3542 struct cam_path *new_path; 3543 3544 new_path = (struct cam_path *)malloc(sizeof(*path), M_CAMPATH, M_NOWAIT); 3545 if (new_path == NULL) 3546 return(CAM_RESRC_UNAVAIL); 3547 xpt_copy_path(new_path, path); 3548 *new_path_ptr = new_path; 3549 return (CAM_REQ_CMP); 3550 } 3551 3552 void 3553 xpt_copy_path(struct cam_path *new_path, struct cam_path *path) 3554 { 3555 3556 *new_path = *path; 3557 if (path->bus != NULL) 3558 xpt_acquire_bus(path->bus); 3559 if (path->target != NULL) 3560 xpt_acquire_target(path->target); 3561 if (path->device != NULL) 3562 xpt_acquire_device(path->device); 3563 } 3564 3565 void 3566 xpt_release_path(struct cam_path *path) 3567 { 3568 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n")); 3569 if (path->device != NULL) { 3570 xpt_release_device(path->device); 3571 path->device = NULL; 3572 } 3573 if (path->target != NULL) { 3574 xpt_release_target(path->target); 3575 path->target = NULL; 3576 } 3577 if (path->bus != NULL) { 3578 xpt_release_bus(path->bus); 3579 path->bus = NULL; 3580 } 3581 } 3582 3583 void 3584 xpt_free_path(struct cam_path *path) 3585 { 3586 3587 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n")); 3588 xpt_release_path(path); 3589 free(path, M_CAMPATH); 3590 } 3591 3592 void 3593 xpt_path_counts(struct cam_path *path, uint32_t *bus_ref, 3594 uint32_t *periph_ref, uint32_t *target_ref, uint32_t *device_ref) 3595 { 3596 3597 xpt_lock_buses(); 3598 if (bus_ref) { 3599 if (path->bus) 3600 *bus_ref = path->bus->refcount; 3601 else 3602 *bus_ref = 0; 3603 } 3604 if (periph_ref) { 3605 if (path->periph) 3606 *periph_ref = path->periph->refcount; 3607 else 3608 *periph_ref = 0; 3609 } 3610 xpt_unlock_buses(); 3611 if (target_ref) { 3612 if (path->target) 3613 *target_ref = path->target->refcount; 3614 else 3615 *target_ref = 0; 3616 } 3617 if (device_ref) { 3618 if (path->device) 3619 *device_ref = path->device->refcount; 3620 else 3621 *device_ref = 0; 3622 } 3623 } 3624 3625 /* 3626 * Return -1 for failure, 0 for exact match, 1 for match with wildcards 3627 * in path1, 2 for match with wildcards in path2. 3628 */ 3629 int 3630 xpt_path_comp(struct cam_path *path1, struct cam_path *path2) 3631 { 3632 int retval = 0; 3633 3634 if (path1->bus != path2->bus) { 3635 if (path1->bus->path_id == CAM_BUS_WILDCARD) 3636 retval = 1; 3637 else if (path2->bus->path_id == CAM_BUS_WILDCARD) 3638 retval = 2; 3639 else 3640 return (-1); 3641 } 3642 if (path1->target != path2->target) { 3643 if (path1->target->target_id == CAM_TARGET_WILDCARD) { 3644 if (retval == 0) 3645 retval = 1; 3646 } else if (path2->target->target_id == CAM_TARGET_WILDCARD) 3647 retval = 2; 3648 else 3649 return (-1); 3650 } 3651 if (path1->device != path2->device) { 3652 if (path1->device->lun_id == CAM_LUN_WILDCARD) { 3653 if (retval == 0) 3654 retval = 1; 3655 } else if (path2->device->lun_id == CAM_LUN_WILDCARD) 3656 retval = 2; 3657 else 3658 return (-1); 3659 } 3660 return (retval); 3661 } 3662 3663 int 3664 xpt_path_comp_dev(struct cam_path *path, struct cam_ed *dev) 3665 { 3666 int retval = 0; 3667 3668 if (path->bus != dev->target->bus) { 3669 if (path->bus->path_id == CAM_BUS_WILDCARD) 3670 retval = 1; 3671 else if (dev->target->bus->path_id == CAM_BUS_WILDCARD) 3672 retval = 2; 3673 else 3674 return (-1); 3675 } 3676 if (path->target != dev->target) { 3677 if (path->target->target_id == CAM_TARGET_WILDCARD) { 3678 if (retval == 0) 3679 retval = 1; 3680 } else if (dev->target->target_id == CAM_TARGET_WILDCARD) 3681 retval = 2; 3682 else 3683 return (-1); 3684 } 3685 if (path->device != dev) { 3686 if (path->device->lun_id == CAM_LUN_WILDCARD) { 3687 if (retval == 0) 3688 retval = 1; 3689 } else if (dev->lun_id == CAM_LUN_WILDCARD) 3690 retval = 2; 3691 else 3692 return (-1); 3693 } 3694 return (retval); 3695 } 3696 3697 void 3698 xpt_print_path(struct cam_path *path) 3699 { 3700 3701 if (path == NULL) 3702 printf("(nopath): "); 3703 else { 3704 if (path->periph != NULL) 3705 printf("(%s%d:", path->periph->periph_name, 3706 path->periph->unit_number); 3707 else 3708 printf("(noperiph:"); 3709 3710 if (path->bus != NULL) 3711 printf("%s%d:%d:", path->bus->sim->sim_name, 3712 path->bus->sim->unit_number, 3713 path->bus->sim->bus_id); 3714 else 3715 printf("nobus:"); 3716 3717 if (path->target != NULL) 3718 printf("%d:", path->target->target_id); 3719 else 3720 printf("X:"); 3721 3722 if (path->device != NULL) 3723 printf("%jx): ", (uintmax_t)path->device->lun_id); 3724 else 3725 printf("X): "); 3726 } 3727 } 3728 3729 void 3730 xpt_print_device(struct cam_ed *device) 3731 { 3732 3733 if (device == NULL) 3734 printf("(nopath): "); 3735 else { 3736 printf("(noperiph:%s%d:%d:%d:%jx): ", device->sim->sim_name, 3737 device->sim->unit_number, 3738 device->sim->bus_id, 3739 device->target->target_id, 3740 (uintmax_t)device->lun_id); 3741 } 3742 } 3743 3744 void 3745 xpt_print(struct cam_path *path, const char *fmt, ...) 3746 { 3747 va_list ap; 3748 xpt_print_path(path); 3749 va_start(ap, fmt); 3750 vprintf(fmt, ap); 3751 va_end(ap); 3752 } 3753 3754 int 3755 xpt_path_string(struct cam_path *path, char *str, size_t str_len) 3756 { 3757 struct sbuf sb; 3758 3759 sbuf_new(&sb, str, str_len, 0); 3760 3761 if (path == NULL) 3762 sbuf_printf(&sb, "(nopath): "); 3763 else { 3764 if (path->periph != NULL) 3765 sbuf_printf(&sb, "(%s%d:", path->periph->periph_name, 3766 path->periph->unit_number); 3767 else 3768 sbuf_printf(&sb, "(noperiph:"); 3769 3770 if (path->bus != NULL) 3771 sbuf_printf(&sb, "%s%d:%d:", path->bus->sim->sim_name, 3772 path->bus->sim->unit_number, 3773 path->bus->sim->bus_id); 3774 else 3775 sbuf_printf(&sb, "nobus:"); 3776 3777 if (path->target != NULL) 3778 sbuf_printf(&sb, "%d:", path->target->target_id); 3779 else 3780 sbuf_printf(&sb, "X:"); 3781 3782 if (path->device != NULL) 3783 sbuf_printf(&sb, "%jx): ", 3784 (uintmax_t)path->device->lun_id); 3785 else 3786 sbuf_printf(&sb, "X): "); 3787 } 3788 sbuf_finish(&sb); 3789 3790 return(sbuf_len(&sb)); 3791 } 3792 3793 path_id_t 3794 xpt_path_path_id(struct cam_path *path) 3795 { 3796 return(path->bus->path_id); 3797 } 3798 3799 target_id_t 3800 xpt_path_target_id(struct cam_path *path) 3801 { 3802 if (path->target != NULL) 3803 return (path->target->target_id); 3804 else 3805 return (CAM_TARGET_WILDCARD); 3806 } 3807 3808 lun_id_t 3809 xpt_path_lun_id(struct cam_path *path) 3810 { 3811 if (path->device != NULL) 3812 return (path->device->lun_id); 3813 else 3814 return (CAM_LUN_WILDCARD); 3815 } 3816 3817 struct cam_sim * 3818 xpt_path_sim(struct cam_path *path) 3819 { 3820 3821 return (path->bus->sim); 3822 } 3823 3824 struct cam_periph* 3825 xpt_path_periph(struct cam_path *path) 3826 { 3827 3828 return (path->periph); 3829 } 3830 3831 /* 3832 * Release a CAM control block for the caller. Remit the cost of the structure 3833 * to the device referenced by the path. If the this device had no 'credits' 3834 * and peripheral drivers have registered async callbacks for this notification 3835 * call them now. 3836 */ 3837 void 3838 xpt_release_ccb(union ccb *free_ccb) 3839 { 3840 struct cam_ed *device; 3841 struct cam_periph *periph; 3842 3843 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n")); 3844 xpt_path_assert(free_ccb->ccb_h.path, MA_OWNED); 3845 device = free_ccb->ccb_h.path->device; 3846 periph = free_ccb->ccb_h.path->periph; 3847 3848 xpt_free_ccb(free_ccb); 3849 periph->periph_allocated--; 3850 cam_ccbq_release_opening(&device->ccbq); 3851 xpt_run_allocq(periph, 0); 3852 } 3853 3854 /* Functions accessed by SIM drivers */ 3855 3856 static struct xpt_xport_ops xport_default_ops = { 3857 .alloc_device = xpt_alloc_device_default, 3858 .action = xpt_action_default, 3859 .async = xpt_dev_async_default, 3860 }; 3861 static struct xpt_xport xport_default = { 3862 .xport = XPORT_UNKNOWN, 3863 .name = "unknown", 3864 .ops = &xport_default_ops, 3865 }; 3866 3867 CAM_XPT_XPORT(xport_default); 3868 3869 /* 3870 * A sim structure, listing the SIM entry points and instance 3871 * identification info is passed to xpt_bus_register to hook the SIM 3872 * into the CAM framework. xpt_bus_register creates a cam_eb entry 3873 * for this new bus and places it in the array of buses and assigns 3874 * it a path_id. The path_id may be influenced by "hard wiring" 3875 * information specified by the user. Once interrupt services are 3876 * available, the bus will be probed. 3877 */ 3878 int32_t 3879 xpt_bus_register(struct cam_sim *sim, device_t parent, u_int32_t bus) 3880 { 3881 struct cam_eb *new_bus; 3882 struct cam_eb *old_bus; 3883 struct ccb_pathinq cpi; 3884 struct cam_path *path; 3885 cam_status status; 3886 3887 mtx_assert(sim->mtx, MA_OWNED); 3888 3889 sim->bus_id = bus; 3890 new_bus = (struct cam_eb *)malloc(sizeof(*new_bus), 3891 M_CAMXPT, M_NOWAIT|M_ZERO); 3892 if (new_bus == NULL) { 3893 /* Couldn't satisfy request */ 3894 return (CAM_RESRC_UNAVAIL); 3895 } 3896 3897 mtx_init(&new_bus->eb_mtx, "CAM bus lock", NULL, MTX_DEF); 3898 TAILQ_INIT(&new_bus->et_entries); 3899 cam_sim_hold(sim); 3900 new_bus->sim = sim; 3901 timevalclear(&new_bus->last_reset); 3902 new_bus->flags = 0; 3903 new_bus->refcount = 1; /* Held until a bus_deregister event */ 3904 new_bus->generation = 0; 3905 3906 xpt_lock_buses(); 3907 sim->path_id = new_bus->path_id = 3908 xptpathid(sim->sim_name, sim->unit_number, sim->bus_id); 3909 old_bus = TAILQ_FIRST(&xsoftc.xpt_busses); 3910 while (old_bus != NULL 3911 && old_bus->path_id < new_bus->path_id) 3912 old_bus = TAILQ_NEXT(old_bus, links); 3913 if (old_bus != NULL) 3914 TAILQ_INSERT_BEFORE(old_bus, new_bus, links); 3915 else 3916 TAILQ_INSERT_TAIL(&xsoftc.xpt_busses, new_bus, links); 3917 xsoftc.bus_generation++; 3918 xpt_unlock_buses(); 3919 3920 /* 3921 * Set a default transport so that a PATH_INQ can be issued to 3922 * the SIM. This will then allow for probing and attaching of 3923 * a more appropriate transport. 3924 */ 3925 new_bus->xport = &xport_default; 3926 3927 status = xpt_create_path(&path, /*periph*/NULL, sim->path_id, 3928 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 3929 if (status != CAM_REQ_CMP) { 3930 xpt_release_bus(new_bus); 3931 free(path, M_CAMXPT); 3932 return (CAM_RESRC_UNAVAIL); 3933 } 3934 3935 xpt_setup_ccb(&cpi.ccb_h, path, CAM_PRIORITY_NORMAL); 3936 cpi.ccb_h.func_code = XPT_PATH_INQ; 3937 xpt_action((union ccb *)&cpi); 3938 3939 if (cpi.ccb_h.status == CAM_REQ_CMP) { 3940 struct xpt_xport **xpt; 3941 3942 SET_FOREACH(xpt, cam_xpt_xport_set) { 3943 if ((*xpt)->xport == cpi.transport) { 3944 new_bus->xport = *xpt; 3945 break; 3946 } 3947 } 3948 if (new_bus->xport == NULL) { 3949 xpt_print_path(path); 3950 printf("No transport found for %d\n", cpi.transport); 3951 xpt_release_bus(new_bus); 3952 free(path, M_CAMXPT); 3953 return (CAM_RESRC_UNAVAIL); 3954 } 3955 } 3956 3957 /* Notify interested parties */ 3958 if (sim->path_id != CAM_XPT_PATH_ID) { 3959 3960 xpt_async(AC_PATH_REGISTERED, path, &cpi); 3961 if ((cpi.hba_misc & PIM_NOSCAN) == 0) { 3962 union ccb *scan_ccb; 3963 3964 /* Initiate bus rescan. */ 3965 scan_ccb = xpt_alloc_ccb_nowait(); 3966 if (scan_ccb != NULL) { 3967 scan_ccb->ccb_h.path = path; 3968 scan_ccb->ccb_h.func_code = XPT_SCAN_BUS; 3969 scan_ccb->crcn.flags = 0; 3970 xpt_rescan(scan_ccb); 3971 } else { 3972 xpt_print(path, 3973 "Can't allocate CCB to scan bus\n"); 3974 xpt_free_path(path); 3975 } 3976 } else 3977 xpt_free_path(path); 3978 } else 3979 xpt_free_path(path); 3980 return (CAM_SUCCESS); 3981 } 3982 3983 int32_t 3984 xpt_bus_deregister(path_id_t pathid) 3985 { 3986 struct cam_path bus_path; 3987 cam_status status; 3988 3989 status = xpt_compile_path(&bus_path, NULL, pathid, 3990 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 3991 if (status != CAM_REQ_CMP) 3992 return (status); 3993 3994 xpt_async(AC_LOST_DEVICE, &bus_path, NULL); 3995 xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL); 3996 3997 /* Release the reference count held while registered. */ 3998 xpt_release_bus(bus_path.bus); 3999 xpt_release_path(&bus_path); 4000 4001 return (CAM_REQ_CMP); 4002 } 4003 4004 static path_id_t 4005 xptnextfreepathid(void) 4006 { 4007 struct cam_eb *bus; 4008 path_id_t pathid; 4009 const char *strval; 4010 4011 mtx_assert(&xsoftc.xpt_topo_lock, MA_OWNED); 4012 pathid = 0; 4013 bus = TAILQ_FIRST(&xsoftc.xpt_busses); 4014 retry: 4015 /* Find an unoccupied pathid */ 4016 while (bus != NULL && bus->path_id <= pathid) { 4017 if (bus->path_id == pathid) 4018 pathid++; 4019 bus = TAILQ_NEXT(bus, links); 4020 } 4021 4022 /* 4023 * Ensure that this pathid is not reserved for 4024 * a bus that may be registered in the future. 4025 */ 4026 if (resource_string_value("scbus", pathid, "at", &strval) == 0) { 4027 ++pathid; 4028 /* Start the search over */ 4029 goto retry; 4030 } 4031 return (pathid); 4032 } 4033 4034 static path_id_t 4035 xptpathid(const char *sim_name, int sim_unit, int sim_bus) 4036 { 4037 path_id_t pathid; 4038 int i, dunit, val; 4039 char buf[32]; 4040 const char *dname; 4041 4042 pathid = CAM_XPT_PATH_ID; 4043 snprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit); 4044 if (strcmp(buf, "xpt0") == 0 && sim_bus == 0) 4045 return (pathid); 4046 i = 0; 4047 while ((resource_find_match(&i, &dname, &dunit, "at", buf)) == 0) { 4048 if (strcmp(dname, "scbus")) { 4049 /* Avoid a bit of foot shooting. */ 4050 continue; 4051 } 4052 if (dunit < 0) /* unwired?! */ 4053 continue; 4054 if (resource_int_value("scbus", dunit, "bus", &val) == 0) { 4055 if (sim_bus == val) { 4056 pathid = dunit; 4057 break; 4058 } 4059 } else if (sim_bus == 0) { 4060 /* Unspecified matches bus 0 */ 4061 pathid = dunit; 4062 break; 4063 } else { 4064 printf("Ambiguous scbus configuration for %s%d " 4065 "bus %d, cannot wire down. The kernel " 4066 "config entry for scbus%d should " 4067 "specify a controller bus.\n" 4068 "Scbus will be assigned dynamically.\n", 4069 sim_name, sim_unit, sim_bus, dunit); 4070 break; 4071 } 4072 } 4073 4074 if (pathid == CAM_XPT_PATH_ID) 4075 pathid = xptnextfreepathid(); 4076 return (pathid); 4077 } 4078 4079 static const char * 4080 xpt_async_string(u_int32_t async_code) 4081 { 4082 4083 switch (async_code) { 4084 case AC_BUS_RESET: return ("AC_BUS_RESET"); 4085 case AC_UNSOL_RESEL: return ("AC_UNSOL_RESEL"); 4086 case AC_SCSI_AEN: return ("AC_SCSI_AEN"); 4087 case AC_SENT_BDR: return ("AC_SENT_BDR"); 4088 case AC_PATH_REGISTERED: return ("AC_PATH_REGISTERED"); 4089 case AC_PATH_DEREGISTERED: return ("AC_PATH_DEREGISTERED"); 4090 case AC_FOUND_DEVICE: return ("AC_FOUND_DEVICE"); 4091 case AC_LOST_DEVICE: return ("AC_LOST_DEVICE"); 4092 case AC_TRANSFER_NEG: return ("AC_TRANSFER_NEG"); 4093 case AC_INQ_CHANGED: return ("AC_INQ_CHANGED"); 4094 case AC_GETDEV_CHANGED: return ("AC_GETDEV_CHANGED"); 4095 case AC_CONTRACT: return ("AC_CONTRACT"); 4096 case AC_ADVINFO_CHANGED: return ("AC_ADVINFO_CHANGED"); 4097 case AC_UNIT_ATTENTION: return ("AC_UNIT_ATTENTION"); 4098 } 4099 return ("AC_UNKNOWN"); 4100 } 4101 4102 static int 4103 xpt_async_size(u_int32_t async_code) 4104 { 4105 4106 switch (async_code) { 4107 case AC_BUS_RESET: return (0); 4108 case AC_UNSOL_RESEL: return (0); 4109 case AC_SCSI_AEN: return (0); 4110 case AC_SENT_BDR: return (0); 4111 case AC_PATH_REGISTERED: return (sizeof(struct ccb_pathinq)); 4112 case AC_PATH_DEREGISTERED: return (0); 4113 case AC_FOUND_DEVICE: return (sizeof(struct ccb_getdev)); 4114 case AC_LOST_DEVICE: return (0); 4115 case AC_TRANSFER_NEG: return (sizeof(struct ccb_trans_settings)); 4116 case AC_INQ_CHANGED: return (0); 4117 case AC_GETDEV_CHANGED: return (0); 4118 case AC_CONTRACT: return (sizeof(struct ac_contract)); 4119 case AC_ADVINFO_CHANGED: return (-1); 4120 case AC_UNIT_ATTENTION: return (sizeof(struct ccb_scsiio)); 4121 } 4122 return (0); 4123 } 4124 4125 static int 4126 xpt_async_process_dev(struct cam_ed *device, void *arg) 4127 { 4128 union ccb *ccb = arg; 4129 struct cam_path *path = ccb->ccb_h.path; 4130 void *async_arg = ccb->casync.async_arg_ptr; 4131 u_int32_t async_code = ccb->casync.async_code; 4132 int relock; 4133 4134 if (path->device != device 4135 && path->device->lun_id != CAM_LUN_WILDCARD 4136 && device->lun_id != CAM_LUN_WILDCARD) 4137 return (1); 4138 4139 /* 4140 * The async callback could free the device. 4141 * If it is a broadcast async, it doesn't hold 4142 * device reference, so take our own reference. 4143 */ 4144 xpt_acquire_device(device); 4145 4146 /* 4147 * If async for specific device is to be delivered to 4148 * the wildcard client, take the specific device lock. 4149 * XXX: We may need a way for client to specify it. 4150 */ 4151 if ((device->lun_id == CAM_LUN_WILDCARD && 4152 path->device->lun_id != CAM_LUN_WILDCARD) || 4153 (device->target->target_id == CAM_TARGET_WILDCARD && 4154 path->target->target_id != CAM_TARGET_WILDCARD) || 4155 (device->target->bus->path_id == CAM_BUS_WILDCARD && 4156 path->target->bus->path_id != CAM_BUS_WILDCARD)) { 4157 mtx_unlock(&device->device_mtx); 4158 xpt_path_lock(path); 4159 relock = 1; 4160 } else 4161 relock = 0; 4162 4163 (*(device->target->bus->xport->ops->async))(async_code, 4164 device->target->bus, device->target, device, async_arg); 4165 xpt_async_bcast(&device->asyncs, async_code, path, async_arg); 4166 4167 if (relock) { 4168 xpt_path_unlock(path); 4169 mtx_lock(&device->device_mtx); 4170 } 4171 xpt_release_device(device); 4172 return (1); 4173 } 4174 4175 static int 4176 xpt_async_process_tgt(struct cam_et *target, void *arg) 4177 { 4178 union ccb *ccb = arg; 4179 struct cam_path *path = ccb->ccb_h.path; 4180 4181 if (path->target != target 4182 && path->target->target_id != CAM_TARGET_WILDCARD 4183 && target->target_id != CAM_TARGET_WILDCARD) 4184 return (1); 4185 4186 if (ccb->casync.async_code == AC_SENT_BDR) { 4187 /* Update our notion of when the last reset occurred */ 4188 microtime(&target->last_reset); 4189 } 4190 4191 return (xptdevicetraverse(target, NULL, xpt_async_process_dev, ccb)); 4192 } 4193 4194 static void 4195 xpt_async_process(struct cam_periph *periph, union ccb *ccb) 4196 { 4197 struct cam_eb *bus; 4198 struct cam_path *path; 4199 void *async_arg; 4200 u_int32_t async_code; 4201 4202 path = ccb->ccb_h.path; 4203 async_code = ccb->casync.async_code; 4204 async_arg = ccb->casync.async_arg_ptr; 4205 CAM_DEBUG(path, CAM_DEBUG_TRACE | CAM_DEBUG_INFO, 4206 ("xpt_async(%s)\n", xpt_async_string(async_code))); 4207 bus = path->bus; 4208 4209 if (async_code == AC_BUS_RESET) { 4210 /* Update our notion of when the last reset occurred */ 4211 microtime(&bus->last_reset); 4212 } 4213 4214 xpttargettraverse(bus, NULL, xpt_async_process_tgt, ccb); 4215 4216 /* 4217 * If this wasn't a fully wildcarded async, tell all 4218 * clients that want all async events. 4219 */ 4220 if (bus != xpt_periph->path->bus) { 4221 xpt_path_lock(xpt_periph->path); 4222 xpt_async_process_dev(xpt_periph->path->device, ccb); 4223 xpt_path_unlock(xpt_periph->path); 4224 } 4225 4226 if (path->device != NULL && path->device->lun_id != CAM_LUN_WILDCARD) 4227 xpt_release_devq(path, 1, TRUE); 4228 else 4229 xpt_release_simq(path->bus->sim, TRUE); 4230 if (ccb->casync.async_arg_size > 0) 4231 free(async_arg, M_CAMXPT); 4232 xpt_free_path(path); 4233 xpt_free_ccb(ccb); 4234 } 4235 4236 static void 4237 xpt_async_bcast(struct async_list *async_head, 4238 u_int32_t async_code, 4239 struct cam_path *path, void *async_arg) 4240 { 4241 struct async_node *cur_entry; 4242 int lock; 4243 4244 cur_entry = SLIST_FIRST(async_head); 4245 while (cur_entry != NULL) { 4246 struct async_node *next_entry; 4247 /* 4248 * Grab the next list entry before we call the current 4249 * entry's callback. This is because the callback function 4250 * can delete its async callback entry. 4251 */ 4252 next_entry = SLIST_NEXT(cur_entry, links); 4253 if ((cur_entry->event_enable & async_code) != 0) { 4254 lock = cur_entry->event_lock; 4255 if (lock) 4256 CAM_SIM_LOCK(path->device->sim); 4257 cur_entry->callback(cur_entry->callback_arg, 4258 async_code, path, 4259 async_arg); 4260 if (lock) 4261 CAM_SIM_UNLOCK(path->device->sim); 4262 } 4263 cur_entry = next_entry; 4264 } 4265 } 4266 4267 void 4268 xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg) 4269 { 4270 union ccb *ccb; 4271 int size; 4272 4273 ccb = xpt_alloc_ccb_nowait(); 4274 if (ccb == NULL) { 4275 xpt_print(path, "Can't allocate CCB to send %s\n", 4276 xpt_async_string(async_code)); 4277 return; 4278 } 4279 4280 if (xpt_clone_path(&ccb->ccb_h.path, path) != CAM_REQ_CMP) { 4281 xpt_print(path, "Can't allocate path to send %s\n", 4282 xpt_async_string(async_code)); 4283 xpt_free_ccb(ccb); 4284 return; 4285 } 4286 ccb->ccb_h.path->periph = NULL; 4287 ccb->ccb_h.func_code = XPT_ASYNC; 4288 ccb->ccb_h.cbfcnp = xpt_async_process; 4289 ccb->ccb_h.flags |= CAM_UNLOCKED; 4290 ccb->casync.async_code = async_code; 4291 ccb->casync.async_arg_size = 0; 4292 size = xpt_async_size(async_code); 4293 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, 4294 ("xpt_async: func %#x %s aync_code %d %s\n", 4295 ccb->ccb_h.func_code, 4296 xpt_action_name(ccb->ccb_h.func_code), 4297 async_code, 4298 xpt_async_string(async_code))); 4299 if (size > 0 && async_arg != NULL) { 4300 ccb->casync.async_arg_ptr = malloc(size, M_CAMXPT, M_NOWAIT); 4301 if (ccb->casync.async_arg_ptr == NULL) { 4302 xpt_print(path, "Can't allocate argument to send %s\n", 4303 xpt_async_string(async_code)); 4304 xpt_free_path(ccb->ccb_h.path); 4305 xpt_free_ccb(ccb); 4306 return; 4307 } 4308 memcpy(ccb->casync.async_arg_ptr, async_arg, size); 4309 ccb->casync.async_arg_size = size; 4310 } else if (size < 0) { 4311 ccb->casync.async_arg_ptr = async_arg; 4312 ccb->casync.async_arg_size = size; 4313 } 4314 if (path->device != NULL && path->device->lun_id != CAM_LUN_WILDCARD) 4315 xpt_freeze_devq(path, 1); 4316 else 4317 xpt_freeze_simq(path->bus->sim, 1); 4318 xpt_done(ccb); 4319 } 4320 4321 static void 4322 xpt_dev_async_default(u_int32_t async_code, struct cam_eb *bus, 4323 struct cam_et *target, struct cam_ed *device, 4324 void *async_arg) 4325 { 4326 4327 /* 4328 * We only need to handle events for real devices. 4329 */ 4330 if (target->target_id == CAM_TARGET_WILDCARD 4331 || device->lun_id == CAM_LUN_WILDCARD) 4332 return; 4333 4334 printf("%s called\n", __func__); 4335 } 4336 4337 static uint32_t 4338 xpt_freeze_devq_device(struct cam_ed *dev, u_int count) 4339 { 4340 struct cam_devq *devq; 4341 uint32_t freeze; 4342 4343 devq = dev->sim->devq; 4344 mtx_assert(&devq->send_mtx, MA_OWNED); 4345 CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE, 4346 ("xpt_freeze_devq_device(%d) %u->%u\n", count, 4347 dev->ccbq.queue.qfrozen_cnt, dev->ccbq.queue.qfrozen_cnt + count)); 4348 freeze = (dev->ccbq.queue.qfrozen_cnt += count); 4349 /* Remove frozen device from sendq. */ 4350 if (device_is_queued(dev)) 4351 camq_remove(&devq->send_queue, dev->devq_entry.index); 4352 return (freeze); 4353 } 4354 4355 u_int32_t 4356 xpt_freeze_devq(struct cam_path *path, u_int count) 4357 { 4358 struct cam_ed *dev = path->device; 4359 struct cam_devq *devq; 4360 uint32_t freeze; 4361 4362 devq = dev->sim->devq; 4363 mtx_lock(&devq->send_mtx); 4364 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_freeze_devq(%d)\n", count)); 4365 freeze = xpt_freeze_devq_device(dev, count); 4366 mtx_unlock(&devq->send_mtx); 4367 return (freeze); 4368 } 4369 4370 u_int32_t 4371 xpt_freeze_simq(struct cam_sim *sim, u_int count) 4372 { 4373 struct cam_devq *devq; 4374 uint32_t freeze; 4375 4376 devq = sim->devq; 4377 mtx_lock(&devq->send_mtx); 4378 freeze = (devq->send_queue.qfrozen_cnt += count); 4379 mtx_unlock(&devq->send_mtx); 4380 return (freeze); 4381 } 4382 4383 static void 4384 xpt_release_devq_timeout(void *arg) 4385 { 4386 struct cam_ed *dev; 4387 struct cam_devq *devq; 4388 4389 dev = (struct cam_ed *)arg; 4390 CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE, ("xpt_release_devq_timeout\n")); 4391 devq = dev->sim->devq; 4392 mtx_assert(&devq->send_mtx, MA_OWNED); 4393 if (xpt_release_devq_device(dev, /*count*/1, /*run_queue*/TRUE)) 4394 xpt_run_devq(devq); 4395 } 4396 4397 void 4398 xpt_release_devq(struct cam_path *path, u_int count, int run_queue) 4399 { 4400 struct cam_ed *dev; 4401 struct cam_devq *devq; 4402 4403 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_devq(%d, %d)\n", 4404 count, run_queue)); 4405 dev = path->device; 4406 devq = dev->sim->devq; 4407 mtx_lock(&devq->send_mtx); 4408 if (xpt_release_devq_device(dev, count, run_queue)) 4409 xpt_run_devq(dev->sim->devq); 4410 mtx_unlock(&devq->send_mtx); 4411 } 4412 4413 static int 4414 xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue) 4415 { 4416 4417 mtx_assert(&dev->sim->devq->send_mtx, MA_OWNED); 4418 CAM_DEBUG_DEV(dev, CAM_DEBUG_TRACE, 4419 ("xpt_release_devq_device(%d, %d) %u->%u\n", count, run_queue, 4420 dev->ccbq.queue.qfrozen_cnt, dev->ccbq.queue.qfrozen_cnt - count)); 4421 if (count > dev->ccbq.queue.qfrozen_cnt) { 4422 #ifdef INVARIANTS 4423 printf("xpt_release_devq(): requested %u > present %u\n", 4424 count, dev->ccbq.queue.qfrozen_cnt); 4425 #endif 4426 count = dev->ccbq.queue.qfrozen_cnt; 4427 } 4428 dev->ccbq.queue.qfrozen_cnt -= count; 4429 if (dev->ccbq.queue.qfrozen_cnt == 0) { 4430 /* 4431 * No longer need to wait for a successful 4432 * command completion. 4433 */ 4434 dev->flags &= ~CAM_DEV_REL_ON_COMPLETE; 4435 /* 4436 * Remove any timeouts that might be scheduled 4437 * to release this queue. 4438 */ 4439 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) { 4440 callout_stop(&dev->callout); 4441 dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING; 4442 } 4443 /* 4444 * Now that we are unfrozen schedule the 4445 * device so any pending transactions are 4446 * run. 4447 */ 4448 xpt_schedule_devq(dev->sim->devq, dev); 4449 } else 4450 run_queue = 0; 4451 return (run_queue); 4452 } 4453 4454 void 4455 xpt_release_simq(struct cam_sim *sim, int run_queue) 4456 { 4457 struct cam_devq *devq; 4458 4459 devq = sim->devq; 4460 mtx_lock(&devq->send_mtx); 4461 if (devq->send_queue.qfrozen_cnt <= 0) { 4462 #ifdef INVARIANTS 4463 printf("xpt_release_simq: requested 1 > present %u\n", 4464 devq->send_queue.qfrozen_cnt); 4465 #endif 4466 } else 4467 devq->send_queue.qfrozen_cnt--; 4468 if (devq->send_queue.qfrozen_cnt == 0) { 4469 /* 4470 * If there is a timeout scheduled to release this 4471 * sim queue, remove it. The queue frozen count is 4472 * already at 0. 4473 */ 4474 if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){ 4475 callout_stop(&sim->callout); 4476 sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING; 4477 } 4478 if (run_queue) { 4479 /* 4480 * Now that we are unfrozen run the send queue. 4481 */ 4482 xpt_run_devq(sim->devq); 4483 } 4484 } 4485 mtx_unlock(&devq->send_mtx); 4486 } 4487 4488 /* 4489 * XXX Appears to be unused. 4490 */ 4491 static void 4492 xpt_release_simq_timeout(void *arg) 4493 { 4494 struct cam_sim *sim; 4495 4496 sim = (struct cam_sim *)arg; 4497 xpt_release_simq(sim, /* run_queue */ TRUE); 4498 } 4499 4500 void 4501 xpt_done(union ccb *done_ccb) 4502 { 4503 struct cam_doneq *queue; 4504 int run, hash; 4505 4506 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) 4507 if (done_ccb->ccb_h.func_code == XPT_SCSI_IO && 4508 done_ccb->csio.bio != NULL) 4509 biotrack(done_ccb->csio.bio, __func__); 4510 #endif 4511 4512 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, 4513 ("xpt_done: func= %#x %s status %#x\n", 4514 done_ccb->ccb_h.func_code, 4515 xpt_action_name(done_ccb->ccb_h.func_code), 4516 done_ccb->ccb_h.status)); 4517 if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0) 4518 return; 4519 4520 /* Store the time the ccb was in the sim */ 4521 done_ccb->ccb_h.qos.sim_data = sbinuptime() - done_ccb->ccb_h.qos.sim_data; 4522 hash = (done_ccb->ccb_h.path_id + done_ccb->ccb_h.target_id + 4523 done_ccb->ccb_h.target_lun) % cam_num_doneqs; 4524 queue = &cam_doneqs[hash]; 4525 mtx_lock(&queue->cam_doneq_mtx); 4526 run = (queue->cam_doneq_sleep && STAILQ_EMPTY(&queue->cam_doneq)); 4527 STAILQ_INSERT_TAIL(&queue->cam_doneq, &done_ccb->ccb_h, sim_links.stqe); 4528 done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX; 4529 mtx_unlock(&queue->cam_doneq_mtx); 4530 if (run) 4531 wakeup(&queue->cam_doneq); 4532 } 4533 4534 void 4535 xpt_done_direct(union ccb *done_ccb) 4536 { 4537 4538 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, 4539 ("xpt_done_direct: status %#x\n", done_ccb->ccb_h.status)); 4540 if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0) 4541 return; 4542 4543 /* Store the time the ccb was in the sim */ 4544 done_ccb->ccb_h.qos.sim_data = sbinuptime() - done_ccb->ccb_h.qos.sim_data; 4545 xpt_done_process(&done_ccb->ccb_h); 4546 } 4547 4548 union ccb * 4549 xpt_alloc_ccb() 4550 { 4551 union ccb *new_ccb; 4552 4553 new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_WAITOK); 4554 return (new_ccb); 4555 } 4556 4557 union ccb * 4558 xpt_alloc_ccb_nowait() 4559 { 4560 union ccb *new_ccb; 4561 4562 new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_NOWAIT); 4563 return (new_ccb); 4564 } 4565 4566 void 4567 xpt_free_ccb(union ccb *free_ccb) 4568 { 4569 free(free_ccb, M_CAMCCB); 4570 } 4571 4572 4573 4574 /* Private XPT functions */ 4575 4576 /* 4577 * Get a CAM control block for the caller. Charge the structure to the device 4578 * referenced by the path. If we don't have sufficient resources to allocate 4579 * more ccbs, we return NULL. 4580 */ 4581 static union ccb * 4582 xpt_get_ccb_nowait(struct cam_periph *periph) 4583 { 4584 union ccb *new_ccb; 4585 4586 new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_NOWAIT); 4587 if (new_ccb == NULL) 4588 return (NULL); 4589 periph->periph_allocated++; 4590 cam_ccbq_take_opening(&periph->path->device->ccbq); 4591 return (new_ccb); 4592 } 4593 4594 static union ccb * 4595 xpt_get_ccb(struct cam_periph *periph) 4596 { 4597 union ccb *new_ccb; 4598 4599 cam_periph_unlock(periph); 4600 new_ccb = malloc(sizeof(*new_ccb), M_CAMCCB, M_ZERO|M_WAITOK); 4601 cam_periph_lock(periph); 4602 periph->periph_allocated++; 4603 cam_ccbq_take_opening(&periph->path->device->ccbq); 4604 return (new_ccb); 4605 } 4606 4607 union ccb * 4608 cam_periph_getccb(struct cam_periph *periph, u_int32_t priority) 4609 { 4610 struct ccb_hdr *ccb_h; 4611 4612 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("cam_periph_getccb\n")); 4613 cam_periph_assert(periph, MA_OWNED); 4614 while ((ccb_h = SLIST_FIRST(&periph->ccb_list)) == NULL || 4615 ccb_h->pinfo.priority != priority) { 4616 if (priority < periph->immediate_priority) { 4617 periph->immediate_priority = priority; 4618 xpt_run_allocq(periph, 0); 4619 } else 4620 cam_periph_sleep(periph, &periph->ccb_list, PRIBIO, 4621 "cgticb", 0); 4622 } 4623 SLIST_REMOVE_HEAD(&periph->ccb_list, periph_links.sle); 4624 return ((union ccb *)ccb_h); 4625 } 4626 4627 static void 4628 xpt_acquire_bus(struct cam_eb *bus) 4629 { 4630 4631 xpt_lock_buses(); 4632 bus->refcount++; 4633 xpt_unlock_buses(); 4634 } 4635 4636 static void 4637 xpt_release_bus(struct cam_eb *bus) 4638 { 4639 4640 xpt_lock_buses(); 4641 KASSERT(bus->refcount >= 1, ("bus->refcount >= 1")); 4642 if (--bus->refcount > 0) { 4643 xpt_unlock_buses(); 4644 return; 4645 } 4646 TAILQ_REMOVE(&xsoftc.xpt_busses, bus, links); 4647 xsoftc.bus_generation++; 4648 xpt_unlock_buses(); 4649 KASSERT(TAILQ_EMPTY(&bus->et_entries), 4650 ("destroying bus, but target list is not empty")); 4651 cam_sim_release(bus->sim); 4652 mtx_destroy(&bus->eb_mtx); 4653 free(bus, M_CAMXPT); 4654 } 4655 4656 static struct cam_et * 4657 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id) 4658 { 4659 struct cam_et *cur_target, *target; 4660 4661 mtx_assert(&xsoftc.xpt_topo_lock, MA_OWNED); 4662 mtx_assert(&bus->eb_mtx, MA_OWNED); 4663 target = (struct cam_et *)malloc(sizeof(*target), M_CAMXPT, 4664 M_NOWAIT|M_ZERO); 4665 if (target == NULL) 4666 return (NULL); 4667 4668 TAILQ_INIT(&target->ed_entries); 4669 target->bus = bus; 4670 target->target_id = target_id; 4671 target->refcount = 1; 4672 target->generation = 0; 4673 target->luns = NULL; 4674 mtx_init(&target->luns_mtx, "CAM LUNs lock", NULL, MTX_DEF); 4675 timevalclear(&target->last_reset); 4676 /* 4677 * Hold a reference to our parent bus so it 4678 * will not go away before we do. 4679 */ 4680 bus->refcount++; 4681 4682 /* Insertion sort into our bus's target list */ 4683 cur_target = TAILQ_FIRST(&bus->et_entries); 4684 while (cur_target != NULL && cur_target->target_id < target_id) 4685 cur_target = TAILQ_NEXT(cur_target, links); 4686 if (cur_target != NULL) { 4687 TAILQ_INSERT_BEFORE(cur_target, target, links); 4688 } else { 4689 TAILQ_INSERT_TAIL(&bus->et_entries, target, links); 4690 } 4691 bus->generation++; 4692 return (target); 4693 } 4694 4695 static void 4696 xpt_acquire_target(struct cam_et *target) 4697 { 4698 struct cam_eb *bus = target->bus; 4699 4700 mtx_lock(&bus->eb_mtx); 4701 target->refcount++; 4702 mtx_unlock(&bus->eb_mtx); 4703 } 4704 4705 static void 4706 xpt_release_target(struct cam_et *target) 4707 { 4708 struct cam_eb *bus = target->bus; 4709 4710 mtx_lock(&bus->eb_mtx); 4711 if (--target->refcount > 0) { 4712 mtx_unlock(&bus->eb_mtx); 4713 return; 4714 } 4715 TAILQ_REMOVE(&bus->et_entries, target, links); 4716 bus->generation++; 4717 mtx_unlock(&bus->eb_mtx); 4718 KASSERT(TAILQ_EMPTY(&target->ed_entries), 4719 ("destroying target, but device list is not empty")); 4720 xpt_release_bus(bus); 4721 mtx_destroy(&target->luns_mtx); 4722 if (target->luns) 4723 free(target->luns, M_CAMXPT); 4724 free(target, M_CAMXPT); 4725 } 4726 4727 static struct cam_ed * 4728 xpt_alloc_device_default(struct cam_eb *bus, struct cam_et *target, 4729 lun_id_t lun_id) 4730 { 4731 struct cam_ed *device; 4732 4733 device = xpt_alloc_device(bus, target, lun_id); 4734 if (device == NULL) 4735 return (NULL); 4736 4737 device->mintags = 1; 4738 device->maxtags = 1; 4739 return (device); 4740 } 4741 4742 static void 4743 xpt_destroy_device(void *context, int pending) 4744 { 4745 struct cam_ed *device = context; 4746 4747 mtx_lock(&device->device_mtx); 4748 mtx_destroy(&device->device_mtx); 4749 free(device, M_CAMDEV); 4750 } 4751 4752 struct cam_ed * 4753 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id) 4754 { 4755 struct cam_ed *cur_device, *device; 4756 struct cam_devq *devq; 4757 cam_status status; 4758 4759 mtx_assert(&bus->eb_mtx, MA_OWNED); 4760 /* Make space for us in the device queue on our bus */ 4761 devq = bus->sim->devq; 4762 mtx_lock(&devq->send_mtx); 4763 status = cam_devq_resize(devq, devq->send_queue.array_size + 1); 4764 mtx_unlock(&devq->send_mtx); 4765 if (status != CAM_REQ_CMP) 4766 return (NULL); 4767 4768 device = (struct cam_ed *)malloc(sizeof(*device), 4769 M_CAMDEV, M_NOWAIT|M_ZERO); 4770 if (device == NULL) 4771 return (NULL); 4772 4773 cam_init_pinfo(&device->devq_entry); 4774 device->target = target; 4775 device->lun_id = lun_id; 4776 device->sim = bus->sim; 4777 if (cam_ccbq_init(&device->ccbq, 4778 bus->sim->max_dev_openings) != 0) { 4779 free(device, M_CAMDEV); 4780 return (NULL); 4781 } 4782 SLIST_INIT(&device->asyncs); 4783 SLIST_INIT(&device->periphs); 4784 device->generation = 0; 4785 device->flags = CAM_DEV_UNCONFIGURED; 4786 device->tag_delay_count = 0; 4787 device->tag_saved_openings = 0; 4788 device->refcount = 1; 4789 mtx_init(&device->device_mtx, "CAM device lock", NULL, MTX_DEF); 4790 callout_init_mtx(&device->callout, &devq->send_mtx, 0); 4791 TASK_INIT(&device->device_destroy_task, 0, xpt_destroy_device, device); 4792 /* 4793 * Hold a reference to our parent bus so it 4794 * will not go away before we do. 4795 */ 4796 target->refcount++; 4797 4798 cur_device = TAILQ_FIRST(&target->ed_entries); 4799 while (cur_device != NULL && cur_device->lun_id < lun_id) 4800 cur_device = TAILQ_NEXT(cur_device, links); 4801 if (cur_device != NULL) 4802 TAILQ_INSERT_BEFORE(cur_device, device, links); 4803 else 4804 TAILQ_INSERT_TAIL(&target->ed_entries, device, links); 4805 target->generation++; 4806 return (device); 4807 } 4808 4809 void 4810 xpt_acquire_device(struct cam_ed *device) 4811 { 4812 struct cam_eb *bus = device->target->bus; 4813 4814 mtx_lock(&bus->eb_mtx); 4815 device->refcount++; 4816 mtx_unlock(&bus->eb_mtx); 4817 } 4818 4819 void 4820 xpt_release_device(struct cam_ed *device) 4821 { 4822 struct cam_eb *bus = device->target->bus; 4823 struct cam_devq *devq; 4824 4825 mtx_lock(&bus->eb_mtx); 4826 if (--device->refcount > 0) { 4827 mtx_unlock(&bus->eb_mtx); 4828 return; 4829 } 4830 4831 TAILQ_REMOVE(&device->target->ed_entries, device,links); 4832 device->target->generation++; 4833 mtx_unlock(&bus->eb_mtx); 4834 4835 /* Release our slot in the devq */ 4836 devq = bus->sim->devq; 4837 mtx_lock(&devq->send_mtx); 4838 cam_devq_resize(devq, devq->send_queue.array_size - 1); 4839 mtx_unlock(&devq->send_mtx); 4840 4841 KASSERT(SLIST_EMPTY(&device->periphs), 4842 ("destroying device, but periphs list is not empty")); 4843 KASSERT(device->devq_entry.index == CAM_UNQUEUED_INDEX, 4844 ("destroying device while still queued for ccbs")); 4845 4846 if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) 4847 callout_stop(&device->callout); 4848 4849 xpt_release_target(device->target); 4850 4851 cam_ccbq_fini(&device->ccbq); 4852 /* 4853 * Free allocated memory. free(9) does nothing if the 4854 * supplied pointer is NULL, so it is safe to call without 4855 * checking. 4856 */ 4857 free(device->supported_vpds, M_CAMXPT); 4858 free(device->device_id, M_CAMXPT); 4859 free(device->ext_inq, M_CAMXPT); 4860 free(device->physpath, M_CAMXPT); 4861 free(device->rcap_buf, M_CAMXPT); 4862 free(device->serial_num, M_CAMXPT); 4863 taskqueue_enqueue(xsoftc.xpt_taskq, &device->device_destroy_task); 4864 } 4865 4866 u_int32_t 4867 xpt_dev_ccbq_resize(struct cam_path *path, int newopenings) 4868 { 4869 int result; 4870 struct cam_ed *dev; 4871 4872 dev = path->device; 4873 mtx_lock(&dev->sim->devq->send_mtx); 4874 result = cam_ccbq_resize(&dev->ccbq, newopenings); 4875 mtx_unlock(&dev->sim->devq->send_mtx); 4876 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 4877 || (dev->inq_flags & SID_CmdQue) != 0) 4878 dev->tag_saved_openings = newopenings; 4879 return (result); 4880 } 4881 4882 static struct cam_eb * 4883 xpt_find_bus(path_id_t path_id) 4884 { 4885 struct cam_eb *bus; 4886 4887 xpt_lock_buses(); 4888 for (bus = TAILQ_FIRST(&xsoftc.xpt_busses); 4889 bus != NULL; 4890 bus = TAILQ_NEXT(bus, links)) { 4891 if (bus->path_id == path_id) { 4892 bus->refcount++; 4893 break; 4894 } 4895 } 4896 xpt_unlock_buses(); 4897 return (bus); 4898 } 4899 4900 static struct cam_et * 4901 xpt_find_target(struct cam_eb *bus, target_id_t target_id) 4902 { 4903 struct cam_et *target; 4904 4905 mtx_assert(&bus->eb_mtx, MA_OWNED); 4906 for (target = TAILQ_FIRST(&bus->et_entries); 4907 target != NULL; 4908 target = TAILQ_NEXT(target, links)) { 4909 if (target->target_id == target_id) { 4910 target->refcount++; 4911 break; 4912 } 4913 } 4914 return (target); 4915 } 4916 4917 static struct cam_ed * 4918 xpt_find_device(struct cam_et *target, lun_id_t lun_id) 4919 { 4920 struct cam_ed *device; 4921 4922 mtx_assert(&target->bus->eb_mtx, MA_OWNED); 4923 for (device = TAILQ_FIRST(&target->ed_entries); 4924 device != NULL; 4925 device = TAILQ_NEXT(device, links)) { 4926 if (device->lun_id == lun_id) { 4927 device->refcount++; 4928 break; 4929 } 4930 } 4931 return (device); 4932 } 4933 4934 void 4935 xpt_start_tags(struct cam_path *path) 4936 { 4937 struct ccb_relsim crs; 4938 struct cam_ed *device; 4939 struct cam_sim *sim; 4940 int newopenings; 4941 4942 device = path->device; 4943 sim = path->bus->sim; 4944 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT; 4945 xpt_freeze_devq(path, /*count*/1); 4946 device->inq_flags |= SID_CmdQue; 4947 if (device->tag_saved_openings != 0) 4948 newopenings = device->tag_saved_openings; 4949 else 4950 newopenings = min(device->maxtags, 4951 sim->max_tagged_dev_openings); 4952 xpt_dev_ccbq_resize(path, newopenings); 4953 xpt_async(AC_GETDEV_CHANGED, path, NULL); 4954 xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL); 4955 crs.ccb_h.func_code = XPT_REL_SIMQ; 4956 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY; 4957 crs.openings 4958 = crs.release_timeout 4959 = crs.qfrozen_cnt 4960 = 0; 4961 xpt_action((union ccb *)&crs); 4962 } 4963 4964 void 4965 xpt_stop_tags(struct cam_path *path) 4966 { 4967 struct ccb_relsim crs; 4968 struct cam_ed *device; 4969 struct cam_sim *sim; 4970 4971 device = path->device; 4972 sim = path->bus->sim; 4973 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT; 4974 device->tag_delay_count = 0; 4975 xpt_freeze_devq(path, /*count*/1); 4976 device->inq_flags &= ~SID_CmdQue; 4977 xpt_dev_ccbq_resize(path, sim->max_dev_openings); 4978 xpt_async(AC_GETDEV_CHANGED, path, NULL); 4979 xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL); 4980 crs.ccb_h.func_code = XPT_REL_SIMQ; 4981 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY; 4982 crs.openings 4983 = crs.release_timeout 4984 = crs.qfrozen_cnt 4985 = 0; 4986 xpt_action((union ccb *)&crs); 4987 } 4988 4989 static void 4990 xpt_boot_delay(void *arg) 4991 { 4992 4993 xpt_release_boot(); 4994 } 4995 4996 static void 4997 xpt_config(void *arg) 4998 { 4999 /* 5000 * Now that interrupts are enabled, go find our devices 5001 */ 5002 if (taskqueue_start_threads(&xsoftc.xpt_taskq, 1, PRIBIO, "CAM taskq")) 5003 printf("xpt_config: failed to create taskqueue thread.\n"); 5004 5005 /* Setup debugging path */ 5006 if (cam_dflags != CAM_DEBUG_NONE) { 5007 if (xpt_create_path(&cam_dpath, NULL, 5008 CAM_DEBUG_BUS, CAM_DEBUG_TARGET, 5009 CAM_DEBUG_LUN) != CAM_REQ_CMP) { 5010 printf("xpt_config: xpt_create_path() failed for debug" 5011 " target %d:%d:%d, debugging disabled\n", 5012 CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN); 5013 cam_dflags = CAM_DEBUG_NONE; 5014 } 5015 } else 5016 cam_dpath = NULL; 5017 5018 periphdriver_init(1); 5019 xpt_hold_boot(); 5020 callout_init(&xsoftc.boot_callout, 1); 5021 callout_reset_sbt(&xsoftc.boot_callout, SBT_1MS * xsoftc.boot_delay, 0, 5022 xpt_boot_delay, NULL, 0); 5023 /* Fire up rescan thread. */ 5024 if (kproc_kthread_add(xpt_scanner_thread, NULL, &cam_proc, NULL, 0, 0, 5025 "cam", "scanner")) { 5026 printf("xpt_config: failed to create rescan thread.\n"); 5027 } 5028 } 5029 5030 void 5031 xpt_hold_boot(void) 5032 { 5033 xpt_lock_buses(); 5034 xsoftc.buses_to_config++; 5035 xpt_unlock_buses(); 5036 } 5037 5038 void 5039 xpt_release_boot(void) 5040 { 5041 xpt_lock_buses(); 5042 xsoftc.buses_to_config--; 5043 if (xsoftc.buses_to_config == 0 && xsoftc.buses_config_done == 0) { 5044 struct xpt_task *task; 5045 5046 xsoftc.buses_config_done = 1; 5047 xpt_unlock_buses(); 5048 /* Call manually because we don't have any buses */ 5049 task = malloc(sizeof(struct xpt_task), M_CAMXPT, M_NOWAIT); 5050 if (task != NULL) { 5051 TASK_INIT(&task->task, 0, xpt_finishconfig_task, task); 5052 taskqueue_enqueue(taskqueue_thread, &task->task); 5053 } 5054 } else 5055 xpt_unlock_buses(); 5056 } 5057 5058 /* 5059 * If the given device only has one peripheral attached to it, and if that 5060 * peripheral is the passthrough driver, announce it. This insures that the 5061 * user sees some sort of announcement for every peripheral in their system. 5062 */ 5063 static int 5064 xptpassannouncefunc(struct cam_ed *device, void *arg) 5065 { 5066 struct cam_periph *periph; 5067 int i; 5068 5069 for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL; 5070 periph = SLIST_NEXT(periph, periph_links), i++); 5071 5072 periph = SLIST_FIRST(&device->periphs); 5073 if ((i == 1) 5074 && (strncmp(periph->periph_name, "pass", 4) == 0)) 5075 xpt_announce_periph(periph, NULL); 5076 5077 return(1); 5078 } 5079 5080 static void 5081 xpt_finishconfig_task(void *context, int pending) 5082 { 5083 5084 periphdriver_init(2); 5085 /* 5086 * Check for devices with no "standard" peripheral driver 5087 * attached. For any devices like that, announce the 5088 * passthrough driver so the user will see something. 5089 */ 5090 if (!bootverbose) 5091 xpt_for_all_devices(xptpassannouncefunc, NULL); 5092 5093 /* Release our hook so that the boot can continue. */ 5094 config_intrhook_disestablish(xsoftc.xpt_config_hook); 5095 free(xsoftc.xpt_config_hook, M_CAMXPT); 5096 xsoftc.xpt_config_hook = NULL; 5097 5098 free(context, M_CAMXPT); 5099 } 5100 5101 cam_status 5102 xpt_register_async(int event, ac_callback_t *cbfunc, void *cbarg, 5103 struct cam_path *path) 5104 { 5105 struct ccb_setasync csa; 5106 cam_status status; 5107 int xptpath = 0; 5108 5109 if (path == NULL) { 5110 status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID, 5111 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 5112 if (status != CAM_REQ_CMP) 5113 return (status); 5114 xpt_path_lock(path); 5115 xptpath = 1; 5116 } 5117 5118 xpt_setup_ccb(&csa.ccb_h, path, CAM_PRIORITY_NORMAL); 5119 csa.ccb_h.func_code = XPT_SASYNC_CB; 5120 csa.event_enable = event; 5121 csa.callback = cbfunc; 5122 csa.callback_arg = cbarg; 5123 xpt_action((union ccb *)&csa); 5124 status = csa.ccb_h.status; 5125 5126 CAM_DEBUG(csa.ccb_h.path, CAM_DEBUG_TRACE, 5127 ("xpt_register_async: func %p\n", cbfunc)); 5128 5129 if (xptpath) { 5130 xpt_path_unlock(path); 5131 xpt_free_path(path); 5132 } 5133 5134 if ((status == CAM_REQ_CMP) && 5135 (csa.event_enable & AC_FOUND_DEVICE)) { 5136 /* 5137 * Get this peripheral up to date with all 5138 * the currently existing devices. 5139 */ 5140 xpt_for_all_devices(xptsetasyncfunc, &csa); 5141 } 5142 if ((status == CAM_REQ_CMP) && 5143 (csa.event_enable & AC_PATH_REGISTERED)) { 5144 /* 5145 * Get this peripheral up to date with all 5146 * the currently existing buses. 5147 */ 5148 xpt_for_all_busses(xptsetasyncbusfunc, &csa); 5149 } 5150 5151 return (status); 5152 } 5153 5154 static void 5155 xptaction(struct cam_sim *sim, union ccb *work_ccb) 5156 { 5157 CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n")); 5158 5159 switch (work_ccb->ccb_h.func_code) { 5160 /* Common cases first */ 5161 case XPT_PATH_INQ: /* Path routing inquiry */ 5162 { 5163 struct ccb_pathinq *cpi; 5164 5165 cpi = &work_ccb->cpi; 5166 cpi->version_num = 1; /* XXX??? */ 5167 cpi->hba_inquiry = 0; 5168 cpi->target_sprt = 0; 5169 cpi->hba_misc = 0; 5170 cpi->hba_eng_cnt = 0; 5171 cpi->max_target = 0; 5172 cpi->max_lun = 0; 5173 cpi->initiator_id = 0; 5174 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 5175 strlcpy(cpi->hba_vid, "", HBA_IDLEN); 5176 strlcpy(cpi->dev_name, sim->sim_name, DEV_IDLEN); 5177 cpi->unit_number = sim->unit_number; 5178 cpi->bus_id = sim->bus_id; 5179 cpi->base_transfer_speed = 0; 5180 cpi->protocol = PROTO_UNSPECIFIED; 5181 cpi->protocol_version = PROTO_VERSION_UNSPECIFIED; 5182 cpi->transport = XPORT_UNSPECIFIED; 5183 cpi->transport_version = XPORT_VERSION_UNSPECIFIED; 5184 cpi->ccb_h.status = CAM_REQ_CMP; 5185 xpt_done(work_ccb); 5186 break; 5187 } 5188 default: 5189 work_ccb->ccb_h.status = CAM_REQ_INVALID; 5190 xpt_done(work_ccb); 5191 break; 5192 } 5193 } 5194 5195 /* 5196 * The xpt as a "controller" has no interrupt sources, so polling 5197 * is a no-op. 5198 */ 5199 static void 5200 xptpoll(struct cam_sim *sim) 5201 { 5202 } 5203 5204 void 5205 xpt_lock_buses(void) 5206 { 5207 mtx_lock(&xsoftc.xpt_topo_lock); 5208 } 5209 5210 void 5211 xpt_unlock_buses(void) 5212 { 5213 mtx_unlock(&xsoftc.xpt_topo_lock); 5214 } 5215 5216 struct mtx * 5217 xpt_path_mtx(struct cam_path *path) 5218 { 5219 5220 return (&path->device->device_mtx); 5221 } 5222 5223 static void 5224 xpt_done_process(struct ccb_hdr *ccb_h) 5225 { 5226 struct cam_sim *sim; 5227 struct cam_devq *devq; 5228 struct mtx *mtx = NULL; 5229 5230 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) 5231 struct ccb_scsiio *csio; 5232 5233 if (ccb_h->func_code == XPT_SCSI_IO) { 5234 csio = &((union ccb *)ccb_h)->csio; 5235 if (csio->bio != NULL) 5236 biotrack(csio->bio, __func__); 5237 } 5238 #endif 5239 5240 if (ccb_h->flags & CAM_HIGH_POWER) { 5241 struct highpowerlist *hphead; 5242 struct cam_ed *device; 5243 5244 mtx_lock(&xsoftc.xpt_highpower_lock); 5245 hphead = &xsoftc.highpowerq; 5246 5247 device = STAILQ_FIRST(hphead); 5248 5249 /* 5250 * Increment the count since this command is done. 5251 */ 5252 xsoftc.num_highpower++; 5253 5254 /* 5255 * Any high powered commands queued up? 5256 */ 5257 if (device != NULL) { 5258 5259 STAILQ_REMOVE_HEAD(hphead, highpowerq_entry); 5260 mtx_unlock(&xsoftc.xpt_highpower_lock); 5261 5262 mtx_lock(&device->sim->devq->send_mtx); 5263 xpt_release_devq_device(device, 5264 /*count*/1, /*runqueue*/TRUE); 5265 mtx_unlock(&device->sim->devq->send_mtx); 5266 } else 5267 mtx_unlock(&xsoftc.xpt_highpower_lock); 5268 } 5269 5270 sim = ccb_h->path->bus->sim; 5271 5272 if (ccb_h->status & CAM_RELEASE_SIMQ) { 5273 xpt_release_simq(sim, /*run_queue*/FALSE); 5274 ccb_h->status &= ~CAM_RELEASE_SIMQ; 5275 } 5276 5277 if ((ccb_h->flags & CAM_DEV_QFRZDIS) 5278 && (ccb_h->status & CAM_DEV_QFRZN)) { 5279 xpt_release_devq(ccb_h->path, /*count*/1, /*run_queue*/TRUE); 5280 ccb_h->status &= ~CAM_DEV_QFRZN; 5281 } 5282 5283 devq = sim->devq; 5284 if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) { 5285 struct cam_ed *dev = ccb_h->path->device; 5286 5287 mtx_lock(&devq->send_mtx); 5288 devq->send_active--; 5289 devq->send_openings++; 5290 cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h); 5291 5292 if (((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0 5293 && (dev->ccbq.dev_active == 0))) { 5294 dev->flags &= ~CAM_DEV_REL_ON_QUEUE_EMPTY; 5295 xpt_release_devq_device(dev, /*count*/1, 5296 /*run_queue*/FALSE); 5297 } 5298 5299 if (((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0 5300 && (ccb_h->status&CAM_STATUS_MASK) != CAM_REQUEUE_REQ)) { 5301 dev->flags &= ~CAM_DEV_REL_ON_COMPLETE; 5302 xpt_release_devq_device(dev, /*count*/1, 5303 /*run_queue*/FALSE); 5304 } 5305 5306 if (!device_is_queued(dev)) 5307 (void)xpt_schedule_devq(devq, dev); 5308 xpt_run_devq(devq); 5309 mtx_unlock(&devq->send_mtx); 5310 5311 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0) { 5312 mtx = xpt_path_mtx(ccb_h->path); 5313 mtx_lock(mtx); 5314 5315 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0 5316 && (--dev->tag_delay_count == 0)) 5317 xpt_start_tags(ccb_h->path); 5318 } 5319 } 5320 5321 if ((ccb_h->flags & CAM_UNLOCKED) == 0) { 5322 if (mtx == NULL) { 5323 mtx = xpt_path_mtx(ccb_h->path); 5324 mtx_lock(mtx); 5325 } 5326 } else { 5327 if (mtx != NULL) { 5328 mtx_unlock(mtx); 5329 mtx = NULL; 5330 } 5331 } 5332 5333 /* Call the peripheral driver's callback */ 5334 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX; 5335 (*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h); 5336 if (mtx != NULL) 5337 mtx_unlock(mtx); 5338 } 5339 5340 void 5341 xpt_done_td(void *arg) 5342 { 5343 struct cam_doneq *queue = arg; 5344 struct ccb_hdr *ccb_h; 5345 STAILQ_HEAD(, ccb_hdr) doneq; 5346 5347 STAILQ_INIT(&doneq); 5348 mtx_lock(&queue->cam_doneq_mtx); 5349 while (1) { 5350 while (STAILQ_EMPTY(&queue->cam_doneq)) { 5351 queue->cam_doneq_sleep = 1; 5352 msleep(&queue->cam_doneq, &queue->cam_doneq_mtx, 5353 PRIBIO, "-", 0); 5354 queue->cam_doneq_sleep = 0; 5355 } 5356 STAILQ_CONCAT(&doneq, &queue->cam_doneq); 5357 mtx_unlock(&queue->cam_doneq_mtx); 5358 5359 THREAD_NO_SLEEPING(); 5360 while ((ccb_h = STAILQ_FIRST(&doneq)) != NULL) { 5361 STAILQ_REMOVE_HEAD(&doneq, sim_links.stqe); 5362 xpt_done_process(ccb_h); 5363 } 5364 THREAD_SLEEPING_OK(); 5365 5366 mtx_lock(&queue->cam_doneq_mtx); 5367 } 5368 } 5369 5370 static void 5371 camisr_runqueue(void) 5372 { 5373 struct ccb_hdr *ccb_h; 5374 struct cam_doneq *queue; 5375 int i; 5376 5377 /* Process global queues. */ 5378 for (i = 0; i < cam_num_doneqs; i++) { 5379 queue = &cam_doneqs[i]; 5380 mtx_lock(&queue->cam_doneq_mtx); 5381 while ((ccb_h = STAILQ_FIRST(&queue->cam_doneq)) != NULL) { 5382 STAILQ_REMOVE_HEAD(&queue->cam_doneq, sim_links.stqe); 5383 mtx_unlock(&queue->cam_doneq_mtx); 5384 xpt_done_process(ccb_h); 5385 mtx_lock(&queue->cam_doneq_mtx); 5386 } 5387 mtx_unlock(&queue->cam_doneq_mtx); 5388 } 5389 } 5390 5391 struct kv 5392 { 5393 uint32_t v; 5394 const char *name; 5395 }; 5396 5397 static struct kv map[] = { 5398 { XPT_NOOP, "XPT_NOOP" }, 5399 { XPT_SCSI_IO, "XPT_SCSI_IO" }, 5400 { XPT_GDEV_TYPE, "XPT_GDEV_TYPE" }, 5401 { XPT_GDEVLIST, "XPT_GDEVLIST" }, 5402 { XPT_PATH_INQ, "XPT_PATH_INQ" }, 5403 { XPT_REL_SIMQ, "XPT_REL_SIMQ" }, 5404 { XPT_SASYNC_CB, "XPT_SASYNC_CB" }, 5405 { XPT_SDEV_TYPE, "XPT_SDEV_TYPE" }, 5406 { XPT_SCAN_BUS, "XPT_SCAN_BUS" }, 5407 { XPT_DEV_MATCH, "XPT_DEV_MATCH" }, 5408 { XPT_DEBUG, "XPT_DEBUG" }, 5409 { XPT_PATH_STATS, "XPT_PATH_STATS" }, 5410 { XPT_GDEV_STATS, "XPT_GDEV_STATS" }, 5411 { XPT_DEV_ADVINFO, "XPT_DEV_ADVINFO" }, 5412 { XPT_ASYNC, "XPT_ASYNC" }, 5413 { XPT_ABORT, "XPT_ABORT" }, 5414 { XPT_RESET_BUS, "XPT_RESET_BUS" }, 5415 { XPT_RESET_DEV, "XPT_RESET_DEV" }, 5416 { XPT_TERM_IO, "XPT_TERM_IO" }, 5417 { XPT_SCAN_LUN, "XPT_SCAN_LUN" }, 5418 { XPT_GET_TRAN_SETTINGS, "XPT_GET_TRAN_SETTINGS" }, 5419 { XPT_SET_TRAN_SETTINGS, "XPT_SET_TRAN_SETTINGS" }, 5420 { XPT_CALC_GEOMETRY, "XPT_CALC_GEOMETRY" }, 5421 { XPT_ATA_IO, "XPT_ATA_IO" }, 5422 { XPT_GET_SIM_KNOB, "XPT_GET_SIM_KNOB" }, 5423 { XPT_SET_SIM_KNOB, "XPT_SET_SIM_KNOB" }, 5424 { XPT_NVME_IO, "XPT_NVME_IO" }, 5425 { XPT_MMCSD_IO, "XPT_MMCSD_IO" }, 5426 { XPT_SMP_IO, "XPT_SMP_IO" }, 5427 { XPT_SCAN_TGT, "XPT_SCAN_TGT" }, 5428 { XPT_ENG_INQ, "XPT_ENG_INQ" }, 5429 { XPT_ENG_EXEC, "XPT_ENG_EXEC" }, 5430 { XPT_EN_LUN, "XPT_EN_LUN" }, 5431 { XPT_TARGET_IO, "XPT_TARGET_IO" }, 5432 { XPT_ACCEPT_TARGET_IO, "XPT_ACCEPT_TARGET_IO" }, 5433 { XPT_CONT_TARGET_IO, "XPT_CONT_TARGET_IO" }, 5434 { XPT_IMMED_NOTIFY, "XPT_IMMED_NOTIFY" }, 5435 { XPT_NOTIFY_ACK, "XPT_NOTIFY_ACK" }, 5436 { XPT_IMMEDIATE_NOTIFY, "XPT_IMMEDIATE_NOTIFY" }, 5437 { XPT_NOTIFY_ACKNOWLEDGE, "XPT_NOTIFY_ACKNOWLEDGE" }, 5438 { 0, 0 } 5439 }; 5440 5441 static const char * 5442 xpt_action_name(uint32_t action) 5443 { 5444 static char buffer[32]; /* Only for unknown messages -- racy */ 5445 struct kv *walker = map; 5446 5447 while (walker->name != NULL) { 5448 if (walker->v == action) 5449 return (walker->name); 5450 walker++; 5451 } 5452 5453 snprintf(buffer, sizeof(buffer), "%#x", action); 5454 return (buffer); 5455 } 5456