1 /*- 2 * Common functions for CAM "type" (peripheral) drivers. 3 * 4 * SPDX-License-Identifier: BSD-2-Clause 5 * 6 * Copyright (c) 1997, 1998 Justin T. Gibbs. 7 * Copyright (c) 1997, 1998, 1999, 2000 Kenneth D. Merry. 8 * All rights reserved. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions, and the following disclaimer, 15 * without modification, immediately at the beginning of the file. 16 * 2. The name of the author may not be used to endorse or promote products 17 * derived from this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 23 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/types.h> 35 #include <sys/malloc.h> 36 #include <sys/kernel.h> 37 #include <sys/bio.h> 38 #include <sys/conf.h> 39 #include <sys/devctl.h> 40 #include <sys/lock.h> 41 #include <sys/mutex.h> 42 #include <sys/buf.h> 43 #include <sys/proc.h> 44 #include <sys/devicestat.h> 45 #include <sys/sbuf.h> 46 #include <sys/sysctl.h> 47 #include <vm/vm.h> 48 #include <vm/vm_extern.h> 49 50 #include <cam/cam.h> 51 #include <cam/cam_ccb.h> 52 #include <cam/cam_compat.h> 53 #include <cam/cam_queue.h> 54 #include <cam/cam_xpt_periph.h> 55 #include <cam/cam_xpt_internal.h> 56 #include <cam/cam_periph.h> 57 #include <cam/cam_debug.h> 58 #include <cam/cam_sim.h> 59 60 #include <cam/scsi/scsi_all.h> 61 #include <cam/scsi/scsi_message.h> 62 #include <cam/scsi/scsi_pass.h> 63 64 static u_int camperiphnextunit(struct periph_driver *p_drv, 65 u_int newunit, bool wired, 66 path_id_t pathid, target_id_t target, 67 lun_id_t lun); 68 static u_int camperiphunit(struct periph_driver *p_drv, 69 path_id_t pathid, target_id_t target, 70 lun_id_t lun, 71 const char *sn); 72 static void camperiphdone(struct cam_periph *periph, 73 union ccb *done_ccb); 74 static void camperiphfree(struct cam_periph *periph); 75 static int camperiphscsistatuserror(union ccb *ccb, 76 union ccb **orig_ccb, 77 cam_flags camflags, 78 uint32_t sense_flags, 79 int *openings, 80 uint32_t *relsim_flags, 81 uint32_t *timeout, 82 uint32_t *action, 83 const char **action_string); 84 static int camperiphscsisenseerror(union ccb *ccb, 85 union ccb **orig_ccb, 86 cam_flags camflags, 87 uint32_t sense_flags, 88 int *openings, 89 uint32_t *relsim_flags, 90 uint32_t *timeout, 91 uint32_t *action, 92 const char **action_string); 93 static void cam_periph_devctl_notify(union ccb *ccb); 94 95 static int nperiph_drivers; 96 static int initialized = 0; 97 struct periph_driver **periph_drivers; 98 99 static MALLOC_DEFINE(M_CAMPERIPH, "CAM periph", "CAM peripheral buffers"); 100 101 static int periph_selto_delay = 1000; 102 TUNABLE_INT("kern.cam.periph_selto_delay", &periph_selto_delay); 103 static int periph_noresrc_delay = 500; 104 TUNABLE_INT("kern.cam.periph_noresrc_delay", &periph_noresrc_delay); 105 static int periph_busy_delay = 500; 106 TUNABLE_INT("kern.cam.periph_busy_delay", &periph_busy_delay); 107 108 static u_int periph_mapmem_thresh = 65536; 109 SYSCTL_UINT(_kern_cam, OID_AUTO, mapmem_thresh, CTLFLAG_RWTUN, 110 &periph_mapmem_thresh, 0, "Threshold for user-space buffer mapping"); 111 112 void 113 periphdriver_register(void *data) 114 { 115 struct periph_driver *drv = (struct periph_driver *)data; 116 struct periph_driver **newdrivers, **old; 117 int ndrivers; 118 119 again: 120 ndrivers = nperiph_drivers + 2; 121 newdrivers = malloc(sizeof(*newdrivers) * ndrivers, M_CAMPERIPH, 122 M_WAITOK); 123 xpt_lock_buses(); 124 if (ndrivers != nperiph_drivers + 2) { 125 /* 126 * Lost race against itself; go around. 127 */ 128 xpt_unlock_buses(); 129 free(newdrivers, M_CAMPERIPH); 130 goto again; 131 } 132 if (periph_drivers) 133 bcopy(periph_drivers, newdrivers, 134 sizeof(*newdrivers) * nperiph_drivers); 135 newdrivers[nperiph_drivers] = drv; 136 newdrivers[nperiph_drivers + 1] = NULL; 137 old = periph_drivers; 138 periph_drivers = newdrivers; 139 nperiph_drivers++; 140 xpt_unlock_buses(); 141 if (old) 142 free(old, M_CAMPERIPH); 143 /* If driver marked as early or it is late now, initialize it. */ 144 if (((drv->flags & CAM_PERIPH_DRV_EARLY) != 0 && initialized > 0) || 145 initialized > 1) 146 (*drv->init)(); 147 } 148 149 int 150 periphdriver_unregister(void *data) 151 { 152 struct periph_driver *drv = (struct periph_driver *)data; 153 int error, n; 154 155 /* If driver marked as early or it is late now, deinitialize it. */ 156 if (((drv->flags & CAM_PERIPH_DRV_EARLY) != 0 && initialized > 0) || 157 initialized > 1) { 158 if (drv->deinit == NULL) { 159 printf("CAM periph driver '%s' doesn't have deinit.\n", 160 drv->driver_name); 161 return (EOPNOTSUPP); 162 } 163 error = drv->deinit(); 164 if (error != 0) 165 return (error); 166 } 167 168 xpt_lock_buses(); 169 for (n = 0; n < nperiph_drivers && periph_drivers[n] != drv; n++) 170 ; 171 KASSERT(n < nperiph_drivers, 172 ("Periph driver '%s' was not registered", drv->driver_name)); 173 for (; n + 1 < nperiph_drivers; n++) 174 periph_drivers[n] = periph_drivers[n + 1]; 175 periph_drivers[n + 1] = NULL; 176 nperiph_drivers--; 177 xpt_unlock_buses(); 178 return (0); 179 } 180 181 void 182 periphdriver_init(int level) 183 { 184 int i, early; 185 186 initialized = max(initialized, level); 187 for (i = 0; periph_drivers[i] != NULL; i++) { 188 early = (periph_drivers[i]->flags & CAM_PERIPH_DRV_EARLY) ? 1 : 2; 189 if (early == initialized) 190 (*periph_drivers[i]->init)(); 191 } 192 } 193 194 cam_status 195 cam_periph_alloc(periph_ctor_t *periph_ctor, 196 periph_oninv_t *periph_oninvalidate, 197 periph_dtor_t *periph_dtor, periph_start_t *periph_start, 198 char *name, cam_periph_type type, struct cam_path *path, 199 ac_callback_t *ac_callback, ac_code code, void *arg) 200 { 201 struct periph_driver **p_drv; 202 struct cam_sim *sim; 203 struct cam_periph *periph; 204 struct cam_periph *cur_periph; 205 path_id_t path_id; 206 target_id_t target_id; 207 lun_id_t lun_id; 208 cam_status status; 209 u_int init_level; 210 211 init_level = 0; 212 /* 213 * Handle Hot-Plug scenarios. If there is already a peripheral 214 * of our type assigned to this path, we are likely waiting for 215 * final close on an old, invalidated, peripheral. If this is 216 * the case, queue up a deferred call to the peripheral's async 217 * handler. If it looks like a mistaken re-allocation, complain. 218 */ 219 if ((periph = cam_periph_find(path, name)) != NULL) { 220 if ((periph->flags & CAM_PERIPH_INVALID) != 0 221 && (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) == 0) { 222 periph->flags |= CAM_PERIPH_NEW_DEV_FOUND; 223 periph->deferred_callback = ac_callback; 224 periph->deferred_ac = code; 225 return (CAM_REQ_INPROG); 226 } else { 227 printf("cam_periph_alloc: attempt to re-allocate " 228 "valid device %s%d rejected flags %#x " 229 "refcount %d\n", periph->periph_name, 230 periph->unit_number, periph->flags, 231 periph->refcount); 232 } 233 return (CAM_REQ_INVALID); 234 } 235 236 periph = (struct cam_periph *)malloc(sizeof(*periph), M_CAMPERIPH, 237 M_NOWAIT|M_ZERO); 238 239 if (periph == NULL) 240 return (CAM_RESRC_UNAVAIL); 241 242 init_level++; 243 244 sim = xpt_path_sim(path); 245 path_id = xpt_path_path_id(path); 246 target_id = xpt_path_target_id(path); 247 lun_id = xpt_path_lun_id(path); 248 periph->periph_start = periph_start; 249 periph->periph_dtor = periph_dtor; 250 periph->periph_oninval = periph_oninvalidate; 251 periph->type = type; 252 periph->periph_name = name; 253 periph->scheduled_priority = CAM_PRIORITY_NONE; 254 periph->immediate_priority = CAM_PRIORITY_NONE; 255 periph->refcount = 1; /* Dropped by invalidation. */ 256 periph->sim = sim; 257 SLIST_INIT(&periph->ccb_list); 258 status = xpt_create_path(&path, periph, path_id, target_id, lun_id); 259 if (status != CAM_REQ_CMP) 260 goto failure; 261 periph->path = path; 262 263 xpt_lock_buses(); 264 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) { 265 if (strcmp((*p_drv)->driver_name, name) == 0) 266 break; 267 } 268 if (*p_drv == NULL) { 269 printf("cam_periph_alloc: invalid periph name '%s'\n", name); 270 xpt_unlock_buses(); 271 xpt_free_path(periph->path); 272 free(periph, M_CAMPERIPH); 273 return (CAM_REQ_INVALID); 274 } 275 periph->unit_number = camperiphunit(*p_drv, path_id, target_id, lun_id, 276 path->device->serial_num); 277 cur_periph = TAILQ_FIRST(&(*p_drv)->units); 278 while (cur_periph != NULL 279 && cur_periph->unit_number < periph->unit_number) 280 cur_periph = TAILQ_NEXT(cur_periph, unit_links); 281 if (cur_periph != NULL) { 282 KASSERT(cur_periph->unit_number != periph->unit_number, 283 ("duplicate units on periph list")); 284 TAILQ_INSERT_BEFORE(cur_periph, periph, unit_links); 285 } else { 286 TAILQ_INSERT_TAIL(&(*p_drv)->units, periph, unit_links); 287 (*p_drv)->generation++; 288 } 289 xpt_unlock_buses(); 290 291 init_level++; 292 293 status = xpt_add_periph(periph); 294 if (status != CAM_REQ_CMP) 295 goto failure; 296 297 init_level++; 298 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph created\n")); 299 300 status = periph_ctor(periph, arg); 301 302 if (status == CAM_REQ_CMP) 303 init_level++; 304 305 failure: 306 switch (init_level) { 307 case 4: 308 /* Initialized successfully */ 309 break; 310 case 3: 311 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph destroyed\n")); 312 xpt_remove_periph(periph); 313 /* FALLTHROUGH */ 314 case 2: 315 xpt_lock_buses(); 316 TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links); 317 xpt_unlock_buses(); 318 xpt_free_path(periph->path); 319 /* FALLTHROUGH */ 320 case 1: 321 free(periph, M_CAMPERIPH); 322 /* FALLTHROUGH */ 323 case 0: 324 /* No cleanup to perform. */ 325 break; 326 default: 327 panic("%s: Unknown init level", __func__); 328 } 329 return(status); 330 } 331 332 /* 333 * Find a peripheral structure with the specified path, target, lun, 334 * and (optionally) type. If the name is NULL, this function will return 335 * the first peripheral driver that matches the specified path. 336 */ 337 struct cam_periph * 338 cam_periph_find(struct cam_path *path, char *name) 339 { 340 struct periph_driver **p_drv; 341 struct cam_periph *periph; 342 343 xpt_lock_buses(); 344 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) { 345 if (name != NULL && (strcmp((*p_drv)->driver_name, name) != 0)) 346 continue; 347 348 TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) { 349 if (xpt_path_comp(periph->path, path) == 0) { 350 xpt_unlock_buses(); 351 cam_periph_assert(periph, MA_OWNED); 352 return(periph); 353 } 354 } 355 if (name != NULL) { 356 xpt_unlock_buses(); 357 return(NULL); 358 } 359 } 360 xpt_unlock_buses(); 361 return(NULL); 362 } 363 364 /* 365 * Find peripheral driver instances attached to the specified path. 366 */ 367 int 368 cam_periph_list(struct cam_path *path, struct sbuf *sb) 369 { 370 struct sbuf local_sb; 371 struct periph_driver **p_drv; 372 struct cam_periph *periph; 373 int count; 374 int sbuf_alloc_len; 375 376 sbuf_alloc_len = 16; 377 retry: 378 sbuf_new(&local_sb, NULL, sbuf_alloc_len, SBUF_FIXEDLEN); 379 count = 0; 380 xpt_lock_buses(); 381 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) { 382 TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) { 383 if (xpt_path_comp(periph->path, path) != 0) 384 continue; 385 386 if (sbuf_len(&local_sb) != 0) 387 sbuf_cat(&local_sb, ","); 388 389 sbuf_printf(&local_sb, "%s%d", periph->periph_name, 390 periph->unit_number); 391 392 if (sbuf_error(&local_sb) == ENOMEM) { 393 sbuf_alloc_len *= 2; 394 xpt_unlock_buses(); 395 sbuf_delete(&local_sb); 396 goto retry; 397 } 398 count++; 399 } 400 } 401 xpt_unlock_buses(); 402 sbuf_finish(&local_sb); 403 if (sbuf_len(sb) != 0) 404 sbuf_cat(sb, ","); 405 sbuf_cat(sb, sbuf_data(&local_sb)); 406 sbuf_delete(&local_sb); 407 return (count); 408 } 409 410 int 411 cam_periph_acquire(struct cam_periph *periph) 412 { 413 int status; 414 415 if (periph == NULL) 416 return (EINVAL); 417 418 status = ENOENT; 419 xpt_lock_buses(); 420 if ((periph->flags & CAM_PERIPH_INVALID) == 0) { 421 periph->refcount++; 422 status = 0; 423 } 424 xpt_unlock_buses(); 425 426 return (status); 427 } 428 429 void 430 cam_periph_doacquire(struct cam_periph *periph) 431 { 432 433 xpt_lock_buses(); 434 KASSERT(periph->refcount >= 1, 435 ("cam_periph_doacquire() with refcount == %d", periph->refcount)); 436 periph->refcount++; 437 xpt_unlock_buses(); 438 } 439 440 void 441 cam_periph_release_locked_buses(struct cam_periph *periph) 442 { 443 444 cam_periph_assert(periph, MA_OWNED); 445 KASSERT(periph->refcount >= 1, ("periph->refcount >= 1")); 446 if (--periph->refcount == 0) 447 camperiphfree(periph); 448 } 449 450 void 451 cam_periph_release_locked(struct cam_periph *periph) 452 { 453 454 if (periph == NULL) 455 return; 456 457 xpt_lock_buses(); 458 cam_periph_release_locked_buses(periph); 459 xpt_unlock_buses(); 460 } 461 462 void 463 cam_periph_release(struct cam_periph *periph) 464 { 465 struct mtx *mtx; 466 467 if (periph == NULL) 468 return; 469 470 cam_periph_assert(periph, MA_NOTOWNED); 471 mtx = cam_periph_mtx(periph); 472 mtx_lock(mtx); 473 cam_periph_release_locked(periph); 474 mtx_unlock(mtx); 475 } 476 477 /* 478 * hold/unhold act as mutual exclusion for sections of the code that 479 * need to sleep and want to make sure that other sections that 480 * will interfere are held off. This only protects exclusive sections 481 * from each other. 482 */ 483 int 484 cam_periph_hold(struct cam_periph *periph, int priority) 485 { 486 int error; 487 488 /* 489 * Increment the reference count on the peripheral 490 * while we wait for our lock attempt to succeed 491 * to ensure the peripheral doesn't disappear out 492 * from user us while we sleep. 493 */ 494 495 if (cam_periph_acquire(periph) != 0) 496 return (ENXIO); 497 498 cam_periph_assert(periph, MA_OWNED); 499 while ((periph->flags & CAM_PERIPH_LOCKED) != 0) { 500 periph->flags |= CAM_PERIPH_LOCK_WANTED; 501 if ((error = cam_periph_sleep(periph, periph, priority, 502 "caplck", 0)) != 0) { 503 cam_periph_release_locked(periph); 504 return (error); 505 } 506 if (periph->flags & CAM_PERIPH_INVALID) { 507 cam_periph_release_locked(periph); 508 return (ENXIO); 509 } 510 } 511 512 periph->flags |= CAM_PERIPH_LOCKED; 513 return (0); 514 } 515 516 void 517 cam_periph_unhold(struct cam_periph *periph) 518 { 519 520 cam_periph_assert(periph, MA_OWNED); 521 522 periph->flags &= ~CAM_PERIPH_LOCKED; 523 if ((periph->flags & CAM_PERIPH_LOCK_WANTED) != 0) { 524 periph->flags &= ~CAM_PERIPH_LOCK_WANTED; 525 wakeup(periph); 526 } 527 528 cam_periph_release_locked(periph); 529 } 530 531 void 532 cam_periph_hold_boot(struct cam_periph *periph) 533 { 534 535 root_mount_hold_token(periph->periph_name, &periph->periph_rootmount); 536 } 537 538 void 539 cam_periph_release_boot(struct cam_periph *periph) 540 { 541 542 root_mount_rel(&periph->periph_rootmount); 543 } 544 545 /* 546 * Look for the next unit number that is not currently in use for this 547 * peripheral type starting at "newunit". Also exclude unit numbers that 548 * are reserved by for future "hardwiring" unless we already know that this 549 * is a potential wired device. Only assume that the device is "wired" the 550 * first time through the loop since after that we'll be looking at unit 551 * numbers that did not match a wiring entry. 552 */ 553 static u_int 554 camperiphnextunit(struct periph_driver *p_drv, u_int newunit, bool wired, 555 path_id_t pathid, target_id_t target, lun_id_t lun) 556 { 557 struct cam_periph *periph; 558 char *periph_name; 559 int i, val, dunit, r; 560 const char *dname, *strval; 561 562 periph_name = p_drv->driver_name; 563 for (;;newunit++) { 564 for (periph = TAILQ_FIRST(&p_drv->units); 565 periph != NULL && periph->unit_number != newunit; 566 periph = TAILQ_NEXT(periph, unit_links)) 567 ; 568 569 if (periph != NULL && periph->unit_number == newunit) { 570 if (wired) { 571 xpt_print(periph->path, "Duplicate Wired " 572 "Device entry!\n"); 573 xpt_print(periph->path, "Second device (%s " 574 "device at scbus%d target %d lun %d) will " 575 "not be wired\n", periph_name, pathid, 576 target, lun); 577 wired = false; 578 } 579 continue; 580 } 581 if (wired) 582 break; 583 584 /* 585 * Don't allow the mere presence of any attributes of a device 586 * means that it is for a wired down entry. Instead, insist that 587 * one of the matching criteria from camperiphunit be present 588 * for the device. 589 */ 590 i = 0; 591 dname = periph_name; 592 for (;;) { 593 r = resource_find_dev(&i, dname, &dunit, NULL, NULL); 594 if (r != 0) 595 break; 596 597 if (newunit != dunit) 598 continue; 599 if (resource_string_value(dname, dunit, "sn", &strval) == 0 || 600 resource_int_value(dname, dunit, "lun", &val) == 0 || 601 resource_int_value(dname, dunit, "target", &val) == 0 || 602 resource_string_value(dname, dunit, "at", &strval) == 0) 603 break; 604 } 605 if (r != 0) 606 break; 607 } 608 return (newunit); 609 } 610 611 static u_int 612 camperiphunit(struct periph_driver *p_drv, path_id_t pathid, 613 target_id_t target, lun_id_t lun, const char *sn) 614 { 615 bool wired = false; 616 u_int unit; 617 int i, val, dunit; 618 const char *dname, *strval; 619 char pathbuf[32], *periph_name; 620 621 periph_name = p_drv->driver_name; 622 snprintf(pathbuf, sizeof(pathbuf), "scbus%d", pathid); 623 unit = 0; 624 i = 0; 625 dname = periph_name; 626 627 for (wired = false; resource_find_dev(&i, dname, &dunit, NULL, NULL) == 0; 628 wired = false) { 629 if (resource_string_value(dname, dunit, "at", &strval) == 0) { 630 if (strcmp(strval, pathbuf) != 0) 631 continue; 632 wired = true; 633 } 634 if (resource_int_value(dname, dunit, "target", &val) == 0) { 635 if (val != target) 636 continue; 637 wired = true; 638 } 639 if (resource_int_value(dname, dunit, "lun", &val) == 0) { 640 if (val != lun) 641 continue; 642 wired = true; 643 } 644 if (resource_string_value(dname, dunit, "sn", &strval) == 0) { 645 if (sn == NULL || strcmp(strval, sn) != 0) 646 continue; 647 wired = true; 648 } 649 if (wired) { 650 unit = dunit; 651 break; 652 } 653 } 654 655 /* 656 * Either start from 0 looking for the next unit or from 657 * the unit number given in the resource config. This way, 658 * if we have wildcard matches, we don't return the same 659 * unit number twice. 660 */ 661 unit = camperiphnextunit(p_drv, unit, wired, pathid, target, lun); 662 663 return (unit); 664 } 665 666 void 667 cam_periph_invalidate(struct cam_periph *periph) 668 { 669 670 cam_periph_assert(periph, MA_OWNED); 671 /* 672 * We only tear down the device the first time a peripheral is 673 * invalidated. 674 */ 675 if ((periph->flags & CAM_PERIPH_INVALID) != 0) 676 return; 677 678 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph invalidated\n")); 679 if ((periph->flags & CAM_PERIPH_ANNOUNCED) && !rebooting) { 680 struct sbuf sb; 681 char buffer[160]; 682 683 sbuf_new(&sb, buffer, 160, SBUF_FIXEDLEN); 684 xpt_denounce_periph_sbuf(periph, &sb); 685 sbuf_finish(&sb); 686 sbuf_putbuf(&sb); 687 } 688 periph->flags |= CAM_PERIPH_INVALID; 689 periph->flags &= ~CAM_PERIPH_NEW_DEV_FOUND; 690 if (periph->periph_oninval != NULL) 691 periph->periph_oninval(periph); 692 cam_periph_release_locked(periph); 693 } 694 695 static void 696 camperiphfree(struct cam_periph *periph) 697 { 698 struct periph_driver **p_drv; 699 struct periph_driver *drv; 700 701 cam_periph_assert(periph, MA_OWNED); 702 KASSERT(periph->periph_allocating == 0, ("%s%d: freed while allocating", 703 periph->periph_name, periph->unit_number)); 704 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) { 705 if (strcmp((*p_drv)->driver_name, periph->periph_name) == 0) 706 break; 707 } 708 if (*p_drv == NULL) { 709 printf("camperiphfree: attempt to free non-existant periph\n"); 710 return; 711 } 712 /* 713 * Cache a pointer to the periph_driver structure. If a 714 * periph_driver is added or removed from the array (see 715 * periphdriver_register()) while we drop the toplogy lock 716 * below, p_drv may change. This doesn't protect against this 717 * particular periph_driver going away. That will require full 718 * reference counting in the periph_driver infrastructure. 719 */ 720 drv = *p_drv; 721 722 /* 723 * We need to set this flag before dropping the topology lock, to 724 * let anyone who is traversing the list that this peripheral is 725 * about to be freed, and there will be no more reference count 726 * checks. 727 */ 728 periph->flags |= CAM_PERIPH_FREE; 729 730 /* 731 * The peripheral destructor semantics dictate calling with only the 732 * SIM mutex held. Since it might sleep, it should not be called 733 * with the topology lock held. 734 */ 735 xpt_unlock_buses(); 736 737 /* 738 * We need to call the peripheral destructor prior to removing the 739 * peripheral from the list. Otherwise, we risk running into a 740 * scenario where the peripheral unit number may get reused 741 * (because it has been removed from the list), but some resources 742 * used by the peripheral are still hanging around. In particular, 743 * the devfs nodes used by some peripherals like the pass(4) driver 744 * aren't fully cleaned up until the destructor is run. If the 745 * unit number is reused before the devfs instance is fully gone, 746 * devfs will panic. 747 */ 748 if (periph->periph_dtor != NULL) 749 periph->periph_dtor(periph); 750 751 /* 752 * The peripheral list is protected by the topology lock. We have to 753 * remove the periph from the drv list before we call deferred_ac. The 754 * AC_FOUND_DEVICE callback won't create a new periph if it's still there. 755 */ 756 xpt_lock_buses(); 757 758 TAILQ_REMOVE(&drv->units, periph, unit_links); 759 drv->generation++; 760 761 xpt_remove_periph(periph); 762 763 xpt_unlock_buses(); 764 if ((periph->flags & CAM_PERIPH_ANNOUNCED) && !rebooting) 765 xpt_print(periph->path, "Periph destroyed\n"); 766 else 767 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph destroyed\n")); 768 769 if (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) { 770 union ccb ccb; 771 void *arg; 772 773 memset(&ccb, 0, sizeof(ccb)); 774 switch (periph->deferred_ac) { 775 case AC_FOUND_DEVICE: 776 ccb.ccb_h.func_code = XPT_GDEV_TYPE; 777 xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 778 xpt_action(&ccb); 779 arg = &ccb; 780 break; 781 case AC_PATH_REGISTERED: 782 xpt_path_inq(&ccb.cpi, periph->path); 783 arg = &ccb; 784 break; 785 default: 786 arg = NULL; 787 break; 788 } 789 periph->deferred_callback(NULL, periph->deferred_ac, 790 periph->path, arg); 791 } 792 xpt_free_path(periph->path); 793 free(periph, M_CAMPERIPH); 794 xpt_lock_buses(); 795 } 796 797 /* 798 * Map user virtual pointers into kernel virtual address space, so we can 799 * access the memory. This is now a generic function that centralizes most 800 * of the sanity checks on the data flags, if any. 801 * This also only works for up to maxphys memory. Since we use 802 * buffers to map stuff in and out, we're limited to the buffer size. 803 */ 804 int 805 cam_periph_mapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo, 806 u_int maxmap) 807 { 808 int numbufs, i; 809 uint8_t **data_ptrs[CAM_PERIPH_MAXMAPS]; 810 uint32_t lengths[CAM_PERIPH_MAXMAPS]; 811 uint32_t dirs[CAM_PERIPH_MAXMAPS]; 812 813 bzero(mapinfo, sizeof(*mapinfo)); 814 if (maxmap == 0) 815 maxmap = DFLTPHYS; /* traditional default */ 816 else if (maxmap > maxphys) 817 maxmap = maxphys; /* for safety */ 818 switch(ccb->ccb_h.func_code) { 819 case XPT_DEV_MATCH: 820 if (ccb->cdm.match_buf_len == 0) { 821 printf("cam_periph_mapmem: invalid match buffer " 822 "length 0\n"); 823 return(EINVAL); 824 } 825 if (ccb->cdm.pattern_buf_len > 0) { 826 data_ptrs[0] = (uint8_t **)&ccb->cdm.patterns; 827 lengths[0] = ccb->cdm.pattern_buf_len; 828 dirs[0] = CAM_DIR_OUT; 829 data_ptrs[1] = (uint8_t **)&ccb->cdm.matches; 830 lengths[1] = ccb->cdm.match_buf_len; 831 dirs[1] = CAM_DIR_IN; 832 numbufs = 2; 833 } else { 834 data_ptrs[0] = (uint8_t **)&ccb->cdm.matches; 835 lengths[0] = ccb->cdm.match_buf_len; 836 dirs[0] = CAM_DIR_IN; 837 numbufs = 1; 838 } 839 /* 840 * This request will not go to the hardware, no reason 841 * to be so strict. vmapbuf() is able to map up to maxphys. 842 */ 843 maxmap = maxphys; 844 break; 845 case XPT_SCSI_IO: 846 case XPT_CONT_TARGET_IO: 847 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) 848 return(0); 849 if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR) 850 return (EINVAL); 851 data_ptrs[0] = &ccb->csio.data_ptr; 852 lengths[0] = ccb->csio.dxfer_len; 853 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK; 854 numbufs = 1; 855 break; 856 case XPT_ATA_IO: 857 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) 858 return(0); 859 if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR) 860 return (EINVAL); 861 data_ptrs[0] = &ccb->ataio.data_ptr; 862 lengths[0] = ccb->ataio.dxfer_len; 863 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK; 864 numbufs = 1; 865 break; 866 case XPT_MMC_IO: 867 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) 868 return(0); 869 /* Two mappings: one for cmd->data and one for cmd->data->data */ 870 data_ptrs[0] = (unsigned char **)&ccb->mmcio.cmd.data; 871 lengths[0] = sizeof(struct mmc_data *); 872 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK; 873 data_ptrs[1] = (unsigned char **)&ccb->mmcio.cmd.data->data; 874 lengths[1] = ccb->mmcio.cmd.data->len; 875 dirs[1] = ccb->ccb_h.flags & CAM_DIR_MASK; 876 numbufs = 2; 877 break; 878 case XPT_SMP_IO: 879 data_ptrs[0] = &ccb->smpio.smp_request; 880 lengths[0] = ccb->smpio.smp_request_len; 881 dirs[0] = CAM_DIR_OUT; 882 data_ptrs[1] = &ccb->smpio.smp_response; 883 lengths[1] = ccb->smpio.smp_response_len; 884 dirs[1] = CAM_DIR_IN; 885 numbufs = 2; 886 break; 887 case XPT_NVME_IO: 888 case XPT_NVME_ADMIN: 889 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) 890 return (0); 891 if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR) 892 return (EINVAL); 893 data_ptrs[0] = &ccb->nvmeio.data_ptr; 894 lengths[0] = ccb->nvmeio.dxfer_len; 895 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK; 896 numbufs = 1; 897 break; 898 case XPT_DEV_ADVINFO: 899 if (ccb->cdai.bufsiz == 0) 900 return (0); 901 902 data_ptrs[0] = (uint8_t **)&ccb->cdai.buf; 903 lengths[0] = ccb->cdai.bufsiz; 904 dirs[0] = CAM_DIR_IN; 905 numbufs = 1; 906 907 /* 908 * This request will not go to the hardware, no reason 909 * to be so strict. vmapbuf() is able to map up to maxphys. 910 */ 911 maxmap = maxphys; 912 break; 913 default: 914 return(EINVAL); 915 break; /* NOTREACHED */ 916 } 917 918 /* 919 * Check the transfer length and permissions first, so we don't 920 * have to unmap any previously mapped buffers. 921 */ 922 for (i = 0; i < numbufs; i++) { 923 if (lengths[i] > maxmap) { 924 printf("cam_periph_mapmem: attempt to map %lu bytes, " 925 "which is greater than %lu\n", 926 (long)(lengths[i]), (u_long)maxmap); 927 return (E2BIG); 928 } 929 } 930 931 /* 932 * This keeps the kernel stack of current thread from getting 933 * swapped. In low-memory situations where the kernel stack might 934 * otherwise get swapped out, this holds it and allows the thread 935 * to make progress and release the kernel mapped pages sooner. 936 * 937 * XXX KDM should I use P_NOSWAP instead? 938 */ 939 PHOLD(curproc); 940 941 for (i = 0; i < numbufs; i++) { 942 /* Save the user's data address. */ 943 mapinfo->orig[i] = *data_ptrs[i]; 944 945 /* 946 * For small buffers use malloc+copyin/copyout instead of 947 * mapping to KVA to avoid expensive TLB shootdowns. For 948 * small allocations malloc is backed by UMA, and so much 949 * cheaper on SMP systems. 950 */ 951 if (lengths[i] <= periph_mapmem_thresh && 952 ccb->ccb_h.func_code != XPT_MMC_IO) { 953 *data_ptrs[i] = malloc(lengths[i], M_CAMPERIPH, 954 M_WAITOK); 955 if (dirs[i] != CAM_DIR_IN) { 956 if (copyin(mapinfo->orig[i], *data_ptrs[i], 957 lengths[i]) != 0) { 958 free(*data_ptrs[i], M_CAMPERIPH); 959 *data_ptrs[i] = mapinfo->orig[i]; 960 goto fail; 961 } 962 } else 963 bzero(*data_ptrs[i], lengths[i]); 964 continue; 965 } 966 967 /* 968 * Get the buffer. 969 */ 970 mapinfo->bp[i] = uma_zalloc(pbuf_zone, M_WAITOK); 971 972 /* set the direction */ 973 mapinfo->bp[i]->b_iocmd = (dirs[i] == CAM_DIR_OUT) ? 974 BIO_WRITE : BIO_READ; 975 976 /* Map the buffer into kernel memory. */ 977 if (vmapbuf(mapinfo->bp[i], *data_ptrs[i], lengths[i], 1) < 0) { 978 uma_zfree(pbuf_zone, mapinfo->bp[i]); 979 goto fail; 980 } 981 982 /* set our pointer to the new mapped area */ 983 *data_ptrs[i] = mapinfo->bp[i]->b_data; 984 } 985 986 /* 987 * Now that we've gotten this far, change ownership to the kernel 988 * of the buffers so that we don't run afoul of returning to user 989 * space with locks (on the buffer) held. 990 */ 991 for (i = 0; i < numbufs; i++) { 992 if (mapinfo->bp[i]) 993 BUF_KERNPROC(mapinfo->bp[i]); 994 } 995 996 mapinfo->num_bufs_used = numbufs; 997 return(0); 998 999 fail: 1000 for (i--; i >= 0; i--) { 1001 if (mapinfo->bp[i]) { 1002 vunmapbuf(mapinfo->bp[i]); 1003 uma_zfree(pbuf_zone, mapinfo->bp[i]); 1004 } else 1005 free(*data_ptrs[i], M_CAMPERIPH); 1006 *data_ptrs[i] = mapinfo->orig[i]; 1007 } 1008 PRELE(curproc); 1009 return(EACCES); 1010 } 1011 1012 /* 1013 * Unmap memory segments mapped into kernel virtual address space by 1014 * cam_periph_mapmem(). 1015 */ 1016 int 1017 cam_periph_unmapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo) 1018 { 1019 int error, numbufs, i; 1020 uint8_t **data_ptrs[CAM_PERIPH_MAXMAPS]; 1021 uint32_t lengths[CAM_PERIPH_MAXMAPS]; 1022 uint32_t dirs[CAM_PERIPH_MAXMAPS]; 1023 1024 if (mapinfo->num_bufs_used <= 0) { 1025 /* nothing to free and the process wasn't held. */ 1026 return (0); 1027 } 1028 1029 switch (ccb->ccb_h.func_code) { 1030 case XPT_DEV_MATCH: 1031 if (ccb->cdm.pattern_buf_len > 0) { 1032 data_ptrs[0] = (uint8_t **)&ccb->cdm.patterns; 1033 lengths[0] = ccb->cdm.pattern_buf_len; 1034 dirs[0] = CAM_DIR_OUT; 1035 data_ptrs[1] = (uint8_t **)&ccb->cdm.matches; 1036 lengths[1] = ccb->cdm.match_buf_len; 1037 dirs[1] = CAM_DIR_IN; 1038 numbufs = 2; 1039 } else { 1040 data_ptrs[0] = (uint8_t **)&ccb->cdm.matches; 1041 lengths[0] = ccb->cdm.match_buf_len; 1042 dirs[0] = CAM_DIR_IN; 1043 numbufs = 1; 1044 } 1045 break; 1046 case XPT_SCSI_IO: 1047 case XPT_CONT_TARGET_IO: 1048 data_ptrs[0] = &ccb->csio.data_ptr; 1049 lengths[0] = ccb->csio.dxfer_len; 1050 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK; 1051 numbufs = 1; 1052 break; 1053 case XPT_ATA_IO: 1054 data_ptrs[0] = &ccb->ataio.data_ptr; 1055 lengths[0] = ccb->ataio.dxfer_len; 1056 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK; 1057 numbufs = 1; 1058 break; 1059 case XPT_MMC_IO: 1060 data_ptrs[0] = (uint8_t **)&ccb->mmcio.cmd.data; 1061 lengths[0] = sizeof(struct mmc_data *); 1062 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK; 1063 data_ptrs[1] = (uint8_t **)&ccb->mmcio.cmd.data->data; 1064 lengths[1] = ccb->mmcio.cmd.data->len; 1065 dirs[1] = ccb->ccb_h.flags & CAM_DIR_MASK; 1066 numbufs = 2; 1067 break; 1068 case XPT_SMP_IO: 1069 data_ptrs[0] = &ccb->smpio.smp_request; 1070 lengths[0] = ccb->smpio.smp_request_len; 1071 dirs[0] = CAM_DIR_OUT; 1072 data_ptrs[1] = &ccb->smpio.smp_response; 1073 lengths[1] = ccb->smpio.smp_response_len; 1074 dirs[1] = CAM_DIR_IN; 1075 numbufs = 2; 1076 break; 1077 case XPT_NVME_IO: 1078 case XPT_NVME_ADMIN: 1079 data_ptrs[0] = &ccb->nvmeio.data_ptr; 1080 lengths[0] = ccb->nvmeio.dxfer_len; 1081 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK; 1082 numbufs = 1; 1083 break; 1084 case XPT_DEV_ADVINFO: 1085 data_ptrs[0] = (uint8_t **)&ccb->cdai.buf; 1086 lengths[0] = ccb->cdai.bufsiz; 1087 dirs[0] = CAM_DIR_IN; 1088 numbufs = 1; 1089 break; 1090 default: 1091 numbufs = 0; 1092 break; 1093 } 1094 1095 error = 0; 1096 for (i = 0; i < numbufs; i++) { 1097 if (mapinfo->bp[i]) { 1098 /* unmap the buffer */ 1099 vunmapbuf(mapinfo->bp[i]); 1100 1101 /* release the buffer */ 1102 uma_zfree(pbuf_zone, mapinfo->bp[i]); 1103 } else { 1104 if (dirs[i] != CAM_DIR_OUT) { 1105 int error1; 1106 1107 error1 = copyout(*data_ptrs[i], mapinfo->orig[i], 1108 lengths[i]); 1109 if (error == 0) 1110 error = error1; 1111 } 1112 free(*data_ptrs[i], M_CAMPERIPH); 1113 } 1114 1115 /* Set the user's pointer back to the original value */ 1116 *data_ptrs[i] = mapinfo->orig[i]; 1117 } 1118 1119 /* allow ourselves to be swapped once again */ 1120 PRELE(curproc); 1121 1122 return (error); 1123 } 1124 1125 int 1126 cam_periph_ioctl(struct cam_periph *periph, u_long cmd, caddr_t addr, 1127 int (*error_routine)(union ccb *ccb, 1128 cam_flags camflags, 1129 uint32_t sense_flags)) 1130 { 1131 union ccb *ccb; 1132 int error; 1133 int found; 1134 1135 error = found = 0; 1136 1137 switch(cmd){ 1138 case CAMGETPASSTHRU_0x19: 1139 case CAMGETPASSTHRU: 1140 ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); 1141 xpt_setup_ccb(&ccb->ccb_h, 1142 ccb->ccb_h.path, 1143 CAM_PRIORITY_NORMAL); 1144 ccb->ccb_h.func_code = XPT_GDEVLIST; 1145 1146 /* 1147 * Basically, the point of this is that we go through 1148 * getting the list of devices, until we find a passthrough 1149 * device. In the current version of the CAM code, the 1150 * only way to determine what type of device we're dealing 1151 * with is by its name. 1152 */ 1153 while (found == 0) { 1154 ccb->cgdl.index = 0; 1155 ccb->cgdl.status = CAM_GDEVLIST_MORE_DEVS; 1156 while (ccb->cgdl.status == CAM_GDEVLIST_MORE_DEVS) { 1157 /* we want the next device in the list */ 1158 xpt_action(ccb); 1159 if (strncmp(ccb->cgdl.periph_name, 1160 "pass", 4) == 0){ 1161 found = 1; 1162 break; 1163 } 1164 } 1165 if ((ccb->cgdl.status == CAM_GDEVLIST_LAST_DEVICE) && 1166 (found == 0)) { 1167 ccb->cgdl.periph_name[0] = '\0'; 1168 ccb->cgdl.unit_number = 0; 1169 break; 1170 } 1171 } 1172 1173 /* copy the result back out */ 1174 bcopy(ccb, addr, sizeof(union ccb)); 1175 1176 /* and release the ccb */ 1177 xpt_release_ccb(ccb); 1178 1179 break; 1180 default: 1181 error = ENOTTY; 1182 break; 1183 } 1184 return(error); 1185 } 1186 1187 static void 1188 cam_periph_done_panic(struct cam_periph *periph, union ccb *done_ccb) 1189 { 1190 1191 panic("%s: already done with ccb %p", __func__, done_ccb); 1192 } 1193 1194 static void 1195 cam_periph_done(struct cam_periph *periph, union ccb *done_ccb) 1196 { 1197 1198 /* Caller will release the CCB */ 1199 xpt_path_assert(done_ccb->ccb_h.path, MA_OWNED); 1200 done_ccb->ccb_h.cbfcnp = cam_periph_done_panic; 1201 wakeup(&done_ccb->ccb_h.cbfcnp); 1202 } 1203 1204 static void 1205 cam_periph_ccbwait(union ccb *ccb) 1206 { 1207 1208 if ((ccb->ccb_h.func_code & XPT_FC_QUEUED) != 0) { 1209 while (ccb->ccb_h.cbfcnp != cam_periph_done_panic) 1210 xpt_path_sleep(ccb->ccb_h.path, &ccb->ccb_h.cbfcnp, 1211 PRIBIO, "cbwait", 0); 1212 } 1213 KASSERT(ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX && 1214 (ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG, 1215 ("%s: proceeding with incomplete ccb: ccb=%p, func_code=%#x, " 1216 "status=%#x, index=%d", __func__, ccb, ccb->ccb_h.func_code, 1217 ccb->ccb_h.status, ccb->ccb_h.pinfo.index)); 1218 } 1219 1220 /* 1221 * Dispatch a CCB and wait for it to complete. If the CCB has set a 1222 * callback function (ccb->ccb_h.cbfcnp), it will be overwritten and lost. 1223 */ 1224 int 1225 cam_periph_runccb(union ccb *ccb, 1226 int (*error_routine)(union ccb *ccb, 1227 cam_flags camflags, 1228 uint32_t sense_flags), 1229 cam_flags camflags, uint32_t sense_flags, 1230 struct devstat *ds) 1231 { 1232 struct bintime *starttime; 1233 struct bintime ltime; 1234 int error; 1235 bool must_poll; 1236 uint32_t timeout = 1; 1237 1238 starttime = NULL; 1239 xpt_path_assert(ccb->ccb_h.path, MA_OWNED); 1240 KASSERT((ccb->ccb_h.flags & CAM_UNLOCKED) == 0, 1241 ("%s: ccb=%p, func_code=%#x, flags=%#x", __func__, ccb, 1242 ccb->ccb_h.func_code, ccb->ccb_h.flags)); 1243 1244 /* 1245 * If the user has supplied a stats structure, and if we understand 1246 * this particular type of ccb, record the transaction start. 1247 */ 1248 if (ds != NULL && 1249 (ccb->ccb_h.func_code == XPT_SCSI_IO || 1250 ccb->ccb_h.func_code == XPT_ATA_IO || 1251 ccb->ccb_h.func_code == XPT_NVME_IO)) { 1252 starttime = <ime; 1253 binuptime(starttime); 1254 devstat_start_transaction(ds, starttime); 1255 } 1256 1257 /* 1258 * We must poll the I/O while we're dumping. The scheduler is normally 1259 * stopped for dumping, except when we call doadump from ddb. While the 1260 * scheduler is running in this case, we still need to poll the I/O to 1261 * avoid sleeping waiting for the ccb to complete. 1262 * 1263 * A panic triggered dump stops the scheduler, any callback from the 1264 * shutdown_post_sync event will run with the scheduler stopped, but 1265 * before we're officially dumping. To avoid hanging in adashutdown 1266 * initiated commands (or other similar situations), we have to test for 1267 * either dumping or SCHEDULER_STOPPED() here. 1268 * 1269 * To avoid locking problems, dumping/polling callers must call 1270 * without a periph lock held. 1271 */ 1272 must_poll = dumping || SCHEDULER_STOPPED(); 1273 ccb->ccb_h.cbfcnp = cam_periph_done; 1274 1275 /* 1276 * If we're polling, then we need to ensure that we have ample resources 1277 * in the periph. cam_periph_error can reschedule the ccb by calling 1278 * xpt_action and returning ERESTART, so we have to effect the polling 1279 * in the do loop below. 1280 */ 1281 if (must_poll) { 1282 if (cam_sim_pollable(ccb->ccb_h.path->bus->sim)) 1283 timeout = xpt_poll_setup(ccb); 1284 else 1285 timeout = 0; 1286 } 1287 1288 if (timeout == 0) { 1289 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 1290 error = EBUSY; 1291 } else { 1292 xpt_action(ccb); 1293 do { 1294 if (must_poll) { 1295 xpt_pollwait(ccb, timeout); 1296 timeout = ccb->ccb_h.timeout * 10; 1297 } else { 1298 cam_periph_ccbwait(ccb); 1299 } 1300 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) 1301 error = 0; 1302 else if (error_routine != NULL) { 1303 /* 1304 * cbfcnp is modified by cam_periph_ccbwait so 1305 * reset it before we call the error routine 1306 * which may call xpt_done. 1307 */ 1308 ccb->ccb_h.cbfcnp = cam_periph_done; 1309 error = (*error_routine)(ccb, camflags, sense_flags); 1310 } else 1311 error = 0; 1312 } while (error == ERESTART); 1313 } 1314 1315 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 1316 cam_release_devq(ccb->ccb_h.path, 1317 /* relsim_flags */0, 1318 /* openings */0, 1319 /* timeout */0, 1320 /* getcount_only */ FALSE); 1321 ccb->ccb_h.status &= ~CAM_DEV_QFRZN; 1322 } 1323 1324 if (ds != NULL) { 1325 uint32_t bytes; 1326 devstat_tag_type tag; 1327 bool valid = true; 1328 1329 if (ccb->ccb_h.func_code == XPT_SCSI_IO) { 1330 bytes = ccb->csio.dxfer_len - ccb->csio.resid; 1331 tag = (devstat_tag_type)(ccb->csio.tag_action & 0x3); 1332 } else if (ccb->ccb_h.func_code == XPT_ATA_IO) { 1333 bytes = ccb->ataio.dxfer_len - ccb->ataio.resid; 1334 tag = (devstat_tag_type)0; 1335 } else if (ccb->ccb_h.func_code == XPT_NVME_IO) { 1336 bytes = ccb->nvmeio.dxfer_len; /* NB: resid no possible */ 1337 tag = (devstat_tag_type)0; 1338 } else { 1339 valid = false; 1340 } 1341 if (valid) 1342 devstat_end_transaction(ds, bytes, tag, 1343 ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) ? 1344 DEVSTAT_NO_DATA : (ccb->ccb_h.flags & CAM_DIR_OUT) ? 1345 DEVSTAT_WRITE : DEVSTAT_READ, NULL, starttime); 1346 } 1347 1348 return(error); 1349 } 1350 1351 void 1352 cam_freeze_devq(struct cam_path *path) 1353 { 1354 struct ccb_hdr ccb_h; 1355 1356 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("cam_freeze_devq\n")); 1357 memset(&ccb_h, 0, sizeof(ccb_h)); 1358 xpt_setup_ccb(&ccb_h, path, /*priority*/1); 1359 ccb_h.func_code = XPT_NOOP; 1360 ccb_h.flags = CAM_DEV_QFREEZE; 1361 xpt_action((union ccb *)&ccb_h); 1362 } 1363 1364 uint32_t 1365 cam_release_devq(struct cam_path *path, uint32_t relsim_flags, 1366 uint32_t openings, uint32_t arg, 1367 int getcount_only) 1368 { 1369 struct ccb_relsim crs; 1370 1371 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("cam_release_devq(%u, %u, %u, %d)\n", 1372 relsim_flags, openings, arg, getcount_only)); 1373 memset(&crs, 0, sizeof(crs)); 1374 xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL); 1375 crs.ccb_h.func_code = XPT_REL_SIMQ; 1376 crs.ccb_h.flags = getcount_only ? CAM_DEV_QFREEZE : 0; 1377 crs.release_flags = relsim_flags; 1378 crs.openings = openings; 1379 crs.release_timeout = arg; 1380 xpt_action((union ccb *)&crs); 1381 return (crs.qfrozen_cnt); 1382 } 1383 1384 #define saved_ccb_ptr ppriv_ptr0 1385 static void 1386 camperiphdone(struct cam_periph *periph, union ccb *done_ccb) 1387 { 1388 union ccb *saved_ccb; 1389 cam_status status; 1390 struct scsi_start_stop_unit *scsi_cmd; 1391 int error = 0, error_code, sense_key, asc, ascq; 1392 uint16_t done_flags; 1393 1394 scsi_cmd = (struct scsi_start_stop_unit *) 1395 &done_ccb->csio.cdb_io.cdb_bytes; 1396 status = done_ccb->ccb_h.status; 1397 1398 if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1399 if (scsi_extract_sense_ccb(done_ccb, 1400 &error_code, &sense_key, &asc, &ascq)) { 1401 /* 1402 * If the error is "invalid field in CDB", 1403 * and the load/eject flag is set, turn the 1404 * flag off and try again. This is just in 1405 * case the drive in question barfs on the 1406 * load eject flag. The CAM code should set 1407 * the load/eject flag by default for 1408 * removable media. 1409 */ 1410 if ((scsi_cmd->opcode == START_STOP_UNIT) && 1411 ((scsi_cmd->how & SSS_LOEJ) != 0) && 1412 (asc == 0x24) && (ascq == 0x00)) { 1413 scsi_cmd->how &= ~SSS_LOEJ; 1414 if (status & CAM_DEV_QFRZN) { 1415 cam_release_devq(done_ccb->ccb_h.path, 1416 0, 0, 0, 0); 1417 done_ccb->ccb_h.status &= 1418 ~CAM_DEV_QFRZN; 1419 } 1420 xpt_action(done_ccb); 1421 goto out; 1422 } 1423 } 1424 error = cam_periph_error(done_ccb, 0, 1425 SF_RETRY_UA | SF_NO_PRINT); 1426 if (error == ERESTART) 1427 goto out; 1428 if (done_ccb->ccb_h.status & CAM_DEV_QFRZN) { 1429 cam_release_devq(done_ccb->ccb_h.path, 0, 0, 0, 0); 1430 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN; 1431 } 1432 } else { 1433 /* 1434 * If we have successfully taken a device from the not 1435 * ready to ready state, re-scan the device and re-get 1436 * the inquiry information. Many devices (mostly disks) 1437 * don't properly report their inquiry information unless 1438 * they are spun up. 1439 */ 1440 if (scsi_cmd->opcode == START_STOP_UNIT) 1441 xpt_async(AC_INQ_CHANGED, done_ccb->ccb_h.path, NULL); 1442 } 1443 1444 /* If we tried long wait and still failed, remember that. */ 1445 if ((periph->flags & CAM_PERIPH_RECOVERY_WAIT) && 1446 (done_ccb->csio.cdb_io.cdb_bytes[0] == TEST_UNIT_READY)) { 1447 periph->flags &= ~CAM_PERIPH_RECOVERY_WAIT; 1448 if (error != 0 && done_ccb->ccb_h.retry_count == 0) 1449 periph->flags |= CAM_PERIPH_RECOVERY_WAIT_FAILED; 1450 } 1451 1452 /* 1453 * After recovery action(s) completed, return to the original CCB. 1454 * If the recovery CCB has failed, considering its own possible 1455 * retries and recovery, assume we are back in state where we have 1456 * been originally, but without recovery hopes left. In such case, 1457 * after the final attempt below, we cancel any further retries, 1458 * blocking by that also any new recovery attempts for this CCB, 1459 * and the result will be the final one returned to the CCB owher. 1460 */ 1461 saved_ccb = (union ccb *)done_ccb->ccb_h.saved_ccb_ptr; 1462 KASSERT(saved_ccb->ccb_h.func_code == XPT_SCSI_IO, 1463 ("%s: saved_ccb func_code %#x != XPT_SCSI_IO", 1464 __func__, saved_ccb->ccb_h.func_code)); 1465 KASSERT(done_ccb->ccb_h.func_code == XPT_SCSI_IO, 1466 ("%s: done_ccb func_code %#x != XPT_SCSI_IO", 1467 __func__, done_ccb->ccb_h.func_code)); 1468 saved_ccb->ccb_h.periph_links = done_ccb->ccb_h.periph_links; 1469 done_flags = done_ccb->ccb_h.alloc_flags; 1470 bcopy(saved_ccb, done_ccb, sizeof(struct ccb_scsiio)); 1471 done_ccb->ccb_h.alloc_flags = done_flags; 1472 xpt_free_ccb(saved_ccb); 1473 if (done_ccb->ccb_h.cbfcnp != camperiphdone) 1474 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG; 1475 if (error != 0) 1476 done_ccb->ccb_h.retry_count = 0; 1477 xpt_action(done_ccb); 1478 1479 out: 1480 /* Drop freeze taken due to CAM_DEV_QFREEZE flag set. */ 1481 cam_release_devq(done_ccb->ccb_h.path, 0, 0, 0, 0); 1482 } 1483 1484 /* 1485 * Generic Async Event handler. Peripheral drivers usually 1486 * filter out the events that require personal attention, 1487 * and leave the rest to this function. 1488 */ 1489 void 1490 cam_periph_async(struct cam_periph *periph, uint32_t code, 1491 struct cam_path *path, void *arg) 1492 { 1493 switch (code) { 1494 case AC_LOST_DEVICE: 1495 cam_periph_invalidate(periph); 1496 break; 1497 default: 1498 break; 1499 } 1500 } 1501 1502 void 1503 cam_periph_bus_settle(struct cam_periph *periph, u_int bus_settle) 1504 { 1505 struct ccb_getdevstats cgds; 1506 1507 memset(&cgds, 0, sizeof(cgds)); 1508 xpt_setup_ccb(&cgds.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 1509 cgds.ccb_h.func_code = XPT_GDEV_STATS; 1510 xpt_action((union ccb *)&cgds); 1511 cam_periph_freeze_after_event(periph, &cgds.last_reset, bus_settle); 1512 } 1513 1514 void 1515 cam_periph_freeze_after_event(struct cam_periph *periph, 1516 struct timeval* event_time, u_int duration_ms) 1517 { 1518 struct timeval delta; 1519 struct timeval duration_tv; 1520 1521 if (!timevalisset(event_time)) 1522 return; 1523 1524 microtime(&delta); 1525 timevalsub(&delta, event_time); 1526 duration_tv.tv_sec = duration_ms / 1000; 1527 duration_tv.tv_usec = (duration_ms % 1000) * 1000; 1528 if (timevalcmp(&delta, &duration_tv, <)) { 1529 timevalsub(&duration_tv, &delta); 1530 1531 duration_ms = duration_tv.tv_sec * 1000; 1532 duration_ms += duration_tv.tv_usec / 1000; 1533 cam_freeze_devq(periph->path); 1534 cam_release_devq(periph->path, 1535 RELSIM_RELEASE_AFTER_TIMEOUT, 1536 /*reduction*/0, 1537 /*timeout*/duration_ms, 1538 /*getcount_only*/0); 1539 } 1540 1541 } 1542 1543 static int 1544 camperiphscsistatuserror(union ccb *ccb, union ccb **orig_ccb, 1545 cam_flags camflags, uint32_t sense_flags, 1546 int *openings, uint32_t *relsim_flags, 1547 uint32_t *timeout, uint32_t *action, const char **action_string) 1548 { 1549 struct cam_periph *periph; 1550 int error; 1551 1552 switch (ccb->csio.scsi_status) { 1553 case SCSI_STATUS_OK: 1554 case SCSI_STATUS_COND_MET: 1555 case SCSI_STATUS_INTERMED: 1556 case SCSI_STATUS_INTERMED_COND_MET: 1557 error = 0; 1558 break; 1559 case SCSI_STATUS_CMD_TERMINATED: 1560 case SCSI_STATUS_CHECK_COND: 1561 error = camperiphscsisenseerror(ccb, orig_ccb, 1562 camflags, 1563 sense_flags, 1564 openings, 1565 relsim_flags, 1566 timeout, 1567 action, 1568 action_string); 1569 break; 1570 case SCSI_STATUS_QUEUE_FULL: 1571 { 1572 /* no decrement */ 1573 struct ccb_getdevstats cgds; 1574 1575 /* 1576 * First off, find out what the current 1577 * transaction counts are. 1578 */ 1579 memset(&cgds, 0, sizeof(cgds)); 1580 xpt_setup_ccb(&cgds.ccb_h, 1581 ccb->ccb_h.path, 1582 CAM_PRIORITY_NORMAL); 1583 cgds.ccb_h.func_code = XPT_GDEV_STATS; 1584 xpt_action((union ccb *)&cgds); 1585 1586 /* 1587 * If we were the only transaction active, treat 1588 * the QUEUE FULL as if it were a BUSY condition. 1589 */ 1590 if (cgds.dev_active != 0) { 1591 int total_openings; 1592 1593 /* 1594 * Reduce the number of openings to 1595 * be 1 less than the amount it took 1596 * to get a queue full bounded by the 1597 * minimum allowed tag count for this 1598 * device. 1599 */ 1600 total_openings = cgds.dev_active + cgds.dev_openings; 1601 *openings = cgds.dev_active; 1602 if (*openings < cgds.mintags) 1603 *openings = cgds.mintags; 1604 if (*openings < total_openings) 1605 *relsim_flags = RELSIM_ADJUST_OPENINGS; 1606 else { 1607 /* 1608 * Some devices report queue full for 1609 * temporary resource shortages. For 1610 * this reason, we allow a minimum 1611 * tag count to be entered via a 1612 * quirk entry to prevent the queue 1613 * count on these devices from falling 1614 * to a pessimisticly low value. We 1615 * still wait for the next successful 1616 * completion, however, before queueing 1617 * more transactions to the device. 1618 */ 1619 *relsim_flags = RELSIM_RELEASE_AFTER_CMDCMPLT; 1620 } 1621 *timeout = 0; 1622 error = ERESTART; 1623 *action &= ~SSQ_PRINT_SENSE; 1624 break; 1625 } 1626 /* FALLTHROUGH */ 1627 } 1628 case SCSI_STATUS_BUSY: 1629 /* 1630 * Restart the queue after either another 1631 * command completes or a 1 second timeout. 1632 */ 1633 periph = xpt_path_periph(ccb->ccb_h.path); 1634 if (periph->flags & CAM_PERIPH_INVALID) { 1635 error = ENXIO; 1636 *action_string = "Periph was invalidated"; 1637 } else if ((sense_flags & SF_RETRY_BUSY) != 0 || 1638 ccb->ccb_h.retry_count > 0) { 1639 if ((sense_flags & SF_RETRY_BUSY) == 0) 1640 ccb->ccb_h.retry_count--; 1641 error = ERESTART; 1642 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT 1643 | RELSIM_RELEASE_AFTER_CMDCMPLT; 1644 *timeout = 1000; 1645 } else { 1646 error = EIO; 1647 *action_string = "Retries exhausted"; 1648 } 1649 break; 1650 case SCSI_STATUS_RESERV_CONFLICT: 1651 default: 1652 error = EIO; 1653 break; 1654 } 1655 return (error); 1656 } 1657 1658 static int 1659 camperiphscsisenseerror(union ccb *ccb, union ccb **orig, 1660 cam_flags camflags, uint32_t sense_flags, 1661 int *openings, uint32_t *relsim_flags, 1662 uint32_t *timeout, uint32_t *action, const char **action_string) 1663 { 1664 struct cam_periph *periph; 1665 union ccb *orig_ccb = ccb; 1666 int error, recoveryccb; 1667 uint16_t flags; 1668 1669 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) 1670 if (ccb->ccb_h.func_code == XPT_SCSI_IO && ccb->csio.bio != NULL) 1671 biotrack(ccb->csio.bio, __func__); 1672 #endif 1673 1674 periph = xpt_path_periph(ccb->ccb_h.path); 1675 recoveryccb = (ccb->ccb_h.cbfcnp == camperiphdone); 1676 if ((periph->flags & CAM_PERIPH_RECOVERY_INPROG) && !recoveryccb) { 1677 /* 1678 * If error recovery is already in progress, don't attempt 1679 * to process this error, but requeue it unconditionally 1680 * and attempt to process it once error recovery has 1681 * completed. This failed command is probably related to 1682 * the error that caused the currently active error recovery 1683 * action so our current recovery efforts should also 1684 * address this command. Be aware that the error recovery 1685 * code assumes that only one recovery action is in progress 1686 * on a particular peripheral instance at any given time 1687 * (e.g. only one saved CCB for error recovery) so it is 1688 * imperitive that we don't violate this assumption. 1689 */ 1690 error = ERESTART; 1691 *action &= ~SSQ_PRINT_SENSE; 1692 } else { 1693 scsi_sense_action err_action; 1694 struct ccb_getdev cgd; 1695 1696 /* 1697 * Grab the inquiry data for this device. 1698 */ 1699 memset(&cgd, 0, sizeof(cgd)); 1700 xpt_setup_ccb(&cgd.ccb_h, ccb->ccb_h.path, CAM_PRIORITY_NORMAL); 1701 cgd.ccb_h.func_code = XPT_GDEV_TYPE; 1702 xpt_action((union ccb *)&cgd); 1703 1704 err_action = scsi_error_action(&ccb->csio, &cgd.inq_data, 1705 sense_flags); 1706 error = err_action & SS_ERRMASK; 1707 1708 /* 1709 * Do not autostart sequential access devices 1710 * to avoid unexpected tape loading. 1711 */ 1712 if ((err_action & SS_MASK) == SS_START && 1713 SID_TYPE(&cgd.inq_data) == T_SEQUENTIAL) { 1714 *action_string = "Will not autostart a " 1715 "sequential access device"; 1716 goto sense_error_done; 1717 } 1718 1719 /* 1720 * Avoid recovery recursion if recovery action is the same. 1721 */ 1722 if ((err_action & SS_MASK) >= SS_START && recoveryccb) { 1723 if (((err_action & SS_MASK) == SS_START && 1724 ccb->csio.cdb_io.cdb_bytes[0] == START_STOP_UNIT) || 1725 ((err_action & SS_MASK) == SS_TUR && 1726 (ccb->csio.cdb_io.cdb_bytes[0] == TEST_UNIT_READY))) { 1727 err_action = SS_RETRY|SSQ_DECREMENT_COUNT|EIO; 1728 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT; 1729 *timeout = 500; 1730 } 1731 } 1732 1733 /* 1734 * If the recovery action will consume a retry, 1735 * make sure we actually have retries available. 1736 */ 1737 if ((err_action & SSQ_DECREMENT_COUNT) != 0) { 1738 if (ccb->ccb_h.retry_count > 0 && 1739 (periph->flags & CAM_PERIPH_INVALID) == 0) 1740 ccb->ccb_h.retry_count--; 1741 else { 1742 *action_string = "Retries exhausted"; 1743 goto sense_error_done; 1744 } 1745 } 1746 1747 if ((err_action & SS_MASK) >= SS_START) { 1748 /* 1749 * Do common portions of commands that 1750 * use recovery CCBs. 1751 */ 1752 orig_ccb = xpt_alloc_ccb_nowait(); 1753 if (orig_ccb == NULL) { 1754 *action_string = "Can't allocate recovery CCB"; 1755 goto sense_error_done; 1756 } 1757 /* 1758 * Clear freeze flag for original request here, as 1759 * this freeze will be dropped as part of ERESTART. 1760 */ 1761 ccb->ccb_h.status &= ~CAM_DEV_QFRZN; 1762 1763 KASSERT(ccb->ccb_h.func_code == XPT_SCSI_IO, 1764 ("%s: ccb func_code %#x != XPT_SCSI_IO", 1765 __func__, ccb->ccb_h.func_code)); 1766 flags = orig_ccb->ccb_h.alloc_flags; 1767 bcopy(ccb, orig_ccb, sizeof(struct ccb_scsiio)); 1768 orig_ccb->ccb_h.alloc_flags = flags; 1769 } 1770 1771 switch (err_action & SS_MASK) { 1772 case SS_NOP: 1773 *action_string = "No recovery action needed"; 1774 error = 0; 1775 break; 1776 case SS_RETRY: 1777 *action_string = "Retrying command (per sense data)"; 1778 error = ERESTART; 1779 break; 1780 case SS_FAIL: 1781 *action_string = "Unretryable error"; 1782 break; 1783 case SS_START: 1784 { 1785 int le; 1786 1787 /* 1788 * Send a start unit command to the device, and 1789 * then retry the command. 1790 */ 1791 *action_string = "Attempting to start unit"; 1792 periph->flags |= CAM_PERIPH_RECOVERY_INPROG; 1793 1794 /* 1795 * Check for removable media and set 1796 * load/eject flag appropriately. 1797 */ 1798 if (SID_IS_REMOVABLE(&cgd.inq_data)) 1799 le = TRUE; 1800 else 1801 le = FALSE; 1802 1803 scsi_start_stop(&ccb->csio, 1804 /*retries*/1, 1805 camperiphdone, 1806 MSG_SIMPLE_Q_TAG, 1807 /*start*/TRUE, 1808 /*load/eject*/le, 1809 /*immediate*/FALSE, 1810 SSD_FULL_SIZE, 1811 /*timeout*/50000); 1812 break; 1813 } 1814 case SS_TUR: 1815 { 1816 /* 1817 * Send a Test Unit Ready to the device. 1818 * If the 'many' flag is set, we send 120 1819 * test unit ready commands, one every half 1820 * second. Otherwise, we just send one TUR. 1821 * We only want to do this if the retry 1822 * count has not been exhausted. 1823 */ 1824 int retries; 1825 1826 if ((err_action & SSQ_MANY) != 0 && (periph->flags & 1827 CAM_PERIPH_RECOVERY_WAIT_FAILED) == 0) { 1828 periph->flags |= CAM_PERIPH_RECOVERY_WAIT; 1829 *action_string = "Polling device for readiness"; 1830 retries = 120; 1831 } else { 1832 *action_string = "Testing device for readiness"; 1833 retries = 1; 1834 } 1835 periph->flags |= CAM_PERIPH_RECOVERY_INPROG; 1836 scsi_test_unit_ready(&ccb->csio, 1837 retries, 1838 camperiphdone, 1839 MSG_SIMPLE_Q_TAG, 1840 SSD_FULL_SIZE, 1841 /*timeout*/5000); 1842 1843 /* 1844 * Accomplish our 500ms delay by deferring 1845 * the release of our device queue appropriately. 1846 */ 1847 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT; 1848 *timeout = 500; 1849 break; 1850 } 1851 default: 1852 panic("Unhandled error action %x", err_action); 1853 } 1854 1855 if ((err_action & SS_MASK) >= SS_START) { 1856 /* 1857 * Drop the priority, so that the recovery 1858 * CCB is the first to execute. Freeze the queue 1859 * after this command is sent so that we can 1860 * restore the old csio and have it queued in 1861 * the proper order before we release normal 1862 * transactions to the device. 1863 */ 1864 ccb->ccb_h.pinfo.priority--; 1865 ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 1866 ccb->ccb_h.saved_ccb_ptr = orig_ccb; 1867 error = ERESTART; 1868 *orig = orig_ccb; 1869 } 1870 1871 sense_error_done: 1872 *action = err_action; 1873 } 1874 return (error); 1875 } 1876 1877 /* 1878 * Generic error handler. Peripheral drivers usually filter 1879 * out the errors that they handle in a unique manner, then 1880 * call this function. 1881 */ 1882 int 1883 cam_periph_error(union ccb *ccb, cam_flags camflags, 1884 uint32_t sense_flags) 1885 { 1886 struct cam_path *newpath; 1887 union ccb *orig_ccb, *scan_ccb; 1888 struct cam_periph *periph; 1889 const char *action_string; 1890 cam_status status; 1891 int frozen, error, openings, devctl_err; 1892 uint32_t action, relsim_flags, timeout; 1893 1894 action = SSQ_PRINT_SENSE; 1895 periph = xpt_path_periph(ccb->ccb_h.path); 1896 action_string = NULL; 1897 status = ccb->ccb_h.status; 1898 frozen = (status & CAM_DEV_QFRZN) != 0; 1899 status &= CAM_STATUS_MASK; 1900 devctl_err = openings = relsim_flags = timeout = 0; 1901 orig_ccb = ccb; 1902 1903 /* Filter the errors that should be reported via devctl */ 1904 switch (ccb->ccb_h.status & CAM_STATUS_MASK) { 1905 case CAM_CMD_TIMEOUT: 1906 case CAM_REQ_ABORTED: 1907 case CAM_REQ_CMP_ERR: 1908 case CAM_REQ_TERMIO: 1909 case CAM_UNREC_HBA_ERROR: 1910 case CAM_DATA_RUN_ERR: 1911 case CAM_SCSI_STATUS_ERROR: 1912 case CAM_ATA_STATUS_ERROR: 1913 case CAM_SMP_STATUS_ERROR: 1914 case CAM_DEV_NOT_THERE: 1915 case CAM_NVME_STATUS_ERROR: 1916 devctl_err++; 1917 break; 1918 default: 1919 break; 1920 } 1921 1922 switch (status) { 1923 case CAM_REQ_CMP: 1924 error = 0; 1925 action &= ~SSQ_PRINT_SENSE; 1926 break; 1927 case CAM_SCSI_STATUS_ERROR: 1928 error = camperiphscsistatuserror(ccb, &orig_ccb, 1929 camflags, sense_flags, &openings, &relsim_flags, 1930 &timeout, &action, &action_string); 1931 break; 1932 case CAM_AUTOSENSE_FAIL: 1933 error = EIO; /* we have to kill the command */ 1934 break; 1935 case CAM_UA_ABORT: 1936 case CAM_UA_TERMIO: 1937 case CAM_MSG_REJECT_REC: 1938 /* XXX Don't know that these are correct */ 1939 error = EIO; 1940 break; 1941 case CAM_SEL_TIMEOUT: 1942 if ((camflags & CAM_RETRY_SELTO) != 0) { 1943 if (ccb->ccb_h.retry_count > 0 && 1944 (periph->flags & CAM_PERIPH_INVALID) == 0) { 1945 ccb->ccb_h.retry_count--; 1946 error = ERESTART; 1947 1948 /* 1949 * Wait a bit to give the device 1950 * time to recover before we try again. 1951 */ 1952 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT; 1953 timeout = periph_selto_delay; 1954 break; 1955 } 1956 action_string = "Retries exhausted"; 1957 } 1958 /* FALLTHROUGH */ 1959 case CAM_DEV_NOT_THERE: 1960 error = ENXIO; 1961 action = SSQ_LOST; 1962 break; 1963 case CAM_REQ_INVALID: 1964 case CAM_PATH_INVALID: 1965 case CAM_NO_HBA: 1966 case CAM_PROVIDE_FAIL: 1967 case CAM_REQ_TOO_BIG: 1968 case CAM_LUN_INVALID: 1969 case CAM_TID_INVALID: 1970 case CAM_FUNC_NOTAVAIL: 1971 error = EINVAL; 1972 break; 1973 case CAM_SCSI_BUS_RESET: 1974 case CAM_BDR_SENT: 1975 /* 1976 * Commands that repeatedly timeout and cause these 1977 * kinds of error recovery actions, should return 1978 * CAM_CMD_TIMEOUT, which allows us to safely assume 1979 * that this command was an innocent bystander to 1980 * these events and should be unconditionally 1981 * retried. 1982 */ 1983 case CAM_REQUEUE_REQ: 1984 /* Unconditional requeue if device is still there */ 1985 if (periph->flags & CAM_PERIPH_INVALID) { 1986 action_string = "Periph was invalidated"; 1987 error = ENXIO; 1988 } else if (sense_flags & SF_NO_RETRY) { 1989 error = EIO; 1990 action_string = "Retry was blocked"; 1991 } else { 1992 error = ERESTART; 1993 action &= ~SSQ_PRINT_SENSE; 1994 } 1995 break; 1996 case CAM_RESRC_UNAVAIL: 1997 /* Wait a bit for the resource shortage to abate. */ 1998 timeout = periph_noresrc_delay; 1999 /* FALLTHROUGH */ 2000 case CAM_BUSY: 2001 if (timeout == 0) { 2002 /* Wait a bit for the busy condition to abate. */ 2003 timeout = periph_busy_delay; 2004 } 2005 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT; 2006 /* FALLTHROUGH */ 2007 case CAM_ATA_STATUS_ERROR: 2008 case CAM_NVME_STATUS_ERROR: 2009 case CAM_SMP_STATUS_ERROR: 2010 case CAM_REQ_CMP_ERR: 2011 case CAM_CMD_TIMEOUT: 2012 case CAM_UNEXP_BUSFREE: 2013 case CAM_UNCOR_PARITY: 2014 case CAM_DATA_RUN_ERR: 2015 default: 2016 if (periph->flags & CAM_PERIPH_INVALID) { 2017 error = ENXIO; 2018 action_string = "Periph was invalidated"; 2019 } else if (ccb->ccb_h.retry_count == 0) { 2020 error = EIO; 2021 action_string = "Retries exhausted"; 2022 } else if (sense_flags & SF_NO_RETRY) { 2023 error = EIO; 2024 action_string = "Retry was blocked"; 2025 } else { 2026 ccb->ccb_h.retry_count--; 2027 error = ERESTART; 2028 } 2029 break; 2030 } 2031 2032 if ((sense_flags & SF_PRINT_ALWAYS) || 2033 CAM_DEBUGGED(ccb->ccb_h.path, CAM_DEBUG_INFO)) 2034 action |= SSQ_PRINT_SENSE; 2035 else if (sense_flags & SF_NO_PRINT) 2036 action &= ~SSQ_PRINT_SENSE; 2037 if ((action & SSQ_PRINT_SENSE) != 0) 2038 cam_error_print(orig_ccb, CAM_ESF_ALL, CAM_EPF_ALL); 2039 if (error != 0 && (action & SSQ_PRINT_SENSE) != 0) { 2040 if (error != ERESTART) { 2041 if (action_string == NULL) 2042 action_string = "Unretryable error"; 2043 xpt_print(ccb->ccb_h.path, "Error %d, %s\n", 2044 error, action_string); 2045 } else if (action_string != NULL) 2046 xpt_print(ccb->ccb_h.path, "%s\n", action_string); 2047 else { 2048 xpt_print(ccb->ccb_h.path, 2049 "Retrying command, %d more tries remain\n", 2050 ccb->ccb_h.retry_count); 2051 } 2052 } 2053 2054 if (devctl_err && (error != 0 || (action & SSQ_PRINT_SENSE) != 0)) 2055 cam_periph_devctl_notify(orig_ccb); 2056 2057 if ((action & SSQ_LOST) != 0) { 2058 lun_id_t lun_id; 2059 2060 /* 2061 * For a selection timeout, we consider all of the LUNs on 2062 * the target to be gone. If the status is CAM_DEV_NOT_THERE, 2063 * then we only get rid of the device(s) specified by the 2064 * path in the original CCB. 2065 */ 2066 if (status == CAM_SEL_TIMEOUT) 2067 lun_id = CAM_LUN_WILDCARD; 2068 else 2069 lun_id = xpt_path_lun_id(ccb->ccb_h.path); 2070 2071 /* Should we do more if we can't create the path?? */ 2072 if (xpt_create_path(&newpath, periph, 2073 xpt_path_path_id(ccb->ccb_h.path), 2074 xpt_path_target_id(ccb->ccb_h.path), 2075 lun_id) == CAM_REQ_CMP) { 2076 /* 2077 * Let peripheral drivers know that this 2078 * device has gone away. 2079 */ 2080 xpt_async(AC_LOST_DEVICE, newpath, NULL); 2081 xpt_free_path(newpath); 2082 } 2083 } 2084 2085 /* Broadcast UNIT ATTENTIONs to all periphs. */ 2086 if ((action & SSQ_UA) != 0) 2087 xpt_async(AC_UNIT_ATTENTION, orig_ccb->ccb_h.path, orig_ccb); 2088 2089 /* Rescan target on "Reported LUNs data has changed" */ 2090 if ((action & SSQ_RESCAN) != 0) { 2091 if (xpt_create_path(&newpath, NULL, 2092 xpt_path_path_id(ccb->ccb_h.path), 2093 xpt_path_target_id(ccb->ccb_h.path), 2094 CAM_LUN_WILDCARD) == CAM_REQ_CMP) { 2095 scan_ccb = xpt_alloc_ccb_nowait(); 2096 if (scan_ccb != NULL) { 2097 scan_ccb->ccb_h.path = newpath; 2098 scan_ccb->ccb_h.func_code = XPT_SCAN_TGT; 2099 scan_ccb->crcn.flags = 0; 2100 xpt_rescan(scan_ccb); 2101 } else { 2102 xpt_print(newpath, 2103 "Can't allocate CCB to rescan target\n"); 2104 xpt_free_path(newpath); 2105 } 2106 } 2107 } 2108 2109 /* Attempt a retry */ 2110 if (error == ERESTART || error == 0) { 2111 if (frozen != 0) 2112 ccb->ccb_h.status &= ~CAM_DEV_QFRZN; 2113 if (error == ERESTART) 2114 xpt_action(ccb); 2115 if (frozen != 0) 2116 cam_release_devq(ccb->ccb_h.path, 2117 relsim_flags, 2118 openings, 2119 timeout, 2120 /*getcount_only*/0); 2121 } 2122 2123 return (error); 2124 } 2125 2126 #define CAM_PERIPH_DEVD_MSG_SIZE 256 2127 2128 static void 2129 cam_periph_devctl_notify(union ccb *ccb) 2130 { 2131 struct cam_periph *periph; 2132 struct ccb_getdev *cgd; 2133 struct sbuf sb; 2134 int serr, sk, asc, ascq; 2135 char *sbmsg, *type; 2136 2137 sbmsg = malloc(CAM_PERIPH_DEVD_MSG_SIZE, M_CAMPERIPH, M_NOWAIT); 2138 if (sbmsg == NULL) 2139 return; 2140 2141 sbuf_new(&sb, sbmsg, CAM_PERIPH_DEVD_MSG_SIZE, SBUF_FIXEDLEN); 2142 2143 periph = xpt_path_periph(ccb->ccb_h.path); 2144 sbuf_printf(&sb, "device=%s%d ", periph->periph_name, 2145 periph->unit_number); 2146 2147 sbuf_cat(&sb, "serial=\""); 2148 if ((cgd = (struct ccb_getdev *)xpt_alloc_ccb_nowait()) != NULL) { 2149 xpt_setup_ccb(&cgd->ccb_h, ccb->ccb_h.path, 2150 CAM_PRIORITY_NORMAL); 2151 cgd->ccb_h.func_code = XPT_GDEV_TYPE; 2152 xpt_action((union ccb *)cgd); 2153 2154 if (cgd->ccb_h.status == CAM_REQ_CMP) 2155 sbuf_bcat(&sb, cgd->serial_num, cgd->serial_num_len); 2156 xpt_free_ccb((union ccb *)cgd); 2157 } 2158 sbuf_cat(&sb, "\" "); 2159 sbuf_printf(&sb, "cam_status=\"0x%x\" ", ccb->ccb_h.status); 2160 2161 switch (ccb->ccb_h.status & CAM_STATUS_MASK) { 2162 case CAM_CMD_TIMEOUT: 2163 sbuf_printf(&sb, "timeout=%d ", ccb->ccb_h.timeout); 2164 type = "timeout"; 2165 break; 2166 case CAM_SCSI_STATUS_ERROR: 2167 sbuf_printf(&sb, "scsi_status=%d ", ccb->csio.scsi_status); 2168 if (scsi_extract_sense_ccb(ccb, &serr, &sk, &asc, &ascq)) 2169 sbuf_printf(&sb, "scsi_sense=\"%02x %02x %02x %02x\" ", 2170 serr, sk, asc, ascq); 2171 type = "error"; 2172 break; 2173 case CAM_ATA_STATUS_ERROR: 2174 sbuf_cat(&sb, "RES=\""); 2175 ata_res_sbuf(&ccb->ataio.res, &sb); 2176 sbuf_cat(&sb, "\" "); 2177 type = "error"; 2178 break; 2179 case CAM_NVME_STATUS_ERROR: 2180 { 2181 struct ccb_nvmeio *n = &ccb->nvmeio; 2182 2183 sbuf_printf(&sb, "sc=\"%02x\" sct=\"%02x\" cdw0=\"%08x\" ", 2184 NVME_STATUS_GET_SC(n->cpl.status), 2185 NVME_STATUS_GET_SCT(n->cpl.status), n->cpl.cdw0); 2186 type = "error"; 2187 break; 2188 } 2189 default: 2190 type = "error"; 2191 break; 2192 } 2193 2194 2195 switch (ccb->ccb_h.func_code) { 2196 case XPT_SCSI_IO: 2197 sbuf_cat(&sb, "CDB=\""); 2198 scsi_cdb_sbuf(scsiio_cdb_ptr(&ccb->csio), &sb); 2199 sbuf_cat(&sb, "\" "); 2200 break; 2201 case XPT_ATA_IO: 2202 sbuf_cat(&sb, "ACB=\""); 2203 ata_cmd_sbuf(&ccb->ataio.cmd, &sb); 2204 sbuf_cat(&sb, "\" "); 2205 break; 2206 case XPT_NVME_IO: 2207 case XPT_NVME_ADMIN: 2208 { 2209 struct ccb_nvmeio *n = &ccb->nvmeio; 2210 struct nvme_command *cmd = &n->cmd; 2211 2212 // XXX Likely should be nvme_cmd_sbuf 2213 sbuf_printf(&sb, "opc=\"%02x\" fuse=\"%02x\" cid=\"%04x\" " 2214 "nsid=\"%08x\" cdw10=\"%08x\" cdw11=\"%08x\" cdw12=\"%08x\" " 2215 "cdw13=\"%08x\" cdw14=\"%08x\" cdw15=\"%08x\" ", 2216 cmd->opc, cmd->fuse, cmd->cid, cmd->nsid, cmd->cdw10, 2217 cmd->cdw11, cmd->cdw12, cmd->cdw13, cmd->cdw14, cmd->cdw15); 2218 break; 2219 } 2220 default: 2221 break; 2222 } 2223 2224 if (sbuf_finish(&sb) == 0) 2225 devctl_notify("CAM", "periph", type, sbuf_data(&sb)); 2226 sbuf_delete(&sb); 2227 free(sbmsg, M_CAMPERIPH); 2228 } 2229 2230 /* 2231 * Sysctl to force an invalidation of the drive right now. Can be 2232 * called with CTLFLAG_MPSAFE since we take periph lock. 2233 */ 2234 int 2235 cam_periph_invalidate_sysctl(SYSCTL_HANDLER_ARGS) 2236 { 2237 struct cam_periph *periph; 2238 int error, value; 2239 2240 periph = arg1; 2241 value = 0; 2242 error = sysctl_handle_int(oidp, &value, 0, req); 2243 if (error != 0 || req->newptr == NULL || value != 1) 2244 return (error); 2245 2246 cam_periph_lock(periph); 2247 cam_periph_invalidate(periph); 2248 cam_periph_unlock(periph); 2249 2250 return (0); 2251 } 2252