1 /*- 2 * Common functions for CAM "type" (peripheral) drivers. 3 * 4 * Copyright (c) 1997, 1998 Justin T. Gibbs. 5 * Copyright (c) 1997, 1998, 1999, 2000 Kenneth D. Merry. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions, and the following disclaimer, 13 * without modification, immediately at the beginning of the file. 14 * 2. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/types.h> 36 #include <sys/malloc.h> 37 #include <sys/kernel.h> 38 #include <sys/bio.h> 39 #include <sys/lock.h> 40 #include <sys/mutex.h> 41 #include <sys/buf.h> 42 #include <sys/proc.h> 43 #include <sys/devicestat.h> 44 #include <sys/bus.h> 45 #include <sys/sbuf.h> 46 #include <vm/vm.h> 47 #include <vm/vm_extern.h> 48 49 #include <cam/cam.h> 50 #include <cam/cam_ccb.h> 51 #include <cam/cam_queue.h> 52 #include <cam/cam_xpt_periph.h> 53 #include <cam/cam_periph.h> 54 #include <cam/cam_debug.h> 55 #include <cam/cam_sim.h> 56 57 #include <cam/scsi/scsi_all.h> 58 #include <cam/scsi/scsi_message.h> 59 #include <cam/scsi/scsi_pass.h> 60 61 static u_int camperiphnextunit(struct periph_driver *p_drv, 62 u_int newunit, int wired, 63 path_id_t pathid, target_id_t target, 64 lun_id_t lun); 65 static u_int camperiphunit(struct periph_driver *p_drv, 66 path_id_t pathid, target_id_t target, 67 lun_id_t lun); 68 static void camperiphdone(struct cam_periph *periph, 69 union ccb *done_ccb); 70 static void camperiphfree(struct cam_periph *periph); 71 static int camperiphscsistatuserror(union ccb *ccb, 72 union ccb **orig_ccb, 73 cam_flags camflags, 74 u_int32_t sense_flags, 75 int *openings, 76 u_int32_t *relsim_flags, 77 u_int32_t *timeout, 78 u_int32_t *action, 79 const char **action_string); 80 static int camperiphscsisenseerror(union ccb *ccb, 81 union ccb **orig_ccb, 82 cam_flags camflags, 83 u_int32_t sense_flags, 84 int *openings, 85 u_int32_t *relsim_flags, 86 u_int32_t *timeout, 87 u_int32_t *action, 88 const char **action_string); 89 90 static int nperiph_drivers; 91 static int initialized = 0; 92 struct periph_driver **periph_drivers; 93 94 static MALLOC_DEFINE(M_CAMPERIPH, "CAM periph", "CAM peripheral buffers"); 95 96 static int periph_selto_delay = 1000; 97 TUNABLE_INT("kern.cam.periph_selto_delay", &periph_selto_delay); 98 static int periph_noresrc_delay = 500; 99 TUNABLE_INT("kern.cam.periph_noresrc_delay", &periph_noresrc_delay); 100 static int periph_busy_delay = 500; 101 TUNABLE_INT("kern.cam.periph_busy_delay", &periph_busy_delay); 102 103 104 void 105 periphdriver_register(void *data) 106 { 107 struct periph_driver *drv = (struct periph_driver *)data; 108 struct periph_driver **newdrivers, **old; 109 int ndrivers; 110 111 again: 112 ndrivers = nperiph_drivers + 2; 113 newdrivers = malloc(sizeof(*newdrivers) * ndrivers, M_CAMPERIPH, 114 M_WAITOK); 115 xpt_lock_buses(); 116 if (ndrivers != nperiph_drivers + 2) { 117 /* 118 * Lost race against itself; go around. 119 */ 120 xpt_unlock_buses(); 121 free(newdrivers, M_CAMPERIPH); 122 goto again; 123 } 124 if (periph_drivers) 125 bcopy(periph_drivers, newdrivers, 126 sizeof(*newdrivers) * nperiph_drivers); 127 newdrivers[nperiph_drivers] = drv; 128 newdrivers[nperiph_drivers + 1] = NULL; 129 old = periph_drivers; 130 periph_drivers = newdrivers; 131 nperiph_drivers++; 132 xpt_unlock_buses(); 133 if (old) 134 free(old, M_CAMPERIPH); 135 /* If driver marked as early or it is late now, initialize it. */ 136 if (((drv->flags & CAM_PERIPH_DRV_EARLY) != 0 && initialized > 0) || 137 initialized > 1) 138 (*drv->init)(); 139 } 140 141 void 142 periphdriver_init(int level) 143 { 144 int i, early; 145 146 initialized = max(initialized, level); 147 for (i = 0; periph_drivers[i] != NULL; i++) { 148 early = (periph_drivers[i]->flags & CAM_PERIPH_DRV_EARLY) ? 1 : 2; 149 if (early == initialized) 150 (*periph_drivers[i]->init)(); 151 } 152 } 153 154 cam_status 155 cam_periph_alloc(periph_ctor_t *periph_ctor, 156 periph_oninv_t *periph_oninvalidate, 157 periph_dtor_t *periph_dtor, periph_start_t *periph_start, 158 char *name, cam_periph_type type, struct cam_path *path, 159 ac_callback_t *ac_callback, ac_code code, void *arg) 160 { 161 struct periph_driver **p_drv; 162 struct cam_sim *sim; 163 struct cam_periph *periph; 164 struct cam_periph *cur_periph; 165 path_id_t path_id; 166 target_id_t target_id; 167 lun_id_t lun_id; 168 cam_status status; 169 u_int init_level; 170 171 init_level = 0; 172 /* 173 * Handle Hot-Plug scenarios. If there is already a peripheral 174 * of our type assigned to this path, we are likely waiting for 175 * final close on an old, invalidated, peripheral. If this is 176 * the case, queue up a deferred call to the peripheral's async 177 * handler. If it looks like a mistaken re-allocation, complain. 178 */ 179 if ((periph = cam_periph_find(path, name)) != NULL) { 180 181 if ((periph->flags & CAM_PERIPH_INVALID) != 0 182 && (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) == 0) { 183 periph->flags |= CAM_PERIPH_NEW_DEV_FOUND; 184 periph->deferred_callback = ac_callback; 185 periph->deferred_ac = code; 186 return (CAM_REQ_INPROG); 187 } else { 188 printf("cam_periph_alloc: attempt to re-allocate " 189 "valid device %s%d rejected flags %#x " 190 "refcount %d\n", periph->periph_name, 191 periph->unit_number, periph->flags, 192 periph->refcount); 193 } 194 return (CAM_REQ_INVALID); 195 } 196 197 periph = (struct cam_periph *)malloc(sizeof(*periph), M_CAMPERIPH, 198 M_NOWAIT|M_ZERO); 199 200 if (periph == NULL) 201 return (CAM_RESRC_UNAVAIL); 202 203 init_level++; 204 205 206 sim = xpt_path_sim(path); 207 path_id = xpt_path_path_id(path); 208 target_id = xpt_path_target_id(path); 209 lun_id = xpt_path_lun_id(path); 210 periph->periph_start = periph_start; 211 periph->periph_dtor = periph_dtor; 212 periph->periph_oninval = periph_oninvalidate; 213 periph->type = type; 214 periph->periph_name = name; 215 periph->scheduled_priority = CAM_PRIORITY_NONE; 216 periph->immediate_priority = CAM_PRIORITY_NONE; 217 periph->refcount = 1; /* Dropped by invalidation. */ 218 periph->sim = sim; 219 SLIST_INIT(&periph->ccb_list); 220 status = xpt_create_path(&path, periph, path_id, target_id, lun_id); 221 if (status != CAM_REQ_CMP) 222 goto failure; 223 periph->path = path; 224 225 xpt_lock_buses(); 226 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) { 227 if (strcmp((*p_drv)->driver_name, name) == 0) 228 break; 229 } 230 if (*p_drv == NULL) { 231 printf("cam_periph_alloc: invalid periph name '%s'\n", name); 232 xpt_unlock_buses(); 233 xpt_free_path(periph->path); 234 free(periph, M_CAMPERIPH); 235 return (CAM_REQ_INVALID); 236 } 237 periph->unit_number = camperiphunit(*p_drv, path_id, target_id, lun_id); 238 cur_periph = TAILQ_FIRST(&(*p_drv)->units); 239 while (cur_periph != NULL 240 && cur_periph->unit_number < periph->unit_number) 241 cur_periph = TAILQ_NEXT(cur_periph, unit_links); 242 if (cur_periph != NULL) { 243 KASSERT(cur_periph->unit_number != periph->unit_number, ("duplicate units on periph list")); 244 TAILQ_INSERT_BEFORE(cur_periph, periph, unit_links); 245 } else { 246 TAILQ_INSERT_TAIL(&(*p_drv)->units, periph, unit_links); 247 (*p_drv)->generation++; 248 } 249 xpt_unlock_buses(); 250 251 init_level++; 252 253 status = xpt_add_periph(periph); 254 if (status != CAM_REQ_CMP) 255 goto failure; 256 257 init_level++; 258 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph created\n")); 259 260 status = periph_ctor(periph, arg); 261 262 if (status == CAM_REQ_CMP) 263 init_level++; 264 265 failure: 266 switch (init_level) { 267 case 4: 268 /* Initialized successfully */ 269 break; 270 case 3: 271 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph destroyed\n")); 272 xpt_remove_periph(periph); 273 /* FALLTHROUGH */ 274 case 2: 275 xpt_lock_buses(); 276 TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links); 277 xpt_unlock_buses(); 278 xpt_free_path(periph->path); 279 /* FALLTHROUGH */ 280 case 1: 281 free(periph, M_CAMPERIPH); 282 /* FALLTHROUGH */ 283 case 0: 284 /* No cleanup to perform. */ 285 break; 286 default: 287 panic("%s: Unknown init level", __func__); 288 } 289 return(status); 290 } 291 292 /* 293 * Find a peripheral structure with the specified path, target, lun, 294 * and (optionally) type. If the name is NULL, this function will return 295 * the first peripheral driver that matches the specified path. 296 */ 297 struct cam_periph * 298 cam_periph_find(struct cam_path *path, char *name) 299 { 300 struct periph_driver **p_drv; 301 struct cam_periph *periph; 302 303 xpt_lock_buses(); 304 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) { 305 306 if (name != NULL && (strcmp((*p_drv)->driver_name, name) != 0)) 307 continue; 308 309 TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) { 310 if (xpt_path_comp(periph->path, path) == 0) { 311 xpt_unlock_buses(); 312 cam_periph_assert(periph, MA_OWNED); 313 return(periph); 314 } 315 } 316 if (name != NULL) { 317 xpt_unlock_buses(); 318 return(NULL); 319 } 320 } 321 xpt_unlock_buses(); 322 return(NULL); 323 } 324 325 /* 326 * Find peripheral driver instances attached to the specified path. 327 */ 328 int 329 cam_periph_list(struct cam_path *path, struct sbuf *sb) 330 { 331 struct sbuf local_sb; 332 struct periph_driver **p_drv; 333 struct cam_periph *periph; 334 int count; 335 int sbuf_alloc_len; 336 337 sbuf_alloc_len = 16; 338 retry: 339 sbuf_new(&local_sb, NULL, sbuf_alloc_len, SBUF_FIXEDLEN); 340 count = 0; 341 xpt_lock_buses(); 342 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) { 343 344 TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) { 345 if (xpt_path_comp(periph->path, path) != 0) 346 continue; 347 348 if (sbuf_len(&local_sb) != 0) 349 sbuf_cat(&local_sb, ","); 350 351 sbuf_printf(&local_sb, "%s%d", periph->periph_name, 352 periph->unit_number); 353 354 if (sbuf_error(&local_sb) == ENOMEM) { 355 sbuf_alloc_len *= 2; 356 xpt_unlock_buses(); 357 sbuf_delete(&local_sb); 358 goto retry; 359 } 360 count++; 361 } 362 } 363 xpt_unlock_buses(); 364 sbuf_finish(&local_sb); 365 sbuf_cpy(sb, sbuf_data(&local_sb)); 366 sbuf_delete(&local_sb); 367 return (count); 368 } 369 370 cam_status 371 cam_periph_acquire(struct cam_periph *periph) 372 { 373 cam_status status; 374 375 status = CAM_REQ_CMP_ERR; 376 if (periph == NULL) 377 return (status); 378 379 xpt_lock_buses(); 380 if ((periph->flags & CAM_PERIPH_INVALID) == 0) { 381 periph->refcount++; 382 status = CAM_REQ_CMP; 383 } 384 xpt_unlock_buses(); 385 386 return (status); 387 } 388 389 void 390 cam_periph_doacquire(struct cam_periph *periph) 391 { 392 393 xpt_lock_buses(); 394 KASSERT(periph->refcount >= 1, 395 ("cam_periph_doacquire() with refcount == %d", periph->refcount)); 396 periph->refcount++; 397 xpt_unlock_buses(); 398 } 399 400 void 401 cam_periph_release_locked_buses(struct cam_periph *periph) 402 { 403 404 cam_periph_assert(periph, MA_OWNED); 405 KASSERT(periph->refcount >= 1, ("periph->refcount >= 1")); 406 if (--periph->refcount == 0) 407 camperiphfree(periph); 408 } 409 410 void 411 cam_periph_release_locked(struct cam_periph *periph) 412 { 413 414 if (periph == NULL) 415 return; 416 417 xpt_lock_buses(); 418 cam_periph_release_locked_buses(periph); 419 xpt_unlock_buses(); 420 } 421 422 void 423 cam_periph_release(struct cam_periph *periph) 424 { 425 struct mtx *mtx; 426 427 if (periph == NULL) 428 return; 429 430 cam_periph_assert(periph, MA_NOTOWNED); 431 mtx = cam_periph_mtx(periph); 432 mtx_lock(mtx); 433 cam_periph_release_locked(periph); 434 mtx_unlock(mtx); 435 } 436 437 int 438 cam_periph_hold(struct cam_periph *periph, int priority) 439 { 440 int error; 441 442 /* 443 * Increment the reference count on the peripheral 444 * while we wait for our lock attempt to succeed 445 * to ensure the peripheral doesn't disappear out 446 * from user us while we sleep. 447 */ 448 449 if (cam_periph_acquire(periph) != CAM_REQ_CMP) 450 return (ENXIO); 451 452 cam_periph_assert(periph, MA_OWNED); 453 while ((periph->flags & CAM_PERIPH_LOCKED) != 0) { 454 periph->flags |= CAM_PERIPH_LOCK_WANTED; 455 if ((error = cam_periph_sleep(periph, periph, priority, 456 "caplck", 0)) != 0) { 457 cam_periph_release_locked(periph); 458 return (error); 459 } 460 if (periph->flags & CAM_PERIPH_INVALID) { 461 cam_periph_release_locked(periph); 462 return (ENXIO); 463 } 464 } 465 466 periph->flags |= CAM_PERIPH_LOCKED; 467 return (0); 468 } 469 470 void 471 cam_periph_unhold(struct cam_periph *periph) 472 { 473 474 cam_periph_assert(periph, MA_OWNED); 475 476 periph->flags &= ~CAM_PERIPH_LOCKED; 477 if ((periph->flags & CAM_PERIPH_LOCK_WANTED) != 0) { 478 periph->flags &= ~CAM_PERIPH_LOCK_WANTED; 479 wakeup(periph); 480 } 481 482 cam_periph_release_locked(periph); 483 } 484 485 /* 486 * Look for the next unit number that is not currently in use for this 487 * peripheral type starting at "newunit". Also exclude unit numbers that 488 * are reserved by for future "hardwiring" unless we already know that this 489 * is a potential wired device. Only assume that the device is "wired" the 490 * first time through the loop since after that we'll be looking at unit 491 * numbers that did not match a wiring entry. 492 */ 493 static u_int 494 camperiphnextunit(struct periph_driver *p_drv, u_int newunit, int wired, 495 path_id_t pathid, target_id_t target, lun_id_t lun) 496 { 497 struct cam_periph *periph; 498 char *periph_name; 499 int i, val, dunit, r; 500 const char *dname, *strval; 501 502 periph_name = p_drv->driver_name; 503 for (;;newunit++) { 504 505 for (periph = TAILQ_FIRST(&p_drv->units); 506 periph != NULL && periph->unit_number != newunit; 507 periph = TAILQ_NEXT(periph, unit_links)) 508 ; 509 510 if (periph != NULL && periph->unit_number == newunit) { 511 if (wired != 0) { 512 xpt_print(periph->path, "Duplicate Wired " 513 "Device entry!\n"); 514 xpt_print(periph->path, "Second device (%s " 515 "device at scbus%d target %d lun %d) will " 516 "not be wired\n", periph_name, pathid, 517 target, lun); 518 wired = 0; 519 } 520 continue; 521 } 522 if (wired) 523 break; 524 525 /* 526 * Don't match entries like "da 4" as a wired down 527 * device, but do match entries like "da 4 target 5" 528 * or even "da 4 scbus 1". 529 */ 530 i = 0; 531 dname = periph_name; 532 for (;;) { 533 r = resource_find_dev(&i, dname, &dunit, NULL, NULL); 534 if (r != 0) 535 break; 536 /* if no "target" and no specific scbus, skip */ 537 if (resource_int_value(dname, dunit, "target", &val) && 538 (resource_string_value(dname, dunit, "at",&strval)|| 539 strcmp(strval, "scbus") == 0)) 540 continue; 541 if (newunit == dunit) 542 break; 543 } 544 if (r != 0) 545 break; 546 } 547 return (newunit); 548 } 549 550 static u_int 551 camperiphunit(struct periph_driver *p_drv, path_id_t pathid, 552 target_id_t target, lun_id_t lun) 553 { 554 u_int unit; 555 int wired, i, val, dunit; 556 const char *dname, *strval; 557 char pathbuf[32], *periph_name; 558 559 periph_name = p_drv->driver_name; 560 snprintf(pathbuf, sizeof(pathbuf), "scbus%d", pathid); 561 unit = 0; 562 i = 0; 563 dname = periph_name; 564 for (wired = 0; resource_find_dev(&i, dname, &dunit, NULL, NULL) == 0; 565 wired = 0) { 566 if (resource_string_value(dname, dunit, "at", &strval) == 0) { 567 if (strcmp(strval, pathbuf) != 0) 568 continue; 569 wired++; 570 } 571 if (resource_int_value(dname, dunit, "target", &val) == 0) { 572 if (val != target) 573 continue; 574 wired++; 575 } 576 if (resource_int_value(dname, dunit, "lun", &val) == 0) { 577 if (val != lun) 578 continue; 579 wired++; 580 } 581 if (wired != 0) { 582 unit = dunit; 583 break; 584 } 585 } 586 587 /* 588 * Either start from 0 looking for the next unit or from 589 * the unit number given in the resource config. This way, 590 * if we have wildcard matches, we don't return the same 591 * unit number twice. 592 */ 593 unit = camperiphnextunit(p_drv, unit, wired, pathid, target, lun); 594 595 return (unit); 596 } 597 598 void 599 cam_periph_invalidate(struct cam_periph *periph) 600 { 601 602 cam_periph_assert(periph, MA_OWNED); 603 /* 604 * We only call this routine the first time a peripheral is 605 * invalidated. 606 */ 607 if ((periph->flags & CAM_PERIPH_INVALID) != 0) 608 return; 609 610 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph invalidated\n")); 611 if ((periph->flags & CAM_PERIPH_ANNOUNCED) && !rebooting) 612 xpt_denounce_periph(periph); 613 periph->flags |= CAM_PERIPH_INVALID; 614 periph->flags &= ~CAM_PERIPH_NEW_DEV_FOUND; 615 if (periph->periph_oninval != NULL) 616 periph->periph_oninval(periph); 617 cam_periph_release_locked(periph); 618 } 619 620 static void 621 camperiphfree(struct cam_periph *periph) 622 { 623 struct periph_driver **p_drv; 624 625 cam_periph_assert(periph, MA_OWNED); 626 KASSERT(periph->periph_allocating == 0, ("%s%d: freed while allocating", 627 periph->periph_name, periph->unit_number)); 628 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) { 629 if (strcmp((*p_drv)->driver_name, periph->periph_name) == 0) 630 break; 631 } 632 if (*p_drv == NULL) { 633 printf("camperiphfree: attempt to free non-existant periph\n"); 634 return; 635 } 636 637 /* 638 * We need to set this flag before dropping the topology lock, to 639 * let anyone who is traversing the list that this peripheral is 640 * about to be freed, and there will be no more reference count 641 * checks. 642 */ 643 periph->flags |= CAM_PERIPH_FREE; 644 645 /* 646 * The peripheral destructor semantics dictate calling with only the 647 * SIM mutex held. Since it might sleep, it should not be called 648 * with the topology lock held. 649 */ 650 xpt_unlock_buses(); 651 652 /* 653 * We need to call the peripheral destructor prior to removing the 654 * peripheral from the list. Otherwise, we risk running into a 655 * scenario where the peripheral unit number may get reused 656 * (because it has been removed from the list), but some resources 657 * used by the peripheral are still hanging around. In particular, 658 * the devfs nodes used by some peripherals like the pass(4) driver 659 * aren't fully cleaned up until the destructor is run. If the 660 * unit number is reused before the devfs instance is fully gone, 661 * devfs will panic. 662 */ 663 if (periph->periph_dtor != NULL) 664 periph->periph_dtor(periph); 665 666 /* 667 * The peripheral list is protected by the topology lock. 668 */ 669 xpt_lock_buses(); 670 671 TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links); 672 (*p_drv)->generation++; 673 674 xpt_remove_periph(periph); 675 676 xpt_unlock_buses(); 677 if ((periph->flags & CAM_PERIPH_ANNOUNCED) && !rebooting) 678 xpt_print(periph->path, "Periph destroyed\n"); 679 else 680 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph destroyed\n")); 681 682 if (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) { 683 union ccb ccb; 684 void *arg; 685 686 switch (periph->deferred_ac) { 687 case AC_FOUND_DEVICE: 688 ccb.ccb_h.func_code = XPT_GDEV_TYPE; 689 xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 690 xpt_action(&ccb); 691 arg = &ccb; 692 break; 693 case AC_PATH_REGISTERED: 694 ccb.ccb_h.func_code = XPT_PATH_INQ; 695 xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 696 xpt_action(&ccb); 697 arg = &ccb; 698 break; 699 default: 700 arg = NULL; 701 break; 702 } 703 periph->deferred_callback(NULL, periph->deferred_ac, 704 periph->path, arg); 705 } 706 xpt_free_path(periph->path); 707 free(periph, M_CAMPERIPH); 708 xpt_lock_buses(); 709 } 710 711 /* 712 * Map user virtual pointers into kernel virtual address space, so we can 713 * access the memory. This is now a generic function that centralizes most 714 * of the sanity checks on the data flags, if any. 715 * This also only works for up to MAXPHYS memory. Since we use 716 * buffers to map stuff in and out, we're limited to the buffer size. 717 */ 718 int 719 cam_periph_mapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo) 720 { 721 int numbufs, i, j; 722 int flags[CAM_PERIPH_MAXMAPS]; 723 u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS]; 724 u_int32_t lengths[CAM_PERIPH_MAXMAPS]; 725 u_int32_t dirs[CAM_PERIPH_MAXMAPS]; 726 /* Some controllers may not be able to handle more data. */ 727 size_t maxmap = DFLTPHYS; 728 729 switch(ccb->ccb_h.func_code) { 730 case XPT_DEV_MATCH: 731 if (ccb->cdm.match_buf_len == 0) { 732 printf("cam_periph_mapmem: invalid match buffer " 733 "length 0\n"); 734 return(EINVAL); 735 } 736 if (ccb->cdm.pattern_buf_len > 0) { 737 data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns; 738 lengths[0] = ccb->cdm.pattern_buf_len; 739 dirs[0] = CAM_DIR_OUT; 740 data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches; 741 lengths[1] = ccb->cdm.match_buf_len; 742 dirs[1] = CAM_DIR_IN; 743 numbufs = 2; 744 } else { 745 data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches; 746 lengths[0] = ccb->cdm.match_buf_len; 747 dirs[0] = CAM_DIR_IN; 748 numbufs = 1; 749 } 750 /* 751 * This request will not go to the hardware, no reason 752 * to be so strict. vmapbuf() is able to map up to MAXPHYS. 753 */ 754 maxmap = MAXPHYS; 755 break; 756 case XPT_SCSI_IO: 757 case XPT_CONT_TARGET_IO: 758 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) 759 return(0); 760 if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR) 761 return (EINVAL); 762 data_ptrs[0] = &ccb->csio.data_ptr; 763 lengths[0] = ccb->csio.dxfer_len; 764 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK; 765 numbufs = 1; 766 break; 767 case XPT_ATA_IO: 768 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) 769 return(0); 770 if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR) 771 return (EINVAL); 772 data_ptrs[0] = &ccb->ataio.data_ptr; 773 lengths[0] = ccb->ataio.dxfer_len; 774 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK; 775 numbufs = 1; 776 break; 777 case XPT_SMP_IO: 778 data_ptrs[0] = &ccb->smpio.smp_request; 779 lengths[0] = ccb->smpio.smp_request_len; 780 dirs[0] = CAM_DIR_OUT; 781 data_ptrs[1] = &ccb->smpio.smp_response; 782 lengths[1] = ccb->smpio.smp_response_len; 783 dirs[1] = CAM_DIR_IN; 784 numbufs = 2; 785 break; 786 case XPT_DEV_ADVINFO: 787 if (ccb->cdai.bufsiz == 0) 788 return (0); 789 790 data_ptrs[0] = (uint8_t **)&ccb->cdai.buf; 791 lengths[0] = ccb->cdai.bufsiz; 792 dirs[0] = CAM_DIR_IN; 793 numbufs = 1; 794 795 /* 796 * This request will not go to the hardware, no reason 797 * to be so strict. vmapbuf() is able to map up to MAXPHYS. 798 */ 799 maxmap = MAXPHYS; 800 break; 801 default: 802 return(EINVAL); 803 break; /* NOTREACHED */ 804 } 805 806 /* 807 * Check the transfer length and permissions first, so we don't 808 * have to unmap any previously mapped buffers. 809 */ 810 for (i = 0; i < numbufs; i++) { 811 812 flags[i] = 0; 813 814 /* 815 * The userland data pointer passed in may not be page 816 * aligned. vmapbuf() truncates the address to a page 817 * boundary, so if the address isn't page aligned, we'll 818 * need enough space for the given transfer length, plus 819 * whatever extra space is necessary to make it to the page 820 * boundary. 821 */ 822 if ((lengths[i] + 823 (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)) > maxmap){ 824 printf("cam_periph_mapmem: attempt to map %lu bytes, " 825 "which is greater than %lu\n", 826 (long)(lengths[i] + 827 (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)), 828 (u_long)maxmap); 829 return(E2BIG); 830 } 831 832 if (dirs[i] & CAM_DIR_OUT) { 833 flags[i] = BIO_WRITE; 834 } 835 836 if (dirs[i] & CAM_DIR_IN) { 837 flags[i] = BIO_READ; 838 } 839 840 } 841 842 /* 843 * This keeps the the kernel stack of current thread from getting 844 * swapped. In low-memory situations where the kernel stack might 845 * otherwise get swapped out, this holds it and allows the thread 846 * to make progress and release the kernel mapped pages sooner. 847 * 848 * XXX KDM should I use P_NOSWAP instead? 849 */ 850 PHOLD(curproc); 851 852 for (i = 0; i < numbufs; i++) { 853 /* 854 * Get the buffer. 855 */ 856 mapinfo->bp[i] = getpbuf(NULL); 857 858 /* put our pointer in the data slot */ 859 mapinfo->bp[i]->b_data = *data_ptrs[i]; 860 861 /* save the user's data address */ 862 mapinfo->bp[i]->b_caller1 = *data_ptrs[i]; 863 864 /* set the transfer length, we know it's < MAXPHYS */ 865 mapinfo->bp[i]->b_bufsize = lengths[i]; 866 867 /* set the direction */ 868 mapinfo->bp[i]->b_iocmd = flags[i]; 869 870 /* 871 * Map the buffer into kernel memory. 872 * 873 * Note that useracc() alone is not a sufficient test. 874 * vmapbuf() can still fail due to a smaller file mapped 875 * into a larger area of VM, or if userland races against 876 * vmapbuf() after the useracc() check. 877 */ 878 if (vmapbuf(mapinfo->bp[i], 1) < 0) { 879 for (j = 0; j < i; ++j) { 880 *data_ptrs[j] = mapinfo->bp[j]->b_caller1; 881 vunmapbuf(mapinfo->bp[j]); 882 relpbuf(mapinfo->bp[j], NULL); 883 } 884 relpbuf(mapinfo->bp[i], NULL); 885 PRELE(curproc); 886 return(EACCES); 887 } 888 889 /* set our pointer to the new mapped area */ 890 *data_ptrs[i] = mapinfo->bp[i]->b_data; 891 892 mapinfo->num_bufs_used++; 893 } 894 895 /* 896 * Now that we've gotten this far, change ownership to the kernel 897 * of the buffers so that we don't run afoul of returning to user 898 * space with locks (on the buffer) held. 899 */ 900 for (i = 0; i < numbufs; i++) { 901 BUF_KERNPROC(mapinfo->bp[i]); 902 } 903 904 905 return(0); 906 } 907 908 /* 909 * Unmap memory segments mapped into kernel virtual address space by 910 * cam_periph_mapmem(). 911 */ 912 void 913 cam_periph_unmapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo) 914 { 915 int numbufs, i; 916 u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS]; 917 918 if (mapinfo->num_bufs_used <= 0) { 919 /* nothing to free and the process wasn't held. */ 920 return; 921 } 922 923 switch (ccb->ccb_h.func_code) { 924 case XPT_DEV_MATCH: 925 numbufs = min(mapinfo->num_bufs_used, 2); 926 927 if (numbufs == 1) { 928 data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches; 929 } else { 930 data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns; 931 data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches; 932 } 933 break; 934 case XPT_SCSI_IO: 935 case XPT_CONT_TARGET_IO: 936 data_ptrs[0] = &ccb->csio.data_ptr; 937 numbufs = min(mapinfo->num_bufs_used, 1); 938 break; 939 case XPT_ATA_IO: 940 data_ptrs[0] = &ccb->ataio.data_ptr; 941 numbufs = min(mapinfo->num_bufs_used, 1); 942 break; 943 case XPT_SMP_IO: 944 numbufs = min(mapinfo->num_bufs_used, 2); 945 data_ptrs[0] = &ccb->smpio.smp_request; 946 data_ptrs[1] = &ccb->smpio.smp_response; 947 break; 948 case XPT_DEV_ADVINFO: 949 numbufs = min(mapinfo->num_bufs_used, 1); 950 data_ptrs[0] = (uint8_t **)&ccb->cdai.buf; 951 break; 952 default: 953 /* allow ourselves to be swapped once again */ 954 PRELE(curproc); 955 return; 956 break; /* NOTREACHED */ 957 } 958 959 for (i = 0; i < numbufs; i++) { 960 /* Set the user's pointer back to the original value */ 961 *data_ptrs[i] = mapinfo->bp[i]->b_caller1; 962 963 /* unmap the buffer */ 964 vunmapbuf(mapinfo->bp[i]); 965 966 /* release the buffer */ 967 relpbuf(mapinfo->bp[i], NULL); 968 } 969 970 /* allow ourselves to be swapped once again */ 971 PRELE(curproc); 972 } 973 974 void 975 cam_periph_ccbwait(union ccb *ccb) 976 { 977 978 if ((ccb->ccb_h.pinfo.index != CAM_UNQUEUED_INDEX) 979 || ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG)) 980 xpt_path_sleep(ccb->ccb_h.path, &ccb->ccb_h.cbfcnp, PRIBIO, 981 "cbwait", 0); 982 } 983 984 int 985 cam_periph_ioctl(struct cam_periph *periph, u_long cmd, caddr_t addr, 986 int (*error_routine)(union ccb *ccb, 987 cam_flags camflags, 988 u_int32_t sense_flags)) 989 { 990 union ccb *ccb; 991 int error; 992 int found; 993 994 error = found = 0; 995 996 switch(cmd){ 997 case CAMGETPASSTHRU: 998 ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); 999 xpt_setup_ccb(&ccb->ccb_h, 1000 ccb->ccb_h.path, 1001 CAM_PRIORITY_NORMAL); 1002 ccb->ccb_h.func_code = XPT_GDEVLIST; 1003 1004 /* 1005 * Basically, the point of this is that we go through 1006 * getting the list of devices, until we find a passthrough 1007 * device. In the current version of the CAM code, the 1008 * only way to determine what type of device we're dealing 1009 * with is by its name. 1010 */ 1011 while (found == 0) { 1012 ccb->cgdl.index = 0; 1013 ccb->cgdl.status = CAM_GDEVLIST_MORE_DEVS; 1014 while (ccb->cgdl.status == CAM_GDEVLIST_MORE_DEVS) { 1015 1016 /* we want the next device in the list */ 1017 xpt_action(ccb); 1018 if (strncmp(ccb->cgdl.periph_name, 1019 "pass", 4) == 0){ 1020 found = 1; 1021 break; 1022 } 1023 } 1024 if ((ccb->cgdl.status == CAM_GDEVLIST_LAST_DEVICE) && 1025 (found == 0)) { 1026 ccb->cgdl.periph_name[0] = '\0'; 1027 ccb->cgdl.unit_number = 0; 1028 break; 1029 } 1030 } 1031 1032 /* copy the result back out */ 1033 bcopy(ccb, addr, sizeof(union ccb)); 1034 1035 /* and release the ccb */ 1036 xpt_release_ccb(ccb); 1037 1038 break; 1039 default: 1040 error = ENOTTY; 1041 break; 1042 } 1043 return(error); 1044 } 1045 1046 static void 1047 cam_periph_done(struct cam_periph *periph, union ccb *done_ccb) 1048 { 1049 1050 /* Caller will release the CCB */ 1051 wakeup(&done_ccb->ccb_h.cbfcnp); 1052 } 1053 1054 int 1055 cam_periph_runccb(union ccb *ccb, 1056 int (*error_routine)(union ccb *ccb, 1057 cam_flags camflags, 1058 u_int32_t sense_flags), 1059 cam_flags camflags, u_int32_t sense_flags, 1060 struct devstat *ds) 1061 { 1062 struct bintime *starttime; 1063 struct bintime ltime; 1064 int error; 1065 1066 starttime = NULL; 1067 xpt_path_assert(ccb->ccb_h.path, MA_OWNED); 1068 1069 /* 1070 * If the user has supplied a stats structure, and if we understand 1071 * this particular type of ccb, record the transaction start. 1072 */ 1073 if ((ds != NULL) && (ccb->ccb_h.func_code == XPT_SCSI_IO || 1074 ccb->ccb_h.func_code == XPT_ATA_IO)) { 1075 starttime = <ime; 1076 binuptime(starttime); 1077 devstat_start_transaction(ds, starttime); 1078 } 1079 1080 ccb->ccb_h.cbfcnp = cam_periph_done; 1081 xpt_action(ccb); 1082 1083 do { 1084 cam_periph_ccbwait(ccb); 1085 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) 1086 error = 0; 1087 else if (error_routine != NULL) 1088 error = (*error_routine)(ccb, camflags, sense_flags); 1089 else 1090 error = 0; 1091 1092 } while (error == ERESTART); 1093 1094 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 1095 cam_release_devq(ccb->ccb_h.path, 1096 /* relsim_flags */0, 1097 /* openings */0, 1098 /* timeout */0, 1099 /* getcount_only */ FALSE); 1100 ccb->ccb_h.status &= ~CAM_DEV_QFRZN; 1101 } 1102 1103 if (ds != NULL) { 1104 if (ccb->ccb_h.func_code == XPT_SCSI_IO) { 1105 devstat_end_transaction(ds, 1106 ccb->csio.dxfer_len - ccb->csio.resid, 1107 ccb->csio.tag_action & 0x3, 1108 ((ccb->ccb_h.flags & CAM_DIR_MASK) == 1109 CAM_DIR_NONE) ? DEVSTAT_NO_DATA : 1110 (ccb->ccb_h.flags & CAM_DIR_OUT) ? 1111 DEVSTAT_WRITE : 1112 DEVSTAT_READ, NULL, starttime); 1113 } else if (ccb->ccb_h.func_code == XPT_ATA_IO) { 1114 devstat_end_transaction(ds, 1115 ccb->ataio.dxfer_len - ccb->ataio.resid, 1116 ccb->ataio.tag_action & 0x3, 1117 ((ccb->ccb_h.flags & CAM_DIR_MASK) == 1118 CAM_DIR_NONE) ? DEVSTAT_NO_DATA : 1119 (ccb->ccb_h.flags & CAM_DIR_OUT) ? 1120 DEVSTAT_WRITE : 1121 DEVSTAT_READ, NULL, starttime); 1122 } 1123 } 1124 1125 return(error); 1126 } 1127 1128 void 1129 cam_freeze_devq(struct cam_path *path) 1130 { 1131 struct ccb_hdr ccb_h; 1132 1133 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("cam_freeze_devq\n")); 1134 xpt_setup_ccb(&ccb_h, path, /*priority*/1); 1135 ccb_h.func_code = XPT_NOOP; 1136 ccb_h.flags = CAM_DEV_QFREEZE; 1137 xpt_action((union ccb *)&ccb_h); 1138 } 1139 1140 u_int32_t 1141 cam_release_devq(struct cam_path *path, u_int32_t relsim_flags, 1142 u_int32_t openings, u_int32_t arg, 1143 int getcount_only) 1144 { 1145 struct ccb_relsim crs; 1146 1147 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("cam_release_devq(%u, %u, %u, %d)\n", 1148 relsim_flags, openings, arg, getcount_only)); 1149 xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL); 1150 crs.ccb_h.func_code = XPT_REL_SIMQ; 1151 crs.ccb_h.flags = getcount_only ? CAM_DEV_QFREEZE : 0; 1152 crs.release_flags = relsim_flags; 1153 crs.openings = openings; 1154 crs.release_timeout = arg; 1155 xpt_action((union ccb *)&crs); 1156 return (crs.qfrozen_cnt); 1157 } 1158 1159 #define saved_ccb_ptr ppriv_ptr0 1160 static void 1161 camperiphdone(struct cam_periph *periph, union ccb *done_ccb) 1162 { 1163 union ccb *saved_ccb; 1164 cam_status status; 1165 struct scsi_start_stop_unit *scsi_cmd; 1166 int error_code, sense_key, asc, ascq; 1167 1168 scsi_cmd = (struct scsi_start_stop_unit *) 1169 &done_ccb->csio.cdb_io.cdb_bytes; 1170 status = done_ccb->ccb_h.status; 1171 1172 if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1173 if (scsi_extract_sense_ccb(done_ccb, 1174 &error_code, &sense_key, &asc, &ascq)) { 1175 /* 1176 * If the error is "invalid field in CDB", 1177 * and the load/eject flag is set, turn the 1178 * flag off and try again. This is just in 1179 * case the drive in question barfs on the 1180 * load eject flag. The CAM code should set 1181 * the load/eject flag by default for 1182 * removable media. 1183 */ 1184 if ((scsi_cmd->opcode == START_STOP_UNIT) && 1185 ((scsi_cmd->how & SSS_LOEJ) != 0) && 1186 (asc == 0x24) && (ascq == 0x00)) { 1187 scsi_cmd->how &= ~SSS_LOEJ; 1188 if (status & CAM_DEV_QFRZN) { 1189 cam_release_devq(done_ccb->ccb_h.path, 1190 0, 0, 0, 0); 1191 done_ccb->ccb_h.status &= 1192 ~CAM_DEV_QFRZN; 1193 } 1194 xpt_action(done_ccb); 1195 goto out; 1196 } 1197 } 1198 if (cam_periph_error(done_ccb, 1199 0, SF_RETRY_UA | SF_NO_PRINT, NULL) == ERESTART) 1200 goto out; 1201 if (done_ccb->ccb_h.status & CAM_DEV_QFRZN) { 1202 cam_release_devq(done_ccb->ccb_h.path, 0, 0, 0, 0); 1203 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN; 1204 } 1205 } else { 1206 /* 1207 * If we have successfully taken a device from the not 1208 * ready to ready state, re-scan the device and re-get 1209 * the inquiry information. Many devices (mostly disks) 1210 * don't properly report their inquiry information unless 1211 * they are spun up. 1212 */ 1213 if (scsi_cmd->opcode == START_STOP_UNIT) 1214 xpt_async(AC_INQ_CHANGED, done_ccb->ccb_h.path, NULL); 1215 } 1216 1217 /* 1218 * Perform the final retry with the original CCB so that final 1219 * error processing is performed by the owner of the CCB. 1220 */ 1221 saved_ccb = (union ccb *)done_ccb->ccb_h.saved_ccb_ptr; 1222 bcopy(saved_ccb, done_ccb, sizeof(*done_ccb)); 1223 xpt_free_ccb(saved_ccb); 1224 if (done_ccb->ccb_h.cbfcnp != camperiphdone) 1225 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG; 1226 xpt_action(done_ccb); 1227 1228 out: 1229 /* Drop freeze taken due to CAM_DEV_QFREEZE flag set. */ 1230 cam_release_devq(done_ccb->ccb_h.path, 0, 0, 0, 0); 1231 } 1232 1233 /* 1234 * Generic Async Event handler. Peripheral drivers usually 1235 * filter out the events that require personal attention, 1236 * and leave the rest to this function. 1237 */ 1238 void 1239 cam_periph_async(struct cam_periph *periph, u_int32_t code, 1240 struct cam_path *path, void *arg) 1241 { 1242 switch (code) { 1243 case AC_LOST_DEVICE: 1244 cam_periph_invalidate(periph); 1245 break; 1246 default: 1247 break; 1248 } 1249 } 1250 1251 void 1252 cam_periph_bus_settle(struct cam_periph *periph, u_int bus_settle) 1253 { 1254 struct ccb_getdevstats cgds; 1255 1256 xpt_setup_ccb(&cgds.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 1257 cgds.ccb_h.func_code = XPT_GDEV_STATS; 1258 xpt_action((union ccb *)&cgds); 1259 cam_periph_freeze_after_event(periph, &cgds.last_reset, bus_settle); 1260 } 1261 1262 void 1263 cam_periph_freeze_after_event(struct cam_periph *periph, 1264 struct timeval* event_time, u_int duration_ms) 1265 { 1266 struct timeval delta; 1267 struct timeval duration_tv; 1268 1269 if (!timevalisset(event_time)) 1270 return; 1271 1272 microtime(&delta); 1273 timevalsub(&delta, event_time); 1274 duration_tv.tv_sec = duration_ms / 1000; 1275 duration_tv.tv_usec = (duration_ms % 1000) * 1000; 1276 if (timevalcmp(&delta, &duration_tv, <)) { 1277 timevalsub(&duration_tv, &delta); 1278 1279 duration_ms = duration_tv.tv_sec * 1000; 1280 duration_ms += duration_tv.tv_usec / 1000; 1281 cam_freeze_devq(periph->path); 1282 cam_release_devq(periph->path, 1283 RELSIM_RELEASE_AFTER_TIMEOUT, 1284 /*reduction*/0, 1285 /*timeout*/duration_ms, 1286 /*getcount_only*/0); 1287 } 1288 1289 } 1290 1291 static int 1292 camperiphscsistatuserror(union ccb *ccb, union ccb **orig_ccb, 1293 cam_flags camflags, u_int32_t sense_flags, 1294 int *openings, u_int32_t *relsim_flags, 1295 u_int32_t *timeout, u_int32_t *action, const char **action_string) 1296 { 1297 int error; 1298 1299 switch (ccb->csio.scsi_status) { 1300 case SCSI_STATUS_OK: 1301 case SCSI_STATUS_COND_MET: 1302 case SCSI_STATUS_INTERMED: 1303 case SCSI_STATUS_INTERMED_COND_MET: 1304 error = 0; 1305 break; 1306 case SCSI_STATUS_CMD_TERMINATED: 1307 case SCSI_STATUS_CHECK_COND: 1308 error = camperiphscsisenseerror(ccb, orig_ccb, 1309 camflags, 1310 sense_flags, 1311 openings, 1312 relsim_flags, 1313 timeout, 1314 action, 1315 action_string); 1316 break; 1317 case SCSI_STATUS_QUEUE_FULL: 1318 { 1319 /* no decrement */ 1320 struct ccb_getdevstats cgds; 1321 1322 /* 1323 * First off, find out what the current 1324 * transaction counts are. 1325 */ 1326 xpt_setup_ccb(&cgds.ccb_h, 1327 ccb->ccb_h.path, 1328 CAM_PRIORITY_NORMAL); 1329 cgds.ccb_h.func_code = XPT_GDEV_STATS; 1330 xpt_action((union ccb *)&cgds); 1331 1332 /* 1333 * If we were the only transaction active, treat 1334 * the QUEUE FULL as if it were a BUSY condition. 1335 */ 1336 if (cgds.dev_active != 0) { 1337 int total_openings; 1338 1339 /* 1340 * Reduce the number of openings to 1341 * be 1 less than the amount it took 1342 * to get a queue full bounded by the 1343 * minimum allowed tag count for this 1344 * device. 1345 */ 1346 total_openings = cgds.dev_active + cgds.dev_openings; 1347 *openings = cgds.dev_active; 1348 if (*openings < cgds.mintags) 1349 *openings = cgds.mintags; 1350 if (*openings < total_openings) 1351 *relsim_flags = RELSIM_ADJUST_OPENINGS; 1352 else { 1353 /* 1354 * Some devices report queue full for 1355 * temporary resource shortages. For 1356 * this reason, we allow a minimum 1357 * tag count to be entered via a 1358 * quirk entry to prevent the queue 1359 * count on these devices from falling 1360 * to a pessimisticly low value. We 1361 * still wait for the next successful 1362 * completion, however, before queueing 1363 * more transactions to the device. 1364 */ 1365 *relsim_flags = RELSIM_RELEASE_AFTER_CMDCMPLT; 1366 } 1367 *timeout = 0; 1368 error = ERESTART; 1369 *action &= ~SSQ_PRINT_SENSE; 1370 break; 1371 } 1372 /* FALLTHROUGH */ 1373 } 1374 case SCSI_STATUS_BUSY: 1375 /* 1376 * Restart the queue after either another 1377 * command completes or a 1 second timeout. 1378 */ 1379 if ((sense_flags & SF_RETRY_BUSY) != 0 || 1380 (ccb->ccb_h.retry_count--) > 0) { 1381 error = ERESTART; 1382 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT 1383 | RELSIM_RELEASE_AFTER_CMDCMPLT; 1384 *timeout = 1000; 1385 } else { 1386 error = EIO; 1387 } 1388 break; 1389 case SCSI_STATUS_RESERV_CONFLICT: 1390 default: 1391 error = EIO; 1392 break; 1393 } 1394 return (error); 1395 } 1396 1397 static int 1398 camperiphscsisenseerror(union ccb *ccb, union ccb **orig, 1399 cam_flags camflags, u_int32_t sense_flags, 1400 int *openings, u_int32_t *relsim_flags, 1401 u_int32_t *timeout, u_int32_t *action, const char **action_string) 1402 { 1403 struct cam_periph *periph; 1404 union ccb *orig_ccb = ccb; 1405 int error, recoveryccb; 1406 1407 periph = xpt_path_periph(ccb->ccb_h.path); 1408 recoveryccb = (ccb->ccb_h.cbfcnp == camperiphdone); 1409 if ((periph->flags & CAM_PERIPH_RECOVERY_INPROG) && !recoveryccb) { 1410 /* 1411 * If error recovery is already in progress, don't attempt 1412 * to process this error, but requeue it unconditionally 1413 * and attempt to process it once error recovery has 1414 * completed. This failed command is probably related to 1415 * the error that caused the currently active error recovery 1416 * action so our current recovery efforts should also 1417 * address this command. Be aware that the error recovery 1418 * code assumes that only one recovery action is in progress 1419 * on a particular peripheral instance at any given time 1420 * (e.g. only one saved CCB for error recovery) so it is 1421 * imperitive that we don't violate this assumption. 1422 */ 1423 error = ERESTART; 1424 *action &= ~SSQ_PRINT_SENSE; 1425 } else { 1426 scsi_sense_action err_action; 1427 struct ccb_getdev cgd; 1428 1429 /* 1430 * Grab the inquiry data for this device. 1431 */ 1432 xpt_setup_ccb(&cgd.ccb_h, ccb->ccb_h.path, CAM_PRIORITY_NORMAL); 1433 cgd.ccb_h.func_code = XPT_GDEV_TYPE; 1434 xpt_action((union ccb *)&cgd); 1435 1436 err_action = scsi_error_action(&ccb->csio, &cgd.inq_data, 1437 sense_flags); 1438 error = err_action & SS_ERRMASK; 1439 1440 /* 1441 * Do not autostart sequential access devices 1442 * to avoid unexpected tape loading. 1443 */ 1444 if ((err_action & SS_MASK) == SS_START && 1445 SID_TYPE(&cgd.inq_data) == T_SEQUENTIAL) { 1446 *action_string = "Will not autostart a " 1447 "sequential access device"; 1448 goto sense_error_done; 1449 } 1450 1451 /* 1452 * Avoid recovery recursion if recovery action is the same. 1453 */ 1454 if ((err_action & SS_MASK) >= SS_START && recoveryccb) { 1455 if (((err_action & SS_MASK) == SS_START && 1456 ccb->csio.cdb_io.cdb_bytes[0] == START_STOP_UNIT) || 1457 ((err_action & SS_MASK) == SS_TUR && 1458 (ccb->csio.cdb_io.cdb_bytes[0] == TEST_UNIT_READY))) { 1459 err_action = SS_RETRY|SSQ_DECREMENT_COUNT|EIO; 1460 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT; 1461 *timeout = 500; 1462 } 1463 } 1464 1465 /* 1466 * If the recovery action will consume a retry, 1467 * make sure we actually have retries available. 1468 */ 1469 if ((err_action & SSQ_DECREMENT_COUNT) != 0) { 1470 if (ccb->ccb_h.retry_count > 0 && 1471 (periph->flags & CAM_PERIPH_INVALID) == 0) 1472 ccb->ccb_h.retry_count--; 1473 else { 1474 *action_string = "Retries exhausted"; 1475 goto sense_error_done; 1476 } 1477 } 1478 1479 if ((err_action & SS_MASK) >= SS_START) { 1480 /* 1481 * Do common portions of commands that 1482 * use recovery CCBs. 1483 */ 1484 orig_ccb = xpt_alloc_ccb_nowait(); 1485 if (orig_ccb == NULL) { 1486 *action_string = "Can't allocate recovery CCB"; 1487 goto sense_error_done; 1488 } 1489 /* 1490 * Clear freeze flag for original request here, as 1491 * this freeze will be dropped as part of ERESTART. 1492 */ 1493 ccb->ccb_h.status &= ~CAM_DEV_QFRZN; 1494 bcopy(ccb, orig_ccb, sizeof(*orig_ccb)); 1495 } 1496 1497 switch (err_action & SS_MASK) { 1498 case SS_NOP: 1499 *action_string = "No recovery action needed"; 1500 error = 0; 1501 break; 1502 case SS_RETRY: 1503 *action_string = "Retrying command (per sense data)"; 1504 error = ERESTART; 1505 break; 1506 case SS_FAIL: 1507 *action_string = "Unretryable error"; 1508 break; 1509 case SS_START: 1510 { 1511 int le; 1512 1513 /* 1514 * Send a start unit command to the device, and 1515 * then retry the command. 1516 */ 1517 *action_string = "Attempting to start unit"; 1518 periph->flags |= CAM_PERIPH_RECOVERY_INPROG; 1519 1520 /* 1521 * Check for removable media and set 1522 * load/eject flag appropriately. 1523 */ 1524 if (SID_IS_REMOVABLE(&cgd.inq_data)) 1525 le = TRUE; 1526 else 1527 le = FALSE; 1528 1529 scsi_start_stop(&ccb->csio, 1530 /*retries*/1, 1531 camperiphdone, 1532 MSG_SIMPLE_Q_TAG, 1533 /*start*/TRUE, 1534 /*load/eject*/le, 1535 /*immediate*/FALSE, 1536 SSD_FULL_SIZE, 1537 /*timeout*/50000); 1538 break; 1539 } 1540 case SS_TUR: 1541 { 1542 /* 1543 * Send a Test Unit Ready to the device. 1544 * If the 'many' flag is set, we send 120 1545 * test unit ready commands, one every half 1546 * second. Otherwise, we just send one TUR. 1547 * We only want to do this if the retry 1548 * count has not been exhausted. 1549 */ 1550 int retries; 1551 1552 if ((err_action & SSQ_MANY) != 0) { 1553 *action_string = "Polling device for readiness"; 1554 retries = 120; 1555 } else { 1556 *action_string = "Testing device for readiness"; 1557 retries = 1; 1558 } 1559 periph->flags |= CAM_PERIPH_RECOVERY_INPROG; 1560 scsi_test_unit_ready(&ccb->csio, 1561 retries, 1562 camperiphdone, 1563 MSG_SIMPLE_Q_TAG, 1564 SSD_FULL_SIZE, 1565 /*timeout*/5000); 1566 1567 /* 1568 * Accomplish our 500ms delay by deferring 1569 * the release of our device queue appropriately. 1570 */ 1571 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT; 1572 *timeout = 500; 1573 break; 1574 } 1575 default: 1576 panic("Unhandled error action %x", err_action); 1577 } 1578 1579 if ((err_action & SS_MASK) >= SS_START) { 1580 /* 1581 * Drop the priority, so that the recovery 1582 * CCB is the first to execute. Freeze the queue 1583 * after this command is sent so that we can 1584 * restore the old csio and have it queued in 1585 * the proper order before we release normal 1586 * transactions to the device. 1587 */ 1588 ccb->ccb_h.pinfo.priority--; 1589 ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 1590 ccb->ccb_h.saved_ccb_ptr = orig_ccb; 1591 error = ERESTART; 1592 *orig = orig_ccb; 1593 } 1594 1595 sense_error_done: 1596 *action = err_action; 1597 } 1598 return (error); 1599 } 1600 1601 /* 1602 * Generic error handler. Peripheral drivers usually filter 1603 * out the errors that they handle in a unique mannor, then 1604 * call this function. 1605 */ 1606 int 1607 cam_periph_error(union ccb *ccb, cam_flags camflags, 1608 u_int32_t sense_flags, union ccb *save_ccb) 1609 { 1610 struct cam_path *newpath; 1611 union ccb *orig_ccb, *scan_ccb; 1612 struct cam_periph *periph; 1613 const char *action_string; 1614 cam_status status; 1615 int frozen, error, openings; 1616 u_int32_t action, relsim_flags, timeout; 1617 1618 action = SSQ_PRINT_SENSE; 1619 periph = xpt_path_periph(ccb->ccb_h.path); 1620 action_string = NULL; 1621 status = ccb->ccb_h.status; 1622 frozen = (status & CAM_DEV_QFRZN) != 0; 1623 status &= CAM_STATUS_MASK; 1624 openings = relsim_flags = timeout = 0; 1625 orig_ccb = ccb; 1626 1627 switch (status) { 1628 case CAM_REQ_CMP: 1629 error = 0; 1630 action &= ~SSQ_PRINT_SENSE; 1631 break; 1632 case CAM_SCSI_STATUS_ERROR: 1633 error = camperiphscsistatuserror(ccb, &orig_ccb, 1634 camflags, sense_flags, &openings, &relsim_flags, 1635 &timeout, &action, &action_string); 1636 break; 1637 case CAM_AUTOSENSE_FAIL: 1638 error = EIO; /* we have to kill the command */ 1639 break; 1640 case CAM_UA_ABORT: 1641 case CAM_UA_TERMIO: 1642 case CAM_MSG_REJECT_REC: 1643 /* XXX Don't know that these are correct */ 1644 error = EIO; 1645 break; 1646 case CAM_SEL_TIMEOUT: 1647 if ((camflags & CAM_RETRY_SELTO) != 0) { 1648 if (ccb->ccb_h.retry_count > 0 && 1649 (periph->flags & CAM_PERIPH_INVALID) == 0) { 1650 ccb->ccb_h.retry_count--; 1651 error = ERESTART; 1652 1653 /* 1654 * Wait a bit to give the device 1655 * time to recover before we try again. 1656 */ 1657 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT; 1658 timeout = periph_selto_delay; 1659 break; 1660 } 1661 action_string = "Retries exhausted"; 1662 } 1663 /* FALLTHROUGH */ 1664 case CAM_DEV_NOT_THERE: 1665 error = ENXIO; 1666 action = SSQ_LOST; 1667 break; 1668 case CAM_REQ_INVALID: 1669 case CAM_PATH_INVALID: 1670 case CAM_NO_HBA: 1671 case CAM_PROVIDE_FAIL: 1672 case CAM_REQ_TOO_BIG: 1673 case CAM_LUN_INVALID: 1674 case CAM_TID_INVALID: 1675 case CAM_FUNC_NOTAVAIL: 1676 error = EINVAL; 1677 break; 1678 case CAM_SCSI_BUS_RESET: 1679 case CAM_BDR_SENT: 1680 /* 1681 * Commands that repeatedly timeout and cause these 1682 * kinds of error recovery actions, should return 1683 * CAM_CMD_TIMEOUT, which allows us to safely assume 1684 * that this command was an innocent bystander to 1685 * these events and should be unconditionally 1686 * retried. 1687 */ 1688 case CAM_REQUEUE_REQ: 1689 /* Unconditional requeue if device is still there */ 1690 if (periph->flags & CAM_PERIPH_INVALID) { 1691 action_string = "Periph was invalidated"; 1692 error = EIO; 1693 } else if (sense_flags & SF_NO_RETRY) { 1694 error = EIO; 1695 action_string = "Retry was blocked"; 1696 } else { 1697 error = ERESTART; 1698 action &= ~SSQ_PRINT_SENSE; 1699 } 1700 break; 1701 case CAM_RESRC_UNAVAIL: 1702 /* Wait a bit for the resource shortage to abate. */ 1703 timeout = periph_noresrc_delay; 1704 /* FALLTHROUGH */ 1705 case CAM_BUSY: 1706 if (timeout == 0) { 1707 /* Wait a bit for the busy condition to abate. */ 1708 timeout = periph_busy_delay; 1709 } 1710 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT; 1711 /* FALLTHROUGH */ 1712 case CAM_ATA_STATUS_ERROR: 1713 case CAM_REQ_CMP_ERR: 1714 case CAM_CMD_TIMEOUT: 1715 case CAM_UNEXP_BUSFREE: 1716 case CAM_UNCOR_PARITY: 1717 case CAM_DATA_RUN_ERR: 1718 default: 1719 if (periph->flags & CAM_PERIPH_INVALID) { 1720 error = EIO; 1721 action_string = "Periph was invalidated"; 1722 } else if (ccb->ccb_h.retry_count == 0) { 1723 error = EIO; 1724 action_string = "Retries exhausted"; 1725 } else if (sense_flags & SF_NO_RETRY) { 1726 error = EIO; 1727 action_string = "Retry was blocked"; 1728 } else { 1729 ccb->ccb_h.retry_count--; 1730 error = ERESTART; 1731 } 1732 break; 1733 } 1734 1735 if ((sense_flags & SF_PRINT_ALWAYS) || 1736 CAM_DEBUGGED(ccb->ccb_h.path, CAM_DEBUG_INFO)) 1737 action |= SSQ_PRINT_SENSE; 1738 else if (sense_flags & SF_NO_PRINT) 1739 action &= ~SSQ_PRINT_SENSE; 1740 if ((action & SSQ_PRINT_SENSE) != 0) 1741 cam_error_print(orig_ccb, CAM_ESF_ALL, CAM_EPF_ALL); 1742 if (error != 0 && (action & SSQ_PRINT_SENSE) != 0) { 1743 if (error != ERESTART) { 1744 if (action_string == NULL) 1745 action_string = "Unretryable error"; 1746 xpt_print(ccb->ccb_h.path, "Error %d, %s\n", 1747 error, action_string); 1748 } else if (action_string != NULL) 1749 xpt_print(ccb->ccb_h.path, "%s\n", action_string); 1750 else 1751 xpt_print(ccb->ccb_h.path, "Retrying command\n"); 1752 } 1753 1754 if ((action & SSQ_LOST) != 0) { 1755 lun_id_t lun_id; 1756 1757 /* 1758 * For a selection timeout, we consider all of the LUNs on 1759 * the target to be gone. If the status is CAM_DEV_NOT_THERE, 1760 * then we only get rid of the device(s) specified by the 1761 * path in the original CCB. 1762 */ 1763 if (status == CAM_SEL_TIMEOUT) 1764 lun_id = CAM_LUN_WILDCARD; 1765 else 1766 lun_id = xpt_path_lun_id(ccb->ccb_h.path); 1767 1768 /* Should we do more if we can't create the path?? */ 1769 if (xpt_create_path(&newpath, periph, 1770 xpt_path_path_id(ccb->ccb_h.path), 1771 xpt_path_target_id(ccb->ccb_h.path), 1772 lun_id) == CAM_REQ_CMP) { 1773 1774 /* 1775 * Let peripheral drivers know that this 1776 * device has gone away. 1777 */ 1778 xpt_async(AC_LOST_DEVICE, newpath, NULL); 1779 xpt_free_path(newpath); 1780 } 1781 } 1782 1783 /* Broadcast UNIT ATTENTIONs to all periphs. */ 1784 if ((action & SSQ_UA) != 0) 1785 xpt_async(AC_UNIT_ATTENTION, orig_ccb->ccb_h.path, orig_ccb); 1786 1787 /* Rescan target on "Reported LUNs data has changed" */ 1788 if ((action & SSQ_RESCAN) != 0) { 1789 if (xpt_create_path(&newpath, NULL, 1790 xpt_path_path_id(ccb->ccb_h.path), 1791 xpt_path_target_id(ccb->ccb_h.path), 1792 CAM_LUN_WILDCARD) == CAM_REQ_CMP) { 1793 1794 scan_ccb = xpt_alloc_ccb_nowait(); 1795 if (scan_ccb != NULL) { 1796 scan_ccb->ccb_h.path = newpath; 1797 scan_ccb->ccb_h.func_code = XPT_SCAN_TGT; 1798 scan_ccb->crcn.flags = 0; 1799 xpt_rescan(scan_ccb); 1800 } else { 1801 xpt_print(newpath, 1802 "Can't allocate CCB to rescan target\n"); 1803 xpt_free_path(newpath); 1804 } 1805 } 1806 } 1807 1808 /* Attempt a retry */ 1809 if (error == ERESTART || error == 0) { 1810 if (frozen != 0) 1811 ccb->ccb_h.status &= ~CAM_DEV_QFRZN; 1812 if (error == ERESTART) 1813 xpt_action(ccb); 1814 if (frozen != 0) 1815 cam_release_devq(ccb->ccb_h.path, 1816 relsim_flags, 1817 openings, 1818 timeout, 1819 /*getcount_only*/0); 1820 } 1821 1822 return (error); 1823 } 1824