1 /*- 2 * Common functions for CAM "type" (peripheral) drivers. 3 * 4 * Copyright (c) 1997, 1998 Justin T. Gibbs. 5 * Copyright (c) 1997, 1998, 1999, 2000 Kenneth D. Merry. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions, and the following disclaimer, 13 * without modification, immediately at the beginning of the file. 14 * 2. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/types.h> 36 #include <sys/malloc.h> 37 #include <sys/kernel.h> 38 #include <sys/bio.h> 39 #include <sys/lock.h> 40 #include <sys/mutex.h> 41 #include <sys/buf.h> 42 #include <sys/proc.h> 43 #include <sys/devicestat.h> 44 #include <sys/bus.h> 45 #include <sys/sbuf.h> 46 #include <vm/vm.h> 47 #include <vm/vm_extern.h> 48 49 #include <cam/cam.h> 50 #include <cam/cam_ccb.h> 51 #include <cam/cam_queue.h> 52 #include <cam/cam_xpt_periph.h> 53 #include <cam/cam_periph.h> 54 #include <cam/cam_debug.h> 55 #include <cam/cam_sim.h> 56 57 #include <cam/scsi/scsi_all.h> 58 #include <cam/scsi/scsi_message.h> 59 #include <cam/scsi/scsi_pass.h> 60 61 static u_int camperiphnextunit(struct periph_driver *p_drv, 62 u_int newunit, int wired, 63 path_id_t pathid, target_id_t target, 64 lun_id_t lun); 65 static u_int camperiphunit(struct periph_driver *p_drv, 66 path_id_t pathid, target_id_t target, 67 lun_id_t lun); 68 static void camperiphdone(struct cam_periph *periph, 69 union ccb *done_ccb); 70 static void camperiphfree(struct cam_periph *periph); 71 static int camperiphscsistatuserror(union ccb *ccb, 72 union ccb **orig_ccb, 73 cam_flags camflags, 74 u_int32_t sense_flags, 75 int *openings, 76 u_int32_t *relsim_flags, 77 u_int32_t *timeout, 78 u_int32_t *action, 79 const char **action_string); 80 static int camperiphscsisenseerror(union ccb *ccb, 81 union ccb **orig_ccb, 82 cam_flags camflags, 83 u_int32_t sense_flags, 84 int *openings, 85 u_int32_t *relsim_flags, 86 u_int32_t *timeout, 87 u_int32_t *action, 88 const char **action_string); 89 90 static int nperiph_drivers; 91 static int initialized = 0; 92 struct periph_driver **periph_drivers; 93 94 static MALLOC_DEFINE(M_CAMPERIPH, "CAM periph", "CAM peripheral buffers"); 95 96 static int periph_selto_delay = 1000; 97 TUNABLE_INT("kern.cam.periph_selto_delay", &periph_selto_delay); 98 static int periph_noresrc_delay = 500; 99 TUNABLE_INT("kern.cam.periph_noresrc_delay", &periph_noresrc_delay); 100 static int periph_busy_delay = 500; 101 TUNABLE_INT("kern.cam.periph_busy_delay", &periph_busy_delay); 102 103 104 void 105 periphdriver_register(void *data) 106 { 107 struct periph_driver *drv = (struct periph_driver *)data; 108 struct periph_driver **newdrivers, **old; 109 int ndrivers; 110 111 again: 112 ndrivers = nperiph_drivers + 2; 113 newdrivers = malloc(sizeof(*newdrivers) * ndrivers, M_CAMPERIPH, 114 M_WAITOK); 115 xpt_lock_buses(); 116 if (ndrivers != nperiph_drivers + 2) { 117 /* 118 * Lost race against itself; go around. 119 */ 120 xpt_unlock_buses(); 121 free(newdrivers, M_CAMPERIPH); 122 goto again; 123 } 124 if (periph_drivers) 125 bcopy(periph_drivers, newdrivers, 126 sizeof(*newdrivers) * nperiph_drivers); 127 newdrivers[nperiph_drivers] = drv; 128 newdrivers[nperiph_drivers + 1] = NULL; 129 old = periph_drivers; 130 periph_drivers = newdrivers; 131 nperiph_drivers++; 132 xpt_unlock_buses(); 133 if (old) 134 free(old, M_CAMPERIPH); 135 /* If driver marked as early or it is late now, initialize it. */ 136 if (((drv->flags & CAM_PERIPH_DRV_EARLY) != 0 && initialized > 0) || 137 initialized > 1) 138 (*drv->init)(); 139 } 140 141 void 142 periphdriver_init(int level) 143 { 144 int i, early; 145 146 initialized = max(initialized, level); 147 for (i = 0; periph_drivers[i] != NULL; i++) { 148 early = (periph_drivers[i]->flags & CAM_PERIPH_DRV_EARLY) ? 1 : 2; 149 if (early == initialized) 150 (*periph_drivers[i]->init)(); 151 } 152 } 153 154 cam_status 155 cam_periph_alloc(periph_ctor_t *periph_ctor, 156 periph_oninv_t *periph_oninvalidate, 157 periph_dtor_t *periph_dtor, periph_start_t *periph_start, 158 char *name, cam_periph_type type, struct cam_path *path, 159 ac_callback_t *ac_callback, ac_code code, void *arg) 160 { 161 struct periph_driver **p_drv; 162 struct cam_sim *sim; 163 struct cam_periph *periph; 164 struct cam_periph *cur_periph; 165 path_id_t path_id; 166 target_id_t target_id; 167 lun_id_t lun_id; 168 cam_status status; 169 u_int init_level; 170 171 init_level = 0; 172 /* 173 * Handle Hot-Plug scenarios. If there is already a peripheral 174 * of our type assigned to this path, we are likely waiting for 175 * final close on an old, invalidated, peripheral. If this is 176 * the case, queue up a deferred call to the peripheral's async 177 * handler. If it looks like a mistaken re-allocation, complain. 178 */ 179 if ((periph = cam_periph_find(path, name)) != NULL) { 180 181 if ((periph->flags & CAM_PERIPH_INVALID) != 0 182 && (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) == 0) { 183 periph->flags |= CAM_PERIPH_NEW_DEV_FOUND; 184 periph->deferred_callback = ac_callback; 185 periph->deferred_ac = code; 186 return (CAM_REQ_INPROG); 187 } else { 188 printf("cam_periph_alloc: attempt to re-allocate " 189 "valid device %s%d rejected flags %#x " 190 "refcount %d\n", periph->periph_name, 191 periph->unit_number, periph->flags, 192 periph->refcount); 193 } 194 return (CAM_REQ_INVALID); 195 } 196 197 periph = (struct cam_periph *)malloc(sizeof(*periph), M_CAMPERIPH, 198 M_NOWAIT|M_ZERO); 199 200 if (periph == NULL) 201 return (CAM_RESRC_UNAVAIL); 202 203 init_level++; 204 205 206 sim = xpt_path_sim(path); 207 path_id = xpt_path_path_id(path); 208 target_id = xpt_path_target_id(path); 209 lun_id = xpt_path_lun_id(path); 210 periph->periph_start = periph_start; 211 periph->periph_dtor = periph_dtor; 212 periph->periph_oninval = periph_oninvalidate; 213 periph->type = type; 214 periph->periph_name = name; 215 periph->scheduled_priority = CAM_PRIORITY_NONE; 216 periph->immediate_priority = CAM_PRIORITY_NONE; 217 periph->refcount = 1; /* Dropped by invalidation. */ 218 periph->sim = sim; 219 SLIST_INIT(&periph->ccb_list); 220 status = xpt_create_path(&path, periph, path_id, target_id, lun_id); 221 if (status != CAM_REQ_CMP) 222 goto failure; 223 periph->path = path; 224 225 xpt_lock_buses(); 226 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) { 227 if (strcmp((*p_drv)->driver_name, name) == 0) 228 break; 229 } 230 if (*p_drv == NULL) { 231 printf("cam_periph_alloc: invalid periph name '%s'\n", name); 232 xpt_unlock_buses(); 233 xpt_free_path(periph->path); 234 free(periph, M_CAMPERIPH); 235 return (CAM_REQ_INVALID); 236 } 237 periph->unit_number = camperiphunit(*p_drv, path_id, target_id, lun_id); 238 cur_periph = TAILQ_FIRST(&(*p_drv)->units); 239 while (cur_periph != NULL 240 && cur_periph->unit_number < periph->unit_number) 241 cur_periph = TAILQ_NEXT(cur_periph, unit_links); 242 if (cur_periph != NULL) { 243 KASSERT(cur_periph->unit_number != periph->unit_number, ("duplicate units on periph list")); 244 TAILQ_INSERT_BEFORE(cur_periph, periph, unit_links); 245 } else { 246 TAILQ_INSERT_TAIL(&(*p_drv)->units, periph, unit_links); 247 (*p_drv)->generation++; 248 } 249 xpt_unlock_buses(); 250 251 init_level++; 252 253 status = xpt_add_periph(periph); 254 if (status != CAM_REQ_CMP) 255 goto failure; 256 257 init_level++; 258 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph created\n")); 259 260 status = periph_ctor(periph, arg); 261 262 if (status == CAM_REQ_CMP) 263 init_level++; 264 265 failure: 266 switch (init_level) { 267 case 4: 268 /* Initialized successfully */ 269 break; 270 case 3: 271 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph destroyed\n")); 272 xpt_remove_periph(periph); 273 /* FALLTHROUGH */ 274 case 2: 275 xpt_lock_buses(); 276 TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links); 277 xpt_unlock_buses(); 278 xpt_free_path(periph->path); 279 /* FALLTHROUGH */ 280 case 1: 281 free(periph, M_CAMPERIPH); 282 /* FALLTHROUGH */ 283 case 0: 284 /* No cleanup to perform. */ 285 break; 286 default: 287 panic("%s: Unknown init level", __func__); 288 } 289 return(status); 290 } 291 292 /* 293 * Find a peripheral structure with the specified path, target, lun, 294 * and (optionally) type. If the name is NULL, this function will return 295 * the first peripheral driver that matches the specified path. 296 */ 297 struct cam_periph * 298 cam_periph_find(struct cam_path *path, char *name) 299 { 300 struct periph_driver **p_drv; 301 struct cam_periph *periph; 302 303 xpt_lock_buses(); 304 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) { 305 306 if (name != NULL && (strcmp((*p_drv)->driver_name, name) != 0)) 307 continue; 308 309 TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) { 310 if (xpt_path_comp(periph->path, path) == 0) { 311 xpt_unlock_buses(); 312 cam_periph_assert(periph, MA_OWNED); 313 return(periph); 314 } 315 } 316 if (name != NULL) { 317 xpt_unlock_buses(); 318 return(NULL); 319 } 320 } 321 xpt_unlock_buses(); 322 return(NULL); 323 } 324 325 /* 326 * Find peripheral driver instances attached to the specified path. 327 */ 328 int 329 cam_periph_list(struct cam_path *path, struct sbuf *sb) 330 { 331 struct sbuf local_sb; 332 struct periph_driver **p_drv; 333 struct cam_periph *periph; 334 int count; 335 int sbuf_alloc_len; 336 337 sbuf_alloc_len = 16; 338 retry: 339 sbuf_new(&local_sb, NULL, sbuf_alloc_len, SBUF_FIXEDLEN); 340 count = 0; 341 xpt_lock_buses(); 342 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) { 343 344 TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) { 345 if (xpt_path_comp(periph->path, path) != 0) 346 continue; 347 348 if (sbuf_len(&local_sb) != 0) 349 sbuf_cat(&local_sb, ","); 350 351 sbuf_printf(&local_sb, "%s%d", periph->periph_name, 352 periph->unit_number); 353 354 if (sbuf_error(&local_sb) == ENOMEM) { 355 sbuf_alloc_len *= 2; 356 xpt_unlock_buses(); 357 sbuf_delete(&local_sb); 358 goto retry; 359 } 360 count++; 361 } 362 } 363 xpt_unlock_buses(); 364 sbuf_finish(&local_sb); 365 sbuf_cpy(sb, sbuf_data(&local_sb)); 366 sbuf_delete(&local_sb); 367 return (count); 368 } 369 370 cam_status 371 cam_periph_acquire(struct cam_periph *periph) 372 { 373 cam_status status; 374 375 status = CAM_REQ_CMP_ERR; 376 if (periph == NULL) 377 return (status); 378 379 xpt_lock_buses(); 380 if ((periph->flags & CAM_PERIPH_INVALID) == 0) { 381 periph->refcount++; 382 status = CAM_REQ_CMP; 383 } 384 xpt_unlock_buses(); 385 386 return (status); 387 } 388 389 void 390 cam_periph_doacquire(struct cam_periph *periph) 391 { 392 393 xpt_lock_buses(); 394 KASSERT(periph->refcount >= 1, 395 ("cam_periph_doacquire() with refcount == %d", periph->refcount)); 396 periph->refcount++; 397 xpt_unlock_buses(); 398 } 399 400 void 401 cam_periph_release_locked_buses(struct cam_periph *periph) 402 { 403 404 cam_periph_assert(periph, MA_OWNED); 405 KASSERT(periph->refcount >= 1, ("periph->refcount >= 1")); 406 if (--periph->refcount == 0) 407 camperiphfree(periph); 408 } 409 410 void 411 cam_periph_release_locked(struct cam_periph *periph) 412 { 413 414 if (periph == NULL) 415 return; 416 417 xpt_lock_buses(); 418 cam_periph_release_locked_buses(periph); 419 xpt_unlock_buses(); 420 } 421 422 void 423 cam_periph_release(struct cam_periph *periph) 424 { 425 struct mtx *mtx; 426 427 if (periph == NULL) 428 return; 429 430 cam_periph_assert(periph, MA_NOTOWNED); 431 mtx = cam_periph_mtx(periph); 432 mtx_lock(mtx); 433 cam_periph_release_locked(periph); 434 mtx_unlock(mtx); 435 } 436 437 int 438 cam_periph_hold(struct cam_periph *periph, int priority) 439 { 440 int error; 441 442 /* 443 * Increment the reference count on the peripheral 444 * while we wait for our lock attempt to succeed 445 * to ensure the peripheral doesn't disappear out 446 * from user us while we sleep. 447 */ 448 449 if (cam_periph_acquire(periph) != CAM_REQ_CMP) 450 return (ENXIO); 451 452 cam_periph_assert(periph, MA_OWNED); 453 while ((periph->flags & CAM_PERIPH_LOCKED) != 0) { 454 periph->flags |= CAM_PERIPH_LOCK_WANTED; 455 if ((error = cam_periph_sleep(periph, periph, priority, 456 "caplck", 0)) != 0) { 457 cam_periph_release_locked(periph); 458 return (error); 459 } 460 if (periph->flags & CAM_PERIPH_INVALID) { 461 cam_periph_release_locked(periph); 462 return (ENXIO); 463 } 464 } 465 466 periph->flags |= CAM_PERIPH_LOCKED; 467 return (0); 468 } 469 470 void 471 cam_periph_unhold(struct cam_periph *periph) 472 { 473 474 cam_periph_assert(periph, MA_OWNED); 475 476 periph->flags &= ~CAM_PERIPH_LOCKED; 477 if ((periph->flags & CAM_PERIPH_LOCK_WANTED) != 0) { 478 periph->flags &= ~CAM_PERIPH_LOCK_WANTED; 479 wakeup(periph); 480 } 481 482 cam_periph_release_locked(periph); 483 } 484 485 /* 486 * Look for the next unit number that is not currently in use for this 487 * peripheral type starting at "newunit". Also exclude unit numbers that 488 * are reserved by for future "hardwiring" unless we already know that this 489 * is a potential wired device. Only assume that the device is "wired" the 490 * first time through the loop since after that we'll be looking at unit 491 * numbers that did not match a wiring entry. 492 */ 493 static u_int 494 camperiphnextunit(struct periph_driver *p_drv, u_int newunit, int wired, 495 path_id_t pathid, target_id_t target, lun_id_t lun) 496 { 497 struct cam_periph *periph; 498 char *periph_name; 499 int i, val, dunit, r; 500 const char *dname, *strval; 501 502 periph_name = p_drv->driver_name; 503 for (;;newunit++) { 504 505 for (periph = TAILQ_FIRST(&p_drv->units); 506 periph != NULL && periph->unit_number != newunit; 507 periph = TAILQ_NEXT(periph, unit_links)) 508 ; 509 510 if (periph != NULL && periph->unit_number == newunit) { 511 if (wired != 0) { 512 xpt_print(periph->path, "Duplicate Wired " 513 "Device entry!\n"); 514 xpt_print(periph->path, "Second device (%s " 515 "device at scbus%d target %d lun %d) will " 516 "not be wired\n", periph_name, pathid, 517 target, lun); 518 wired = 0; 519 } 520 continue; 521 } 522 if (wired) 523 break; 524 525 /* 526 * Don't match entries like "da 4" as a wired down 527 * device, but do match entries like "da 4 target 5" 528 * or even "da 4 scbus 1". 529 */ 530 i = 0; 531 dname = periph_name; 532 for (;;) { 533 r = resource_find_dev(&i, dname, &dunit, NULL, NULL); 534 if (r != 0) 535 break; 536 /* if no "target" and no specific scbus, skip */ 537 if (resource_int_value(dname, dunit, "target", &val) && 538 (resource_string_value(dname, dunit, "at",&strval)|| 539 strcmp(strval, "scbus") == 0)) 540 continue; 541 if (newunit == dunit) 542 break; 543 } 544 if (r != 0) 545 break; 546 } 547 return (newunit); 548 } 549 550 static u_int 551 camperiphunit(struct periph_driver *p_drv, path_id_t pathid, 552 target_id_t target, lun_id_t lun) 553 { 554 u_int unit; 555 int wired, i, val, dunit; 556 const char *dname, *strval; 557 char pathbuf[32], *periph_name; 558 559 periph_name = p_drv->driver_name; 560 snprintf(pathbuf, sizeof(pathbuf), "scbus%d", pathid); 561 unit = 0; 562 i = 0; 563 dname = periph_name; 564 for (wired = 0; resource_find_dev(&i, dname, &dunit, NULL, NULL) == 0; 565 wired = 0) { 566 if (resource_string_value(dname, dunit, "at", &strval) == 0) { 567 if (strcmp(strval, pathbuf) != 0) 568 continue; 569 wired++; 570 } 571 if (resource_int_value(dname, dunit, "target", &val) == 0) { 572 if (val != target) 573 continue; 574 wired++; 575 } 576 if (resource_int_value(dname, dunit, "lun", &val) == 0) { 577 if (val != lun) 578 continue; 579 wired++; 580 } 581 if (wired != 0) { 582 unit = dunit; 583 break; 584 } 585 } 586 587 /* 588 * Either start from 0 looking for the next unit or from 589 * the unit number given in the resource config. This way, 590 * if we have wildcard matches, we don't return the same 591 * unit number twice. 592 */ 593 unit = camperiphnextunit(p_drv, unit, wired, pathid, target, lun); 594 595 return (unit); 596 } 597 598 void 599 cam_periph_invalidate(struct cam_periph *periph) 600 { 601 602 cam_periph_assert(periph, MA_OWNED); 603 /* 604 * We only call this routine the first time a peripheral is 605 * invalidated. 606 */ 607 if ((periph->flags & CAM_PERIPH_INVALID) != 0) 608 return; 609 610 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph invalidated\n")); 611 if ((periph->flags & CAM_PERIPH_ANNOUNCED) && !rebooting) 612 xpt_denounce_periph(periph); 613 periph->flags |= CAM_PERIPH_INVALID; 614 periph->flags &= ~CAM_PERIPH_NEW_DEV_FOUND; 615 if (periph->periph_oninval != NULL) 616 periph->periph_oninval(periph); 617 cam_periph_release_locked(periph); 618 } 619 620 static void 621 camperiphfree(struct cam_periph *periph) 622 { 623 struct periph_driver **p_drv; 624 625 cam_periph_assert(periph, MA_OWNED); 626 KASSERT(periph->periph_allocating == 0, ("%s%d: freed while allocating", 627 periph->periph_name, periph->unit_number)); 628 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) { 629 if (strcmp((*p_drv)->driver_name, periph->periph_name) == 0) 630 break; 631 } 632 if (*p_drv == NULL) { 633 printf("camperiphfree: attempt to free non-existant periph\n"); 634 return; 635 } 636 637 /* 638 * We need to set this flag before dropping the topology lock, to 639 * let anyone who is traversing the list that this peripheral is 640 * about to be freed, and there will be no more reference count 641 * checks. 642 */ 643 periph->flags |= CAM_PERIPH_FREE; 644 645 /* 646 * The peripheral destructor semantics dictate calling with only the 647 * SIM mutex held. Since it might sleep, it should not be called 648 * with the topology lock held. 649 */ 650 xpt_unlock_buses(); 651 652 /* 653 * We need to call the peripheral destructor prior to removing the 654 * peripheral from the list. Otherwise, we risk running into a 655 * scenario where the peripheral unit number may get reused 656 * (because it has been removed from the list), but some resources 657 * used by the peripheral are still hanging around. In particular, 658 * the devfs nodes used by some peripherals like the pass(4) driver 659 * aren't fully cleaned up until the destructor is run. If the 660 * unit number is reused before the devfs instance is fully gone, 661 * devfs will panic. 662 */ 663 if (periph->periph_dtor != NULL) 664 periph->periph_dtor(periph); 665 666 /* 667 * The peripheral list is protected by the topology lock. 668 */ 669 xpt_lock_buses(); 670 671 TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links); 672 (*p_drv)->generation++; 673 674 xpt_remove_periph(periph); 675 676 xpt_unlock_buses(); 677 if ((periph->flags & CAM_PERIPH_ANNOUNCED) && !rebooting) 678 xpt_print(periph->path, "Periph destroyed\n"); 679 else 680 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph destroyed\n")); 681 682 if (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) { 683 union ccb ccb; 684 void *arg; 685 686 switch (periph->deferred_ac) { 687 case AC_FOUND_DEVICE: 688 ccb.ccb_h.func_code = XPT_GDEV_TYPE; 689 xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 690 xpt_action(&ccb); 691 arg = &ccb; 692 break; 693 case AC_PATH_REGISTERED: 694 ccb.ccb_h.func_code = XPT_PATH_INQ; 695 xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 696 xpt_action(&ccb); 697 arg = &ccb; 698 break; 699 default: 700 arg = NULL; 701 break; 702 } 703 periph->deferred_callback(NULL, periph->deferred_ac, 704 periph->path, arg); 705 } 706 xpt_free_path(periph->path); 707 free(periph, M_CAMPERIPH); 708 xpt_lock_buses(); 709 } 710 711 /* 712 * Map user virtual pointers into kernel virtual address space, so we can 713 * access the memory. This is now a generic function that centralizes most 714 * of the sanity checks on the data flags, if any. 715 * This also only works for up to MAXPHYS memory. Since we use 716 * buffers to map stuff in and out, we're limited to the buffer size. 717 */ 718 int 719 cam_periph_mapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo, 720 u_int maxmap) 721 { 722 int numbufs, i, j; 723 int flags[CAM_PERIPH_MAXMAPS]; 724 u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS]; 725 u_int32_t lengths[CAM_PERIPH_MAXMAPS]; 726 u_int32_t dirs[CAM_PERIPH_MAXMAPS]; 727 728 if (maxmap == 0) 729 maxmap = DFLTPHYS; /* traditional default */ 730 else if (maxmap > MAXPHYS) 731 maxmap = MAXPHYS; /* for safety */ 732 switch(ccb->ccb_h.func_code) { 733 case XPT_DEV_MATCH: 734 if (ccb->cdm.match_buf_len == 0) { 735 printf("cam_periph_mapmem: invalid match buffer " 736 "length 0\n"); 737 return(EINVAL); 738 } 739 if (ccb->cdm.pattern_buf_len > 0) { 740 data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns; 741 lengths[0] = ccb->cdm.pattern_buf_len; 742 dirs[0] = CAM_DIR_OUT; 743 data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches; 744 lengths[1] = ccb->cdm.match_buf_len; 745 dirs[1] = CAM_DIR_IN; 746 numbufs = 2; 747 } else { 748 data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches; 749 lengths[0] = ccb->cdm.match_buf_len; 750 dirs[0] = CAM_DIR_IN; 751 numbufs = 1; 752 } 753 /* 754 * This request will not go to the hardware, no reason 755 * to be so strict. vmapbuf() is able to map up to MAXPHYS. 756 */ 757 maxmap = MAXPHYS; 758 break; 759 case XPT_SCSI_IO: 760 case XPT_CONT_TARGET_IO: 761 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) 762 return(0); 763 if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR) 764 return (EINVAL); 765 data_ptrs[0] = &ccb->csio.data_ptr; 766 lengths[0] = ccb->csio.dxfer_len; 767 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK; 768 numbufs = 1; 769 break; 770 case XPT_ATA_IO: 771 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) 772 return(0); 773 if ((ccb->ccb_h.flags & CAM_DATA_MASK) != CAM_DATA_VADDR) 774 return (EINVAL); 775 data_ptrs[0] = &ccb->ataio.data_ptr; 776 lengths[0] = ccb->ataio.dxfer_len; 777 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK; 778 numbufs = 1; 779 break; 780 case XPT_SMP_IO: 781 data_ptrs[0] = &ccb->smpio.smp_request; 782 lengths[0] = ccb->smpio.smp_request_len; 783 dirs[0] = CAM_DIR_OUT; 784 data_ptrs[1] = &ccb->smpio.smp_response; 785 lengths[1] = ccb->smpio.smp_response_len; 786 dirs[1] = CAM_DIR_IN; 787 numbufs = 2; 788 break; 789 case XPT_DEV_ADVINFO: 790 if (ccb->cdai.bufsiz == 0) 791 return (0); 792 793 data_ptrs[0] = (uint8_t **)&ccb->cdai.buf; 794 lengths[0] = ccb->cdai.bufsiz; 795 dirs[0] = CAM_DIR_IN; 796 numbufs = 1; 797 798 /* 799 * This request will not go to the hardware, no reason 800 * to be so strict. vmapbuf() is able to map up to MAXPHYS. 801 */ 802 maxmap = MAXPHYS; 803 break; 804 default: 805 return(EINVAL); 806 break; /* NOTREACHED */ 807 } 808 809 /* 810 * Check the transfer length and permissions first, so we don't 811 * have to unmap any previously mapped buffers. 812 */ 813 for (i = 0; i < numbufs; i++) { 814 815 flags[i] = 0; 816 817 /* 818 * The userland data pointer passed in may not be page 819 * aligned. vmapbuf() truncates the address to a page 820 * boundary, so if the address isn't page aligned, we'll 821 * need enough space for the given transfer length, plus 822 * whatever extra space is necessary to make it to the page 823 * boundary. 824 */ 825 if ((lengths[i] + 826 (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)) > maxmap){ 827 printf("cam_periph_mapmem: attempt to map %lu bytes, " 828 "which is greater than %lu\n", 829 (long)(lengths[i] + 830 (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)), 831 (u_long)maxmap); 832 return(E2BIG); 833 } 834 835 if (dirs[i] & CAM_DIR_OUT) { 836 flags[i] = BIO_WRITE; 837 } 838 839 if (dirs[i] & CAM_DIR_IN) { 840 flags[i] = BIO_READ; 841 } 842 843 } 844 845 /* 846 * This keeps the the kernel stack of current thread from getting 847 * swapped. In low-memory situations where the kernel stack might 848 * otherwise get swapped out, this holds it and allows the thread 849 * to make progress and release the kernel mapped pages sooner. 850 * 851 * XXX KDM should I use P_NOSWAP instead? 852 */ 853 PHOLD(curproc); 854 855 for (i = 0; i < numbufs; i++) { 856 /* 857 * Get the buffer. 858 */ 859 mapinfo->bp[i] = getpbuf(NULL); 860 861 /* put our pointer in the data slot */ 862 mapinfo->bp[i]->b_data = *data_ptrs[i]; 863 864 /* save the user's data address */ 865 mapinfo->bp[i]->b_caller1 = *data_ptrs[i]; 866 867 /* set the transfer length, we know it's < MAXPHYS */ 868 mapinfo->bp[i]->b_bufsize = lengths[i]; 869 870 /* set the direction */ 871 mapinfo->bp[i]->b_iocmd = flags[i]; 872 873 /* 874 * Map the buffer into kernel memory. 875 * 876 * Note that useracc() alone is not a sufficient test. 877 * vmapbuf() can still fail due to a smaller file mapped 878 * into a larger area of VM, or if userland races against 879 * vmapbuf() after the useracc() check. 880 */ 881 if (vmapbuf(mapinfo->bp[i], 1) < 0) { 882 for (j = 0; j < i; ++j) { 883 *data_ptrs[j] = mapinfo->bp[j]->b_caller1; 884 vunmapbuf(mapinfo->bp[j]); 885 relpbuf(mapinfo->bp[j], NULL); 886 } 887 relpbuf(mapinfo->bp[i], NULL); 888 PRELE(curproc); 889 return(EACCES); 890 } 891 892 /* set our pointer to the new mapped area */ 893 *data_ptrs[i] = mapinfo->bp[i]->b_data; 894 895 mapinfo->num_bufs_used++; 896 } 897 898 /* 899 * Now that we've gotten this far, change ownership to the kernel 900 * of the buffers so that we don't run afoul of returning to user 901 * space with locks (on the buffer) held. 902 */ 903 for (i = 0; i < numbufs; i++) { 904 BUF_KERNPROC(mapinfo->bp[i]); 905 } 906 907 908 return(0); 909 } 910 911 /* 912 * Unmap memory segments mapped into kernel virtual address space by 913 * cam_periph_mapmem(). 914 */ 915 void 916 cam_periph_unmapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo) 917 { 918 int numbufs, i; 919 u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS]; 920 921 if (mapinfo->num_bufs_used <= 0) { 922 /* nothing to free and the process wasn't held. */ 923 return; 924 } 925 926 switch (ccb->ccb_h.func_code) { 927 case XPT_DEV_MATCH: 928 numbufs = min(mapinfo->num_bufs_used, 2); 929 930 if (numbufs == 1) { 931 data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches; 932 } else { 933 data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns; 934 data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches; 935 } 936 break; 937 case XPT_SCSI_IO: 938 case XPT_CONT_TARGET_IO: 939 data_ptrs[0] = &ccb->csio.data_ptr; 940 numbufs = min(mapinfo->num_bufs_used, 1); 941 break; 942 case XPT_ATA_IO: 943 data_ptrs[0] = &ccb->ataio.data_ptr; 944 numbufs = min(mapinfo->num_bufs_used, 1); 945 break; 946 case XPT_SMP_IO: 947 numbufs = min(mapinfo->num_bufs_used, 2); 948 data_ptrs[0] = &ccb->smpio.smp_request; 949 data_ptrs[1] = &ccb->smpio.smp_response; 950 break; 951 case XPT_DEV_ADVINFO: 952 numbufs = min(mapinfo->num_bufs_used, 1); 953 data_ptrs[0] = (uint8_t **)&ccb->cdai.buf; 954 break; 955 default: 956 /* allow ourselves to be swapped once again */ 957 PRELE(curproc); 958 return; 959 break; /* NOTREACHED */ 960 } 961 962 for (i = 0; i < numbufs; i++) { 963 /* Set the user's pointer back to the original value */ 964 *data_ptrs[i] = mapinfo->bp[i]->b_caller1; 965 966 /* unmap the buffer */ 967 vunmapbuf(mapinfo->bp[i]); 968 969 /* release the buffer */ 970 relpbuf(mapinfo->bp[i], NULL); 971 } 972 973 /* allow ourselves to be swapped once again */ 974 PRELE(curproc); 975 } 976 977 void 978 cam_periph_ccbwait(union ccb *ccb) 979 { 980 981 if ((ccb->ccb_h.pinfo.index != CAM_UNQUEUED_INDEX) 982 || ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG)) 983 xpt_path_sleep(ccb->ccb_h.path, &ccb->ccb_h.cbfcnp, PRIBIO, 984 "cbwait", 0); 985 } 986 987 int 988 cam_periph_ioctl(struct cam_periph *periph, u_long cmd, caddr_t addr, 989 int (*error_routine)(union ccb *ccb, 990 cam_flags camflags, 991 u_int32_t sense_flags)) 992 { 993 union ccb *ccb; 994 int error; 995 int found; 996 997 error = found = 0; 998 999 switch(cmd){ 1000 case CAMGETPASSTHRU: 1001 ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); 1002 xpt_setup_ccb(&ccb->ccb_h, 1003 ccb->ccb_h.path, 1004 CAM_PRIORITY_NORMAL); 1005 ccb->ccb_h.func_code = XPT_GDEVLIST; 1006 1007 /* 1008 * Basically, the point of this is that we go through 1009 * getting the list of devices, until we find a passthrough 1010 * device. In the current version of the CAM code, the 1011 * only way to determine what type of device we're dealing 1012 * with is by its name. 1013 */ 1014 while (found == 0) { 1015 ccb->cgdl.index = 0; 1016 ccb->cgdl.status = CAM_GDEVLIST_MORE_DEVS; 1017 while (ccb->cgdl.status == CAM_GDEVLIST_MORE_DEVS) { 1018 1019 /* we want the next device in the list */ 1020 xpt_action(ccb); 1021 if (strncmp(ccb->cgdl.periph_name, 1022 "pass", 4) == 0){ 1023 found = 1; 1024 break; 1025 } 1026 } 1027 if ((ccb->cgdl.status == CAM_GDEVLIST_LAST_DEVICE) && 1028 (found == 0)) { 1029 ccb->cgdl.periph_name[0] = '\0'; 1030 ccb->cgdl.unit_number = 0; 1031 break; 1032 } 1033 } 1034 1035 /* copy the result back out */ 1036 bcopy(ccb, addr, sizeof(union ccb)); 1037 1038 /* and release the ccb */ 1039 xpt_release_ccb(ccb); 1040 1041 break; 1042 default: 1043 error = ENOTTY; 1044 break; 1045 } 1046 return(error); 1047 } 1048 1049 static void 1050 cam_periph_done(struct cam_periph *periph, union ccb *done_ccb) 1051 { 1052 1053 /* Caller will release the CCB */ 1054 wakeup(&done_ccb->ccb_h.cbfcnp); 1055 } 1056 1057 int 1058 cam_periph_runccb(union ccb *ccb, 1059 int (*error_routine)(union ccb *ccb, 1060 cam_flags camflags, 1061 u_int32_t sense_flags), 1062 cam_flags camflags, u_int32_t sense_flags, 1063 struct devstat *ds) 1064 { 1065 struct bintime *starttime; 1066 struct bintime ltime; 1067 int error; 1068 1069 starttime = NULL; 1070 xpt_path_assert(ccb->ccb_h.path, MA_OWNED); 1071 1072 /* 1073 * If the user has supplied a stats structure, and if we understand 1074 * this particular type of ccb, record the transaction start. 1075 */ 1076 if ((ds != NULL) && (ccb->ccb_h.func_code == XPT_SCSI_IO || 1077 ccb->ccb_h.func_code == XPT_ATA_IO)) { 1078 starttime = <ime; 1079 binuptime(starttime); 1080 devstat_start_transaction(ds, starttime); 1081 } 1082 1083 ccb->ccb_h.cbfcnp = cam_periph_done; 1084 xpt_action(ccb); 1085 1086 do { 1087 cam_periph_ccbwait(ccb); 1088 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) 1089 error = 0; 1090 else if (error_routine != NULL) 1091 error = (*error_routine)(ccb, camflags, sense_flags); 1092 else 1093 error = 0; 1094 1095 } while (error == ERESTART); 1096 1097 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 1098 cam_release_devq(ccb->ccb_h.path, 1099 /* relsim_flags */0, 1100 /* openings */0, 1101 /* timeout */0, 1102 /* getcount_only */ FALSE); 1103 ccb->ccb_h.status &= ~CAM_DEV_QFRZN; 1104 } 1105 1106 if (ds != NULL) { 1107 if (ccb->ccb_h.func_code == XPT_SCSI_IO) { 1108 devstat_end_transaction(ds, 1109 ccb->csio.dxfer_len - ccb->csio.resid, 1110 ccb->csio.tag_action & 0x3, 1111 ((ccb->ccb_h.flags & CAM_DIR_MASK) == 1112 CAM_DIR_NONE) ? DEVSTAT_NO_DATA : 1113 (ccb->ccb_h.flags & CAM_DIR_OUT) ? 1114 DEVSTAT_WRITE : 1115 DEVSTAT_READ, NULL, starttime); 1116 } else if (ccb->ccb_h.func_code == XPT_ATA_IO) { 1117 devstat_end_transaction(ds, 1118 ccb->ataio.dxfer_len - ccb->ataio.resid, 1119 ccb->ataio.tag_action & 0x3, 1120 ((ccb->ccb_h.flags & CAM_DIR_MASK) == 1121 CAM_DIR_NONE) ? DEVSTAT_NO_DATA : 1122 (ccb->ccb_h.flags & CAM_DIR_OUT) ? 1123 DEVSTAT_WRITE : 1124 DEVSTAT_READ, NULL, starttime); 1125 } 1126 } 1127 1128 return(error); 1129 } 1130 1131 void 1132 cam_freeze_devq(struct cam_path *path) 1133 { 1134 struct ccb_hdr ccb_h; 1135 1136 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("cam_freeze_devq\n")); 1137 xpt_setup_ccb(&ccb_h, path, /*priority*/1); 1138 ccb_h.func_code = XPT_NOOP; 1139 ccb_h.flags = CAM_DEV_QFREEZE; 1140 xpt_action((union ccb *)&ccb_h); 1141 } 1142 1143 u_int32_t 1144 cam_release_devq(struct cam_path *path, u_int32_t relsim_flags, 1145 u_int32_t openings, u_int32_t arg, 1146 int getcount_only) 1147 { 1148 struct ccb_relsim crs; 1149 1150 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("cam_release_devq(%u, %u, %u, %d)\n", 1151 relsim_flags, openings, arg, getcount_only)); 1152 xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL); 1153 crs.ccb_h.func_code = XPT_REL_SIMQ; 1154 crs.ccb_h.flags = getcount_only ? CAM_DEV_QFREEZE : 0; 1155 crs.release_flags = relsim_flags; 1156 crs.openings = openings; 1157 crs.release_timeout = arg; 1158 xpt_action((union ccb *)&crs); 1159 return (crs.qfrozen_cnt); 1160 } 1161 1162 #define saved_ccb_ptr ppriv_ptr0 1163 static void 1164 camperiphdone(struct cam_periph *periph, union ccb *done_ccb) 1165 { 1166 union ccb *saved_ccb; 1167 cam_status status; 1168 struct scsi_start_stop_unit *scsi_cmd; 1169 int error_code, sense_key, asc, ascq; 1170 1171 scsi_cmd = (struct scsi_start_stop_unit *) 1172 &done_ccb->csio.cdb_io.cdb_bytes; 1173 status = done_ccb->ccb_h.status; 1174 1175 if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1176 if (scsi_extract_sense_ccb(done_ccb, 1177 &error_code, &sense_key, &asc, &ascq)) { 1178 /* 1179 * If the error is "invalid field in CDB", 1180 * and the load/eject flag is set, turn the 1181 * flag off and try again. This is just in 1182 * case the drive in question barfs on the 1183 * load eject flag. The CAM code should set 1184 * the load/eject flag by default for 1185 * removable media. 1186 */ 1187 if ((scsi_cmd->opcode == START_STOP_UNIT) && 1188 ((scsi_cmd->how & SSS_LOEJ) != 0) && 1189 (asc == 0x24) && (ascq == 0x00)) { 1190 scsi_cmd->how &= ~SSS_LOEJ; 1191 if (status & CAM_DEV_QFRZN) { 1192 cam_release_devq(done_ccb->ccb_h.path, 1193 0, 0, 0, 0); 1194 done_ccb->ccb_h.status &= 1195 ~CAM_DEV_QFRZN; 1196 } 1197 xpt_action(done_ccb); 1198 goto out; 1199 } 1200 } 1201 if (cam_periph_error(done_ccb, 1202 0, SF_RETRY_UA | SF_NO_PRINT, NULL) == ERESTART) 1203 goto out; 1204 if (done_ccb->ccb_h.status & CAM_DEV_QFRZN) { 1205 cam_release_devq(done_ccb->ccb_h.path, 0, 0, 0, 0); 1206 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN; 1207 } 1208 } else { 1209 /* 1210 * If we have successfully taken a device from the not 1211 * ready to ready state, re-scan the device and re-get 1212 * the inquiry information. Many devices (mostly disks) 1213 * don't properly report their inquiry information unless 1214 * they are spun up. 1215 */ 1216 if (scsi_cmd->opcode == START_STOP_UNIT) 1217 xpt_async(AC_INQ_CHANGED, done_ccb->ccb_h.path, NULL); 1218 } 1219 1220 /* 1221 * Perform the final retry with the original CCB so that final 1222 * error processing is performed by the owner of the CCB. 1223 */ 1224 saved_ccb = (union ccb *)done_ccb->ccb_h.saved_ccb_ptr; 1225 bcopy(saved_ccb, done_ccb, sizeof(*done_ccb)); 1226 xpt_free_ccb(saved_ccb); 1227 if (done_ccb->ccb_h.cbfcnp != camperiphdone) 1228 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG; 1229 xpt_action(done_ccb); 1230 1231 out: 1232 /* Drop freeze taken due to CAM_DEV_QFREEZE flag set. */ 1233 cam_release_devq(done_ccb->ccb_h.path, 0, 0, 0, 0); 1234 } 1235 1236 /* 1237 * Generic Async Event handler. Peripheral drivers usually 1238 * filter out the events that require personal attention, 1239 * and leave the rest to this function. 1240 */ 1241 void 1242 cam_periph_async(struct cam_periph *periph, u_int32_t code, 1243 struct cam_path *path, void *arg) 1244 { 1245 switch (code) { 1246 case AC_LOST_DEVICE: 1247 cam_periph_invalidate(periph); 1248 break; 1249 default: 1250 break; 1251 } 1252 } 1253 1254 void 1255 cam_periph_bus_settle(struct cam_periph *periph, u_int bus_settle) 1256 { 1257 struct ccb_getdevstats cgds; 1258 1259 xpt_setup_ccb(&cgds.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 1260 cgds.ccb_h.func_code = XPT_GDEV_STATS; 1261 xpt_action((union ccb *)&cgds); 1262 cam_periph_freeze_after_event(periph, &cgds.last_reset, bus_settle); 1263 } 1264 1265 void 1266 cam_periph_freeze_after_event(struct cam_periph *periph, 1267 struct timeval* event_time, u_int duration_ms) 1268 { 1269 struct timeval delta; 1270 struct timeval duration_tv; 1271 1272 if (!timevalisset(event_time)) 1273 return; 1274 1275 microtime(&delta); 1276 timevalsub(&delta, event_time); 1277 duration_tv.tv_sec = duration_ms / 1000; 1278 duration_tv.tv_usec = (duration_ms % 1000) * 1000; 1279 if (timevalcmp(&delta, &duration_tv, <)) { 1280 timevalsub(&duration_tv, &delta); 1281 1282 duration_ms = duration_tv.tv_sec * 1000; 1283 duration_ms += duration_tv.tv_usec / 1000; 1284 cam_freeze_devq(periph->path); 1285 cam_release_devq(periph->path, 1286 RELSIM_RELEASE_AFTER_TIMEOUT, 1287 /*reduction*/0, 1288 /*timeout*/duration_ms, 1289 /*getcount_only*/0); 1290 } 1291 1292 } 1293 1294 static int 1295 camperiphscsistatuserror(union ccb *ccb, union ccb **orig_ccb, 1296 cam_flags camflags, u_int32_t sense_flags, 1297 int *openings, u_int32_t *relsim_flags, 1298 u_int32_t *timeout, u_int32_t *action, const char **action_string) 1299 { 1300 int error; 1301 1302 switch (ccb->csio.scsi_status) { 1303 case SCSI_STATUS_OK: 1304 case SCSI_STATUS_COND_MET: 1305 case SCSI_STATUS_INTERMED: 1306 case SCSI_STATUS_INTERMED_COND_MET: 1307 error = 0; 1308 break; 1309 case SCSI_STATUS_CMD_TERMINATED: 1310 case SCSI_STATUS_CHECK_COND: 1311 error = camperiphscsisenseerror(ccb, orig_ccb, 1312 camflags, 1313 sense_flags, 1314 openings, 1315 relsim_flags, 1316 timeout, 1317 action, 1318 action_string); 1319 break; 1320 case SCSI_STATUS_QUEUE_FULL: 1321 { 1322 /* no decrement */ 1323 struct ccb_getdevstats cgds; 1324 1325 /* 1326 * First off, find out what the current 1327 * transaction counts are. 1328 */ 1329 xpt_setup_ccb(&cgds.ccb_h, 1330 ccb->ccb_h.path, 1331 CAM_PRIORITY_NORMAL); 1332 cgds.ccb_h.func_code = XPT_GDEV_STATS; 1333 xpt_action((union ccb *)&cgds); 1334 1335 /* 1336 * If we were the only transaction active, treat 1337 * the QUEUE FULL as if it were a BUSY condition. 1338 */ 1339 if (cgds.dev_active != 0) { 1340 int total_openings; 1341 1342 /* 1343 * Reduce the number of openings to 1344 * be 1 less than the amount it took 1345 * to get a queue full bounded by the 1346 * minimum allowed tag count for this 1347 * device. 1348 */ 1349 total_openings = cgds.dev_active + cgds.dev_openings; 1350 *openings = cgds.dev_active; 1351 if (*openings < cgds.mintags) 1352 *openings = cgds.mintags; 1353 if (*openings < total_openings) 1354 *relsim_flags = RELSIM_ADJUST_OPENINGS; 1355 else { 1356 /* 1357 * Some devices report queue full for 1358 * temporary resource shortages. For 1359 * this reason, we allow a minimum 1360 * tag count to be entered via a 1361 * quirk entry to prevent the queue 1362 * count on these devices from falling 1363 * to a pessimisticly low value. We 1364 * still wait for the next successful 1365 * completion, however, before queueing 1366 * more transactions to the device. 1367 */ 1368 *relsim_flags = RELSIM_RELEASE_AFTER_CMDCMPLT; 1369 } 1370 *timeout = 0; 1371 error = ERESTART; 1372 *action &= ~SSQ_PRINT_SENSE; 1373 break; 1374 } 1375 /* FALLTHROUGH */ 1376 } 1377 case SCSI_STATUS_BUSY: 1378 /* 1379 * Restart the queue after either another 1380 * command completes or a 1 second timeout. 1381 */ 1382 if ((sense_flags & SF_RETRY_BUSY) != 0 || 1383 (ccb->ccb_h.retry_count--) > 0) { 1384 error = ERESTART; 1385 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT 1386 | RELSIM_RELEASE_AFTER_CMDCMPLT; 1387 *timeout = 1000; 1388 } else { 1389 error = EIO; 1390 } 1391 break; 1392 case SCSI_STATUS_RESERV_CONFLICT: 1393 default: 1394 error = EIO; 1395 break; 1396 } 1397 return (error); 1398 } 1399 1400 static int 1401 camperiphscsisenseerror(union ccb *ccb, union ccb **orig, 1402 cam_flags camflags, u_int32_t sense_flags, 1403 int *openings, u_int32_t *relsim_flags, 1404 u_int32_t *timeout, u_int32_t *action, const char **action_string) 1405 { 1406 struct cam_periph *periph; 1407 union ccb *orig_ccb = ccb; 1408 int error, recoveryccb; 1409 1410 periph = xpt_path_periph(ccb->ccb_h.path); 1411 recoveryccb = (ccb->ccb_h.cbfcnp == camperiphdone); 1412 if ((periph->flags & CAM_PERIPH_RECOVERY_INPROG) && !recoveryccb) { 1413 /* 1414 * If error recovery is already in progress, don't attempt 1415 * to process this error, but requeue it unconditionally 1416 * and attempt to process it once error recovery has 1417 * completed. This failed command is probably related to 1418 * the error that caused the currently active error recovery 1419 * action so our current recovery efforts should also 1420 * address this command. Be aware that the error recovery 1421 * code assumes that only one recovery action is in progress 1422 * on a particular peripheral instance at any given time 1423 * (e.g. only one saved CCB for error recovery) so it is 1424 * imperitive that we don't violate this assumption. 1425 */ 1426 error = ERESTART; 1427 *action &= ~SSQ_PRINT_SENSE; 1428 } else { 1429 scsi_sense_action err_action; 1430 struct ccb_getdev cgd; 1431 1432 /* 1433 * Grab the inquiry data for this device. 1434 */ 1435 xpt_setup_ccb(&cgd.ccb_h, ccb->ccb_h.path, CAM_PRIORITY_NORMAL); 1436 cgd.ccb_h.func_code = XPT_GDEV_TYPE; 1437 xpt_action((union ccb *)&cgd); 1438 1439 err_action = scsi_error_action(&ccb->csio, &cgd.inq_data, 1440 sense_flags); 1441 error = err_action & SS_ERRMASK; 1442 1443 /* 1444 * Do not autostart sequential access devices 1445 * to avoid unexpected tape loading. 1446 */ 1447 if ((err_action & SS_MASK) == SS_START && 1448 SID_TYPE(&cgd.inq_data) == T_SEQUENTIAL) { 1449 *action_string = "Will not autostart a " 1450 "sequential access device"; 1451 goto sense_error_done; 1452 } 1453 1454 /* 1455 * Avoid recovery recursion if recovery action is the same. 1456 */ 1457 if ((err_action & SS_MASK) >= SS_START && recoveryccb) { 1458 if (((err_action & SS_MASK) == SS_START && 1459 ccb->csio.cdb_io.cdb_bytes[0] == START_STOP_UNIT) || 1460 ((err_action & SS_MASK) == SS_TUR && 1461 (ccb->csio.cdb_io.cdb_bytes[0] == TEST_UNIT_READY))) { 1462 err_action = SS_RETRY|SSQ_DECREMENT_COUNT|EIO; 1463 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT; 1464 *timeout = 500; 1465 } 1466 } 1467 1468 /* 1469 * If the recovery action will consume a retry, 1470 * make sure we actually have retries available. 1471 */ 1472 if ((err_action & SSQ_DECREMENT_COUNT) != 0) { 1473 if (ccb->ccb_h.retry_count > 0 && 1474 (periph->flags & CAM_PERIPH_INVALID) == 0) 1475 ccb->ccb_h.retry_count--; 1476 else { 1477 *action_string = "Retries exhausted"; 1478 goto sense_error_done; 1479 } 1480 } 1481 1482 if ((err_action & SS_MASK) >= SS_START) { 1483 /* 1484 * Do common portions of commands that 1485 * use recovery CCBs. 1486 */ 1487 orig_ccb = xpt_alloc_ccb_nowait(); 1488 if (orig_ccb == NULL) { 1489 *action_string = "Can't allocate recovery CCB"; 1490 goto sense_error_done; 1491 } 1492 /* 1493 * Clear freeze flag for original request here, as 1494 * this freeze will be dropped as part of ERESTART. 1495 */ 1496 ccb->ccb_h.status &= ~CAM_DEV_QFRZN; 1497 bcopy(ccb, orig_ccb, sizeof(*orig_ccb)); 1498 } 1499 1500 switch (err_action & SS_MASK) { 1501 case SS_NOP: 1502 *action_string = "No recovery action needed"; 1503 error = 0; 1504 break; 1505 case SS_RETRY: 1506 *action_string = "Retrying command (per sense data)"; 1507 error = ERESTART; 1508 break; 1509 case SS_FAIL: 1510 *action_string = "Unretryable error"; 1511 break; 1512 case SS_START: 1513 { 1514 int le; 1515 1516 /* 1517 * Send a start unit command to the device, and 1518 * then retry the command. 1519 */ 1520 *action_string = "Attempting to start unit"; 1521 periph->flags |= CAM_PERIPH_RECOVERY_INPROG; 1522 1523 /* 1524 * Check for removable media and set 1525 * load/eject flag appropriately. 1526 */ 1527 if (SID_IS_REMOVABLE(&cgd.inq_data)) 1528 le = TRUE; 1529 else 1530 le = FALSE; 1531 1532 scsi_start_stop(&ccb->csio, 1533 /*retries*/1, 1534 camperiphdone, 1535 MSG_SIMPLE_Q_TAG, 1536 /*start*/TRUE, 1537 /*load/eject*/le, 1538 /*immediate*/FALSE, 1539 SSD_FULL_SIZE, 1540 /*timeout*/50000); 1541 break; 1542 } 1543 case SS_TUR: 1544 { 1545 /* 1546 * Send a Test Unit Ready to the device. 1547 * If the 'many' flag is set, we send 120 1548 * test unit ready commands, one every half 1549 * second. Otherwise, we just send one TUR. 1550 * We only want to do this if the retry 1551 * count has not been exhausted. 1552 */ 1553 int retries; 1554 1555 if ((err_action & SSQ_MANY) != 0) { 1556 *action_string = "Polling device for readiness"; 1557 retries = 120; 1558 } else { 1559 *action_string = "Testing device for readiness"; 1560 retries = 1; 1561 } 1562 periph->flags |= CAM_PERIPH_RECOVERY_INPROG; 1563 scsi_test_unit_ready(&ccb->csio, 1564 retries, 1565 camperiphdone, 1566 MSG_SIMPLE_Q_TAG, 1567 SSD_FULL_SIZE, 1568 /*timeout*/5000); 1569 1570 /* 1571 * Accomplish our 500ms delay by deferring 1572 * the release of our device queue appropriately. 1573 */ 1574 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT; 1575 *timeout = 500; 1576 break; 1577 } 1578 default: 1579 panic("Unhandled error action %x", err_action); 1580 } 1581 1582 if ((err_action & SS_MASK) >= SS_START) { 1583 /* 1584 * Drop the priority, so that the recovery 1585 * CCB is the first to execute. Freeze the queue 1586 * after this command is sent so that we can 1587 * restore the old csio and have it queued in 1588 * the proper order before we release normal 1589 * transactions to the device. 1590 */ 1591 ccb->ccb_h.pinfo.priority--; 1592 ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 1593 ccb->ccb_h.saved_ccb_ptr = orig_ccb; 1594 error = ERESTART; 1595 *orig = orig_ccb; 1596 } 1597 1598 sense_error_done: 1599 *action = err_action; 1600 } 1601 return (error); 1602 } 1603 1604 /* 1605 * Generic error handler. Peripheral drivers usually filter 1606 * out the errors that they handle in a unique mannor, then 1607 * call this function. 1608 */ 1609 int 1610 cam_periph_error(union ccb *ccb, cam_flags camflags, 1611 u_int32_t sense_flags, union ccb *save_ccb) 1612 { 1613 struct cam_path *newpath; 1614 union ccb *orig_ccb, *scan_ccb; 1615 struct cam_periph *periph; 1616 const char *action_string; 1617 cam_status status; 1618 int frozen, error, openings; 1619 u_int32_t action, relsim_flags, timeout; 1620 1621 action = SSQ_PRINT_SENSE; 1622 periph = xpt_path_periph(ccb->ccb_h.path); 1623 action_string = NULL; 1624 status = ccb->ccb_h.status; 1625 frozen = (status & CAM_DEV_QFRZN) != 0; 1626 status &= CAM_STATUS_MASK; 1627 openings = relsim_flags = timeout = 0; 1628 orig_ccb = ccb; 1629 1630 switch (status) { 1631 case CAM_REQ_CMP: 1632 error = 0; 1633 action &= ~SSQ_PRINT_SENSE; 1634 break; 1635 case CAM_SCSI_STATUS_ERROR: 1636 error = camperiphscsistatuserror(ccb, &orig_ccb, 1637 camflags, sense_flags, &openings, &relsim_flags, 1638 &timeout, &action, &action_string); 1639 break; 1640 case CAM_AUTOSENSE_FAIL: 1641 error = EIO; /* we have to kill the command */ 1642 break; 1643 case CAM_UA_ABORT: 1644 case CAM_UA_TERMIO: 1645 case CAM_MSG_REJECT_REC: 1646 /* XXX Don't know that these are correct */ 1647 error = EIO; 1648 break; 1649 case CAM_SEL_TIMEOUT: 1650 if ((camflags & CAM_RETRY_SELTO) != 0) { 1651 if (ccb->ccb_h.retry_count > 0 && 1652 (periph->flags & CAM_PERIPH_INVALID) == 0) { 1653 ccb->ccb_h.retry_count--; 1654 error = ERESTART; 1655 1656 /* 1657 * Wait a bit to give the device 1658 * time to recover before we try again. 1659 */ 1660 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT; 1661 timeout = periph_selto_delay; 1662 break; 1663 } 1664 action_string = "Retries exhausted"; 1665 } 1666 /* FALLTHROUGH */ 1667 case CAM_DEV_NOT_THERE: 1668 error = ENXIO; 1669 action = SSQ_LOST; 1670 break; 1671 case CAM_REQ_INVALID: 1672 case CAM_PATH_INVALID: 1673 case CAM_NO_HBA: 1674 case CAM_PROVIDE_FAIL: 1675 case CAM_REQ_TOO_BIG: 1676 case CAM_LUN_INVALID: 1677 case CAM_TID_INVALID: 1678 case CAM_FUNC_NOTAVAIL: 1679 error = EINVAL; 1680 break; 1681 case CAM_SCSI_BUS_RESET: 1682 case CAM_BDR_SENT: 1683 /* 1684 * Commands that repeatedly timeout and cause these 1685 * kinds of error recovery actions, should return 1686 * CAM_CMD_TIMEOUT, which allows us to safely assume 1687 * that this command was an innocent bystander to 1688 * these events and should be unconditionally 1689 * retried. 1690 */ 1691 case CAM_REQUEUE_REQ: 1692 /* Unconditional requeue if device is still there */ 1693 if (periph->flags & CAM_PERIPH_INVALID) { 1694 action_string = "Periph was invalidated"; 1695 error = EIO; 1696 } else if (sense_flags & SF_NO_RETRY) { 1697 error = EIO; 1698 action_string = "Retry was blocked"; 1699 } else { 1700 error = ERESTART; 1701 action &= ~SSQ_PRINT_SENSE; 1702 } 1703 break; 1704 case CAM_RESRC_UNAVAIL: 1705 /* Wait a bit for the resource shortage to abate. */ 1706 timeout = periph_noresrc_delay; 1707 /* FALLTHROUGH */ 1708 case CAM_BUSY: 1709 if (timeout == 0) { 1710 /* Wait a bit for the busy condition to abate. */ 1711 timeout = periph_busy_delay; 1712 } 1713 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT; 1714 /* FALLTHROUGH */ 1715 case CAM_ATA_STATUS_ERROR: 1716 case CAM_REQ_CMP_ERR: 1717 case CAM_CMD_TIMEOUT: 1718 case CAM_UNEXP_BUSFREE: 1719 case CAM_UNCOR_PARITY: 1720 case CAM_DATA_RUN_ERR: 1721 default: 1722 if (periph->flags & CAM_PERIPH_INVALID) { 1723 error = EIO; 1724 action_string = "Periph was invalidated"; 1725 } else if (ccb->ccb_h.retry_count == 0) { 1726 error = EIO; 1727 action_string = "Retries exhausted"; 1728 } else if (sense_flags & SF_NO_RETRY) { 1729 error = EIO; 1730 action_string = "Retry was blocked"; 1731 } else { 1732 ccb->ccb_h.retry_count--; 1733 error = ERESTART; 1734 } 1735 break; 1736 } 1737 1738 if ((sense_flags & SF_PRINT_ALWAYS) || 1739 CAM_DEBUGGED(ccb->ccb_h.path, CAM_DEBUG_INFO)) 1740 action |= SSQ_PRINT_SENSE; 1741 else if (sense_flags & SF_NO_PRINT) 1742 action &= ~SSQ_PRINT_SENSE; 1743 if ((action & SSQ_PRINT_SENSE) != 0) 1744 cam_error_print(orig_ccb, CAM_ESF_ALL, CAM_EPF_ALL); 1745 if (error != 0 && (action & SSQ_PRINT_SENSE) != 0) { 1746 if (error != ERESTART) { 1747 if (action_string == NULL) 1748 action_string = "Unretryable error"; 1749 xpt_print(ccb->ccb_h.path, "Error %d, %s\n", 1750 error, action_string); 1751 } else if (action_string != NULL) 1752 xpt_print(ccb->ccb_h.path, "%s\n", action_string); 1753 else 1754 xpt_print(ccb->ccb_h.path, "Retrying command\n"); 1755 } 1756 1757 if ((action & SSQ_LOST) != 0) { 1758 lun_id_t lun_id; 1759 1760 /* 1761 * For a selection timeout, we consider all of the LUNs on 1762 * the target to be gone. If the status is CAM_DEV_NOT_THERE, 1763 * then we only get rid of the device(s) specified by the 1764 * path in the original CCB. 1765 */ 1766 if (status == CAM_SEL_TIMEOUT) 1767 lun_id = CAM_LUN_WILDCARD; 1768 else 1769 lun_id = xpt_path_lun_id(ccb->ccb_h.path); 1770 1771 /* Should we do more if we can't create the path?? */ 1772 if (xpt_create_path(&newpath, periph, 1773 xpt_path_path_id(ccb->ccb_h.path), 1774 xpt_path_target_id(ccb->ccb_h.path), 1775 lun_id) == CAM_REQ_CMP) { 1776 1777 /* 1778 * Let peripheral drivers know that this 1779 * device has gone away. 1780 */ 1781 xpt_async(AC_LOST_DEVICE, newpath, NULL); 1782 xpt_free_path(newpath); 1783 } 1784 } 1785 1786 /* Broadcast UNIT ATTENTIONs to all periphs. */ 1787 if ((action & SSQ_UA) != 0) 1788 xpt_async(AC_UNIT_ATTENTION, orig_ccb->ccb_h.path, orig_ccb); 1789 1790 /* Rescan target on "Reported LUNs data has changed" */ 1791 if ((action & SSQ_RESCAN) != 0) { 1792 if (xpt_create_path(&newpath, NULL, 1793 xpt_path_path_id(ccb->ccb_h.path), 1794 xpt_path_target_id(ccb->ccb_h.path), 1795 CAM_LUN_WILDCARD) == CAM_REQ_CMP) { 1796 1797 scan_ccb = xpt_alloc_ccb_nowait(); 1798 if (scan_ccb != NULL) { 1799 scan_ccb->ccb_h.path = newpath; 1800 scan_ccb->ccb_h.func_code = XPT_SCAN_TGT; 1801 scan_ccb->crcn.flags = 0; 1802 xpt_rescan(scan_ccb); 1803 } else { 1804 xpt_print(newpath, 1805 "Can't allocate CCB to rescan target\n"); 1806 xpt_free_path(newpath); 1807 } 1808 } 1809 } 1810 1811 /* Attempt a retry */ 1812 if (error == ERESTART || error == 0) { 1813 if (frozen != 0) 1814 ccb->ccb_h.status &= ~CAM_DEV_QFRZN; 1815 if (error == ERESTART) 1816 xpt_action(ccb); 1817 if (frozen != 0) 1818 cam_release_devq(ccb->ccb_h.path, 1819 relsim_flags, 1820 openings, 1821 timeout, 1822 /*getcount_only*/0); 1823 } 1824 1825 return (error); 1826 } 1827