1 /*- 2 * Common functions for CAM "type" (peripheral) drivers. 3 * 4 * Copyright (c) 1997, 1998 Justin T. Gibbs. 5 * Copyright (c) 1997, 1998, 1999, 2000 Kenneth D. Merry. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions, and the following disclaimer, 13 * without modification, immediately at the beginning of the file. 14 * 2. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/types.h> 36 #include <sys/malloc.h> 37 #include <sys/kernel.h> 38 #include <sys/bio.h> 39 #include <sys/lock.h> 40 #include <sys/mutex.h> 41 #include <sys/buf.h> 42 #include <sys/proc.h> 43 #include <sys/devicestat.h> 44 #include <sys/bus.h> 45 #include <sys/sbuf.h> 46 #include <vm/vm.h> 47 #include <vm/vm_extern.h> 48 49 #include <cam/cam.h> 50 #include <cam/cam_ccb.h> 51 #include <cam/cam_queue.h> 52 #include <cam/cam_xpt_periph.h> 53 #include <cam/cam_periph.h> 54 #include <cam/cam_debug.h> 55 #include <cam/cam_sim.h> 56 57 #include <cam/scsi/scsi_all.h> 58 #include <cam/scsi/scsi_message.h> 59 #include <cam/scsi/scsi_pass.h> 60 61 static u_int camperiphnextunit(struct periph_driver *p_drv, 62 u_int newunit, int wired, 63 path_id_t pathid, target_id_t target, 64 lun_id_t lun); 65 static u_int camperiphunit(struct periph_driver *p_drv, 66 path_id_t pathid, target_id_t target, 67 lun_id_t lun); 68 static void camperiphdone(struct cam_periph *periph, 69 union ccb *done_ccb); 70 static void camperiphfree(struct cam_periph *periph); 71 static int camperiphscsistatuserror(union ccb *ccb, 72 union ccb **orig_ccb, 73 cam_flags camflags, 74 u_int32_t sense_flags, 75 int *openings, 76 u_int32_t *relsim_flags, 77 u_int32_t *timeout, 78 int *print, 79 const char **action_string); 80 static int camperiphscsisenseerror(union ccb *ccb, 81 union ccb **orig_ccb, 82 cam_flags camflags, 83 u_int32_t sense_flags, 84 int *openings, 85 u_int32_t *relsim_flags, 86 u_int32_t *timeout, 87 int *print, 88 const char **action_string); 89 90 static int nperiph_drivers; 91 static int initialized = 0; 92 struct periph_driver **periph_drivers; 93 94 static MALLOC_DEFINE(M_CAMPERIPH, "CAM periph", "CAM peripheral buffers"); 95 96 static int periph_selto_delay = 1000; 97 TUNABLE_INT("kern.cam.periph_selto_delay", &periph_selto_delay); 98 static int periph_noresrc_delay = 500; 99 TUNABLE_INT("kern.cam.periph_noresrc_delay", &periph_noresrc_delay); 100 static int periph_busy_delay = 500; 101 TUNABLE_INT("kern.cam.periph_busy_delay", &periph_busy_delay); 102 103 104 void 105 periphdriver_register(void *data) 106 { 107 struct periph_driver *drv = (struct periph_driver *)data; 108 struct periph_driver **newdrivers, **old; 109 int ndrivers; 110 111 ndrivers = nperiph_drivers + 2; 112 newdrivers = malloc(sizeof(*newdrivers) * ndrivers, M_CAMPERIPH, 113 M_WAITOK); 114 if (periph_drivers) 115 bcopy(periph_drivers, newdrivers, 116 sizeof(*newdrivers) * nperiph_drivers); 117 newdrivers[nperiph_drivers] = drv; 118 newdrivers[nperiph_drivers + 1] = NULL; 119 old = periph_drivers; 120 periph_drivers = newdrivers; 121 if (old) 122 free(old, M_CAMPERIPH); 123 nperiph_drivers++; 124 /* If driver marked as early or it is late now, initialize it. */ 125 if (((drv->flags & CAM_PERIPH_DRV_EARLY) != 0 && initialized > 0) || 126 initialized > 1) 127 (*drv->init)(); 128 } 129 130 void 131 periphdriver_init(int level) 132 { 133 int i, early; 134 135 initialized = max(initialized, level); 136 for (i = 0; periph_drivers[i] != NULL; i++) { 137 early = (periph_drivers[i]->flags & CAM_PERIPH_DRV_EARLY) ? 1 : 2; 138 if (early == initialized) 139 (*periph_drivers[i]->init)(); 140 } 141 } 142 143 cam_status 144 cam_periph_alloc(periph_ctor_t *periph_ctor, 145 periph_oninv_t *periph_oninvalidate, 146 periph_dtor_t *periph_dtor, periph_start_t *periph_start, 147 char *name, cam_periph_type type, struct cam_path *path, 148 ac_callback_t *ac_callback, ac_code code, void *arg) 149 { 150 struct periph_driver **p_drv; 151 struct cam_sim *sim; 152 struct cam_periph *periph; 153 struct cam_periph *cur_periph; 154 path_id_t path_id; 155 target_id_t target_id; 156 lun_id_t lun_id; 157 cam_status status; 158 u_int init_level; 159 160 init_level = 0; 161 /* 162 * Handle Hot-Plug scenarios. If there is already a peripheral 163 * of our type assigned to this path, we are likely waiting for 164 * final close on an old, invalidated, peripheral. If this is 165 * the case, queue up a deferred call to the peripheral's async 166 * handler. If it looks like a mistaken re-allocation, complain. 167 */ 168 if ((periph = cam_periph_find(path, name)) != NULL) { 169 170 if ((periph->flags & CAM_PERIPH_INVALID) != 0 171 && (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) == 0) { 172 periph->flags |= CAM_PERIPH_NEW_DEV_FOUND; 173 periph->deferred_callback = ac_callback; 174 periph->deferred_ac = code; 175 return (CAM_REQ_INPROG); 176 } else { 177 printf("cam_periph_alloc: attempt to re-allocate " 178 "valid device %s%d rejected flags %#x " 179 "refcount %d\n", periph->periph_name, 180 periph->unit_number, periph->flags, 181 periph->refcount); 182 } 183 return (CAM_REQ_INVALID); 184 } 185 186 periph = (struct cam_periph *)malloc(sizeof(*periph), M_CAMPERIPH, 187 M_NOWAIT|M_ZERO); 188 189 if (periph == NULL) 190 return (CAM_RESRC_UNAVAIL); 191 192 init_level++; 193 194 195 sim = xpt_path_sim(path); 196 path_id = xpt_path_path_id(path); 197 target_id = xpt_path_target_id(path); 198 lun_id = xpt_path_lun_id(path); 199 cam_init_pinfo(&periph->pinfo); 200 periph->periph_start = periph_start; 201 periph->periph_dtor = periph_dtor; 202 periph->periph_oninval = periph_oninvalidate; 203 periph->type = type; 204 periph->periph_name = name; 205 periph->immediate_priority = CAM_PRIORITY_NONE; 206 periph->refcount = 0; 207 periph->sim = sim; 208 SLIST_INIT(&periph->ccb_list); 209 status = xpt_create_path(&path, periph, path_id, target_id, lun_id); 210 if (status != CAM_REQ_CMP) 211 goto failure; 212 periph->path = path; 213 214 xpt_lock_buses(); 215 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) { 216 if (strcmp((*p_drv)->driver_name, name) == 0) 217 break; 218 } 219 if (*p_drv == NULL) { 220 printf("cam_periph_alloc: invalid periph name '%s'\n", name); 221 xpt_free_path(periph->path); 222 free(periph, M_CAMPERIPH); 223 xpt_unlock_buses(); 224 return (CAM_REQ_INVALID); 225 } 226 periph->unit_number = camperiphunit(*p_drv, path_id, target_id, lun_id); 227 cur_periph = TAILQ_FIRST(&(*p_drv)->units); 228 while (cur_periph != NULL 229 && cur_periph->unit_number < periph->unit_number) 230 cur_periph = TAILQ_NEXT(cur_periph, unit_links); 231 if (cur_periph != NULL) { 232 KASSERT(cur_periph->unit_number != periph->unit_number, ("duplicate units on periph list")); 233 TAILQ_INSERT_BEFORE(cur_periph, periph, unit_links); 234 } else { 235 TAILQ_INSERT_TAIL(&(*p_drv)->units, periph, unit_links); 236 (*p_drv)->generation++; 237 } 238 xpt_unlock_buses(); 239 240 init_level++; 241 242 status = xpt_add_periph(periph); 243 if (status != CAM_REQ_CMP) 244 goto failure; 245 246 init_level++; 247 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph created\n")); 248 249 status = periph_ctor(periph, arg); 250 251 if (status == CAM_REQ_CMP) 252 init_level++; 253 254 failure: 255 switch (init_level) { 256 case 4: 257 /* Initialized successfully */ 258 break; 259 case 3: 260 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph destroyed\n")); 261 xpt_remove_periph(periph, /*topology_lock_held*/ 0); 262 /* FALLTHROUGH */ 263 case 2: 264 xpt_lock_buses(); 265 TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links); 266 xpt_unlock_buses(); 267 xpt_free_path(periph->path); 268 /* FALLTHROUGH */ 269 case 1: 270 free(periph, M_CAMPERIPH); 271 /* FALLTHROUGH */ 272 case 0: 273 /* No cleanup to perform. */ 274 break; 275 default: 276 panic("%s: Unknown init level", __func__); 277 } 278 return(status); 279 } 280 281 /* 282 * Find a peripheral structure with the specified path, target, lun, 283 * and (optionally) type. If the name is NULL, this function will return 284 * the first peripheral driver that matches the specified path. 285 */ 286 struct cam_periph * 287 cam_periph_find(struct cam_path *path, char *name) 288 { 289 struct periph_driver **p_drv; 290 struct cam_periph *periph; 291 292 xpt_lock_buses(); 293 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) { 294 295 if (name != NULL && (strcmp((*p_drv)->driver_name, name) != 0)) 296 continue; 297 298 TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) { 299 if (xpt_path_comp(periph->path, path) == 0) { 300 xpt_unlock_buses(); 301 mtx_assert(periph->sim->mtx, MA_OWNED); 302 return(periph); 303 } 304 } 305 if (name != NULL) { 306 xpt_unlock_buses(); 307 return(NULL); 308 } 309 } 310 xpt_unlock_buses(); 311 return(NULL); 312 } 313 314 /* 315 * Find peripheral driver instances attached to the specified path. 316 */ 317 int 318 cam_periph_list(struct cam_path *path, struct sbuf *sb) 319 { 320 struct sbuf local_sb; 321 struct periph_driver **p_drv; 322 struct cam_periph *periph; 323 int count; 324 int sbuf_alloc_len; 325 326 sbuf_alloc_len = 16; 327 retry: 328 sbuf_new(&local_sb, NULL, sbuf_alloc_len, SBUF_FIXEDLEN); 329 count = 0; 330 xpt_lock_buses(); 331 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) { 332 333 TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) { 334 if (xpt_path_comp(periph->path, path) != 0) 335 continue; 336 337 if (sbuf_len(&local_sb) != 0) 338 sbuf_cat(&local_sb, ","); 339 340 sbuf_printf(&local_sb, "%s%d", periph->periph_name, 341 periph->unit_number); 342 343 if (sbuf_error(&local_sb) == ENOMEM) { 344 sbuf_alloc_len *= 2; 345 xpt_unlock_buses(); 346 sbuf_delete(&local_sb); 347 goto retry; 348 } 349 count++; 350 } 351 } 352 xpt_unlock_buses(); 353 sbuf_finish(&local_sb); 354 sbuf_cpy(sb, sbuf_data(&local_sb)); 355 sbuf_delete(&local_sb); 356 return (count); 357 } 358 359 cam_status 360 cam_periph_acquire(struct cam_periph *periph) 361 { 362 cam_status status; 363 364 status = CAM_REQ_CMP_ERR; 365 if (periph == NULL) 366 return (status); 367 368 xpt_lock_buses(); 369 if ((periph->flags & CAM_PERIPH_INVALID) == 0) { 370 periph->refcount++; 371 status = CAM_REQ_CMP; 372 } 373 xpt_unlock_buses(); 374 375 return (status); 376 } 377 378 void 379 cam_periph_release_locked_buses(struct cam_periph *periph) 380 { 381 if (periph->refcount != 0) { 382 periph->refcount--; 383 } else { 384 panic("%s: release of %p when refcount is zero\n ", __func__, 385 periph); 386 } 387 if (periph->refcount == 0 388 && (periph->flags & CAM_PERIPH_INVALID)) { 389 camperiphfree(periph); 390 } 391 } 392 393 void 394 cam_periph_release_locked(struct cam_periph *periph) 395 { 396 397 if (periph == NULL) 398 return; 399 400 xpt_lock_buses(); 401 cam_periph_release_locked_buses(periph); 402 xpt_unlock_buses(); 403 } 404 405 void 406 cam_periph_release(struct cam_periph *periph) 407 { 408 struct cam_sim *sim; 409 410 if (periph == NULL) 411 return; 412 413 sim = periph->sim; 414 mtx_assert(sim->mtx, MA_NOTOWNED); 415 mtx_lock(sim->mtx); 416 cam_periph_release_locked(periph); 417 mtx_unlock(sim->mtx); 418 } 419 420 int 421 cam_periph_hold(struct cam_periph *periph, int priority) 422 { 423 int error; 424 425 /* 426 * Increment the reference count on the peripheral 427 * while we wait for our lock attempt to succeed 428 * to ensure the peripheral doesn't disappear out 429 * from user us while we sleep. 430 */ 431 432 if (cam_periph_acquire(periph) != CAM_REQ_CMP) 433 return (ENXIO); 434 435 mtx_assert(periph->sim->mtx, MA_OWNED); 436 while ((periph->flags & CAM_PERIPH_LOCKED) != 0) { 437 periph->flags |= CAM_PERIPH_LOCK_WANTED; 438 if ((error = mtx_sleep(periph, periph->sim->mtx, priority, 439 "caplck", 0)) != 0) { 440 cam_periph_release_locked(periph); 441 return (error); 442 } 443 if (periph->flags & CAM_PERIPH_INVALID) { 444 cam_periph_release_locked(periph); 445 return (ENXIO); 446 } 447 } 448 449 periph->flags |= CAM_PERIPH_LOCKED; 450 return (0); 451 } 452 453 void 454 cam_periph_unhold(struct cam_periph *periph) 455 { 456 457 mtx_assert(periph->sim->mtx, MA_OWNED); 458 459 periph->flags &= ~CAM_PERIPH_LOCKED; 460 if ((periph->flags & CAM_PERIPH_LOCK_WANTED) != 0) { 461 periph->flags &= ~CAM_PERIPH_LOCK_WANTED; 462 wakeup(periph); 463 } 464 465 cam_periph_release_locked(periph); 466 } 467 468 /* 469 * Look for the next unit number that is not currently in use for this 470 * peripheral type starting at "newunit". Also exclude unit numbers that 471 * are reserved by for future "hardwiring" unless we already know that this 472 * is a potential wired device. Only assume that the device is "wired" the 473 * first time through the loop since after that we'll be looking at unit 474 * numbers that did not match a wiring entry. 475 */ 476 static u_int 477 camperiphnextunit(struct periph_driver *p_drv, u_int newunit, int wired, 478 path_id_t pathid, target_id_t target, lun_id_t lun) 479 { 480 struct cam_periph *periph; 481 char *periph_name; 482 int i, val, dunit, r; 483 const char *dname, *strval; 484 485 periph_name = p_drv->driver_name; 486 for (;;newunit++) { 487 488 for (periph = TAILQ_FIRST(&p_drv->units); 489 periph != NULL && periph->unit_number != newunit; 490 periph = TAILQ_NEXT(periph, unit_links)) 491 ; 492 493 if (periph != NULL && periph->unit_number == newunit) { 494 if (wired != 0) { 495 xpt_print(periph->path, "Duplicate Wired " 496 "Device entry!\n"); 497 xpt_print(periph->path, "Second device (%s " 498 "device at scbus%d target %d lun %d) will " 499 "not be wired\n", periph_name, pathid, 500 target, lun); 501 wired = 0; 502 } 503 continue; 504 } 505 if (wired) 506 break; 507 508 /* 509 * Don't match entries like "da 4" as a wired down 510 * device, but do match entries like "da 4 target 5" 511 * or even "da 4 scbus 1". 512 */ 513 i = 0; 514 dname = periph_name; 515 for (;;) { 516 r = resource_find_dev(&i, dname, &dunit, NULL, NULL); 517 if (r != 0) 518 break; 519 /* if no "target" and no specific scbus, skip */ 520 if (resource_int_value(dname, dunit, "target", &val) && 521 (resource_string_value(dname, dunit, "at",&strval)|| 522 strcmp(strval, "scbus") == 0)) 523 continue; 524 if (newunit == dunit) 525 break; 526 } 527 if (r != 0) 528 break; 529 } 530 return (newunit); 531 } 532 533 static u_int 534 camperiphunit(struct periph_driver *p_drv, path_id_t pathid, 535 target_id_t target, lun_id_t lun) 536 { 537 u_int unit; 538 int wired, i, val, dunit; 539 const char *dname, *strval; 540 char pathbuf[32], *periph_name; 541 542 periph_name = p_drv->driver_name; 543 snprintf(pathbuf, sizeof(pathbuf), "scbus%d", pathid); 544 unit = 0; 545 i = 0; 546 dname = periph_name; 547 for (wired = 0; resource_find_dev(&i, dname, &dunit, NULL, NULL) == 0; 548 wired = 0) { 549 if (resource_string_value(dname, dunit, "at", &strval) == 0) { 550 if (strcmp(strval, pathbuf) != 0) 551 continue; 552 wired++; 553 } 554 if (resource_int_value(dname, dunit, "target", &val) == 0) { 555 if (val != target) 556 continue; 557 wired++; 558 } 559 if (resource_int_value(dname, dunit, "lun", &val) == 0) { 560 if (val != lun) 561 continue; 562 wired++; 563 } 564 if (wired != 0) { 565 unit = dunit; 566 break; 567 } 568 } 569 570 /* 571 * Either start from 0 looking for the next unit or from 572 * the unit number given in the resource config. This way, 573 * if we have wildcard matches, we don't return the same 574 * unit number twice. 575 */ 576 unit = camperiphnextunit(p_drv, unit, wired, pathid, target, lun); 577 578 return (unit); 579 } 580 581 void 582 cam_periph_invalidate(struct cam_periph *periph) 583 { 584 585 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph invalidated\n")); 586 /* 587 * We only call this routine the first time a peripheral is 588 * invalidated. 589 */ 590 if (((periph->flags & CAM_PERIPH_INVALID) == 0) 591 && (periph->periph_oninval != NULL)) 592 periph->periph_oninval(periph); 593 594 periph->flags |= CAM_PERIPH_INVALID; 595 periph->flags &= ~CAM_PERIPH_NEW_DEV_FOUND; 596 597 xpt_lock_buses(); 598 if (periph->refcount == 0) 599 camperiphfree(periph); 600 xpt_unlock_buses(); 601 } 602 603 static void 604 camperiphfree(struct cam_periph *periph) 605 { 606 struct periph_driver **p_drv; 607 608 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) { 609 if (strcmp((*p_drv)->driver_name, periph->periph_name) == 0) 610 break; 611 } 612 if (*p_drv == NULL) { 613 printf("camperiphfree: attempt to free non-existant periph\n"); 614 return; 615 } 616 617 /* 618 * We need to set this flag before dropping the topology lock, to 619 * let anyone who is traversing the list that this peripheral is 620 * about to be freed, and there will be no more reference count 621 * checks. 622 */ 623 periph->flags |= CAM_PERIPH_FREE; 624 625 /* 626 * The peripheral destructor semantics dictate calling with only the 627 * SIM mutex held. Since it might sleep, it should not be called 628 * with the topology lock held. 629 */ 630 xpt_unlock_buses(); 631 632 /* 633 * We need to call the peripheral destructor prior to removing the 634 * peripheral from the list. Otherwise, we risk running into a 635 * scenario where the peripheral unit number may get reused 636 * (because it has been removed from the list), but some resources 637 * used by the peripheral are still hanging around. In particular, 638 * the devfs nodes used by some peripherals like the pass(4) driver 639 * aren't fully cleaned up until the destructor is run. If the 640 * unit number is reused before the devfs instance is fully gone, 641 * devfs will panic. 642 */ 643 if (periph->periph_dtor != NULL) 644 periph->periph_dtor(periph); 645 646 /* 647 * The peripheral list is protected by the topology lock. 648 */ 649 xpt_lock_buses(); 650 651 TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links); 652 (*p_drv)->generation++; 653 654 xpt_remove_periph(periph, /*topology_lock_held*/ 1); 655 656 xpt_unlock_buses(); 657 CAM_DEBUG(periph->path, CAM_DEBUG_INFO, ("Periph destroyed\n")); 658 659 if (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) { 660 union ccb ccb; 661 void *arg; 662 663 switch (periph->deferred_ac) { 664 case AC_FOUND_DEVICE: 665 ccb.ccb_h.func_code = XPT_GDEV_TYPE; 666 xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 667 xpt_action(&ccb); 668 arg = &ccb; 669 break; 670 case AC_PATH_REGISTERED: 671 ccb.ccb_h.func_code = XPT_PATH_INQ; 672 xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 673 xpt_action(&ccb); 674 arg = &ccb; 675 break; 676 default: 677 arg = NULL; 678 break; 679 } 680 periph->deferred_callback(NULL, periph->deferred_ac, 681 periph->path, arg); 682 } 683 xpt_free_path(periph->path); 684 free(periph, M_CAMPERIPH); 685 xpt_lock_buses(); 686 } 687 688 /* 689 * Map user virtual pointers into kernel virtual address space, so we can 690 * access the memory. This won't work on physical pointers, for now it's 691 * up to the caller to check for that. (XXX KDM -- should we do that here 692 * instead?) This also only works for up to MAXPHYS memory. Since we use 693 * buffers to map stuff in and out, we're limited to the buffer size. 694 */ 695 int 696 cam_periph_mapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo) 697 { 698 int numbufs, i, j; 699 int flags[CAM_PERIPH_MAXMAPS]; 700 u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS]; 701 u_int32_t lengths[CAM_PERIPH_MAXMAPS]; 702 u_int32_t dirs[CAM_PERIPH_MAXMAPS]; 703 /* Some controllers may not be able to handle more data. */ 704 size_t maxmap = DFLTPHYS; 705 706 switch(ccb->ccb_h.func_code) { 707 case XPT_DEV_MATCH: 708 if (ccb->cdm.match_buf_len == 0) { 709 printf("cam_periph_mapmem: invalid match buffer " 710 "length 0\n"); 711 return(EINVAL); 712 } 713 if (ccb->cdm.pattern_buf_len > 0) { 714 data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns; 715 lengths[0] = ccb->cdm.pattern_buf_len; 716 dirs[0] = CAM_DIR_OUT; 717 data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches; 718 lengths[1] = ccb->cdm.match_buf_len; 719 dirs[1] = CAM_DIR_IN; 720 numbufs = 2; 721 } else { 722 data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches; 723 lengths[0] = ccb->cdm.match_buf_len; 724 dirs[0] = CAM_DIR_IN; 725 numbufs = 1; 726 } 727 /* 728 * This request will not go to the hardware, no reason 729 * to be so strict. vmapbuf() is able to map up to MAXPHYS. 730 */ 731 maxmap = MAXPHYS; 732 break; 733 case XPT_SCSI_IO: 734 case XPT_CONT_TARGET_IO: 735 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) 736 return(0); 737 738 data_ptrs[0] = &ccb->csio.data_ptr; 739 lengths[0] = ccb->csio.dxfer_len; 740 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK; 741 numbufs = 1; 742 break; 743 case XPT_ATA_IO: 744 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) 745 return(0); 746 747 data_ptrs[0] = &ccb->ataio.data_ptr; 748 lengths[0] = ccb->ataio.dxfer_len; 749 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK; 750 numbufs = 1; 751 break; 752 case XPT_SMP_IO: 753 data_ptrs[0] = &ccb->smpio.smp_request; 754 lengths[0] = ccb->smpio.smp_request_len; 755 dirs[0] = CAM_DIR_OUT; 756 data_ptrs[1] = &ccb->smpio.smp_response; 757 lengths[1] = ccb->smpio.smp_response_len; 758 dirs[1] = CAM_DIR_IN; 759 numbufs = 2; 760 break; 761 case XPT_DEV_ADVINFO: 762 if (ccb->cdai.bufsiz == 0) 763 return (0); 764 765 data_ptrs[0] = (uint8_t **)&ccb->cdai.buf; 766 lengths[0] = ccb->cdai.bufsiz; 767 dirs[0] = CAM_DIR_IN; 768 numbufs = 1; 769 770 /* 771 * This request will not go to the hardware, no reason 772 * to be so strict. vmapbuf() is able to map up to MAXPHYS. 773 */ 774 maxmap = MAXPHYS; 775 break; 776 default: 777 return(EINVAL); 778 break; /* NOTREACHED */ 779 } 780 781 /* 782 * Check the transfer length and permissions first, so we don't 783 * have to unmap any previously mapped buffers. 784 */ 785 for (i = 0; i < numbufs; i++) { 786 787 flags[i] = 0; 788 789 /* 790 * The userland data pointer passed in may not be page 791 * aligned. vmapbuf() truncates the address to a page 792 * boundary, so if the address isn't page aligned, we'll 793 * need enough space for the given transfer length, plus 794 * whatever extra space is necessary to make it to the page 795 * boundary. 796 */ 797 if ((lengths[i] + 798 (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)) > maxmap){ 799 printf("cam_periph_mapmem: attempt to map %lu bytes, " 800 "which is greater than %lu\n", 801 (long)(lengths[i] + 802 (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)), 803 (u_long)maxmap); 804 return(E2BIG); 805 } 806 807 if (dirs[i] & CAM_DIR_OUT) { 808 flags[i] = BIO_WRITE; 809 } 810 811 if (dirs[i] & CAM_DIR_IN) { 812 flags[i] = BIO_READ; 813 } 814 815 } 816 817 /* this keeps the current process from getting swapped */ 818 /* 819 * XXX KDM should I use P_NOSWAP instead? 820 */ 821 PHOLD(curproc); 822 823 for (i = 0; i < numbufs; i++) { 824 /* 825 * Get the buffer. 826 */ 827 mapinfo->bp[i] = getpbuf(NULL); 828 829 /* save the buffer's data address */ 830 mapinfo->bp[i]->b_saveaddr = mapinfo->bp[i]->b_data; 831 832 /* put our pointer in the data slot */ 833 mapinfo->bp[i]->b_data = *data_ptrs[i]; 834 835 /* set the transfer length, we know it's < MAXPHYS */ 836 mapinfo->bp[i]->b_bufsize = lengths[i]; 837 838 /* set the direction */ 839 mapinfo->bp[i]->b_iocmd = flags[i]; 840 841 /* 842 * Map the buffer into kernel memory. 843 * 844 * Note that useracc() alone is not a sufficient test. 845 * vmapbuf() can still fail due to a smaller file mapped 846 * into a larger area of VM, or if userland races against 847 * vmapbuf() after the useracc() check. 848 */ 849 if (vmapbuf(mapinfo->bp[i]) < 0) { 850 for (j = 0; j < i; ++j) { 851 *data_ptrs[j] = mapinfo->bp[j]->b_saveaddr; 852 vunmapbuf(mapinfo->bp[j]); 853 relpbuf(mapinfo->bp[j], NULL); 854 } 855 relpbuf(mapinfo->bp[i], NULL); 856 PRELE(curproc); 857 return(EACCES); 858 } 859 860 /* set our pointer to the new mapped area */ 861 *data_ptrs[i] = mapinfo->bp[i]->b_data; 862 863 mapinfo->num_bufs_used++; 864 } 865 866 /* 867 * Now that we've gotten this far, change ownership to the kernel 868 * of the buffers so that we don't run afoul of returning to user 869 * space with locks (on the buffer) held. 870 */ 871 for (i = 0; i < numbufs; i++) { 872 BUF_KERNPROC(mapinfo->bp[i]); 873 } 874 875 876 return(0); 877 } 878 879 /* 880 * Unmap memory segments mapped into kernel virtual address space by 881 * cam_periph_mapmem(). 882 */ 883 void 884 cam_periph_unmapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo) 885 { 886 int numbufs, i; 887 u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS]; 888 889 if (mapinfo->num_bufs_used <= 0) { 890 /* allow ourselves to be swapped once again */ 891 PRELE(curproc); 892 return; 893 } 894 895 switch (ccb->ccb_h.func_code) { 896 case XPT_DEV_MATCH: 897 numbufs = min(mapinfo->num_bufs_used, 2); 898 899 if (numbufs == 1) { 900 data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches; 901 } else { 902 data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns; 903 data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches; 904 } 905 break; 906 case XPT_SCSI_IO: 907 case XPT_CONT_TARGET_IO: 908 data_ptrs[0] = &ccb->csio.data_ptr; 909 numbufs = min(mapinfo->num_bufs_used, 1); 910 break; 911 case XPT_ATA_IO: 912 data_ptrs[0] = &ccb->ataio.data_ptr; 913 numbufs = min(mapinfo->num_bufs_used, 1); 914 break; 915 case XPT_SMP_IO: 916 numbufs = min(mapinfo->num_bufs_used, 2); 917 data_ptrs[0] = &ccb->smpio.smp_request; 918 data_ptrs[1] = &ccb->smpio.smp_response; 919 break; 920 case XPT_DEV_ADVINFO: 921 numbufs = min(mapinfo->num_bufs_used, 1); 922 data_ptrs[0] = (uint8_t **)&ccb->cdai.buf; 923 break; 924 default: 925 /* allow ourselves to be swapped once again */ 926 PRELE(curproc); 927 return; 928 break; /* NOTREACHED */ 929 } 930 931 for (i = 0; i < numbufs; i++) { 932 /* Set the user's pointer back to the original value */ 933 *data_ptrs[i] = mapinfo->bp[i]->b_saveaddr; 934 935 /* unmap the buffer */ 936 vunmapbuf(mapinfo->bp[i]); 937 938 /* release the buffer */ 939 relpbuf(mapinfo->bp[i], NULL); 940 } 941 942 /* allow ourselves to be swapped once again */ 943 PRELE(curproc); 944 } 945 946 union ccb * 947 cam_periph_getccb(struct cam_periph *periph, u_int32_t priority) 948 { 949 struct ccb_hdr *ccb_h; 950 951 mtx_assert(periph->sim->mtx, MA_OWNED); 952 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering cdgetccb\n")); 953 954 while (SLIST_FIRST(&periph->ccb_list) == NULL) { 955 if (periph->immediate_priority > priority) 956 periph->immediate_priority = priority; 957 xpt_schedule(periph, priority); 958 if ((SLIST_FIRST(&periph->ccb_list) != NULL) 959 && (SLIST_FIRST(&periph->ccb_list)->pinfo.priority == priority)) 960 break; 961 mtx_assert(periph->sim->mtx, MA_OWNED); 962 mtx_sleep(&periph->ccb_list, periph->sim->mtx, PRIBIO, "cgticb", 963 0); 964 } 965 966 ccb_h = SLIST_FIRST(&periph->ccb_list); 967 SLIST_REMOVE_HEAD(&periph->ccb_list, periph_links.sle); 968 return ((union ccb *)ccb_h); 969 } 970 971 void 972 cam_periph_ccbwait(union ccb *ccb) 973 { 974 struct cam_sim *sim; 975 976 sim = xpt_path_sim(ccb->ccb_h.path); 977 if ((ccb->ccb_h.pinfo.index != CAM_UNQUEUED_INDEX) 978 || ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG)) 979 mtx_sleep(&ccb->ccb_h.cbfcnp, sim->mtx, PRIBIO, "cbwait", 0); 980 } 981 982 int 983 cam_periph_ioctl(struct cam_periph *periph, u_long cmd, caddr_t addr, 984 int (*error_routine)(union ccb *ccb, 985 cam_flags camflags, 986 u_int32_t sense_flags)) 987 { 988 union ccb *ccb; 989 int error; 990 int found; 991 992 error = found = 0; 993 994 switch(cmd){ 995 case CAMGETPASSTHRU: 996 ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); 997 xpt_setup_ccb(&ccb->ccb_h, 998 ccb->ccb_h.path, 999 CAM_PRIORITY_NORMAL); 1000 ccb->ccb_h.func_code = XPT_GDEVLIST; 1001 1002 /* 1003 * Basically, the point of this is that we go through 1004 * getting the list of devices, until we find a passthrough 1005 * device. In the current version of the CAM code, the 1006 * only way to determine what type of device we're dealing 1007 * with is by its name. 1008 */ 1009 while (found == 0) { 1010 ccb->cgdl.index = 0; 1011 ccb->cgdl.status = CAM_GDEVLIST_MORE_DEVS; 1012 while (ccb->cgdl.status == CAM_GDEVLIST_MORE_DEVS) { 1013 1014 /* we want the next device in the list */ 1015 xpt_action(ccb); 1016 if (strncmp(ccb->cgdl.periph_name, 1017 "pass", 4) == 0){ 1018 found = 1; 1019 break; 1020 } 1021 } 1022 if ((ccb->cgdl.status == CAM_GDEVLIST_LAST_DEVICE) && 1023 (found == 0)) { 1024 ccb->cgdl.periph_name[0] = '\0'; 1025 ccb->cgdl.unit_number = 0; 1026 break; 1027 } 1028 } 1029 1030 /* copy the result back out */ 1031 bcopy(ccb, addr, sizeof(union ccb)); 1032 1033 /* and release the ccb */ 1034 xpt_release_ccb(ccb); 1035 1036 break; 1037 default: 1038 error = ENOTTY; 1039 break; 1040 } 1041 return(error); 1042 } 1043 1044 int 1045 cam_periph_runccb(union ccb *ccb, 1046 int (*error_routine)(union ccb *ccb, 1047 cam_flags camflags, 1048 u_int32_t sense_flags), 1049 cam_flags camflags, u_int32_t sense_flags, 1050 struct devstat *ds) 1051 { 1052 struct cam_sim *sim; 1053 int error; 1054 1055 error = 0; 1056 sim = xpt_path_sim(ccb->ccb_h.path); 1057 mtx_assert(sim->mtx, MA_OWNED); 1058 1059 /* 1060 * If the user has supplied a stats structure, and if we understand 1061 * this particular type of ccb, record the transaction start. 1062 */ 1063 if ((ds != NULL) && (ccb->ccb_h.func_code == XPT_SCSI_IO || 1064 ccb->ccb_h.func_code == XPT_ATA_IO)) 1065 devstat_start_transaction(ds, NULL); 1066 1067 xpt_action(ccb); 1068 1069 do { 1070 cam_periph_ccbwait(ccb); 1071 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) 1072 error = 0; 1073 else if (error_routine != NULL) 1074 error = (*error_routine)(ccb, camflags, sense_flags); 1075 else 1076 error = 0; 1077 1078 } while (error == ERESTART); 1079 1080 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 1081 cam_release_devq(ccb->ccb_h.path, 1082 /* relsim_flags */0, 1083 /* openings */0, 1084 /* timeout */0, 1085 /* getcount_only */ FALSE); 1086 ccb->ccb_h.status &= ~CAM_DEV_QFRZN; 1087 } 1088 1089 if (ds != NULL) { 1090 if (ccb->ccb_h.func_code == XPT_SCSI_IO) { 1091 devstat_end_transaction(ds, 1092 ccb->csio.dxfer_len, 1093 ccb->csio.tag_action & 0x3, 1094 ((ccb->ccb_h.flags & CAM_DIR_MASK) == 1095 CAM_DIR_NONE) ? DEVSTAT_NO_DATA : 1096 (ccb->ccb_h.flags & CAM_DIR_OUT) ? 1097 DEVSTAT_WRITE : 1098 DEVSTAT_READ, NULL, NULL); 1099 } else if (ccb->ccb_h.func_code == XPT_ATA_IO) { 1100 devstat_end_transaction(ds, 1101 ccb->ataio.dxfer_len, 1102 ccb->ataio.tag_action & 0x3, 1103 ((ccb->ccb_h.flags & CAM_DIR_MASK) == 1104 CAM_DIR_NONE) ? DEVSTAT_NO_DATA : 1105 (ccb->ccb_h.flags & CAM_DIR_OUT) ? 1106 DEVSTAT_WRITE : 1107 DEVSTAT_READ, NULL, NULL); 1108 } 1109 } 1110 1111 return(error); 1112 } 1113 1114 void 1115 cam_freeze_devq(struct cam_path *path) 1116 { 1117 1118 cam_freeze_devq_arg(path, 0, 0); 1119 } 1120 1121 void 1122 cam_freeze_devq_arg(struct cam_path *path, uint32_t flags, uint32_t arg) 1123 { 1124 struct ccb_relsim crs; 1125 1126 xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NONE); 1127 crs.ccb_h.func_code = XPT_FREEZE_QUEUE; 1128 crs.release_flags = flags; 1129 crs.openings = arg; 1130 crs.release_timeout = arg; 1131 xpt_action((union ccb *)&crs); 1132 } 1133 1134 u_int32_t 1135 cam_release_devq(struct cam_path *path, u_int32_t relsim_flags, 1136 u_int32_t openings, u_int32_t arg, 1137 int getcount_only) 1138 { 1139 struct ccb_relsim crs; 1140 1141 xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL); 1142 crs.ccb_h.func_code = XPT_REL_SIMQ; 1143 crs.ccb_h.flags = getcount_only ? CAM_DEV_QFREEZE : 0; 1144 crs.release_flags = relsim_flags; 1145 crs.openings = openings; 1146 crs.release_timeout = arg; 1147 xpt_action((union ccb *)&crs); 1148 return (crs.qfrozen_cnt); 1149 } 1150 1151 #define saved_ccb_ptr ppriv_ptr0 1152 static void 1153 camperiphdone(struct cam_periph *periph, union ccb *done_ccb) 1154 { 1155 union ccb *saved_ccb; 1156 cam_status status; 1157 struct scsi_start_stop_unit *scsi_cmd; 1158 int error_code, sense_key, asc, ascq; 1159 1160 scsi_cmd = (struct scsi_start_stop_unit *) 1161 &done_ccb->csio.cdb_io.cdb_bytes; 1162 status = done_ccb->ccb_h.status; 1163 1164 if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1165 if (scsi_extract_sense_ccb(done_ccb, 1166 &error_code, &sense_key, &asc, &ascq)) { 1167 /* 1168 * If the error is "invalid field in CDB", 1169 * and the load/eject flag is set, turn the 1170 * flag off and try again. This is just in 1171 * case the drive in question barfs on the 1172 * load eject flag. The CAM code should set 1173 * the load/eject flag by default for 1174 * removable media. 1175 */ 1176 if ((scsi_cmd->opcode == START_STOP_UNIT) && 1177 ((scsi_cmd->how & SSS_LOEJ) != 0) && 1178 (asc == 0x24) && (ascq == 0x00)) { 1179 scsi_cmd->how &= ~SSS_LOEJ; 1180 if (status & CAM_DEV_QFRZN) { 1181 cam_release_devq(done_ccb->ccb_h.path, 1182 0, 0, 0, 0); 1183 done_ccb->ccb_h.status &= 1184 ~CAM_DEV_QFRZN; 1185 } 1186 xpt_action(done_ccb); 1187 goto out; 1188 } 1189 } 1190 if (cam_periph_error(done_ccb, 1191 0, SF_RETRY_UA | SF_NO_PRINT, NULL) == ERESTART) 1192 goto out; 1193 if (done_ccb->ccb_h.status & CAM_DEV_QFRZN) { 1194 cam_release_devq(done_ccb->ccb_h.path, 0, 0, 0, 0); 1195 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN; 1196 } 1197 } else { 1198 /* 1199 * If we have successfully taken a device from the not 1200 * ready to ready state, re-scan the device and re-get 1201 * the inquiry information. Many devices (mostly disks) 1202 * don't properly report their inquiry information unless 1203 * they are spun up. 1204 */ 1205 if (scsi_cmd->opcode == START_STOP_UNIT) 1206 xpt_async(AC_INQ_CHANGED, done_ccb->ccb_h.path, NULL); 1207 } 1208 1209 /* 1210 * Perform the final retry with the original CCB so that final 1211 * error processing is performed by the owner of the CCB. 1212 */ 1213 saved_ccb = (union ccb *)done_ccb->ccb_h.saved_ccb_ptr; 1214 bcopy(saved_ccb, done_ccb, sizeof(*done_ccb)); 1215 xpt_free_ccb(saved_ccb); 1216 if (done_ccb->ccb_h.cbfcnp != camperiphdone) 1217 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG; 1218 xpt_action(done_ccb); 1219 1220 out: 1221 /* Drop freeze taken due to CAM_DEV_QFREEZE flag set. */ 1222 cam_release_devq(done_ccb->ccb_h.path, 0, 0, 0, 0); 1223 } 1224 1225 /* 1226 * Generic Async Event handler. Peripheral drivers usually 1227 * filter out the events that require personal attention, 1228 * and leave the rest to this function. 1229 */ 1230 void 1231 cam_periph_async(struct cam_periph *periph, u_int32_t code, 1232 struct cam_path *path, void *arg) 1233 { 1234 switch (code) { 1235 case AC_LOST_DEVICE: 1236 cam_periph_invalidate(periph); 1237 break; 1238 default: 1239 break; 1240 } 1241 } 1242 1243 void 1244 cam_periph_bus_settle(struct cam_periph *periph, u_int bus_settle) 1245 { 1246 struct ccb_getdevstats cgds; 1247 1248 xpt_setup_ccb(&cgds.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 1249 cgds.ccb_h.func_code = XPT_GDEV_STATS; 1250 xpt_action((union ccb *)&cgds); 1251 cam_periph_freeze_after_event(periph, &cgds.last_reset, bus_settle); 1252 } 1253 1254 void 1255 cam_periph_freeze_after_event(struct cam_periph *periph, 1256 struct timeval* event_time, u_int duration_ms) 1257 { 1258 struct timeval delta; 1259 struct timeval duration_tv; 1260 1261 if (!timevalisset(event_time)) 1262 return; 1263 1264 microtime(&delta); 1265 timevalsub(&delta, event_time); 1266 duration_tv.tv_sec = duration_ms / 1000; 1267 duration_tv.tv_usec = (duration_ms % 1000) * 1000; 1268 if (timevalcmp(&delta, &duration_tv, <)) { 1269 timevalsub(&duration_tv, &delta); 1270 1271 duration_ms = duration_tv.tv_sec * 1000; 1272 duration_ms += duration_tv.tv_usec / 1000; 1273 cam_freeze_devq(periph->path); 1274 cam_release_devq(periph->path, 1275 RELSIM_RELEASE_AFTER_TIMEOUT, 1276 /*reduction*/0, 1277 /*timeout*/duration_ms, 1278 /*getcount_only*/0); 1279 } 1280 1281 } 1282 1283 static int 1284 camperiphscsistatuserror(union ccb *ccb, union ccb **orig_ccb, 1285 cam_flags camflags, u_int32_t sense_flags, 1286 int *openings, u_int32_t *relsim_flags, 1287 u_int32_t *timeout, int *print, const char **action_string) 1288 { 1289 int error; 1290 1291 switch (ccb->csio.scsi_status) { 1292 case SCSI_STATUS_OK: 1293 case SCSI_STATUS_COND_MET: 1294 case SCSI_STATUS_INTERMED: 1295 case SCSI_STATUS_INTERMED_COND_MET: 1296 error = 0; 1297 break; 1298 case SCSI_STATUS_CMD_TERMINATED: 1299 case SCSI_STATUS_CHECK_COND: 1300 error = camperiphscsisenseerror(ccb, orig_ccb, 1301 camflags, 1302 sense_flags, 1303 openings, 1304 relsim_flags, 1305 timeout, 1306 print, 1307 action_string); 1308 break; 1309 case SCSI_STATUS_QUEUE_FULL: 1310 { 1311 /* no decrement */ 1312 struct ccb_getdevstats cgds; 1313 1314 /* 1315 * First off, find out what the current 1316 * transaction counts are. 1317 */ 1318 xpt_setup_ccb(&cgds.ccb_h, 1319 ccb->ccb_h.path, 1320 CAM_PRIORITY_NORMAL); 1321 cgds.ccb_h.func_code = XPT_GDEV_STATS; 1322 xpt_action((union ccb *)&cgds); 1323 1324 /* 1325 * If we were the only transaction active, treat 1326 * the QUEUE FULL as if it were a BUSY condition. 1327 */ 1328 if (cgds.dev_active != 0) { 1329 int total_openings; 1330 1331 /* 1332 * Reduce the number of openings to 1333 * be 1 less than the amount it took 1334 * to get a queue full bounded by the 1335 * minimum allowed tag count for this 1336 * device. 1337 */ 1338 total_openings = cgds.dev_active + cgds.dev_openings; 1339 *openings = cgds.dev_active; 1340 if (*openings < cgds.mintags) 1341 *openings = cgds.mintags; 1342 if (*openings < total_openings) 1343 *relsim_flags = RELSIM_ADJUST_OPENINGS; 1344 else { 1345 /* 1346 * Some devices report queue full for 1347 * temporary resource shortages. For 1348 * this reason, we allow a minimum 1349 * tag count to be entered via a 1350 * quirk entry to prevent the queue 1351 * count on these devices from falling 1352 * to a pessimisticly low value. We 1353 * still wait for the next successful 1354 * completion, however, before queueing 1355 * more transactions to the device. 1356 */ 1357 *relsim_flags = RELSIM_RELEASE_AFTER_CMDCMPLT; 1358 } 1359 *timeout = 0; 1360 error = ERESTART; 1361 *print = 0; 1362 break; 1363 } 1364 /* FALLTHROUGH */ 1365 } 1366 case SCSI_STATUS_BUSY: 1367 /* 1368 * Restart the queue after either another 1369 * command completes or a 1 second timeout. 1370 */ 1371 if (ccb->ccb_h.retry_count > 0) { 1372 ccb->ccb_h.retry_count--; 1373 error = ERESTART; 1374 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT 1375 | RELSIM_RELEASE_AFTER_CMDCMPLT; 1376 *timeout = 1000; 1377 } else { 1378 error = EIO; 1379 } 1380 break; 1381 case SCSI_STATUS_RESERV_CONFLICT: 1382 default: 1383 error = EIO; 1384 break; 1385 } 1386 return (error); 1387 } 1388 1389 static int 1390 camperiphscsisenseerror(union ccb *ccb, union ccb **orig, 1391 cam_flags camflags, u_int32_t sense_flags, 1392 int *openings, u_int32_t *relsim_flags, 1393 u_int32_t *timeout, int *print, const char **action_string) 1394 { 1395 struct cam_periph *periph; 1396 union ccb *orig_ccb = ccb; 1397 int error, recoveryccb; 1398 1399 periph = xpt_path_periph(ccb->ccb_h.path); 1400 recoveryccb = (ccb->ccb_h.cbfcnp == camperiphdone); 1401 if ((periph->flags & CAM_PERIPH_RECOVERY_INPROG) && !recoveryccb) { 1402 /* 1403 * If error recovery is already in progress, don't attempt 1404 * to process this error, but requeue it unconditionally 1405 * and attempt to process it once error recovery has 1406 * completed. This failed command is probably related to 1407 * the error that caused the currently active error recovery 1408 * action so our current recovery efforts should also 1409 * address this command. Be aware that the error recovery 1410 * code assumes that only one recovery action is in progress 1411 * on a particular peripheral instance at any given time 1412 * (e.g. only one saved CCB for error recovery) so it is 1413 * imperitive that we don't violate this assumption. 1414 */ 1415 error = ERESTART; 1416 *print = 0; 1417 } else { 1418 scsi_sense_action err_action; 1419 struct ccb_getdev cgd; 1420 1421 /* 1422 * Grab the inquiry data for this device. 1423 */ 1424 xpt_setup_ccb(&cgd.ccb_h, ccb->ccb_h.path, CAM_PRIORITY_NORMAL); 1425 cgd.ccb_h.func_code = XPT_GDEV_TYPE; 1426 xpt_action((union ccb *)&cgd); 1427 1428 err_action = scsi_error_action(&ccb->csio, &cgd.inq_data, 1429 sense_flags); 1430 error = err_action & SS_ERRMASK; 1431 1432 /* 1433 * Do not autostart sequential access devices 1434 * to avoid unexpected tape loading. 1435 */ 1436 if ((err_action & SS_MASK) == SS_START && 1437 SID_TYPE(&cgd.inq_data) == T_SEQUENTIAL) { 1438 *action_string = "Will not autostart a " 1439 "sequential access device"; 1440 goto sense_error_done; 1441 } 1442 1443 /* 1444 * Avoid recovery recursion if recovery action is the same. 1445 */ 1446 if ((err_action & SS_MASK) >= SS_START && recoveryccb) { 1447 if (((err_action & SS_MASK) == SS_START && 1448 ccb->csio.cdb_io.cdb_bytes[0] == START_STOP_UNIT) || 1449 ((err_action & SS_MASK) == SS_TUR && 1450 (ccb->csio.cdb_io.cdb_bytes[0] == TEST_UNIT_READY))) { 1451 err_action = SS_RETRY|SSQ_DECREMENT_COUNT|EIO; 1452 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT; 1453 *timeout = 500; 1454 } 1455 } 1456 1457 /* 1458 * If the recovery action will consume a retry, 1459 * make sure we actually have retries available. 1460 */ 1461 if ((err_action & SSQ_DECREMENT_COUNT) != 0) { 1462 if (ccb->ccb_h.retry_count > 0 && 1463 (periph->flags & CAM_PERIPH_INVALID) == 0) 1464 ccb->ccb_h.retry_count--; 1465 else { 1466 *action_string = "Retries exhausted"; 1467 goto sense_error_done; 1468 } 1469 } 1470 1471 if ((err_action & SS_MASK) >= SS_START) { 1472 /* 1473 * Do common portions of commands that 1474 * use recovery CCBs. 1475 */ 1476 orig_ccb = xpt_alloc_ccb_nowait(); 1477 if (orig_ccb == NULL) { 1478 *action_string = "Can't allocate recovery CCB"; 1479 goto sense_error_done; 1480 } 1481 /* 1482 * Clear freeze flag for original request here, as 1483 * this freeze will be dropped as part of ERESTART. 1484 */ 1485 ccb->ccb_h.status &= ~CAM_DEV_QFRZN; 1486 bcopy(ccb, orig_ccb, sizeof(*orig_ccb)); 1487 } 1488 1489 switch (err_action & SS_MASK) { 1490 case SS_NOP: 1491 *action_string = "No recovery action needed"; 1492 error = 0; 1493 break; 1494 case SS_RETRY: 1495 *action_string = "Retrying command (per sense data)"; 1496 error = ERESTART; 1497 break; 1498 case SS_FAIL: 1499 *action_string = "Unretryable error"; 1500 break; 1501 case SS_START: 1502 { 1503 int le; 1504 1505 /* 1506 * Send a start unit command to the device, and 1507 * then retry the command. 1508 */ 1509 *action_string = "Attempting to start unit"; 1510 periph->flags |= CAM_PERIPH_RECOVERY_INPROG; 1511 1512 /* 1513 * Check for removable media and set 1514 * load/eject flag appropriately. 1515 */ 1516 if (SID_IS_REMOVABLE(&cgd.inq_data)) 1517 le = TRUE; 1518 else 1519 le = FALSE; 1520 1521 scsi_start_stop(&ccb->csio, 1522 /*retries*/1, 1523 camperiphdone, 1524 MSG_SIMPLE_Q_TAG, 1525 /*start*/TRUE, 1526 /*load/eject*/le, 1527 /*immediate*/FALSE, 1528 SSD_FULL_SIZE, 1529 /*timeout*/50000); 1530 break; 1531 } 1532 case SS_TUR: 1533 { 1534 /* 1535 * Send a Test Unit Ready to the device. 1536 * If the 'many' flag is set, we send 120 1537 * test unit ready commands, one every half 1538 * second. Otherwise, we just send one TUR. 1539 * We only want to do this if the retry 1540 * count has not been exhausted. 1541 */ 1542 int retries; 1543 1544 if ((err_action & SSQ_MANY) != 0) { 1545 *action_string = "Polling device for readiness"; 1546 retries = 120; 1547 } else { 1548 *action_string = "Testing device for readiness"; 1549 retries = 1; 1550 } 1551 periph->flags |= CAM_PERIPH_RECOVERY_INPROG; 1552 scsi_test_unit_ready(&ccb->csio, 1553 retries, 1554 camperiphdone, 1555 MSG_SIMPLE_Q_TAG, 1556 SSD_FULL_SIZE, 1557 /*timeout*/5000); 1558 1559 /* 1560 * Accomplish our 500ms delay by deferring 1561 * the release of our device queue appropriately. 1562 */ 1563 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT; 1564 *timeout = 500; 1565 break; 1566 } 1567 default: 1568 panic("Unhandled error action %x", err_action); 1569 } 1570 1571 if ((err_action & SS_MASK) >= SS_START) { 1572 /* 1573 * Drop the priority, so that the recovery 1574 * CCB is the first to execute. Freeze the queue 1575 * after this command is sent so that we can 1576 * restore the old csio and have it queued in 1577 * the proper order before we release normal 1578 * transactions to the device. 1579 */ 1580 ccb->ccb_h.pinfo.priority--; 1581 ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 1582 ccb->ccb_h.saved_ccb_ptr = orig_ccb; 1583 error = ERESTART; 1584 *orig = orig_ccb; 1585 } 1586 1587 sense_error_done: 1588 *print = ((err_action & SSQ_PRINT_SENSE) != 0); 1589 } 1590 return (error); 1591 } 1592 1593 /* 1594 * Generic error handler. Peripheral drivers usually filter 1595 * out the errors that they handle in a unique mannor, then 1596 * call this function. 1597 */ 1598 int 1599 cam_periph_error(union ccb *ccb, cam_flags camflags, 1600 u_int32_t sense_flags, union ccb *save_ccb) 1601 { 1602 union ccb *orig_ccb; 1603 struct cam_periph *periph; 1604 const char *action_string; 1605 cam_status status; 1606 int frozen, error, openings, print, lost_device; 1607 int error_code, sense_key, asc, ascq; 1608 u_int32_t relsim_flags, timeout; 1609 1610 print = 1; 1611 periph = xpt_path_periph(ccb->ccb_h.path); 1612 action_string = NULL; 1613 status = ccb->ccb_h.status; 1614 frozen = (status & CAM_DEV_QFRZN) != 0; 1615 status &= CAM_STATUS_MASK; 1616 openings = relsim_flags = timeout = lost_device = 0; 1617 orig_ccb = ccb; 1618 1619 switch (status) { 1620 case CAM_REQ_CMP: 1621 error = 0; 1622 print = 0; 1623 break; 1624 case CAM_SCSI_STATUS_ERROR: 1625 error = camperiphscsistatuserror(ccb, &orig_ccb, 1626 camflags, sense_flags, &openings, &relsim_flags, 1627 &timeout, &print, &action_string); 1628 break; 1629 case CAM_AUTOSENSE_FAIL: 1630 error = EIO; /* we have to kill the command */ 1631 break; 1632 case CAM_UA_ABORT: 1633 case CAM_UA_TERMIO: 1634 case CAM_MSG_REJECT_REC: 1635 /* XXX Don't know that these are correct */ 1636 error = EIO; 1637 break; 1638 case CAM_SEL_TIMEOUT: 1639 if ((camflags & CAM_RETRY_SELTO) != 0) { 1640 if (ccb->ccb_h.retry_count > 0 && 1641 (periph->flags & CAM_PERIPH_INVALID) == 0) { 1642 ccb->ccb_h.retry_count--; 1643 error = ERESTART; 1644 1645 /* 1646 * Wait a bit to give the device 1647 * time to recover before we try again. 1648 */ 1649 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT; 1650 timeout = periph_selto_delay; 1651 break; 1652 } 1653 action_string = "Retries exhausted"; 1654 } 1655 /* FALLTHROUGH */ 1656 case CAM_DEV_NOT_THERE: 1657 error = ENXIO; 1658 print = 0; 1659 lost_device = 1; 1660 break; 1661 case CAM_REQ_INVALID: 1662 case CAM_PATH_INVALID: 1663 case CAM_NO_HBA: 1664 case CAM_PROVIDE_FAIL: 1665 case CAM_REQ_TOO_BIG: 1666 case CAM_LUN_INVALID: 1667 case CAM_TID_INVALID: 1668 error = EINVAL; 1669 break; 1670 case CAM_SCSI_BUS_RESET: 1671 case CAM_BDR_SENT: 1672 /* 1673 * Commands that repeatedly timeout and cause these 1674 * kinds of error recovery actions, should return 1675 * CAM_CMD_TIMEOUT, which allows us to safely assume 1676 * that this command was an innocent bystander to 1677 * these events and should be unconditionally 1678 * retried. 1679 */ 1680 case CAM_REQUEUE_REQ: 1681 /* Unconditional requeue if device is still there */ 1682 if (periph->flags & CAM_PERIPH_INVALID) { 1683 action_string = "Periph was invalidated"; 1684 error = EIO; 1685 } else if (sense_flags & SF_NO_RETRY) { 1686 error = EIO; 1687 action_string = "Retry was blocked"; 1688 } else { 1689 error = ERESTART; 1690 print = 0; 1691 } 1692 break; 1693 case CAM_RESRC_UNAVAIL: 1694 /* Wait a bit for the resource shortage to abate. */ 1695 timeout = periph_noresrc_delay; 1696 /* FALLTHROUGH */ 1697 case CAM_BUSY: 1698 if (timeout == 0) { 1699 /* Wait a bit for the busy condition to abate. */ 1700 timeout = periph_busy_delay; 1701 } 1702 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT; 1703 /* FALLTHROUGH */ 1704 case CAM_ATA_STATUS_ERROR: 1705 case CAM_REQ_CMP_ERR: 1706 case CAM_CMD_TIMEOUT: 1707 case CAM_UNEXP_BUSFREE: 1708 case CAM_UNCOR_PARITY: 1709 case CAM_DATA_RUN_ERR: 1710 default: 1711 if (periph->flags & CAM_PERIPH_INVALID) { 1712 error = EIO; 1713 action_string = "Periph was invalidated"; 1714 } else if (ccb->ccb_h.retry_count == 0) { 1715 error = EIO; 1716 action_string = "Retries exhausted"; 1717 } else if (sense_flags & SF_NO_RETRY) { 1718 error = EIO; 1719 action_string = "Retry was blocked"; 1720 } else { 1721 ccb->ccb_h.retry_count--; 1722 error = ERESTART; 1723 } 1724 break; 1725 } 1726 1727 if ((sense_flags & SF_PRINT_ALWAYS) || 1728 CAM_DEBUGGED(ccb->ccb_h.path, CAM_DEBUG_INFO)) 1729 print = 1; 1730 else if (sense_flags & SF_NO_PRINT) 1731 print = 0; 1732 if (print) 1733 cam_error_print(orig_ccb, CAM_ESF_ALL, CAM_EPF_ALL); 1734 if (error != 0 && print) { 1735 if (error != ERESTART) { 1736 if (action_string == NULL) 1737 action_string = "Unretryable error"; 1738 xpt_print(ccb->ccb_h.path, "Error %d, %s\n", 1739 error, action_string); 1740 } else if (action_string != NULL) 1741 xpt_print(ccb->ccb_h.path, "%s\n", action_string); 1742 else 1743 xpt_print(ccb->ccb_h.path, "Retrying command\n"); 1744 } 1745 1746 if (lost_device) { 1747 struct cam_path *newpath; 1748 lun_id_t lun_id; 1749 1750 /* 1751 * For a selection timeout, we consider all of the LUNs on 1752 * the target to be gone. If the status is CAM_DEV_NOT_THERE, 1753 * then we only get rid of the device(s) specified by the 1754 * path in the original CCB. 1755 */ 1756 if (status == CAM_DEV_NOT_THERE) 1757 lun_id = xpt_path_lun_id(ccb->ccb_h.path); 1758 else 1759 lun_id = CAM_LUN_WILDCARD; 1760 1761 /* Should we do more if we can't create the path?? */ 1762 if (xpt_create_path(&newpath, periph, 1763 xpt_path_path_id(ccb->ccb_h.path), 1764 xpt_path_target_id(ccb->ccb_h.path), 1765 lun_id) == CAM_REQ_CMP) { 1766 1767 /* 1768 * Let peripheral drivers know that this 1769 * device has gone away. 1770 */ 1771 xpt_async(AC_LOST_DEVICE, newpath, NULL); 1772 xpt_free_path(newpath); 1773 } 1774 1775 /* Broadcast UNIT ATTENTIONs to all periphs. */ 1776 } else if (scsi_extract_sense_ccb(ccb, 1777 &error_code, &sense_key, &asc, &ascq) && 1778 sense_key == SSD_KEY_UNIT_ATTENTION) { 1779 xpt_async(AC_UNIT_ATTENTION, orig_ccb->ccb_h.path, orig_ccb); 1780 } 1781 1782 /* Attempt a retry */ 1783 if (error == ERESTART || error == 0) { 1784 if (frozen != 0) 1785 ccb->ccb_h.status &= ~CAM_DEV_QFRZN; 1786 if (error == ERESTART) 1787 xpt_action(ccb); 1788 if (frozen != 0) 1789 cam_release_devq(ccb->ccb_h.path, 1790 relsim_flags, 1791 openings, 1792 timeout, 1793 /*getcount_only*/0); 1794 } 1795 1796 return (error); 1797 } 1798