1 /*- 2 * Common functions for CAM "type" (peripheral) drivers. 3 * 4 * Copyright (c) 1997, 1998 Justin T. Gibbs. 5 * Copyright (c) 1997, 1998, 1999, 2000 Kenneth D. Merry. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions, and the following disclaimer, 13 * without modification, immediately at the beginning of the file. 14 * 2. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/types.h> 36 #include <sys/malloc.h> 37 #include <sys/kernel.h> 38 #include <sys/bio.h> 39 #include <sys/lock.h> 40 #include <sys/mutex.h> 41 #include <sys/buf.h> 42 #include <sys/proc.h> 43 #include <sys/devicestat.h> 44 #include <sys/bus.h> 45 #include <vm/vm.h> 46 #include <vm/vm_extern.h> 47 48 #include <cam/cam.h> 49 #include <cam/cam_ccb.h> 50 #include <cam/cam_queue.h> 51 #include <cam/cam_xpt_periph.h> 52 #include <cam/cam_periph.h> 53 #include <cam/cam_debug.h> 54 #include <cam/cam_sim.h> 55 56 #include <cam/scsi/scsi_all.h> 57 #include <cam/scsi/scsi_message.h> 58 #include <cam/scsi/scsi_pass.h> 59 60 static u_int camperiphnextunit(struct periph_driver *p_drv, 61 u_int newunit, int wired, 62 path_id_t pathid, target_id_t target, 63 lun_id_t lun); 64 static u_int camperiphunit(struct periph_driver *p_drv, 65 path_id_t pathid, target_id_t target, 66 lun_id_t lun); 67 static void camperiphdone(struct cam_periph *periph, 68 union ccb *done_ccb); 69 static void camperiphfree(struct cam_periph *periph); 70 static int camperiphscsistatuserror(union ccb *ccb, 71 cam_flags camflags, 72 u_int32_t sense_flags, 73 int *openings, 74 u_int32_t *relsim_flags, 75 u_int32_t *timeout, 76 const char **action_string); 77 static int camperiphscsisenseerror(union ccb *ccb, 78 cam_flags camflags, 79 u_int32_t sense_flags, 80 int *openings, 81 u_int32_t *relsim_flags, 82 u_int32_t *timeout, 83 const char **action_string); 84 85 static int nperiph_drivers; 86 static int initialized = 0; 87 struct periph_driver **periph_drivers; 88 89 MALLOC_DEFINE(M_CAMPERIPH, "CAM periph", "CAM peripheral buffers"); 90 91 static int periph_selto_delay = 1000; 92 TUNABLE_INT("kern.cam.periph_selto_delay", &periph_selto_delay); 93 static int periph_noresrc_delay = 500; 94 TUNABLE_INT("kern.cam.periph_noresrc_delay", &periph_noresrc_delay); 95 static int periph_busy_delay = 500; 96 TUNABLE_INT("kern.cam.periph_busy_delay", &periph_busy_delay); 97 98 99 void 100 periphdriver_register(void *data) 101 { 102 struct periph_driver *drv = (struct periph_driver *)data; 103 struct periph_driver **newdrivers, **old; 104 int ndrivers; 105 106 ndrivers = nperiph_drivers + 2; 107 newdrivers = malloc(sizeof(*newdrivers) * ndrivers, M_CAMPERIPH, 108 M_WAITOK); 109 if (periph_drivers) 110 bcopy(periph_drivers, newdrivers, 111 sizeof(*newdrivers) * nperiph_drivers); 112 newdrivers[nperiph_drivers] = drv; 113 newdrivers[nperiph_drivers + 1] = NULL; 114 old = periph_drivers; 115 periph_drivers = newdrivers; 116 if (old) 117 free(old, M_CAMPERIPH); 118 nperiph_drivers++; 119 /* If driver marked as early or it is late now, initialize it. */ 120 if (((drv->flags & CAM_PERIPH_DRV_EARLY) != 0 && initialized > 0) || 121 initialized > 1) 122 (*drv->init)(); 123 } 124 125 void 126 periphdriver_init(int level) 127 { 128 int i, early; 129 130 initialized = max(initialized, level); 131 for (i = 0; periph_drivers[i] != NULL; i++) { 132 early = (periph_drivers[i]->flags & CAM_PERIPH_DRV_EARLY) ? 1 : 2; 133 if (early == initialized) 134 (*periph_drivers[i]->init)(); 135 } 136 } 137 138 cam_status 139 cam_periph_alloc(periph_ctor_t *periph_ctor, 140 periph_oninv_t *periph_oninvalidate, 141 periph_dtor_t *periph_dtor, periph_start_t *periph_start, 142 char *name, cam_periph_type type, struct cam_path *path, 143 ac_callback_t *ac_callback, ac_code code, void *arg) 144 { 145 struct periph_driver **p_drv; 146 struct cam_sim *sim; 147 struct cam_periph *periph; 148 struct cam_periph *cur_periph; 149 path_id_t path_id; 150 target_id_t target_id; 151 lun_id_t lun_id; 152 cam_status status; 153 u_int init_level; 154 155 init_level = 0; 156 /* 157 * Handle Hot-Plug scenarios. If there is already a peripheral 158 * of our type assigned to this path, we are likely waiting for 159 * final close on an old, invalidated, peripheral. If this is 160 * the case, queue up a deferred call to the peripheral's async 161 * handler. If it looks like a mistaken re-allocation, complain. 162 */ 163 if ((periph = cam_periph_find(path, name)) != NULL) { 164 165 if ((periph->flags & CAM_PERIPH_INVALID) != 0 166 && (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) == 0) { 167 periph->flags |= CAM_PERIPH_NEW_DEV_FOUND; 168 periph->deferred_callback = ac_callback; 169 periph->deferred_ac = code; 170 return (CAM_REQ_INPROG); 171 } else { 172 printf("cam_periph_alloc: attempt to re-allocate " 173 "valid device %s%d rejected\n", 174 periph->periph_name, periph->unit_number); 175 } 176 return (CAM_REQ_INVALID); 177 } 178 179 periph = (struct cam_periph *)malloc(sizeof(*periph), M_CAMPERIPH, 180 M_NOWAIT); 181 182 if (periph == NULL) 183 return (CAM_RESRC_UNAVAIL); 184 185 init_level++; 186 187 188 sim = xpt_path_sim(path); 189 path_id = xpt_path_path_id(path); 190 target_id = xpt_path_target_id(path); 191 lun_id = xpt_path_lun_id(path); 192 bzero(periph, sizeof(*periph)); 193 cam_init_pinfo(&periph->pinfo); 194 periph->periph_start = periph_start; 195 periph->periph_dtor = periph_dtor; 196 periph->periph_oninval = periph_oninvalidate; 197 periph->type = type; 198 periph->periph_name = name; 199 periph->immediate_priority = CAM_PRIORITY_NONE; 200 periph->refcount = 0; 201 periph->sim = sim; 202 SLIST_INIT(&periph->ccb_list); 203 status = xpt_create_path(&path, periph, path_id, target_id, lun_id); 204 if (status != CAM_REQ_CMP) 205 goto failure; 206 periph->path = path; 207 208 xpt_lock_buses(); 209 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) { 210 if (strcmp((*p_drv)->driver_name, name) == 0) 211 break; 212 } 213 if (*p_drv == NULL) { 214 printf("cam_periph_alloc: invalid periph name '%s'\n", name); 215 xpt_free_path(periph->path); 216 free(periph, M_CAMPERIPH); 217 xpt_unlock_buses(); 218 return (CAM_REQ_INVALID); 219 } 220 periph->unit_number = camperiphunit(*p_drv, path_id, target_id, lun_id); 221 cur_periph = TAILQ_FIRST(&(*p_drv)->units); 222 while (cur_periph != NULL 223 && cur_periph->unit_number < periph->unit_number) 224 cur_periph = TAILQ_NEXT(cur_periph, unit_links); 225 if (cur_periph != NULL) { 226 KASSERT(cur_periph->unit_number != periph->unit_number, ("duplicate units on periph list")); 227 TAILQ_INSERT_BEFORE(cur_periph, periph, unit_links); 228 } else { 229 TAILQ_INSERT_TAIL(&(*p_drv)->units, periph, unit_links); 230 (*p_drv)->generation++; 231 } 232 xpt_unlock_buses(); 233 234 init_level++; 235 236 status = xpt_add_periph(periph); 237 if (status != CAM_REQ_CMP) 238 goto failure; 239 240 init_level++; 241 242 status = periph_ctor(periph, arg); 243 244 if (status == CAM_REQ_CMP) 245 init_level++; 246 247 failure: 248 switch (init_level) { 249 case 4: 250 /* Initialized successfully */ 251 break; 252 case 3: 253 xpt_remove_periph(periph); 254 /* FALLTHROUGH */ 255 case 2: 256 xpt_lock_buses(); 257 TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links); 258 xpt_unlock_buses(); 259 xpt_free_path(periph->path); 260 /* FALLTHROUGH */ 261 case 1: 262 free(periph, M_CAMPERIPH); 263 /* FALLTHROUGH */ 264 case 0: 265 /* No cleanup to perform. */ 266 break; 267 default: 268 panic("cam_periph_alloc: Unkown init level"); 269 } 270 return(status); 271 } 272 273 /* 274 * Find a peripheral structure with the specified path, target, lun, 275 * and (optionally) type. If the name is NULL, this function will return 276 * the first peripheral driver that matches the specified path. 277 */ 278 struct cam_periph * 279 cam_periph_find(struct cam_path *path, char *name) 280 { 281 struct periph_driver **p_drv; 282 struct cam_periph *periph; 283 284 xpt_lock_buses(); 285 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) { 286 287 if (name != NULL && (strcmp((*p_drv)->driver_name, name) != 0)) 288 continue; 289 290 TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) { 291 if (xpt_path_comp(periph->path, path) == 0) { 292 xpt_unlock_buses(); 293 mtx_assert(periph->sim->mtx, MA_OWNED); 294 return(periph); 295 } 296 } 297 if (name != NULL) { 298 xpt_unlock_buses(); 299 return(NULL); 300 } 301 } 302 xpt_unlock_buses(); 303 return(NULL); 304 } 305 306 cam_status 307 cam_periph_acquire(struct cam_periph *periph) 308 { 309 310 if (periph == NULL) 311 return(CAM_REQ_CMP_ERR); 312 313 xpt_lock_buses(); 314 periph->refcount++; 315 xpt_unlock_buses(); 316 317 return(CAM_REQ_CMP); 318 } 319 320 void 321 cam_periph_release_locked(struct cam_periph *periph) 322 { 323 324 if (periph == NULL) 325 return; 326 327 xpt_lock_buses(); 328 if (periph->refcount != 0) { 329 periph->refcount--; 330 } else { 331 xpt_print(periph->path, "%s: release %p when refcount is zero\n ", __func__, periph); 332 } 333 if (periph->refcount == 0 334 && (periph->flags & CAM_PERIPH_INVALID)) { 335 camperiphfree(periph); 336 } 337 xpt_unlock_buses(); 338 } 339 340 void 341 cam_periph_release(struct cam_periph *periph) 342 { 343 struct cam_sim *sim; 344 345 if (periph == NULL) 346 return; 347 348 sim = periph->sim; 349 mtx_assert(sim->mtx, MA_NOTOWNED); 350 mtx_lock(sim->mtx); 351 cam_periph_release_locked(periph); 352 mtx_unlock(sim->mtx); 353 } 354 355 int 356 cam_periph_hold(struct cam_periph *periph, int priority) 357 { 358 int error; 359 360 /* 361 * Increment the reference count on the peripheral 362 * while we wait for our lock attempt to succeed 363 * to ensure the peripheral doesn't disappear out 364 * from user us while we sleep. 365 */ 366 367 if (cam_periph_acquire(periph) != CAM_REQ_CMP) 368 return (ENXIO); 369 370 mtx_assert(periph->sim->mtx, MA_OWNED); 371 while ((periph->flags & CAM_PERIPH_LOCKED) != 0) { 372 periph->flags |= CAM_PERIPH_LOCK_WANTED; 373 if ((error = mtx_sleep(periph, periph->sim->mtx, priority, 374 "caplck", 0)) != 0) { 375 cam_periph_release_locked(periph); 376 return (error); 377 } 378 } 379 380 periph->flags |= CAM_PERIPH_LOCKED; 381 return (0); 382 } 383 384 void 385 cam_periph_unhold(struct cam_periph *periph) 386 { 387 388 mtx_assert(periph->sim->mtx, MA_OWNED); 389 390 periph->flags &= ~CAM_PERIPH_LOCKED; 391 if ((periph->flags & CAM_PERIPH_LOCK_WANTED) != 0) { 392 periph->flags &= ~CAM_PERIPH_LOCK_WANTED; 393 wakeup(periph); 394 } 395 396 cam_periph_release_locked(periph); 397 } 398 399 /* 400 * Look for the next unit number that is not currently in use for this 401 * peripheral type starting at "newunit". Also exclude unit numbers that 402 * are reserved by for future "hardwiring" unless we already know that this 403 * is a potential wired device. Only assume that the device is "wired" the 404 * first time through the loop since after that we'll be looking at unit 405 * numbers that did not match a wiring entry. 406 */ 407 static u_int 408 camperiphnextunit(struct periph_driver *p_drv, u_int newunit, int wired, 409 path_id_t pathid, target_id_t target, lun_id_t lun) 410 { 411 struct cam_periph *periph; 412 char *periph_name; 413 int i, val, dunit, r; 414 const char *dname, *strval; 415 416 periph_name = p_drv->driver_name; 417 for (;;newunit++) { 418 419 for (periph = TAILQ_FIRST(&p_drv->units); 420 periph != NULL && periph->unit_number != newunit; 421 periph = TAILQ_NEXT(periph, unit_links)) 422 ; 423 424 if (periph != NULL && periph->unit_number == newunit) { 425 if (wired != 0) { 426 xpt_print(periph->path, "Duplicate Wired " 427 "Device entry!\n"); 428 xpt_print(periph->path, "Second device (%s " 429 "device at scbus%d target %d lun %d) will " 430 "not be wired\n", periph_name, pathid, 431 target, lun); 432 wired = 0; 433 } 434 continue; 435 } 436 if (wired) 437 break; 438 439 /* 440 * Don't match entries like "da 4" as a wired down 441 * device, but do match entries like "da 4 target 5" 442 * or even "da 4 scbus 1". 443 */ 444 i = 0; 445 dname = periph_name; 446 for (;;) { 447 r = resource_find_dev(&i, dname, &dunit, NULL, NULL); 448 if (r != 0) 449 break; 450 /* if no "target" and no specific scbus, skip */ 451 if (resource_int_value(dname, dunit, "target", &val) && 452 (resource_string_value(dname, dunit, "at",&strval)|| 453 strcmp(strval, "scbus") == 0)) 454 continue; 455 if (newunit == dunit) 456 break; 457 } 458 if (r != 0) 459 break; 460 } 461 return (newunit); 462 } 463 464 static u_int 465 camperiphunit(struct periph_driver *p_drv, path_id_t pathid, 466 target_id_t target, lun_id_t lun) 467 { 468 u_int unit; 469 int wired, i, val, dunit; 470 const char *dname, *strval; 471 char pathbuf[32], *periph_name; 472 473 periph_name = p_drv->driver_name; 474 snprintf(pathbuf, sizeof(pathbuf), "scbus%d", pathid); 475 unit = 0; 476 i = 0; 477 dname = periph_name; 478 for (wired = 0; resource_find_dev(&i, dname, &dunit, NULL, NULL) == 0; 479 wired = 0) { 480 if (resource_string_value(dname, dunit, "at", &strval) == 0) { 481 if (strcmp(strval, pathbuf) != 0) 482 continue; 483 wired++; 484 } 485 if (resource_int_value(dname, dunit, "target", &val) == 0) { 486 if (val != target) 487 continue; 488 wired++; 489 } 490 if (resource_int_value(dname, dunit, "lun", &val) == 0) { 491 if (val != lun) 492 continue; 493 wired++; 494 } 495 if (wired != 0) { 496 unit = dunit; 497 break; 498 } 499 } 500 501 /* 502 * Either start from 0 looking for the next unit or from 503 * the unit number given in the resource config. This way, 504 * if we have wildcard matches, we don't return the same 505 * unit number twice. 506 */ 507 unit = camperiphnextunit(p_drv, unit, wired, pathid, target, lun); 508 509 return (unit); 510 } 511 512 void 513 cam_periph_invalidate(struct cam_periph *periph) 514 { 515 516 /* 517 * We only call this routine the first time a peripheral is 518 * invalidated. 519 */ 520 if (((periph->flags & CAM_PERIPH_INVALID) == 0) 521 && (periph->periph_oninval != NULL)) 522 periph->periph_oninval(periph); 523 524 periph->flags |= CAM_PERIPH_INVALID; 525 periph->flags &= ~CAM_PERIPH_NEW_DEV_FOUND; 526 527 xpt_lock_buses(); 528 if (periph->refcount == 0) 529 camperiphfree(periph); 530 else if (periph->refcount < 0) 531 printf("cam_invalidate_periph: refcount < 0!!\n"); 532 xpt_unlock_buses(); 533 } 534 535 static void 536 camperiphfree(struct cam_periph *periph) 537 { 538 struct periph_driver **p_drv; 539 540 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) { 541 if (strcmp((*p_drv)->driver_name, periph->periph_name) == 0) 542 break; 543 } 544 if (*p_drv == NULL) { 545 printf("camperiphfree: attempt to free non-existant periph\n"); 546 return; 547 } 548 549 TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links); 550 (*p_drv)->generation++; 551 xpt_unlock_buses(); 552 553 if (periph->periph_dtor != NULL) 554 periph->periph_dtor(periph); 555 xpt_remove_periph(periph); 556 557 if (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) { 558 union ccb ccb; 559 void *arg; 560 561 switch (periph->deferred_ac) { 562 case AC_FOUND_DEVICE: 563 ccb.ccb_h.func_code = XPT_GDEV_TYPE; 564 xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 565 xpt_action(&ccb); 566 arg = &ccb; 567 break; 568 case AC_PATH_REGISTERED: 569 ccb.ccb_h.func_code = XPT_PATH_INQ; 570 xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 571 xpt_action(&ccb); 572 arg = &ccb; 573 break; 574 default: 575 arg = NULL; 576 break; 577 } 578 periph->deferred_callback(NULL, periph->deferred_ac, 579 periph->path, arg); 580 } 581 xpt_free_path(periph->path); 582 free(periph, M_CAMPERIPH); 583 xpt_lock_buses(); 584 } 585 586 /* 587 * Map user virtual pointers into kernel virtual address space, so we can 588 * access the memory. This won't work on physical pointers, for now it's 589 * up to the caller to check for that. (XXX KDM -- should we do that here 590 * instead?) This also only works for up to MAXPHYS memory. Since we use 591 * buffers to map stuff in and out, we're limited to the buffer size. 592 */ 593 int 594 cam_periph_mapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo) 595 { 596 int numbufs, i, j; 597 int flags[CAM_PERIPH_MAXMAPS]; 598 u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS]; 599 u_int32_t lengths[CAM_PERIPH_MAXMAPS]; 600 u_int32_t dirs[CAM_PERIPH_MAXMAPS]; 601 /* Some controllers may not be able to handle more data. */ 602 size_t maxmap = DFLTPHYS; 603 604 switch(ccb->ccb_h.func_code) { 605 case XPT_DEV_MATCH: 606 if (ccb->cdm.match_buf_len == 0) { 607 printf("cam_periph_mapmem: invalid match buffer " 608 "length 0\n"); 609 return(EINVAL); 610 } 611 if (ccb->cdm.pattern_buf_len > 0) { 612 data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns; 613 lengths[0] = ccb->cdm.pattern_buf_len; 614 dirs[0] = CAM_DIR_OUT; 615 data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches; 616 lengths[1] = ccb->cdm.match_buf_len; 617 dirs[1] = CAM_DIR_IN; 618 numbufs = 2; 619 } else { 620 data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches; 621 lengths[0] = ccb->cdm.match_buf_len; 622 dirs[0] = CAM_DIR_IN; 623 numbufs = 1; 624 } 625 /* 626 * This request will not go to the hardware, no reason 627 * to be so strict. vmapbuf() is able to map up to MAXPHYS. 628 */ 629 maxmap = MAXPHYS; 630 break; 631 case XPT_SCSI_IO: 632 case XPT_CONT_TARGET_IO: 633 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) 634 return(0); 635 636 data_ptrs[0] = &ccb->csio.data_ptr; 637 lengths[0] = ccb->csio.dxfer_len; 638 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK; 639 numbufs = 1; 640 break; 641 case XPT_ATA_IO: 642 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) 643 return(0); 644 645 data_ptrs[0] = &ccb->ataio.data_ptr; 646 lengths[0] = ccb->ataio.dxfer_len; 647 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK; 648 numbufs = 1; 649 break; 650 case XPT_SMP_IO: 651 data_ptrs[0] = &ccb->smpio.smp_request; 652 lengths[0] = ccb->smpio.smp_request_len; 653 dirs[0] = CAM_DIR_OUT; 654 data_ptrs[1] = &ccb->smpio.smp_response; 655 lengths[1] = ccb->smpio.smp_response_len; 656 dirs[1] = CAM_DIR_IN; 657 numbufs = 2; 658 break; 659 case XPT_GDEV_ADVINFO: 660 if (ccb->cgdai.bufsiz == 0) 661 return (0); 662 663 data_ptrs[0] = (uint8_t **)&ccb->cgdai.buf; 664 lengths[0] = ccb->cgdai.bufsiz; 665 dirs[0] = CAM_DIR_IN; 666 numbufs = 1; 667 668 /* 669 * This request will not go to the hardware, no reason 670 * to be so strict. vmapbuf() is able to map up to MAXPHYS. 671 */ 672 maxmap = MAXPHYS; 673 break; 674 default: 675 return(EINVAL); 676 break; /* NOTREACHED */ 677 } 678 679 /* 680 * Check the transfer length and permissions first, so we don't 681 * have to unmap any previously mapped buffers. 682 */ 683 for (i = 0; i < numbufs; i++) { 684 685 flags[i] = 0; 686 687 /* 688 * The userland data pointer passed in may not be page 689 * aligned. vmapbuf() truncates the address to a page 690 * boundary, so if the address isn't page aligned, we'll 691 * need enough space for the given transfer length, plus 692 * whatever extra space is necessary to make it to the page 693 * boundary. 694 */ 695 if ((lengths[i] + 696 (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)) > maxmap){ 697 printf("cam_periph_mapmem: attempt to map %lu bytes, " 698 "which is greater than %lu\n", 699 (long)(lengths[i] + 700 (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)), 701 (u_long)maxmap); 702 return(E2BIG); 703 } 704 705 if (dirs[i] & CAM_DIR_OUT) { 706 flags[i] = BIO_WRITE; 707 } 708 709 if (dirs[i] & CAM_DIR_IN) { 710 flags[i] = BIO_READ; 711 } 712 713 } 714 715 /* this keeps the current process from getting swapped */ 716 /* 717 * XXX KDM should I use P_NOSWAP instead? 718 */ 719 PHOLD(curproc); 720 721 for (i = 0; i < numbufs; i++) { 722 /* 723 * Get the buffer. 724 */ 725 mapinfo->bp[i] = getpbuf(NULL); 726 727 /* save the buffer's data address */ 728 mapinfo->bp[i]->b_saveaddr = mapinfo->bp[i]->b_data; 729 730 /* put our pointer in the data slot */ 731 mapinfo->bp[i]->b_data = *data_ptrs[i]; 732 733 /* set the transfer length, we know it's < MAXPHYS */ 734 mapinfo->bp[i]->b_bufsize = lengths[i]; 735 736 /* set the direction */ 737 mapinfo->bp[i]->b_iocmd = flags[i]; 738 739 /* 740 * Map the buffer into kernel memory. 741 * 742 * Note that useracc() alone is not a sufficient test. 743 * vmapbuf() can still fail due to a smaller file mapped 744 * into a larger area of VM, or if userland races against 745 * vmapbuf() after the useracc() check. 746 */ 747 if (vmapbuf(mapinfo->bp[i]) < 0) { 748 for (j = 0; j < i; ++j) { 749 *data_ptrs[j] = mapinfo->bp[j]->b_saveaddr; 750 vunmapbuf(mapinfo->bp[j]); 751 relpbuf(mapinfo->bp[j], NULL); 752 } 753 relpbuf(mapinfo->bp[i], NULL); 754 PRELE(curproc); 755 return(EACCES); 756 } 757 758 /* set our pointer to the new mapped area */ 759 *data_ptrs[i] = mapinfo->bp[i]->b_data; 760 761 mapinfo->num_bufs_used++; 762 } 763 764 /* 765 * Now that we've gotten this far, change ownership to the kernel 766 * of the buffers so that we don't run afoul of returning to user 767 * space with locks (on the buffer) held. 768 */ 769 for (i = 0; i < numbufs; i++) { 770 BUF_KERNPROC(mapinfo->bp[i]); 771 } 772 773 774 return(0); 775 } 776 777 /* 778 * Unmap memory segments mapped into kernel virtual address space by 779 * cam_periph_mapmem(). 780 */ 781 void 782 cam_periph_unmapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo) 783 { 784 int numbufs, i; 785 u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS]; 786 787 if (mapinfo->num_bufs_used <= 0) { 788 /* allow ourselves to be swapped once again */ 789 PRELE(curproc); 790 return; 791 } 792 793 switch (ccb->ccb_h.func_code) { 794 case XPT_DEV_MATCH: 795 numbufs = min(mapinfo->num_bufs_used, 2); 796 797 if (numbufs == 1) { 798 data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches; 799 } else { 800 data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns; 801 data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches; 802 } 803 break; 804 case XPT_SCSI_IO: 805 case XPT_CONT_TARGET_IO: 806 data_ptrs[0] = &ccb->csio.data_ptr; 807 numbufs = min(mapinfo->num_bufs_used, 1); 808 break; 809 case XPT_ATA_IO: 810 data_ptrs[0] = &ccb->ataio.data_ptr; 811 numbufs = min(mapinfo->num_bufs_used, 1); 812 break; 813 case XPT_SMP_IO: 814 numbufs = min(mapinfo->num_bufs_used, 2); 815 data_ptrs[0] = &ccb->smpio.smp_request; 816 data_ptrs[1] = &ccb->smpio.smp_response; 817 break; 818 case XPT_GDEV_ADVINFO: 819 numbufs = min(mapinfo->num_bufs_used, 1); 820 data_ptrs[0] = (uint8_t **)&ccb->cgdai.buf; 821 break; 822 default: 823 /* allow ourselves to be swapped once again */ 824 PRELE(curproc); 825 return; 826 break; /* NOTREACHED */ 827 } 828 829 for (i = 0; i < numbufs; i++) { 830 /* Set the user's pointer back to the original value */ 831 *data_ptrs[i] = mapinfo->bp[i]->b_saveaddr; 832 833 /* unmap the buffer */ 834 vunmapbuf(mapinfo->bp[i]); 835 836 /* release the buffer */ 837 relpbuf(mapinfo->bp[i], NULL); 838 } 839 840 /* allow ourselves to be swapped once again */ 841 PRELE(curproc); 842 } 843 844 union ccb * 845 cam_periph_getccb(struct cam_periph *periph, u_int32_t priority) 846 { 847 struct ccb_hdr *ccb_h; 848 849 mtx_assert(periph->sim->mtx, MA_OWNED); 850 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering cdgetccb\n")); 851 852 while (SLIST_FIRST(&periph->ccb_list) == NULL) { 853 if (periph->immediate_priority > priority) 854 periph->immediate_priority = priority; 855 xpt_schedule(periph, priority); 856 if ((SLIST_FIRST(&periph->ccb_list) != NULL) 857 && (SLIST_FIRST(&periph->ccb_list)->pinfo.priority == priority)) 858 break; 859 mtx_assert(periph->sim->mtx, MA_OWNED); 860 mtx_sleep(&periph->ccb_list, periph->sim->mtx, PRIBIO, "cgticb", 861 0); 862 } 863 864 ccb_h = SLIST_FIRST(&periph->ccb_list); 865 SLIST_REMOVE_HEAD(&periph->ccb_list, periph_links.sle); 866 return ((union ccb *)ccb_h); 867 } 868 869 void 870 cam_periph_ccbwait(union ccb *ccb) 871 { 872 struct cam_sim *sim; 873 874 sim = xpt_path_sim(ccb->ccb_h.path); 875 if ((ccb->ccb_h.pinfo.index != CAM_UNQUEUED_INDEX) 876 || ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG)) 877 mtx_sleep(&ccb->ccb_h.cbfcnp, sim->mtx, PRIBIO, "cbwait", 0); 878 } 879 880 int 881 cam_periph_ioctl(struct cam_periph *periph, u_long cmd, caddr_t addr, 882 int (*error_routine)(union ccb *ccb, 883 cam_flags camflags, 884 u_int32_t sense_flags)) 885 { 886 union ccb *ccb; 887 int error; 888 int found; 889 890 error = found = 0; 891 892 switch(cmd){ 893 case CAMGETPASSTHRU: 894 ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); 895 xpt_setup_ccb(&ccb->ccb_h, 896 ccb->ccb_h.path, 897 CAM_PRIORITY_NORMAL); 898 ccb->ccb_h.func_code = XPT_GDEVLIST; 899 900 /* 901 * Basically, the point of this is that we go through 902 * getting the list of devices, until we find a passthrough 903 * device. In the current version of the CAM code, the 904 * only way to determine what type of device we're dealing 905 * with is by its name. 906 */ 907 while (found == 0) { 908 ccb->cgdl.index = 0; 909 ccb->cgdl.status = CAM_GDEVLIST_MORE_DEVS; 910 while (ccb->cgdl.status == CAM_GDEVLIST_MORE_DEVS) { 911 912 /* we want the next device in the list */ 913 xpt_action(ccb); 914 if (strncmp(ccb->cgdl.periph_name, 915 "pass", 4) == 0){ 916 found = 1; 917 break; 918 } 919 } 920 if ((ccb->cgdl.status == CAM_GDEVLIST_LAST_DEVICE) && 921 (found == 0)) { 922 ccb->cgdl.periph_name[0] = '\0'; 923 ccb->cgdl.unit_number = 0; 924 break; 925 } 926 } 927 928 /* copy the result back out */ 929 bcopy(ccb, addr, sizeof(union ccb)); 930 931 /* and release the ccb */ 932 xpt_release_ccb(ccb); 933 934 break; 935 default: 936 error = ENOTTY; 937 break; 938 } 939 return(error); 940 } 941 942 int 943 cam_periph_runccb(union ccb *ccb, 944 int (*error_routine)(union ccb *ccb, 945 cam_flags camflags, 946 u_int32_t sense_flags), 947 cam_flags camflags, u_int32_t sense_flags, 948 struct devstat *ds) 949 { 950 struct cam_sim *sim; 951 int error; 952 953 error = 0; 954 sim = xpt_path_sim(ccb->ccb_h.path); 955 mtx_assert(sim->mtx, MA_OWNED); 956 957 /* 958 * If the user has supplied a stats structure, and if we understand 959 * this particular type of ccb, record the transaction start. 960 */ 961 if ((ds != NULL) && (ccb->ccb_h.func_code == XPT_SCSI_IO || 962 ccb->ccb_h.func_code == XPT_ATA_IO)) 963 devstat_start_transaction(ds, NULL); 964 965 xpt_action(ccb); 966 967 do { 968 cam_periph_ccbwait(ccb); 969 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) 970 error = 0; 971 else if (error_routine != NULL) 972 error = (*error_routine)(ccb, camflags, sense_flags); 973 else 974 error = 0; 975 976 } while (error == ERESTART); 977 978 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 979 cam_release_devq(ccb->ccb_h.path, 980 /* relsim_flags */0, 981 /* openings */0, 982 /* timeout */0, 983 /* getcount_only */ FALSE); 984 ccb->ccb_h.status &= ~CAM_DEV_QFRZN; 985 } 986 987 if (ds != NULL) { 988 if (ccb->ccb_h.func_code == XPT_SCSI_IO) { 989 devstat_end_transaction(ds, 990 ccb->csio.dxfer_len, 991 ccb->csio.tag_action & 0x3, 992 ((ccb->ccb_h.flags & CAM_DIR_MASK) == 993 CAM_DIR_NONE) ? DEVSTAT_NO_DATA : 994 (ccb->ccb_h.flags & CAM_DIR_OUT) ? 995 DEVSTAT_WRITE : 996 DEVSTAT_READ, NULL, NULL); 997 } else if (ccb->ccb_h.func_code == XPT_ATA_IO) { 998 devstat_end_transaction(ds, 999 ccb->ataio.dxfer_len, 1000 ccb->ataio.tag_action & 0x3, 1001 ((ccb->ccb_h.flags & CAM_DIR_MASK) == 1002 CAM_DIR_NONE) ? DEVSTAT_NO_DATA : 1003 (ccb->ccb_h.flags & CAM_DIR_OUT) ? 1004 DEVSTAT_WRITE : 1005 DEVSTAT_READ, NULL, NULL); 1006 } 1007 } 1008 1009 return(error); 1010 } 1011 1012 void 1013 cam_freeze_devq(struct cam_path *path) 1014 { 1015 1016 cam_freeze_devq_arg(path, 0, 0); 1017 } 1018 1019 void 1020 cam_freeze_devq_arg(struct cam_path *path, uint32_t flags, uint32_t arg) 1021 { 1022 struct ccb_relsim crs; 1023 1024 xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NONE); 1025 crs.ccb_h.func_code = XPT_FREEZE_QUEUE; 1026 crs.release_flags = flags; 1027 crs.openings = arg; 1028 crs.release_timeout = arg; 1029 xpt_action((union ccb *)&crs); 1030 } 1031 1032 u_int32_t 1033 cam_release_devq(struct cam_path *path, u_int32_t relsim_flags, 1034 u_int32_t openings, u_int32_t arg, 1035 int getcount_only) 1036 { 1037 struct ccb_relsim crs; 1038 1039 xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL); 1040 crs.ccb_h.func_code = XPT_REL_SIMQ; 1041 crs.ccb_h.flags = getcount_only ? CAM_DEV_QFREEZE : 0; 1042 crs.release_flags = relsim_flags; 1043 crs.openings = openings; 1044 crs.release_timeout = arg; 1045 xpt_action((union ccb *)&crs); 1046 return (crs.qfrozen_cnt); 1047 } 1048 1049 #define saved_ccb_ptr ppriv_ptr0 1050 #define recovery_depth ppriv_field1 1051 static void 1052 camperiphsensedone(struct cam_periph *periph, union ccb *done_ccb) 1053 { 1054 union ccb *saved_ccb = (union ccb *)done_ccb->ccb_h.saved_ccb_ptr; 1055 cam_status status; 1056 int frozen = 0; 1057 u_int sense_key; 1058 int depth = done_ccb->ccb_h.recovery_depth; 1059 1060 status = done_ccb->ccb_h.status; 1061 if (status & CAM_DEV_QFRZN) { 1062 frozen = 1; 1063 /* 1064 * Clear freeze flag now for case of retry, 1065 * freeze will be dropped later. 1066 */ 1067 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN; 1068 } 1069 status &= CAM_STATUS_MASK; 1070 switch (status) { 1071 case CAM_REQ_CMP: 1072 { 1073 /* 1074 * If we manually retrieved sense into a CCB and got 1075 * something other than "NO SENSE" send the updated CCB 1076 * back to the client via xpt_done() to be processed via 1077 * the error recovery code again. 1078 */ 1079 sense_key = saved_ccb->csio.sense_data.flags; 1080 sense_key &= SSD_KEY; 1081 if (sense_key != SSD_KEY_NO_SENSE) { 1082 saved_ccb->ccb_h.status |= 1083 CAM_AUTOSNS_VALID; 1084 } else { 1085 saved_ccb->ccb_h.status &= 1086 ~CAM_STATUS_MASK; 1087 saved_ccb->ccb_h.status |= 1088 CAM_AUTOSENSE_FAIL; 1089 } 1090 saved_ccb->csio.sense_resid = done_ccb->csio.resid; 1091 bcopy(saved_ccb, done_ccb, sizeof(union ccb)); 1092 xpt_free_ccb(saved_ccb); 1093 break; 1094 } 1095 default: 1096 bcopy(saved_ccb, done_ccb, sizeof(union ccb)); 1097 xpt_free_ccb(saved_ccb); 1098 done_ccb->ccb_h.status &= ~CAM_STATUS_MASK; 1099 done_ccb->ccb_h.status |= CAM_AUTOSENSE_FAIL; 1100 break; 1101 } 1102 periph->flags &= ~CAM_PERIPH_SENSE_INPROG; 1103 /* 1104 * If it is the end of recovery, drop freeze, taken due to 1105 * CAM_DEV_QFREEZE flag, set on recovery request. 1106 */ 1107 if (depth == 0) { 1108 cam_release_devq(done_ccb->ccb_h.path, 1109 /*relsim_flags*/0, 1110 /*openings*/0, 1111 /*timeout*/0, 1112 /*getcount_only*/0); 1113 } 1114 /* 1115 * Copy frozen flag from recovery request if it is set there 1116 * for some reason. 1117 */ 1118 if (frozen != 0) 1119 done_ccb->ccb_h.status |= CAM_DEV_QFRZN; 1120 (*done_ccb->ccb_h.cbfcnp)(periph, done_ccb); 1121 } 1122 1123 static void 1124 camperiphdone(struct cam_periph *periph, union ccb *done_ccb) 1125 { 1126 union ccb *saved_ccb, *save_ccb; 1127 cam_status status; 1128 int frozen = 0; 1129 struct scsi_start_stop_unit *scsi_cmd; 1130 u_int32_t relsim_flags, timeout; 1131 1132 status = done_ccb->ccb_h.status; 1133 if (status & CAM_DEV_QFRZN) { 1134 frozen = 1; 1135 /* 1136 * Clear freeze flag now for case of retry, 1137 * freeze will be dropped later. 1138 */ 1139 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN; 1140 } 1141 1142 timeout = 0; 1143 relsim_flags = 0; 1144 saved_ccb = (union ccb *)done_ccb->ccb_h.saved_ccb_ptr; 1145 1146 switch (status & CAM_STATUS_MASK) { 1147 case CAM_REQ_CMP: 1148 { 1149 /* 1150 * If we have successfully taken a device from the not 1151 * ready to ready state, re-scan the device and re-get 1152 * the inquiry information. Many devices (mostly disks) 1153 * don't properly report their inquiry information unless 1154 * they are spun up. 1155 */ 1156 scsi_cmd = (struct scsi_start_stop_unit *) 1157 &done_ccb->csio.cdb_io.cdb_bytes; 1158 1159 if (scsi_cmd->opcode == START_STOP_UNIT) 1160 xpt_async(AC_INQ_CHANGED, 1161 done_ccb->ccb_h.path, NULL); 1162 goto final; 1163 } 1164 case CAM_SCSI_STATUS_ERROR: 1165 scsi_cmd = (struct scsi_start_stop_unit *) 1166 &done_ccb->csio.cdb_io.cdb_bytes; 1167 if (status & CAM_AUTOSNS_VALID) { 1168 struct ccb_getdev cgd; 1169 struct scsi_sense_data *sense; 1170 int error_code, sense_key, asc, ascq; 1171 scsi_sense_action err_action; 1172 1173 sense = &done_ccb->csio.sense_data; 1174 scsi_extract_sense(sense, &error_code, 1175 &sense_key, &asc, &ascq); 1176 /* 1177 * Grab the inquiry data for this device. 1178 */ 1179 xpt_setup_ccb(&cgd.ccb_h, done_ccb->ccb_h.path, 1180 CAM_PRIORITY_NORMAL); 1181 cgd.ccb_h.func_code = XPT_GDEV_TYPE; 1182 xpt_action((union ccb *)&cgd); 1183 err_action = scsi_error_action(&done_ccb->csio, 1184 &cgd.inq_data, 0); 1185 /* 1186 * If the error is "invalid field in CDB", 1187 * and the load/eject flag is set, turn the 1188 * flag off and try again. This is just in 1189 * case the drive in question barfs on the 1190 * load eject flag. The CAM code should set 1191 * the load/eject flag by default for 1192 * removable media. 1193 */ 1194 /* XXX KDM 1195 * Should we check to see what the specific 1196 * scsi status is?? Or does it not matter 1197 * since we already know that there was an 1198 * error, and we know what the specific 1199 * error code was, and we know what the 1200 * opcode is.. 1201 */ 1202 if ((scsi_cmd->opcode == START_STOP_UNIT) && 1203 ((scsi_cmd->how & SSS_LOEJ) != 0) && 1204 (asc == 0x24) && (ascq == 0x00) && 1205 (done_ccb->ccb_h.retry_count > 0)) { 1206 1207 scsi_cmd->how &= ~SSS_LOEJ; 1208 xpt_action(done_ccb); 1209 } else if ((done_ccb->ccb_h.retry_count > 1) 1210 && ((err_action & SS_MASK) != SS_FAIL)) { 1211 1212 /* 1213 * In this case, the error recovery 1214 * command failed, but we've got 1215 * some retries left on it. Give 1216 * it another try unless this is an 1217 * unretryable error. 1218 */ 1219 /* set the timeout to .5 sec */ 1220 relsim_flags = 1221 RELSIM_RELEASE_AFTER_TIMEOUT; 1222 timeout = 500; 1223 xpt_action(done_ccb); 1224 break; 1225 } else { 1226 /* 1227 * Perform the final retry with the original 1228 * CCB so that final error processing is 1229 * performed by the owner of the CCB. 1230 */ 1231 goto final; 1232 } 1233 } else { 1234 save_ccb = xpt_alloc_ccb_nowait(); 1235 if (save_ccb == NULL) 1236 goto final; 1237 bcopy(done_ccb, save_ccb, sizeof(*save_ccb)); 1238 periph->flags |= CAM_PERIPH_SENSE_INPROG; 1239 /* 1240 * Send a Request Sense to the device. We 1241 * assume that we are in a contingent allegiance 1242 * condition so we do not tag this request. 1243 */ 1244 scsi_request_sense(&done_ccb->csio, /*retries*/1, 1245 camperiphsensedone, 1246 &save_ccb->csio.sense_data, 1247 save_ccb->csio.sense_len, 1248 CAM_TAG_ACTION_NONE, 1249 /*sense_len*/SSD_FULL_SIZE, 1250 /*timeout*/5000); 1251 done_ccb->ccb_h.pinfo.priority--; 1252 done_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 1253 done_ccb->ccb_h.saved_ccb_ptr = save_ccb; 1254 done_ccb->ccb_h.recovery_depth++; 1255 xpt_action(done_ccb); 1256 } 1257 break; 1258 default: 1259 final: 1260 bcopy(saved_ccb, done_ccb, sizeof(*done_ccb)); 1261 xpt_free_ccb(saved_ccb); 1262 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG; 1263 xpt_action(done_ccb); 1264 break; 1265 } 1266 1267 /* decrement the retry count */ 1268 /* 1269 * XXX This isn't appropriate in all cases. Restructure, 1270 * so that the retry count is only decremented on an 1271 * actual retry. Remeber that the orignal ccb had its 1272 * retry count dropped before entering recovery, so 1273 * doing it again is a bug. 1274 */ 1275 if (done_ccb->ccb_h.retry_count > 0) 1276 done_ccb->ccb_h.retry_count--; 1277 /* 1278 * Drop freeze taken due to CAM_DEV_QFREEZE flag set on recovery 1279 * request. 1280 */ 1281 cam_release_devq(done_ccb->ccb_h.path, 1282 /*relsim_flags*/relsim_flags, 1283 /*openings*/0, 1284 /*timeout*/timeout, 1285 /*getcount_only*/0); 1286 /* Drop freeze taken, if this recovery request got error. */ 1287 if (frozen != 0) { 1288 cam_release_devq(done_ccb->ccb_h.path, 1289 /*relsim_flags*/0, 1290 /*openings*/0, 1291 /*timeout*/0, 1292 /*getcount_only*/0); 1293 } 1294 } 1295 1296 /* 1297 * Generic Async Event handler. Peripheral drivers usually 1298 * filter out the events that require personal attention, 1299 * and leave the rest to this function. 1300 */ 1301 void 1302 cam_periph_async(struct cam_periph *periph, u_int32_t code, 1303 struct cam_path *path, void *arg) 1304 { 1305 switch (code) { 1306 case AC_LOST_DEVICE: 1307 cam_periph_invalidate(periph); 1308 break; 1309 default: 1310 break; 1311 } 1312 } 1313 1314 void 1315 cam_periph_bus_settle(struct cam_periph *periph, u_int bus_settle) 1316 { 1317 struct ccb_getdevstats cgds; 1318 1319 xpt_setup_ccb(&cgds.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 1320 cgds.ccb_h.func_code = XPT_GDEV_STATS; 1321 xpt_action((union ccb *)&cgds); 1322 cam_periph_freeze_after_event(periph, &cgds.last_reset, bus_settle); 1323 } 1324 1325 void 1326 cam_periph_freeze_after_event(struct cam_periph *periph, 1327 struct timeval* event_time, u_int duration_ms) 1328 { 1329 struct timeval delta; 1330 struct timeval duration_tv; 1331 1332 microtime(&delta); 1333 timevalsub(&delta, event_time); 1334 duration_tv.tv_sec = duration_ms / 1000; 1335 duration_tv.tv_usec = (duration_ms % 1000) * 1000; 1336 if (timevalcmp(&delta, &duration_tv, <)) { 1337 timevalsub(&duration_tv, &delta); 1338 1339 duration_ms = duration_tv.tv_sec * 1000; 1340 duration_ms += duration_tv.tv_usec / 1000; 1341 cam_freeze_devq(periph->path); 1342 cam_release_devq(periph->path, 1343 RELSIM_RELEASE_AFTER_TIMEOUT, 1344 /*reduction*/0, 1345 /*timeout*/duration_ms, 1346 /*getcount_only*/0); 1347 } 1348 1349 } 1350 1351 static int 1352 camperiphscsistatuserror(union ccb *ccb, cam_flags camflags, 1353 u_int32_t sense_flags, 1354 int *openings, u_int32_t *relsim_flags, 1355 u_int32_t *timeout, const char **action_string) 1356 { 1357 int error; 1358 1359 switch (ccb->csio.scsi_status) { 1360 case SCSI_STATUS_OK: 1361 case SCSI_STATUS_COND_MET: 1362 case SCSI_STATUS_INTERMED: 1363 case SCSI_STATUS_INTERMED_COND_MET: 1364 error = 0; 1365 break; 1366 case SCSI_STATUS_CMD_TERMINATED: 1367 case SCSI_STATUS_CHECK_COND: 1368 if (bootverbose) 1369 xpt_print(ccb->ccb_h.path, "SCSI status error\n"); 1370 error = camperiphscsisenseerror(ccb, 1371 camflags, 1372 sense_flags, 1373 openings, 1374 relsim_flags, 1375 timeout, 1376 action_string); 1377 break; 1378 case SCSI_STATUS_QUEUE_FULL: 1379 { 1380 /* no decrement */ 1381 struct ccb_getdevstats cgds; 1382 1383 /* 1384 * First off, find out what the current 1385 * transaction counts are. 1386 */ 1387 xpt_setup_ccb(&cgds.ccb_h, 1388 ccb->ccb_h.path, 1389 CAM_PRIORITY_NORMAL); 1390 cgds.ccb_h.func_code = XPT_GDEV_STATS; 1391 xpt_action((union ccb *)&cgds); 1392 1393 /* 1394 * If we were the only transaction active, treat 1395 * the QUEUE FULL as if it were a BUSY condition. 1396 */ 1397 if (cgds.dev_active != 0) { 1398 int total_openings; 1399 1400 /* 1401 * Reduce the number of openings to 1402 * be 1 less than the amount it took 1403 * to get a queue full bounded by the 1404 * minimum allowed tag count for this 1405 * device. 1406 */ 1407 total_openings = cgds.dev_active + cgds.dev_openings; 1408 *openings = cgds.dev_active; 1409 if (*openings < cgds.mintags) 1410 *openings = cgds.mintags; 1411 if (*openings < total_openings) 1412 *relsim_flags = RELSIM_ADJUST_OPENINGS; 1413 else { 1414 /* 1415 * Some devices report queue full for 1416 * temporary resource shortages. For 1417 * this reason, we allow a minimum 1418 * tag count to be entered via a 1419 * quirk entry to prevent the queue 1420 * count on these devices from falling 1421 * to a pessimisticly low value. We 1422 * still wait for the next successful 1423 * completion, however, before queueing 1424 * more transactions to the device. 1425 */ 1426 *relsim_flags = RELSIM_RELEASE_AFTER_CMDCMPLT; 1427 } 1428 *timeout = 0; 1429 error = ERESTART; 1430 if (bootverbose) { 1431 xpt_print(ccb->ccb_h.path, "Queue full\n"); 1432 } 1433 break; 1434 } 1435 /* FALLTHROUGH */ 1436 } 1437 case SCSI_STATUS_BUSY: 1438 /* 1439 * Restart the queue after either another 1440 * command completes or a 1 second timeout. 1441 */ 1442 if (bootverbose) { 1443 xpt_print(ccb->ccb_h.path, "Device busy\n"); 1444 } 1445 if (ccb->ccb_h.retry_count > 0) { 1446 ccb->ccb_h.retry_count--; 1447 error = ERESTART; 1448 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT 1449 | RELSIM_RELEASE_AFTER_CMDCMPLT; 1450 *timeout = 1000; 1451 } else { 1452 error = EIO; 1453 } 1454 break; 1455 case SCSI_STATUS_RESERV_CONFLICT: 1456 xpt_print(ccb->ccb_h.path, "Reservation conflict\n"); 1457 error = EIO; 1458 break; 1459 default: 1460 xpt_print(ccb->ccb_h.path, "SCSI status 0x%x\n", 1461 ccb->csio.scsi_status); 1462 error = EIO; 1463 break; 1464 } 1465 return (error); 1466 } 1467 1468 static int 1469 camperiphscsisenseerror(union ccb *ccb, cam_flags camflags, 1470 u_int32_t sense_flags, 1471 int *openings, u_int32_t *relsim_flags, 1472 u_int32_t *timeout, const char **action_string) 1473 { 1474 struct cam_periph *periph; 1475 union ccb *orig_ccb = ccb; 1476 int error; 1477 1478 periph = xpt_path_periph(ccb->ccb_h.path); 1479 if (periph->flags & 1480 (CAM_PERIPH_RECOVERY_INPROG | CAM_PERIPH_SENSE_INPROG)) { 1481 /* 1482 * If error recovery is already in progress, don't attempt 1483 * to process this error, but requeue it unconditionally 1484 * and attempt to process it once error recovery has 1485 * completed. This failed command is probably related to 1486 * the error that caused the currently active error recovery 1487 * action so our current recovery efforts should also 1488 * address this command. Be aware that the error recovery 1489 * code assumes that only one recovery action is in progress 1490 * on a particular peripheral instance at any given time 1491 * (e.g. only one saved CCB for error recovery) so it is 1492 * imperitive that we don't violate this assumption. 1493 */ 1494 error = ERESTART; 1495 } else { 1496 scsi_sense_action err_action; 1497 struct ccb_getdev cgd; 1498 1499 /* 1500 * Grab the inquiry data for this device. 1501 */ 1502 xpt_setup_ccb(&cgd.ccb_h, ccb->ccb_h.path, CAM_PRIORITY_NORMAL); 1503 cgd.ccb_h.func_code = XPT_GDEV_TYPE; 1504 xpt_action((union ccb *)&cgd); 1505 1506 if ((ccb->ccb_h.status & CAM_AUTOSNS_VALID) != 0) 1507 err_action = scsi_error_action(&ccb->csio, 1508 &cgd.inq_data, 1509 sense_flags); 1510 else if ((ccb->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0) 1511 err_action = SS_REQSENSE; 1512 else 1513 err_action = SS_RETRY|SSQ_DECREMENT_COUNT|EIO; 1514 1515 error = err_action & SS_ERRMASK; 1516 1517 /* 1518 * If the recovery action will consume a retry, 1519 * make sure we actually have retries available. 1520 */ 1521 if ((err_action & SSQ_DECREMENT_COUNT) != 0) { 1522 if (ccb->ccb_h.retry_count > 0) 1523 ccb->ccb_h.retry_count--; 1524 else { 1525 *action_string = "Retries exhausted"; 1526 goto sense_error_done; 1527 } 1528 } 1529 1530 if ((err_action & SS_MASK) >= SS_START) { 1531 /* 1532 * Do common portions of commands that 1533 * use recovery CCBs. 1534 */ 1535 orig_ccb = xpt_alloc_ccb_nowait(); 1536 if (orig_ccb == NULL) { 1537 *action_string = "Can't allocate recovery CCB"; 1538 goto sense_error_done; 1539 } 1540 /* 1541 * Clear freeze flag for original request here, as 1542 * this freeze will be dropped as part of ERESTART. 1543 */ 1544 ccb->ccb_h.status &= ~CAM_DEV_QFRZN; 1545 bcopy(ccb, orig_ccb, sizeof(*orig_ccb)); 1546 } 1547 1548 switch (err_action & SS_MASK) { 1549 case SS_NOP: 1550 *action_string = "No recovery action needed"; 1551 error = 0; 1552 break; 1553 case SS_RETRY: 1554 *action_string = "Retrying command (per sense data)"; 1555 error = ERESTART; 1556 break; 1557 case SS_FAIL: 1558 *action_string = "Unretryable error"; 1559 break; 1560 case SS_START: 1561 { 1562 int le; 1563 if (SID_TYPE(&cgd.inq_data) == T_SEQUENTIAL) { 1564 xpt_free_ccb(orig_ccb); 1565 ccb->ccb_h.status |= CAM_DEV_QFRZN; 1566 *action_string = "Will not autostart a " 1567 "sequential access device"; 1568 err_action = SS_FAIL; 1569 error = EIO; 1570 break; 1571 } 1572 1573 /* 1574 * Send a start unit command to the device, and 1575 * then retry the command. 1576 */ 1577 *action_string = "Attempting to start unit"; 1578 periph->flags |= CAM_PERIPH_RECOVERY_INPROG; 1579 1580 /* 1581 * Check for removable media and set 1582 * load/eject flag appropriately. 1583 */ 1584 if (SID_IS_REMOVABLE(&cgd.inq_data)) 1585 le = TRUE; 1586 else 1587 le = FALSE; 1588 1589 scsi_start_stop(&ccb->csio, 1590 /*retries*/1, 1591 camperiphdone, 1592 MSG_SIMPLE_Q_TAG, 1593 /*start*/TRUE, 1594 /*load/eject*/le, 1595 /*immediate*/FALSE, 1596 SSD_FULL_SIZE, 1597 /*timeout*/50000); 1598 break; 1599 } 1600 case SS_TUR: 1601 { 1602 /* 1603 * Send a Test Unit Ready to the device. 1604 * If the 'many' flag is set, we send 120 1605 * test unit ready commands, one every half 1606 * second. Otherwise, we just send one TUR. 1607 * We only want to do this if the retry 1608 * count has not been exhausted. 1609 */ 1610 int retries; 1611 1612 if ((err_action & SSQ_MANY) != 0) { 1613 *action_string = "Polling device for readiness"; 1614 retries = 120; 1615 } else { 1616 *action_string = "Testing device for readiness"; 1617 retries = 1; 1618 } 1619 periph->flags |= CAM_PERIPH_RECOVERY_INPROG; 1620 scsi_test_unit_ready(&ccb->csio, 1621 retries, 1622 camperiphdone, 1623 MSG_SIMPLE_Q_TAG, 1624 SSD_FULL_SIZE, 1625 /*timeout*/5000); 1626 1627 /* 1628 * Accomplish our 500ms delay by deferring 1629 * the release of our device queue appropriately. 1630 */ 1631 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT; 1632 *timeout = 500; 1633 break; 1634 } 1635 case SS_REQSENSE: 1636 { 1637 *action_string = "Requesting SCSI sense data"; 1638 periph->flags |= CAM_PERIPH_SENSE_INPROG; 1639 /* 1640 * Send a Request Sense to the device. We 1641 * assume that we are in a contingent allegiance 1642 * condition so we do not tag this request. 1643 */ 1644 scsi_request_sense(&ccb->csio, /*retries*/1, 1645 camperiphsensedone, 1646 &orig_ccb->csio.sense_data, 1647 orig_ccb->csio.sense_len, 1648 CAM_TAG_ACTION_NONE, 1649 /*sense_len*/SSD_FULL_SIZE, 1650 /*timeout*/5000); 1651 break; 1652 } 1653 default: 1654 panic("Unhandled error action %x", err_action); 1655 } 1656 1657 if ((err_action & SS_MASK) >= SS_START) { 1658 /* 1659 * Drop the priority, so that the recovery 1660 * CCB is the first to execute. Freeze the queue 1661 * after this command is sent so that we can 1662 * restore the old csio and have it queued in 1663 * the proper order before we release normal 1664 * transactions to the device. 1665 */ 1666 ccb->ccb_h.pinfo.priority--; 1667 ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 1668 ccb->ccb_h.saved_ccb_ptr = orig_ccb; 1669 ccb->ccb_h.recovery_depth = 0; 1670 error = ERESTART; 1671 } 1672 1673 sense_error_done: 1674 if ((err_action & SSQ_PRINT_SENSE) != 0 1675 && (ccb->ccb_h.status & CAM_AUTOSNS_VALID) != 0) 1676 cam_error_print(orig_ccb, CAM_ESF_ALL, CAM_EPF_ALL); 1677 } 1678 return (error); 1679 } 1680 1681 /* 1682 * Generic error handler. Peripheral drivers usually filter 1683 * out the errors that they handle in a unique mannor, then 1684 * call this function. 1685 */ 1686 int 1687 cam_periph_error(union ccb *ccb, cam_flags camflags, 1688 u_int32_t sense_flags, union ccb *save_ccb) 1689 { 1690 const char *action_string; 1691 cam_status status; 1692 int frozen; 1693 int error, printed = 0; 1694 int openings; 1695 u_int32_t relsim_flags; 1696 u_int32_t timeout = 0; 1697 1698 action_string = NULL; 1699 status = ccb->ccb_h.status; 1700 frozen = (status & CAM_DEV_QFRZN) != 0; 1701 status &= CAM_STATUS_MASK; 1702 openings = relsim_flags = 0; 1703 1704 switch (status) { 1705 case CAM_REQ_CMP: 1706 error = 0; 1707 break; 1708 case CAM_SCSI_STATUS_ERROR: 1709 error = camperiphscsistatuserror(ccb, 1710 camflags, 1711 sense_flags, 1712 &openings, 1713 &relsim_flags, 1714 &timeout, 1715 &action_string); 1716 break; 1717 case CAM_AUTOSENSE_FAIL: 1718 xpt_print(ccb->ccb_h.path, "AutoSense failed\n"); 1719 error = EIO; /* we have to kill the command */ 1720 break; 1721 case CAM_ATA_STATUS_ERROR: 1722 if (bootverbose && printed == 0) { 1723 xpt_print(ccb->ccb_h.path, "ATA status error\n"); 1724 cam_error_print(ccb, CAM_ESF_ALL, CAM_EPF_ALL); 1725 printed++; 1726 } 1727 /* FALLTHROUGH */ 1728 case CAM_REQ_CMP_ERR: 1729 if (bootverbose && printed == 0) { 1730 xpt_print(ccb->ccb_h.path, 1731 "Request completed with CAM_REQ_CMP_ERR\n"); 1732 printed++; 1733 } 1734 /* FALLTHROUGH */ 1735 case CAM_CMD_TIMEOUT: 1736 if (bootverbose && printed == 0) { 1737 xpt_print(ccb->ccb_h.path, "Command timed out\n"); 1738 printed++; 1739 } 1740 /* FALLTHROUGH */ 1741 case CAM_UNEXP_BUSFREE: 1742 if (bootverbose && printed == 0) { 1743 xpt_print(ccb->ccb_h.path, "Unexpected Bus Free\n"); 1744 printed++; 1745 } 1746 /* FALLTHROUGH */ 1747 case CAM_UNCOR_PARITY: 1748 if (bootverbose && printed == 0) { 1749 xpt_print(ccb->ccb_h.path, 1750 "Uncorrected parity error\n"); 1751 printed++; 1752 } 1753 /* FALLTHROUGH */ 1754 case CAM_DATA_RUN_ERR: 1755 if (bootverbose && printed == 0) { 1756 xpt_print(ccb->ccb_h.path, "Data overrun\n"); 1757 printed++; 1758 } 1759 error = EIO; /* we have to kill the command */ 1760 /* decrement the number of retries */ 1761 if (ccb->ccb_h.retry_count > 0) { 1762 ccb->ccb_h.retry_count--; 1763 error = ERESTART; 1764 } else { 1765 action_string = "Retries exhausted"; 1766 error = EIO; 1767 } 1768 break; 1769 case CAM_UA_ABORT: 1770 case CAM_UA_TERMIO: 1771 case CAM_MSG_REJECT_REC: 1772 /* XXX Don't know that these are correct */ 1773 error = EIO; 1774 break; 1775 case CAM_SEL_TIMEOUT: 1776 { 1777 struct cam_path *newpath; 1778 1779 if ((camflags & CAM_RETRY_SELTO) != 0) { 1780 if (ccb->ccb_h.retry_count > 0) { 1781 1782 ccb->ccb_h.retry_count--; 1783 error = ERESTART; 1784 if (bootverbose && printed == 0) { 1785 xpt_print(ccb->ccb_h.path, 1786 "Selection timeout\n"); 1787 printed++; 1788 } 1789 1790 /* 1791 * Wait a bit to give the device 1792 * time to recover before we try again. 1793 */ 1794 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT; 1795 timeout = periph_selto_delay; 1796 break; 1797 } 1798 } 1799 error = ENXIO; 1800 /* Should we do more if we can't create the path?? */ 1801 if (xpt_create_path(&newpath, xpt_path_periph(ccb->ccb_h.path), 1802 xpt_path_path_id(ccb->ccb_h.path), 1803 xpt_path_target_id(ccb->ccb_h.path), 1804 CAM_LUN_WILDCARD) != CAM_REQ_CMP) 1805 break; 1806 1807 /* 1808 * Let peripheral drivers know that this device has gone 1809 * away. 1810 */ 1811 xpt_async(AC_LOST_DEVICE, newpath, NULL); 1812 xpt_free_path(newpath); 1813 break; 1814 } 1815 case CAM_REQ_INVALID: 1816 case CAM_PATH_INVALID: 1817 case CAM_DEV_NOT_THERE: 1818 case CAM_NO_HBA: 1819 case CAM_PROVIDE_FAIL: 1820 case CAM_REQ_TOO_BIG: 1821 case CAM_LUN_INVALID: 1822 case CAM_TID_INVALID: 1823 error = EINVAL; 1824 break; 1825 case CAM_SCSI_BUS_RESET: 1826 case CAM_BDR_SENT: 1827 /* 1828 * Commands that repeatedly timeout and cause these 1829 * kinds of error recovery actions, should return 1830 * CAM_CMD_TIMEOUT, which allows us to safely assume 1831 * that this command was an innocent bystander to 1832 * these events and should be unconditionally 1833 * retried. 1834 */ 1835 if (bootverbose && printed == 0) { 1836 xpt_print_path(ccb->ccb_h.path); 1837 if (status == CAM_BDR_SENT) 1838 printf("Bus Device Reset sent\n"); 1839 else 1840 printf("Bus Reset issued\n"); 1841 printed++; 1842 } 1843 /* FALLTHROUGH */ 1844 case CAM_REQUEUE_REQ: 1845 /* Unconditional requeue */ 1846 error = ERESTART; 1847 if (bootverbose && printed == 0) { 1848 xpt_print(ccb->ccb_h.path, "Request requeued\n"); 1849 printed++; 1850 } 1851 break; 1852 case CAM_RESRC_UNAVAIL: 1853 /* Wait a bit for the resource shortage to abate. */ 1854 timeout = periph_noresrc_delay; 1855 /* FALLTHROUGH */ 1856 case CAM_BUSY: 1857 if (timeout == 0) { 1858 /* Wait a bit for the busy condition to abate. */ 1859 timeout = periph_busy_delay; 1860 } 1861 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT; 1862 /* FALLTHROUGH */ 1863 default: 1864 /* decrement the number of retries */ 1865 if (ccb->ccb_h.retry_count > 0) { 1866 ccb->ccb_h.retry_count--; 1867 error = ERESTART; 1868 if (bootverbose && printed == 0) { 1869 xpt_print(ccb->ccb_h.path, "CAM status 0x%x\n", 1870 status); 1871 printed++; 1872 } 1873 } else { 1874 error = EIO; 1875 action_string = "Retries exhausted"; 1876 } 1877 break; 1878 } 1879 1880 /* 1881 * If we have and error and are booting verbosely, whine 1882 * *unless* this was a non-retryable selection timeout. 1883 */ 1884 if (error != 0 && bootverbose && 1885 !(status == CAM_SEL_TIMEOUT && (camflags & CAM_RETRY_SELTO) == 0)) { 1886 if (error != ERESTART) { 1887 if (action_string == NULL) 1888 action_string = "Unretryable error"; 1889 xpt_print(ccb->ccb_h.path, "Error %d, %s\n", 1890 error, action_string); 1891 } else if (action_string != NULL) 1892 xpt_print(ccb->ccb_h.path, "%s\n", action_string); 1893 else 1894 xpt_print(ccb->ccb_h.path, "Retrying command\n"); 1895 } 1896 1897 /* Attempt a retry */ 1898 if (error == ERESTART || error == 0) { 1899 if (frozen != 0) 1900 ccb->ccb_h.status &= ~CAM_DEV_QFRZN; 1901 if (error == ERESTART) 1902 xpt_action(ccb); 1903 if (frozen != 0) 1904 cam_release_devq(ccb->ccb_h.path, 1905 relsim_flags, 1906 openings, 1907 timeout, 1908 /*getcount_only*/0); 1909 } 1910 1911 return (error); 1912 } 1913