1 /*- 2 * Common functions for CAM "type" (peripheral) drivers. 3 * 4 * Copyright (c) 1997, 1998 Justin T. Gibbs. 5 * Copyright (c) 1997, 1998, 1999, 2000 Kenneth D. Merry. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions, and the following disclaimer, 13 * without modification, immediately at the beginning of the file. 14 * 2. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/types.h> 36 #include <sys/malloc.h> 37 #include <sys/kernel.h> 38 #include <sys/bio.h> 39 #include <sys/lock.h> 40 #include <sys/mutex.h> 41 #include <sys/buf.h> 42 #include <sys/proc.h> 43 #include <sys/devicestat.h> 44 #include <sys/bus.h> 45 #include <vm/vm.h> 46 #include <vm/vm_extern.h> 47 48 #include <cam/cam.h> 49 #include <cam/cam_ccb.h> 50 #include <cam/cam_queue.h> 51 #include <cam/cam_xpt_periph.h> 52 #include <cam/cam_periph.h> 53 #include <cam/cam_debug.h> 54 #include <cam/cam_sim.h> 55 56 #include <cam/scsi/scsi_all.h> 57 #include <cam/scsi/scsi_message.h> 58 #include <cam/scsi/scsi_pass.h> 59 60 static u_int camperiphnextunit(struct periph_driver *p_drv, 61 u_int newunit, int wired, 62 path_id_t pathid, target_id_t target, 63 lun_id_t lun); 64 static u_int camperiphunit(struct periph_driver *p_drv, 65 path_id_t pathid, target_id_t target, 66 lun_id_t lun); 67 static void camperiphdone(struct cam_periph *periph, 68 union ccb *done_ccb); 69 static void camperiphfree(struct cam_periph *periph); 70 static int camperiphscsistatuserror(union ccb *ccb, 71 cam_flags camflags, 72 u_int32_t sense_flags, 73 int *openings, 74 u_int32_t *relsim_flags, 75 u_int32_t *timeout, 76 const char **action_string); 77 static int camperiphscsisenseerror(union ccb *ccb, 78 cam_flags camflags, 79 u_int32_t sense_flags, 80 int *openings, 81 u_int32_t *relsim_flags, 82 u_int32_t *timeout, 83 const char **action_string); 84 85 static int nperiph_drivers; 86 static int initialized = 0; 87 struct periph_driver **periph_drivers; 88 89 MALLOC_DEFINE(M_CAMPERIPH, "CAM periph", "CAM peripheral buffers"); 90 91 static int periph_selto_delay = 1000; 92 TUNABLE_INT("kern.cam.periph_selto_delay", &periph_selto_delay); 93 static int periph_noresrc_delay = 500; 94 TUNABLE_INT("kern.cam.periph_noresrc_delay", &periph_noresrc_delay); 95 static int periph_busy_delay = 500; 96 TUNABLE_INT("kern.cam.periph_busy_delay", &periph_busy_delay); 97 98 99 void 100 periphdriver_register(void *data) 101 { 102 struct periph_driver *drv = (struct periph_driver *)data; 103 struct periph_driver **newdrivers, **old; 104 int ndrivers; 105 106 ndrivers = nperiph_drivers + 2; 107 newdrivers = malloc(sizeof(*newdrivers) * ndrivers, M_CAMPERIPH, 108 M_WAITOK); 109 if (periph_drivers) 110 bcopy(periph_drivers, newdrivers, 111 sizeof(*newdrivers) * nperiph_drivers); 112 newdrivers[nperiph_drivers] = drv; 113 newdrivers[nperiph_drivers + 1] = NULL; 114 old = periph_drivers; 115 periph_drivers = newdrivers; 116 if (old) 117 free(old, M_CAMPERIPH); 118 nperiph_drivers++; 119 /* If driver marked as early or it is late now, initialize it. */ 120 if (((drv->flags & CAM_PERIPH_DRV_EARLY) != 0 && initialized > 0) || 121 initialized > 1) 122 (*drv->init)(); 123 } 124 125 void 126 periphdriver_init(int level) 127 { 128 int i, early; 129 130 initialized = max(initialized, level); 131 for (i = 0; periph_drivers[i] != NULL; i++) { 132 early = (periph_drivers[i]->flags & CAM_PERIPH_DRV_EARLY) ? 1 : 2; 133 if (early == initialized) 134 (*periph_drivers[i]->init)(); 135 } 136 } 137 138 cam_status 139 cam_periph_alloc(periph_ctor_t *periph_ctor, 140 periph_oninv_t *periph_oninvalidate, 141 periph_dtor_t *periph_dtor, periph_start_t *periph_start, 142 char *name, cam_periph_type type, struct cam_path *path, 143 ac_callback_t *ac_callback, ac_code code, void *arg) 144 { 145 struct periph_driver **p_drv; 146 struct cam_sim *sim; 147 struct cam_periph *periph; 148 struct cam_periph *cur_periph; 149 path_id_t path_id; 150 target_id_t target_id; 151 lun_id_t lun_id; 152 cam_status status; 153 u_int init_level; 154 155 init_level = 0; 156 /* 157 * Handle Hot-Plug scenarios. If there is already a peripheral 158 * of our type assigned to this path, we are likely waiting for 159 * final close on an old, invalidated, peripheral. If this is 160 * the case, queue up a deferred call to the peripheral's async 161 * handler. If it looks like a mistaken re-allocation, complain. 162 */ 163 if ((periph = cam_periph_find(path, name)) != NULL) { 164 165 if ((periph->flags & CAM_PERIPH_INVALID) != 0 166 && (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) == 0) { 167 periph->flags |= CAM_PERIPH_NEW_DEV_FOUND; 168 periph->deferred_callback = ac_callback; 169 periph->deferred_ac = code; 170 return (CAM_REQ_INPROG); 171 } else { 172 printf("cam_periph_alloc: attempt to re-allocate " 173 "valid device %s%d rejected\n", 174 periph->periph_name, periph->unit_number); 175 } 176 return (CAM_REQ_INVALID); 177 } 178 179 periph = (struct cam_periph *)malloc(sizeof(*periph), M_CAMPERIPH, 180 M_NOWAIT); 181 182 if (periph == NULL) 183 return (CAM_RESRC_UNAVAIL); 184 185 init_level++; 186 187 188 sim = xpt_path_sim(path); 189 path_id = xpt_path_path_id(path); 190 target_id = xpt_path_target_id(path); 191 lun_id = xpt_path_lun_id(path); 192 bzero(periph, sizeof(*periph)); 193 cam_init_pinfo(&periph->pinfo); 194 periph->periph_start = periph_start; 195 periph->periph_dtor = periph_dtor; 196 periph->periph_oninval = periph_oninvalidate; 197 periph->type = type; 198 periph->periph_name = name; 199 periph->immediate_priority = CAM_PRIORITY_NONE; 200 periph->refcount = 0; 201 periph->sim = sim; 202 SLIST_INIT(&periph->ccb_list); 203 status = xpt_create_path(&path, periph, path_id, target_id, lun_id); 204 if (status != CAM_REQ_CMP) 205 goto failure; 206 periph->path = path; 207 208 xpt_lock_buses(); 209 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) { 210 if (strcmp((*p_drv)->driver_name, name) == 0) 211 break; 212 } 213 if (*p_drv == NULL) { 214 printf("cam_periph_alloc: invalid periph name '%s'\n", name); 215 xpt_free_path(periph->path); 216 free(periph, M_CAMPERIPH); 217 xpt_unlock_buses(); 218 return (CAM_REQ_INVALID); 219 } 220 periph->unit_number = camperiphunit(*p_drv, path_id, target_id, lun_id); 221 cur_periph = TAILQ_FIRST(&(*p_drv)->units); 222 while (cur_periph != NULL 223 && cur_periph->unit_number < periph->unit_number) 224 cur_periph = TAILQ_NEXT(cur_periph, unit_links); 225 if (cur_periph != NULL) { 226 KASSERT(cur_periph->unit_number != periph->unit_number, ("duplicate units on periph list")); 227 TAILQ_INSERT_BEFORE(cur_periph, periph, unit_links); 228 } else { 229 TAILQ_INSERT_TAIL(&(*p_drv)->units, periph, unit_links); 230 (*p_drv)->generation++; 231 } 232 xpt_unlock_buses(); 233 234 init_level++; 235 236 status = xpt_add_periph(periph); 237 if (status != CAM_REQ_CMP) 238 goto failure; 239 240 init_level++; 241 242 status = periph_ctor(periph, arg); 243 244 if (status == CAM_REQ_CMP) 245 init_level++; 246 247 failure: 248 switch (init_level) { 249 case 4: 250 /* Initialized successfully */ 251 break; 252 case 3: 253 xpt_remove_periph(periph); 254 /* FALLTHROUGH */ 255 case 2: 256 xpt_lock_buses(); 257 TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links); 258 xpt_unlock_buses(); 259 xpt_free_path(periph->path); 260 /* FALLTHROUGH */ 261 case 1: 262 free(periph, M_CAMPERIPH); 263 /* FALLTHROUGH */ 264 case 0: 265 /* No cleanup to perform. */ 266 break; 267 default: 268 panic("cam_periph_alloc: Unkown init level"); 269 } 270 return(status); 271 } 272 273 /* 274 * Find a peripheral structure with the specified path, target, lun, 275 * and (optionally) type. If the name is NULL, this function will return 276 * the first peripheral driver that matches the specified path. 277 */ 278 struct cam_periph * 279 cam_periph_find(struct cam_path *path, char *name) 280 { 281 struct periph_driver **p_drv; 282 struct cam_periph *periph; 283 284 xpt_lock_buses(); 285 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) { 286 287 if (name != NULL && (strcmp((*p_drv)->driver_name, name) != 0)) 288 continue; 289 290 TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) { 291 if (xpt_path_comp(periph->path, path) == 0) { 292 xpt_unlock_buses(); 293 mtx_assert(periph->sim->mtx, MA_OWNED); 294 return(periph); 295 } 296 } 297 if (name != NULL) { 298 xpt_unlock_buses(); 299 return(NULL); 300 } 301 } 302 xpt_unlock_buses(); 303 return(NULL); 304 } 305 306 cam_status 307 cam_periph_acquire(struct cam_periph *periph) 308 { 309 310 if (periph == NULL) 311 return(CAM_REQ_CMP_ERR); 312 313 xpt_lock_buses(); 314 periph->refcount++; 315 xpt_unlock_buses(); 316 317 return(CAM_REQ_CMP); 318 } 319 320 void 321 cam_periph_release_locked(struct cam_periph *periph) 322 { 323 324 if (periph == NULL) 325 return; 326 327 xpt_lock_buses(); 328 if (periph->refcount != 0) { 329 periph->refcount--; 330 } else { 331 xpt_print(periph->path, "%s: release %p when refcount is zero\n ", __func__, periph); 332 } 333 if (periph->refcount == 0 334 && (periph->flags & CAM_PERIPH_INVALID)) { 335 camperiphfree(periph); 336 } 337 xpt_unlock_buses(); 338 } 339 340 void 341 cam_periph_release(struct cam_periph *periph) 342 { 343 struct cam_sim *sim; 344 345 if (periph == NULL) 346 return; 347 348 sim = periph->sim; 349 mtx_assert(sim->mtx, MA_NOTOWNED); 350 mtx_lock(sim->mtx); 351 cam_periph_release_locked(periph); 352 mtx_unlock(sim->mtx); 353 } 354 355 int 356 cam_periph_hold(struct cam_periph *periph, int priority) 357 { 358 int error; 359 360 /* 361 * Increment the reference count on the peripheral 362 * while we wait for our lock attempt to succeed 363 * to ensure the peripheral doesn't disappear out 364 * from user us while we sleep. 365 */ 366 367 if (cam_periph_acquire(periph) != CAM_REQ_CMP) 368 return (ENXIO); 369 370 mtx_assert(periph->sim->mtx, MA_OWNED); 371 while ((periph->flags & CAM_PERIPH_LOCKED) != 0) { 372 periph->flags |= CAM_PERIPH_LOCK_WANTED; 373 if ((error = mtx_sleep(periph, periph->sim->mtx, priority, 374 "caplck", 0)) != 0) { 375 cam_periph_release_locked(periph); 376 return (error); 377 } 378 } 379 380 periph->flags |= CAM_PERIPH_LOCKED; 381 return (0); 382 } 383 384 void 385 cam_periph_unhold(struct cam_periph *periph) 386 { 387 388 mtx_assert(periph->sim->mtx, MA_OWNED); 389 390 periph->flags &= ~CAM_PERIPH_LOCKED; 391 if ((periph->flags & CAM_PERIPH_LOCK_WANTED) != 0) { 392 periph->flags &= ~CAM_PERIPH_LOCK_WANTED; 393 wakeup(periph); 394 } 395 396 cam_periph_release_locked(periph); 397 } 398 399 /* 400 * Look for the next unit number that is not currently in use for this 401 * peripheral type starting at "newunit". Also exclude unit numbers that 402 * are reserved by for future "hardwiring" unless we already know that this 403 * is a potential wired device. Only assume that the device is "wired" the 404 * first time through the loop since after that we'll be looking at unit 405 * numbers that did not match a wiring entry. 406 */ 407 static u_int 408 camperiphnextunit(struct periph_driver *p_drv, u_int newunit, int wired, 409 path_id_t pathid, target_id_t target, lun_id_t lun) 410 { 411 struct cam_periph *periph; 412 char *periph_name; 413 int i, val, dunit, r; 414 const char *dname, *strval; 415 416 periph_name = p_drv->driver_name; 417 for (;;newunit++) { 418 419 for (periph = TAILQ_FIRST(&p_drv->units); 420 periph != NULL && periph->unit_number != newunit; 421 periph = TAILQ_NEXT(periph, unit_links)) 422 ; 423 424 if (periph != NULL && periph->unit_number == newunit) { 425 if (wired != 0) { 426 xpt_print(periph->path, "Duplicate Wired " 427 "Device entry!\n"); 428 xpt_print(periph->path, "Second device (%s " 429 "device at scbus%d target %d lun %d) will " 430 "not be wired\n", periph_name, pathid, 431 target, lun); 432 wired = 0; 433 } 434 continue; 435 } 436 if (wired) 437 break; 438 439 /* 440 * Don't match entries like "da 4" as a wired down 441 * device, but do match entries like "da 4 target 5" 442 * or even "da 4 scbus 1". 443 */ 444 i = 0; 445 dname = periph_name; 446 for (;;) { 447 r = resource_find_dev(&i, dname, &dunit, NULL, NULL); 448 if (r != 0) 449 break; 450 /* if no "target" and no specific scbus, skip */ 451 if (resource_int_value(dname, dunit, "target", &val) && 452 (resource_string_value(dname, dunit, "at",&strval)|| 453 strcmp(strval, "scbus") == 0)) 454 continue; 455 if (newunit == dunit) 456 break; 457 } 458 if (r != 0) 459 break; 460 } 461 return (newunit); 462 } 463 464 static u_int 465 camperiphunit(struct periph_driver *p_drv, path_id_t pathid, 466 target_id_t target, lun_id_t lun) 467 { 468 u_int unit; 469 int wired, i, val, dunit; 470 const char *dname, *strval; 471 char pathbuf[32], *periph_name; 472 473 periph_name = p_drv->driver_name; 474 snprintf(pathbuf, sizeof(pathbuf), "scbus%d", pathid); 475 unit = 0; 476 i = 0; 477 dname = periph_name; 478 for (wired = 0; resource_find_dev(&i, dname, &dunit, NULL, NULL) == 0; 479 wired = 0) { 480 if (resource_string_value(dname, dunit, "at", &strval) == 0) { 481 if (strcmp(strval, pathbuf) != 0) 482 continue; 483 wired++; 484 } 485 if (resource_int_value(dname, dunit, "target", &val) == 0) { 486 if (val != target) 487 continue; 488 wired++; 489 } 490 if (resource_int_value(dname, dunit, "lun", &val) == 0) { 491 if (val != lun) 492 continue; 493 wired++; 494 } 495 if (wired != 0) { 496 unit = dunit; 497 break; 498 } 499 } 500 501 /* 502 * Either start from 0 looking for the next unit or from 503 * the unit number given in the resource config. This way, 504 * if we have wildcard matches, we don't return the same 505 * unit number twice. 506 */ 507 unit = camperiphnextunit(p_drv, unit, wired, pathid, target, lun); 508 509 return (unit); 510 } 511 512 void 513 cam_periph_invalidate(struct cam_periph *periph) 514 { 515 516 /* 517 * We only call this routine the first time a peripheral is 518 * invalidated. 519 */ 520 if (((periph->flags & CAM_PERIPH_INVALID) == 0) 521 && (periph->periph_oninval != NULL)) 522 periph->periph_oninval(periph); 523 524 periph->flags |= CAM_PERIPH_INVALID; 525 periph->flags &= ~CAM_PERIPH_NEW_DEV_FOUND; 526 527 xpt_lock_buses(); 528 if (periph->refcount == 0) 529 camperiphfree(periph); 530 xpt_unlock_buses(); 531 } 532 533 static void 534 camperiphfree(struct cam_periph *periph) 535 { 536 struct periph_driver **p_drv; 537 538 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) { 539 if (strcmp((*p_drv)->driver_name, periph->periph_name) == 0) 540 break; 541 } 542 if (*p_drv == NULL) { 543 printf("camperiphfree: attempt to free non-existant periph\n"); 544 return; 545 } 546 547 TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links); 548 (*p_drv)->generation++; 549 xpt_unlock_buses(); 550 551 if (periph->periph_dtor != NULL) 552 periph->periph_dtor(periph); 553 xpt_remove_periph(periph); 554 555 if (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) { 556 union ccb ccb; 557 void *arg; 558 559 switch (periph->deferred_ac) { 560 case AC_FOUND_DEVICE: 561 ccb.ccb_h.func_code = XPT_GDEV_TYPE; 562 xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 563 xpt_action(&ccb); 564 arg = &ccb; 565 break; 566 case AC_PATH_REGISTERED: 567 ccb.ccb_h.func_code = XPT_PATH_INQ; 568 xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 569 xpt_action(&ccb); 570 arg = &ccb; 571 break; 572 default: 573 arg = NULL; 574 break; 575 } 576 periph->deferred_callback(NULL, periph->deferred_ac, 577 periph->path, arg); 578 } 579 xpt_free_path(periph->path); 580 free(periph, M_CAMPERIPH); 581 xpt_lock_buses(); 582 } 583 584 /* 585 * Map user virtual pointers into kernel virtual address space, so we can 586 * access the memory. This won't work on physical pointers, for now it's 587 * up to the caller to check for that. (XXX KDM -- should we do that here 588 * instead?) This also only works for up to MAXPHYS memory. Since we use 589 * buffers to map stuff in and out, we're limited to the buffer size. 590 */ 591 int 592 cam_periph_mapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo) 593 { 594 int numbufs, i, j; 595 int flags[CAM_PERIPH_MAXMAPS]; 596 u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS]; 597 u_int32_t lengths[CAM_PERIPH_MAXMAPS]; 598 u_int32_t dirs[CAM_PERIPH_MAXMAPS]; 599 /* Some controllers may not be able to handle more data. */ 600 size_t maxmap = DFLTPHYS; 601 602 switch(ccb->ccb_h.func_code) { 603 case XPT_DEV_MATCH: 604 if (ccb->cdm.match_buf_len == 0) { 605 printf("cam_periph_mapmem: invalid match buffer " 606 "length 0\n"); 607 return(EINVAL); 608 } 609 if (ccb->cdm.pattern_buf_len > 0) { 610 data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns; 611 lengths[0] = ccb->cdm.pattern_buf_len; 612 dirs[0] = CAM_DIR_OUT; 613 data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches; 614 lengths[1] = ccb->cdm.match_buf_len; 615 dirs[1] = CAM_DIR_IN; 616 numbufs = 2; 617 } else { 618 data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches; 619 lengths[0] = ccb->cdm.match_buf_len; 620 dirs[0] = CAM_DIR_IN; 621 numbufs = 1; 622 } 623 /* 624 * This request will not go to the hardware, no reason 625 * to be so strict. vmapbuf() is able to map up to MAXPHYS. 626 */ 627 maxmap = MAXPHYS; 628 break; 629 case XPT_SCSI_IO: 630 case XPT_CONT_TARGET_IO: 631 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) 632 return(0); 633 634 data_ptrs[0] = &ccb->csio.data_ptr; 635 lengths[0] = ccb->csio.dxfer_len; 636 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK; 637 numbufs = 1; 638 break; 639 case XPT_ATA_IO: 640 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) 641 return(0); 642 643 data_ptrs[0] = &ccb->ataio.data_ptr; 644 lengths[0] = ccb->ataio.dxfer_len; 645 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK; 646 numbufs = 1; 647 break; 648 case XPT_SMP_IO: 649 data_ptrs[0] = &ccb->smpio.smp_request; 650 lengths[0] = ccb->smpio.smp_request_len; 651 dirs[0] = CAM_DIR_OUT; 652 data_ptrs[1] = &ccb->smpio.smp_response; 653 lengths[1] = ccb->smpio.smp_response_len; 654 dirs[1] = CAM_DIR_IN; 655 numbufs = 2; 656 break; 657 case XPT_GDEV_ADVINFO: 658 if (ccb->cgdai.bufsiz == 0) 659 return (0); 660 661 data_ptrs[0] = (uint8_t **)&ccb->cgdai.buf; 662 lengths[0] = ccb->cgdai.bufsiz; 663 dirs[0] = CAM_DIR_IN; 664 numbufs = 1; 665 666 /* 667 * This request will not go to the hardware, no reason 668 * to be so strict. vmapbuf() is able to map up to MAXPHYS. 669 */ 670 maxmap = MAXPHYS; 671 break; 672 default: 673 return(EINVAL); 674 break; /* NOTREACHED */ 675 } 676 677 /* 678 * Check the transfer length and permissions first, so we don't 679 * have to unmap any previously mapped buffers. 680 */ 681 for (i = 0; i < numbufs; i++) { 682 683 flags[i] = 0; 684 685 /* 686 * The userland data pointer passed in may not be page 687 * aligned. vmapbuf() truncates the address to a page 688 * boundary, so if the address isn't page aligned, we'll 689 * need enough space for the given transfer length, plus 690 * whatever extra space is necessary to make it to the page 691 * boundary. 692 */ 693 if ((lengths[i] + 694 (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)) > maxmap){ 695 printf("cam_periph_mapmem: attempt to map %lu bytes, " 696 "which is greater than %lu\n", 697 (long)(lengths[i] + 698 (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)), 699 (u_long)maxmap); 700 return(E2BIG); 701 } 702 703 if (dirs[i] & CAM_DIR_OUT) { 704 flags[i] = BIO_WRITE; 705 } 706 707 if (dirs[i] & CAM_DIR_IN) { 708 flags[i] = BIO_READ; 709 } 710 711 } 712 713 /* this keeps the current process from getting swapped */ 714 /* 715 * XXX KDM should I use P_NOSWAP instead? 716 */ 717 PHOLD(curproc); 718 719 for (i = 0; i < numbufs; i++) { 720 /* 721 * Get the buffer. 722 */ 723 mapinfo->bp[i] = getpbuf(NULL); 724 725 /* save the buffer's data address */ 726 mapinfo->bp[i]->b_saveaddr = mapinfo->bp[i]->b_data; 727 728 /* put our pointer in the data slot */ 729 mapinfo->bp[i]->b_data = *data_ptrs[i]; 730 731 /* set the transfer length, we know it's < MAXPHYS */ 732 mapinfo->bp[i]->b_bufsize = lengths[i]; 733 734 /* set the direction */ 735 mapinfo->bp[i]->b_iocmd = flags[i]; 736 737 /* 738 * Map the buffer into kernel memory. 739 * 740 * Note that useracc() alone is not a sufficient test. 741 * vmapbuf() can still fail due to a smaller file mapped 742 * into a larger area of VM, or if userland races against 743 * vmapbuf() after the useracc() check. 744 */ 745 if (vmapbuf(mapinfo->bp[i]) < 0) { 746 for (j = 0; j < i; ++j) { 747 *data_ptrs[j] = mapinfo->bp[j]->b_saveaddr; 748 vunmapbuf(mapinfo->bp[j]); 749 relpbuf(mapinfo->bp[j], NULL); 750 } 751 relpbuf(mapinfo->bp[i], NULL); 752 PRELE(curproc); 753 return(EACCES); 754 } 755 756 /* set our pointer to the new mapped area */ 757 *data_ptrs[i] = mapinfo->bp[i]->b_data; 758 759 mapinfo->num_bufs_used++; 760 } 761 762 /* 763 * Now that we've gotten this far, change ownership to the kernel 764 * of the buffers so that we don't run afoul of returning to user 765 * space with locks (on the buffer) held. 766 */ 767 for (i = 0; i < numbufs; i++) { 768 BUF_KERNPROC(mapinfo->bp[i]); 769 } 770 771 772 return(0); 773 } 774 775 /* 776 * Unmap memory segments mapped into kernel virtual address space by 777 * cam_periph_mapmem(). 778 */ 779 void 780 cam_periph_unmapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo) 781 { 782 int numbufs, i; 783 u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS]; 784 785 if (mapinfo->num_bufs_used <= 0) { 786 /* allow ourselves to be swapped once again */ 787 PRELE(curproc); 788 return; 789 } 790 791 switch (ccb->ccb_h.func_code) { 792 case XPT_DEV_MATCH: 793 numbufs = min(mapinfo->num_bufs_used, 2); 794 795 if (numbufs == 1) { 796 data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches; 797 } else { 798 data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns; 799 data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches; 800 } 801 break; 802 case XPT_SCSI_IO: 803 case XPT_CONT_TARGET_IO: 804 data_ptrs[0] = &ccb->csio.data_ptr; 805 numbufs = min(mapinfo->num_bufs_used, 1); 806 break; 807 case XPT_ATA_IO: 808 data_ptrs[0] = &ccb->ataio.data_ptr; 809 numbufs = min(mapinfo->num_bufs_used, 1); 810 break; 811 case XPT_SMP_IO: 812 numbufs = min(mapinfo->num_bufs_used, 2); 813 data_ptrs[0] = &ccb->smpio.smp_request; 814 data_ptrs[1] = &ccb->smpio.smp_response; 815 break; 816 case XPT_GDEV_ADVINFO: 817 numbufs = min(mapinfo->num_bufs_used, 1); 818 data_ptrs[0] = (uint8_t **)&ccb->cgdai.buf; 819 break; 820 default: 821 /* allow ourselves to be swapped once again */ 822 PRELE(curproc); 823 return; 824 break; /* NOTREACHED */ 825 } 826 827 for (i = 0; i < numbufs; i++) { 828 /* Set the user's pointer back to the original value */ 829 *data_ptrs[i] = mapinfo->bp[i]->b_saveaddr; 830 831 /* unmap the buffer */ 832 vunmapbuf(mapinfo->bp[i]); 833 834 /* release the buffer */ 835 relpbuf(mapinfo->bp[i], NULL); 836 } 837 838 /* allow ourselves to be swapped once again */ 839 PRELE(curproc); 840 } 841 842 union ccb * 843 cam_periph_getccb(struct cam_periph *periph, u_int32_t priority) 844 { 845 struct ccb_hdr *ccb_h; 846 847 mtx_assert(periph->sim->mtx, MA_OWNED); 848 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering cdgetccb\n")); 849 850 while (SLIST_FIRST(&periph->ccb_list) == NULL) { 851 if (periph->immediate_priority > priority) 852 periph->immediate_priority = priority; 853 xpt_schedule(periph, priority); 854 if ((SLIST_FIRST(&periph->ccb_list) != NULL) 855 && (SLIST_FIRST(&periph->ccb_list)->pinfo.priority == priority)) 856 break; 857 mtx_assert(periph->sim->mtx, MA_OWNED); 858 mtx_sleep(&periph->ccb_list, periph->sim->mtx, PRIBIO, "cgticb", 859 0); 860 } 861 862 ccb_h = SLIST_FIRST(&periph->ccb_list); 863 SLIST_REMOVE_HEAD(&periph->ccb_list, periph_links.sle); 864 return ((union ccb *)ccb_h); 865 } 866 867 void 868 cam_periph_ccbwait(union ccb *ccb) 869 { 870 struct cam_sim *sim; 871 872 sim = xpt_path_sim(ccb->ccb_h.path); 873 if ((ccb->ccb_h.pinfo.index != CAM_UNQUEUED_INDEX) 874 || ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG)) 875 mtx_sleep(&ccb->ccb_h.cbfcnp, sim->mtx, PRIBIO, "cbwait", 0); 876 } 877 878 int 879 cam_periph_ioctl(struct cam_periph *periph, u_long cmd, caddr_t addr, 880 int (*error_routine)(union ccb *ccb, 881 cam_flags camflags, 882 u_int32_t sense_flags)) 883 { 884 union ccb *ccb; 885 int error; 886 int found; 887 888 error = found = 0; 889 890 switch(cmd){ 891 case CAMGETPASSTHRU: 892 ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); 893 xpt_setup_ccb(&ccb->ccb_h, 894 ccb->ccb_h.path, 895 CAM_PRIORITY_NORMAL); 896 ccb->ccb_h.func_code = XPT_GDEVLIST; 897 898 /* 899 * Basically, the point of this is that we go through 900 * getting the list of devices, until we find a passthrough 901 * device. In the current version of the CAM code, the 902 * only way to determine what type of device we're dealing 903 * with is by its name. 904 */ 905 while (found == 0) { 906 ccb->cgdl.index = 0; 907 ccb->cgdl.status = CAM_GDEVLIST_MORE_DEVS; 908 while (ccb->cgdl.status == CAM_GDEVLIST_MORE_DEVS) { 909 910 /* we want the next device in the list */ 911 xpt_action(ccb); 912 if (strncmp(ccb->cgdl.periph_name, 913 "pass", 4) == 0){ 914 found = 1; 915 break; 916 } 917 } 918 if ((ccb->cgdl.status == CAM_GDEVLIST_LAST_DEVICE) && 919 (found == 0)) { 920 ccb->cgdl.periph_name[0] = '\0'; 921 ccb->cgdl.unit_number = 0; 922 break; 923 } 924 } 925 926 /* copy the result back out */ 927 bcopy(ccb, addr, sizeof(union ccb)); 928 929 /* and release the ccb */ 930 xpt_release_ccb(ccb); 931 932 break; 933 default: 934 error = ENOTTY; 935 break; 936 } 937 return(error); 938 } 939 940 int 941 cam_periph_runccb(union ccb *ccb, 942 int (*error_routine)(union ccb *ccb, 943 cam_flags camflags, 944 u_int32_t sense_flags), 945 cam_flags camflags, u_int32_t sense_flags, 946 struct devstat *ds) 947 { 948 struct cam_sim *sim; 949 int error; 950 951 error = 0; 952 sim = xpt_path_sim(ccb->ccb_h.path); 953 mtx_assert(sim->mtx, MA_OWNED); 954 955 /* 956 * If the user has supplied a stats structure, and if we understand 957 * this particular type of ccb, record the transaction start. 958 */ 959 if ((ds != NULL) && (ccb->ccb_h.func_code == XPT_SCSI_IO || 960 ccb->ccb_h.func_code == XPT_ATA_IO)) 961 devstat_start_transaction(ds, NULL); 962 963 xpt_action(ccb); 964 965 do { 966 cam_periph_ccbwait(ccb); 967 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) 968 error = 0; 969 else if (error_routine != NULL) 970 error = (*error_routine)(ccb, camflags, sense_flags); 971 else 972 error = 0; 973 974 } while (error == ERESTART); 975 976 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 977 cam_release_devq(ccb->ccb_h.path, 978 /* relsim_flags */0, 979 /* openings */0, 980 /* timeout */0, 981 /* getcount_only */ FALSE); 982 ccb->ccb_h.status &= ~CAM_DEV_QFRZN; 983 } 984 985 if (ds != NULL) { 986 if (ccb->ccb_h.func_code == XPT_SCSI_IO) { 987 devstat_end_transaction(ds, 988 ccb->csio.dxfer_len, 989 ccb->csio.tag_action & 0x3, 990 ((ccb->ccb_h.flags & CAM_DIR_MASK) == 991 CAM_DIR_NONE) ? DEVSTAT_NO_DATA : 992 (ccb->ccb_h.flags & CAM_DIR_OUT) ? 993 DEVSTAT_WRITE : 994 DEVSTAT_READ, NULL, NULL); 995 } else if (ccb->ccb_h.func_code == XPT_ATA_IO) { 996 devstat_end_transaction(ds, 997 ccb->ataio.dxfer_len, 998 ccb->ataio.tag_action & 0x3, 999 ((ccb->ccb_h.flags & CAM_DIR_MASK) == 1000 CAM_DIR_NONE) ? DEVSTAT_NO_DATA : 1001 (ccb->ccb_h.flags & CAM_DIR_OUT) ? 1002 DEVSTAT_WRITE : 1003 DEVSTAT_READ, NULL, NULL); 1004 } 1005 } 1006 1007 return(error); 1008 } 1009 1010 void 1011 cam_freeze_devq(struct cam_path *path) 1012 { 1013 1014 cam_freeze_devq_arg(path, 0, 0); 1015 } 1016 1017 void 1018 cam_freeze_devq_arg(struct cam_path *path, uint32_t flags, uint32_t arg) 1019 { 1020 struct ccb_relsim crs; 1021 1022 xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NONE); 1023 crs.ccb_h.func_code = XPT_FREEZE_QUEUE; 1024 crs.release_flags = flags; 1025 crs.openings = arg; 1026 crs.release_timeout = arg; 1027 xpt_action((union ccb *)&crs); 1028 } 1029 1030 u_int32_t 1031 cam_release_devq(struct cam_path *path, u_int32_t relsim_flags, 1032 u_int32_t openings, u_int32_t arg, 1033 int getcount_only) 1034 { 1035 struct ccb_relsim crs; 1036 1037 xpt_setup_ccb(&crs.ccb_h, path, CAM_PRIORITY_NORMAL); 1038 crs.ccb_h.func_code = XPT_REL_SIMQ; 1039 crs.ccb_h.flags = getcount_only ? CAM_DEV_QFREEZE : 0; 1040 crs.release_flags = relsim_flags; 1041 crs.openings = openings; 1042 crs.release_timeout = arg; 1043 xpt_action((union ccb *)&crs); 1044 return (crs.qfrozen_cnt); 1045 } 1046 1047 #define saved_ccb_ptr ppriv_ptr0 1048 #define recovery_depth ppriv_field1 1049 static void 1050 camperiphsensedone(struct cam_periph *periph, union ccb *done_ccb) 1051 { 1052 union ccb *saved_ccb = (union ccb *)done_ccb->ccb_h.saved_ccb_ptr; 1053 cam_status status; 1054 int frozen = 0; 1055 u_int sense_key; 1056 int depth = done_ccb->ccb_h.recovery_depth; 1057 1058 status = done_ccb->ccb_h.status; 1059 if (status & CAM_DEV_QFRZN) { 1060 frozen = 1; 1061 /* 1062 * Clear freeze flag now for case of retry, 1063 * freeze will be dropped later. 1064 */ 1065 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN; 1066 } 1067 status &= CAM_STATUS_MASK; 1068 switch (status) { 1069 case CAM_REQ_CMP: 1070 { 1071 /* 1072 * If we manually retrieved sense into a CCB and got 1073 * something other than "NO SENSE" send the updated CCB 1074 * back to the client via xpt_done() to be processed via 1075 * the error recovery code again. 1076 */ 1077 sense_key = saved_ccb->csio.sense_data.flags; 1078 sense_key &= SSD_KEY; 1079 if (sense_key != SSD_KEY_NO_SENSE) { 1080 saved_ccb->ccb_h.status |= 1081 CAM_AUTOSNS_VALID; 1082 } else { 1083 saved_ccb->ccb_h.status &= 1084 ~CAM_STATUS_MASK; 1085 saved_ccb->ccb_h.status |= 1086 CAM_AUTOSENSE_FAIL; 1087 } 1088 saved_ccb->csio.sense_resid = done_ccb->csio.resid; 1089 bcopy(saved_ccb, done_ccb, sizeof(union ccb)); 1090 xpt_free_ccb(saved_ccb); 1091 break; 1092 } 1093 default: 1094 bcopy(saved_ccb, done_ccb, sizeof(union ccb)); 1095 xpt_free_ccb(saved_ccb); 1096 done_ccb->ccb_h.status &= ~CAM_STATUS_MASK; 1097 done_ccb->ccb_h.status |= CAM_AUTOSENSE_FAIL; 1098 break; 1099 } 1100 periph->flags &= ~CAM_PERIPH_SENSE_INPROG; 1101 /* 1102 * If it is the end of recovery, drop freeze, taken due to 1103 * CAM_DEV_QFREEZE flag, set on recovery request. 1104 */ 1105 if (depth == 0) { 1106 cam_release_devq(done_ccb->ccb_h.path, 1107 /*relsim_flags*/0, 1108 /*openings*/0, 1109 /*timeout*/0, 1110 /*getcount_only*/0); 1111 } 1112 /* 1113 * Copy frozen flag from recovery request if it is set there 1114 * for some reason. 1115 */ 1116 if (frozen != 0) 1117 done_ccb->ccb_h.status |= CAM_DEV_QFRZN; 1118 (*done_ccb->ccb_h.cbfcnp)(periph, done_ccb); 1119 } 1120 1121 static void 1122 camperiphdone(struct cam_periph *periph, union ccb *done_ccb) 1123 { 1124 union ccb *saved_ccb, *save_ccb; 1125 cam_status status; 1126 int frozen = 0; 1127 struct scsi_start_stop_unit *scsi_cmd; 1128 u_int32_t relsim_flags, timeout; 1129 1130 status = done_ccb->ccb_h.status; 1131 if (status & CAM_DEV_QFRZN) { 1132 frozen = 1; 1133 /* 1134 * Clear freeze flag now for case of retry, 1135 * freeze will be dropped later. 1136 */ 1137 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN; 1138 } 1139 1140 timeout = 0; 1141 relsim_flags = 0; 1142 saved_ccb = (union ccb *)done_ccb->ccb_h.saved_ccb_ptr; 1143 1144 switch (status & CAM_STATUS_MASK) { 1145 case CAM_REQ_CMP: 1146 { 1147 /* 1148 * If we have successfully taken a device from the not 1149 * ready to ready state, re-scan the device and re-get 1150 * the inquiry information. Many devices (mostly disks) 1151 * don't properly report their inquiry information unless 1152 * they are spun up. 1153 */ 1154 scsi_cmd = (struct scsi_start_stop_unit *) 1155 &done_ccb->csio.cdb_io.cdb_bytes; 1156 1157 if (scsi_cmd->opcode == START_STOP_UNIT) 1158 xpt_async(AC_INQ_CHANGED, 1159 done_ccb->ccb_h.path, NULL); 1160 goto final; 1161 } 1162 case CAM_SCSI_STATUS_ERROR: 1163 scsi_cmd = (struct scsi_start_stop_unit *) 1164 &done_ccb->csio.cdb_io.cdb_bytes; 1165 if (status & CAM_AUTOSNS_VALID) { 1166 struct ccb_getdev cgd; 1167 struct scsi_sense_data *sense; 1168 int error_code, sense_key, asc, ascq; 1169 scsi_sense_action err_action; 1170 1171 sense = &done_ccb->csio.sense_data; 1172 scsi_extract_sense(sense, &error_code, 1173 &sense_key, &asc, &ascq); 1174 /* 1175 * Grab the inquiry data for this device. 1176 */ 1177 xpt_setup_ccb(&cgd.ccb_h, done_ccb->ccb_h.path, 1178 CAM_PRIORITY_NORMAL); 1179 cgd.ccb_h.func_code = XPT_GDEV_TYPE; 1180 xpt_action((union ccb *)&cgd); 1181 err_action = scsi_error_action(&done_ccb->csio, 1182 &cgd.inq_data, 0); 1183 /* 1184 * If the error is "invalid field in CDB", 1185 * and the load/eject flag is set, turn the 1186 * flag off and try again. This is just in 1187 * case the drive in question barfs on the 1188 * load eject flag. The CAM code should set 1189 * the load/eject flag by default for 1190 * removable media. 1191 */ 1192 /* XXX KDM 1193 * Should we check to see what the specific 1194 * scsi status is?? Or does it not matter 1195 * since we already know that there was an 1196 * error, and we know what the specific 1197 * error code was, and we know what the 1198 * opcode is.. 1199 */ 1200 if ((scsi_cmd->opcode == START_STOP_UNIT) && 1201 ((scsi_cmd->how & SSS_LOEJ) != 0) && 1202 (asc == 0x24) && (ascq == 0x00) && 1203 (done_ccb->ccb_h.retry_count > 0)) { 1204 1205 scsi_cmd->how &= ~SSS_LOEJ; 1206 xpt_action(done_ccb); 1207 } else if ((done_ccb->ccb_h.retry_count > 1) 1208 && ((err_action & SS_MASK) != SS_FAIL)) { 1209 1210 /* 1211 * In this case, the error recovery 1212 * command failed, but we've got 1213 * some retries left on it. Give 1214 * it another try unless this is an 1215 * unretryable error. 1216 */ 1217 /* set the timeout to .5 sec */ 1218 relsim_flags = 1219 RELSIM_RELEASE_AFTER_TIMEOUT; 1220 timeout = 500; 1221 xpt_action(done_ccb); 1222 break; 1223 } else { 1224 /* 1225 * Perform the final retry with the original 1226 * CCB so that final error processing is 1227 * performed by the owner of the CCB. 1228 */ 1229 goto final; 1230 } 1231 } else { 1232 save_ccb = xpt_alloc_ccb_nowait(); 1233 if (save_ccb == NULL) 1234 goto final; 1235 bcopy(done_ccb, save_ccb, sizeof(*save_ccb)); 1236 periph->flags |= CAM_PERIPH_SENSE_INPROG; 1237 /* 1238 * Send a Request Sense to the device. We 1239 * assume that we are in a contingent allegiance 1240 * condition so we do not tag this request. 1241 */ 1242 scsi_request_sense(&done_ccb->csio, /*retries*/1, 1243 camperiphsensedone, 1244 &save_ccb->csio.sense_data, 1245 save_ccb->csio.sense_len, 1246 CAM_TAG_ACTION_NONE, 1247 /*sense_len*/SSD_FULL_SIZE, 1248 /*timeout*/5000); 1249 done_ccb->ccb_h.pinfo.priority--; 1250 done_ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 1251 done_ccb->ccb_h.saved_ccb_ptr = save_ccb; 1252 done_ccb->ccb_h.recovery_depth++; 1253 xpt_action(done_ccb); 1254 } 1255 break; 1256 default: 1257 final: 1258 bcopy(saved_ccb, done_ccb, sizeof(*done_ccb)); 1259 xpt_free_ccb(saved_ccb); 1260 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG; 1261 xpt_action(done_ccb); 1262 break; 1263 } 1264 1265 /* decrement the retry count */ 1266 /* 1267 * XXX This isn't appropriate in all cases. Restructure, 1268 * so that the retry count is only decremented on an 1269 * actual retry. Remeber that the orignal ccb had its 1270 * retry count dropped before entering recovery, so 1271 * doing it again is a bug. 1272 */ 1273 if (done_ccb->ccb_h.retry_count > 0) 1274 done_ccb->ccb_h.retry_count--; 1275 /* 1276 * Drop freeze taken due to CAM_DEV_QFREEZE flag set on recovery 1277 * request. 1278 */ 1279 cam_release_devq(done_ccb->ccb_h.path, 1280 /*relsim_flags*/relsim_flags, 1281 /*openings*/0, 1282 /*timeout*/timeout, 1283 /*getcount_only*/0); 1284 /* Drop freeze taken, if this recovery request got error. */ 1285 if (frozen != 0) { 1286 cam_release_devq(done_ccb->ccb_h.path, 1287 /*relsim_flags*/0, 1288 /*openings*/0, 1289 /*timeout*/0, 1290 /*getcount_only*/0); 1291 } 1292 } 1293 1294 /* 1295 * Generic Async Event handler. Peripheral drivers usually 1296 * filter out the events that require personal attention, 1297 * and leave the rest to this function. 1298 */ 1299 void 1300 cam_periph_async(struct cam_periph *periph, u_int32_t code, 1301 struct cam_path *path, void *arg) 1302 { 1303 switch (code) { 1304 case AC_LOST_DEVICE: 1305 cam_periph_invalidate(periph); 1306 break; 1307 default: 1308 break; 1309 } 1310 } 1311 1312 void 1313 cam_periph_bus_settle(struct cam_periph *periph, u_int bus_settle) 1314 { 1315 struct ccb_getdevstats cgds; 1316 1317 xpt_setup_ccb(&cgds.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 1318 cgds.ccb_h.func_code = XPT_GDEV_STATS; 1319 xpt_action((union ccb *)&cgds); 1320 cam_periph_freeze_after_event(periph, &cgds.last_reset, bus_settle); 1321 } 1322 1323 void 1324 cam_periph_freeze_after_event(struct cam_periph *periph, 1325 struct timeval* event_time, u_int duration_ms) 1326 { 1327 struct timeval delta; 1328 struct timeval duration_tv; 1329 1330 microtime(&delta); 1331 timevalsub(&delta, event_time); 1332 duration_tv.tv_sec = duration_ms / 1000; 1333 duration_tv.tv_usec = (duration_ms % 1000) * 1000; 1334 if (timevalcmp(&delta, &duration_tv, <)) { 1335 timevalsub(&duration_tv, &delta); 1336 1337 duration_ms = duration_tv.tv_sec * 1000; 1338 duration_ms += duration_tv.tv_usec / 1000; 1339 cam_freeze_devq(periph->path); 1340 cam_release_devq(periph->path, 1341 RELSIM_RELEASE_AFTER_TIMEOUT, 1342 /*reduction*/0, 1343 /*timeout*/duration_ms, 1344 /*getcount_only*/0); 1345 } 1346 1347 } 1348 1349 static int 1350 camperiphscsistatuserror(union ccb *ccb, cam_flags camflags, 1351 u_int32_t sense_flags, 1352 int *openings, u_int32_t *relsim_flags, 1353 u_int32_t *timeout, const char **action_string) 1354 { 1355 int error; 1356 1357 switch (ccb->csio.scsi_status) { 1358 case SCSI_STATUS_OK: 1359 case SCSI_STATUS_COND_MET: 1360 case SCSI_STATUS_INTERMED: 1361 case SCSI_STATUS_INTERMED_COND_MET: 1362 error = 0; 1363 break; 1364 case SCSI_STATUS_CMD_TERMINATED: 1365 case SCSI_STATUS_CHECK_COND: 1366 if (bootverbose) 1367 xpt_print(ccb->ccb_h.path, "SCSI status error\n"); 1368 error = camperiphscsisenseerror(ccb, 1369 camflags, 1370 sense_flags, 1371 openings, 1372 relsim_flags, 1373 timeout, 1374 action_string); 1375 break; 1376 case SCSI_STATUS_QUEUE_FULL: 1377 { 1378 /* no decrement */ 1379 struct ccb_getdevstats cgds; 1380 1381 /* 1382 * First off, find out what the current 1383 * transaction counts are. 1384 */ 1385 xpt_setup_ccb(&cgds.ccb_h, 1386 ccb->ccb_h.path, 1387 CAM_PRIORITY_NORMAL); 1388 cgds.ccb_h.func_code = XPT_GDEV_STATS; 1389 xpt_action((union ccb *)&cgds); 1390 1391 /* 1392 * If we were the only transaction active, treat 1393 * the QUEUE FULL as if it were a BUSY condition. 1394 */ 1395 if (cgds.dev_active != 0) { 1396 int total_openings; 1397 1398 /* 1399 * Reduce the number of openings to 1400 * be 1 less than the amount it took 1401 * to get a queue full bounded by the 1402 * minimum allowed tag count for this 1403 * device. 1404 */ 1405 total_openings = cgds.dev_active + cgds.dev_openings; 1406 *openings = cgds.dev_active; 1407 if (*openings < cgds.mintags) 1408 *openings = cgds.mintags; 1409 if (*openings < total_openings) 1410 *relsim_flags = RELSIM_ADJUST_OPENINGS; 1411 else { 1412 /* 1413 * Some devices report queue full for 1414 * temporary resource shortages. For 1415 * this reason, we allow a minimum 1416 * tag count to be entered via a 1417 * quirk entry to prevent the queue 1418 * count on these devices from falling 1419 * to a pessimisticly low value. We 1420 * still wait for the next successful 1421 * completion, however, before queueing 1422 * more transactions to the device. 1423 */ 1424 *relsim_flags = RELSIM_RELEASE_AFTER_CMDCMPLT; 1425 } 1426 *timeout = 0; 1427 error = ERESTART; 1428 if (bootverbose) { 1429 xpt_print(ccb->ccb_h.path, "Queue full\n"); 1430 } 1431 break; 1432 } 1433 /* FALLTHROUGH */ 1434 } 1435 case SCSI_STATUS_BUSY: 1436 /* 1437 * Restart the queue after either another 1438 * command completes or a 1 second timeout. 1439 */ 1440 if (bootverbose) { 1441 xpt_print(ccb->ccb_h.path, "Device busy\n"); 1442 } 1443 if (ccb->ccb_h.retry_count > 0) { 1444 ccb->ccb_h.retry_count--; 1445 error = ERESTART; 1446 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT 1447 | RELSIM_RELEASE_AFTER_CMDCMPLT; 1448 *timeout = 1000; 1449 } else { 1450 error = EIO; 1451 } 1452 break; 1453 case SCSI_STATUS_RESERV_CONFLICT: 1454 xpt_print(ccb->ccb_h.path, "Reservation conflict\n"); 1455 error = EIO; 1456 break; 1457 default: 1458 xpt_print(ccb->ccb_h.path, "SCSI status 0x%x\n", 1459 ccb->csio.scsi_status); 1460 error = EIO; 1461 break; 1462 } 1463 return (error); 1464 } 1465 1466 static int 1467 camperiphscsisenseerror(union ccb *ccb, cam_flags camflags, 1468 u_int32_t sense_flags, 1469 int *openings, u_int32_t *relsim_flags, 1470 u_int32_t *timeout, const char **action_string) 1471 { 1472 struct cam_periph *periph; 1473 union ccb *orig_ccb = ccb; 1474 int error; 1475 1476 periph = xpt_path_periph(ccb->ccb_h.path); 1477 if (periph->flags & 1478 (CAM_PERIPH_RECOVERY_INPROG | CAM_PERIPH_SENSE_INPROG)) { 1479 /* 1480 * If error recovery is already in progress, don't attempt 1481 * to process this error, but requeue it unconditionally 1482 * and attempt to process it once error recovery has 1483 * completed. This failed command is probably related to 1484 * the error that caused the currently active error recovery 1485 * action so our current recovery efforts should also 1486 * address this command. Be aware that the error recovery 1487 * code assumes that only one recovery action is in progress 1488 * on a particular peripheral instance at any given time 1489 * (e.g. only one saved CCB for error recovery) so it is 1490 * imperitive that we don't violate this assumption. 1491 */ 1492 error = ERESTART; 1493 } else { 1494 scsi_sense_action err_action; 1495 struct ccb_getdev cgd; 1496 1497 /* 1498 * Grab the inquiry data for this device. 1499 */ 1500 xpt_setup_ccb(&cgd.ccb_h, ccb->ccb_h.path, CAM_PRIORITY_NORMAL); 1501 cgd.ccb_h.func_code = XPT_GDEV_TYPE; 1502 xpt_action((union ccb *)&cgd); 1503 1504 if ((ccb->ccb_h.status & CAM_AUTOSNS_VALID) != 0) 1505 err_action = scsi_error_action(&ccb->csio, 1506 &cgd.inq_data, 1507 sense_flags); 1508 else if ((ccb->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0) 1509 err_action = SS_REQSENSE; 1510 else 1511 err_action = SS_RETRY|SSQ_DECREMENT_COUNT|EIO; 1512 1513 error = err_action & SS_ERRMASK; 1514 1515 /* 1516 * If the recovery action will consume a retry, 1517 * make sure we actually have retries available. 1518 */ 1519 if ((err_action & SSQ_DECREMENT_COUNT) != 0) { 1520 if (ccb->ccb_h.retry_count > 0) 1521 ccb->ccb_h.retry_count--; 1522 else { 1523 *action_string = "Retries exhausted"; 1524 goto sense_error_done; 1525 } 1526 } 1527 1528 if ((err_action & SS_MASK) >= SS_START) { 1529 /* 1530 * Do common portions of commands that 1531 * use recovery CCBs. 1532 */ 1533 orig_ccb = xpt_alloc_ccb_nowait(); 1534 if (orig_ccb == NULL) { 1535 *action_string = "Can't allocate recovery CCB"; 1536 goto sense_error_done; 1537 } 1538 /* 1539 * Clear freeze flag for original request here, as 1540 * this freeze will be dropped as part of ERESTART. 1541 */ 1542 ccb->ccb_h.status &= ~CAM_DEV_QFRZN; 1543 bcopy(ccb, orig_ccb, sizeof(*orig_ccb)); 1544 } 1545 1546 switch (err_action & SS_MASK) { 1547 case SS_NOP: 1548 *action_string = "No recovery action needed"; 1549 error = 0; 1550 break; 1551 case SS_RETRY: 1552 *action_string = "Retrying command (per sense data)"; 1553 error = ERESTART; 1554 break; 1555 case SS_FAIL: 1556 *action_string = "Unretryable error"; 1557 break; 1558 case SS_START: 1559 { 1560 int le; 1561 if (SID_TYPE(&cgd.inq_data) == T_SEQUENTIAL) { 1562 xpt_free_ccb(orig_ccb); 1563 ccb->ccb_h.status |= CAM_DEV_QFRZN; 1564 *action_string = "Will not autostart a " 1565 "sequential access device"; 1566 err_action = SS_FAIL; 1567 error = EIO; 1568 break; 1569 } 1570 1571 /* 1572 * Send a start unit command to the device, and 1573 * then retry the command. 1574 */ 1575 *action_string = "Attempting to start unit"; 1576 periph->flags |= CAM_PERIPH_RECOVERY_INPROG; 1577 1578 /* 1579 * Check for removable media and set 1580 * load/eject flag appropriately. 1581 */ 1582 if (SID_IS_REMOVABLE(&cgd.inq_data)) 1583 le = TRUE; 1584 else 1585 le = FALSE; 1586 1587 scsi_start_stop(&ccb->csio, 1588 /*retries*/1, 1589 camperiphdone, 1590 MSG_SIMPLE_Q_TAG, 1591 /*start*/TRUE, 1592 /*load/eject*/le, 1593 /*immediate*/FALSE, 1594 SSD_FULL_SIZE, 1595 /*timeout*/50000); 1596 break; 1597 } 1598 case SS_TUR: 1599 { 1600 /* 1601 * Send a Test Unit Ready to the device. 1602 * If the 'many' flag is set, we send 120 1603 * test unit ready commands, one every half 1604 * second. Otherwise, we just send one TUR. 1605 * We only want to do this if the retry 1606 * count has not been exhausted. 1607 */ 1608 int retries; 1609 1610 if ((err_action & SSQ_MANY) != 0) { 1611 *action_string = "Polling device for readiness"; 1612 retries = 120; 1613 } else { 1614 *action_string = "Testing device for readiness"; 1615 retries = 1; 1616 } 1617 periph->flags |= CAM_PERIPH_RECOVERY_INPROG; 1618 scsi_test_unit_ready(&ccb->csio, 1619 retries, 1620 camperiphdone, 1621 MSG_SIMPLE_Q_TAG, 1622 SSD_FULL_SIZE, 1623 /*timeout*/5000); 1624 1625 /* 1626 * Accomplish our 500ms delay by deferring 1627 * the release of our device queue appropriately. 1628 */ 1629 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT; 1630 *timeout = 500; 1631 break; 1632 } 1633 case SS_REQSENSE: 1634 { 1635 *action_string = "Requesting SCSI sense data"; 1636 periph->flags |= CAM_PERIPH_SENSE_INPROG; 1637 /* 1638 * Send a Request Sense to the device. We 1639 * assume that we are in a contingent allegiance 1640 * condition so we do not tag this request. 1641 */ 1642 scsi_request_sense(&ccb->csio, /*retries*/1, 1643 camperiphsensedone, 1644 &orig_ccb->csio.sense_data, 1645 orig_ccb->csio.sense_len, 1646 CAM_TAG_ACTION_NONE, 1647 /*sense_len*/SSD_FULL_SIZE, 1648 /*timeout*/5000); 1649 break; 1650 } 1651 default: 1652 panic("Unhandled error action %x", err_action); 1653 } 1654 1655 if ((err_action & SS_MASK) >= SS_START) { 1656 /* 1657 * Drop the priority, so that the recovery 1658 * CCB is the first to execute. Freeze the queue 1659 * after this command is sent so that we can 1660 * restore the old csio and have it queued in 1661 * the proper order before we release normal 1662 * transactions to the device. 1663 */ 1664 ccb->ccb_h.pinfo.priority--; 1665 ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 1666 ccb->ccb_h.saved_ccb_ptr = orig_ccb; 1667 ccb->ccb_h.recovery_depth = 0; 1668 error = ERESTART; 1669 } 1670 1671 sense_error_done: 1672 if ((err_action & SSQ_PRINT_SENSE) != 0 1673 && (ccb->ccb_h.status & CAM_AUTOSNS_VALID) != 0) 1674 cam_error_print(orig_ccb, CAM_ESF_ALL, CAM_EPF_ALL); 1675 } 1676 return (error); 1677 } 1678 1679 /* 1680 * Generic error handler. Peripheral drivers usually filter 1681 * out the errors that they handle in a unique mannor, then 1682 * call this function. 1683 */ 1684 int 1685 cam_periph_error(union ccb *ccb, cam_flags camflags, 1686 u_int32_t sense_flags, union ccb *save_ccb) 1687 { 1688 const char *action_string; 1689 cam_status status; 1690 int frozen; 1691 int error, printed = 0; 1692 int openings; 1693 u_int32_t relsim_flags; 1694 u_int32_t timeout = 0; 1695 1696 action_string = NULL; 1697 status = ccb->ccb_h.status; 1698 frozen = (status & CAM_DEV_QFRZN) != 0; 1699 status &= CAM_STATUS_MASK; 1700 openings = relsim_flags = 0; 1701 1702 switch (status) { 1703 case CAM_REQ_CMP: 1704 error = 0; 1705 break; 1706 case CAM_SCSI_STATUS_ERROR: 1707 error = camperiphscsistatuserror(ccb, 1708 camflags, 1709 sense_flags, 1710 &openings, 1711 &relsim_flags, 1712 &timeout, 1713 &action_string); 1714 break; 1715 case CAM_AUTOSENSE_FAIL: 1716 xpt_print(ccb->ccb_h.path, "AutoSense failed\n"); 1717 error = EIO; /* we have to kill the command */ 1718 break; 1719 case CAM_ATA_STATUS_ERROR: 1720 if (bootverbose && printed == 0) { 1721 xpt_print(ccb->ccb_h.path, "ATA status error\n"); 1722 cam_error_print(ccb, CAM_ESF_ALL, CAM_EPF_ALL); 1723 printed++; 1724 } 1725 /* FALLTHROUGH */ 1726 case CAM_REQ_CMP_ERR: 1727 if (bootverbose && printed == 0) { 1728 xpt_print(ccb->ccb_h.path, 1729 "Request completed with CAM_REQ_CMP_ERR\n"); 1730 printed++; 1731 } 1732 /* FALLTHROUGH */ 1733 case CAM_CMD_TIMEOUT: 1734 if (bootverbose && printed == 0) { 1735 xpt_print(ccb->ccb_h.path, "Command timed out\n"); 1736 printed++; 1737 } 1738 /* FALLTHROUGH */ 1739 case CAM_UNEXP_BUSFREE: 1740 if (bootverbose && printed == 0) { 1741 xpt_print(ccb->ccb_h.path, "Unexpected Bus Free\n"); 1742 printed++; 1743 } 1744 /* FALLTHROUGH */ 1745 case CAM_UNCOR_PARITY: 1746 if (bootverbose && printed == 0) { 1747 xpt_print(ccb->ccb_h.path, 1748 "Uncorrected parity error\n"); 1749 printed++; 1750 } 1751 /* FALLTHROUGH */ 1752 case CAM_DATA_RUN_ERR: 1753 if (bootverbose && printed == 0) { 1754 xpt_print(ccb->ccb_h.path, "Data overrun\n"); 1755 printed++; 1756 } 1757 error = EIO; /* we have to kill the command */ 1758 /* decrement the number of retries */ 1759 if (ccb->ccb_h.retry_count > 0) { 1760 ccb->ccb_h.retry_count--; 1761 error = ERESTART; 1762 } else { 1763 action_string = "Retries exhausted"; 1764 error = EIO; 1765 } 1766 break; 1767 case CAM_UA_ABORT: 1768 case CAM_UA_TERMIO: 1769 case CAM_MSG_REJECT_REC: 1770 /* XXX Don't know that these are correct */ 1771 error = EIO; 1772 break; 1773 case CAM_SEL_TIMEOUT: 1774 { 1775 struct cam_path *newpath; 1776 1777 if ((camflags & CAM_RETRY_SELTO) != 0) { 1778 if (ccb->ccb_h.retry_count > 0) { 1779 1780 ccb->ccb_h.retry_count--; 1781 error = ERESTART; 1782 if (bootverbose && printed == 0) { 1783 xpt_print(ccb->ccb_h.path, 1784 "Selection timeout\n"); 1785 printed++; 1786 } 1787 1788 /* 1789 * Wait a bit to give the device 1790 * time to recover before we try again. 1791 */ 1792 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT; 1793 timeout = periph_selto_delay; 1794 break; 1795 } 1796 } 1797 error = ENXIO; 1798 /* Should we do more if we can't create the path?? */ 1799 if (xpt_create_path(&newpath, xpt_path_periph(ccb->ccb_h.path), 1800 xpt_path_path_id(ccb->ccb_h.path), 1801 xpt_path_target_id(ccb->ccb_h.path), 1802 CAM_LUN_WILDCARD) != CAM_REQ_CMP) 1803 break; 1804 1805 /* 1806 * Let peripheral drivers know that this device has gone 1807 * away. 1808 */ 1809 xpt_async(AC_LOST_DEVICE, newpath, NULL); 1810 xpt_free_path(newpath); 1811 break; 1812 } 1813 case CAM_REQ_INVALID: 1814 case CAM_PATH_INVALID: 1815 case CAM_DEV_NOT_THERE: 1816 case CAM_NO_HBA: 1817 case CAM_PROVIDE_FAIL: 1818 case CAM_REQ_TOO_BIG: 1819 case CAM_LUN_INVALID: 1820 case CAM_TID_INVALID: 1821 error = EINVAL; 1822 break; 1823 case CAM_SCSI_BUS_RESET: 1824 case CAM_BDR_SENT: 1825 /* 1826 * Commands that repeatedly timeout and cause these 1827 * kinds of error recovery actions, should return 1828 * CAM_CMD_TIMEOUT, which allows us to safely assume 1829 * that this command was an innocent bystander to 1830 * these events and should be unconditionally 1831 * retried. 1832 */ 1833 if (bootverbose && printed == 0) { 1834 xpt_print_path(ccb->ccb_h.path); 1835 if (status == CAM_BDR_SENT) 1836 printf("Bus Device Reset sent\n"); 1837 else 1838 printf("Bus Reset issued\n"); 1839 printed++; 1840 } 1841 /* FALLTHROUGH */ 1842 case CAM_REQUEUE_REQ: 1843 /* Unconditional requeue */ 1844 error = ERESTART; 1845 if (bootverbose && printed == 0) { 1846 xpt_print(ccb->ccb_h.path, "Request requeued\n"); 1847 printed++; 1848 } 1849 break; 1850 case CAM_RESRC_UNAVAIL: 1851 /* Wait a bit for the resource shortage to abate. */ 1852 timeout = periph_noresrc_delay; 1853 /* FALLTHROUGH */ 1854 case CAM_BUSY: 1855 if (timeout == 0) { 1856 /* Wait a bit for the busy condition to abate. */ 1857 timeout = periph_busy_delay; 1858 } 1859 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT; 1860 /* FALLTHROUGH */ 1861 default: 1862 /* decrement the number of retries */ 1863 if (ccb->ccb_h.retry_count > 0) { 1864 ccb->ccb_h.retry_count--; 1865 error = ERESTART; 1866 if (bootverbose && printed == 0) { 1867 xpt_print(ccb->ccb_h.path, "CAM status 0x%x\n", 1868 status); 1869 printed++; 1870 } 1871 } else { 1872 error = EIO; 1873 action_string = "Retries exhausted"; 1874 } 1875 break; 1876 } 1877 1878 /* 1879 * If we have and error and are booting verbosely, whine 1880 * *unless* this was a non-retryable selection timeout. 1881 */ 1882 if (error != 0 && bootverbose && 1883 !(status == CAM_SEL_TIMEOUT && (camflags & CAM_RETRY_SELTO) == 0)) { 1884 if (error != ERESTART) { 1885 if (action_string == NULL) 1886 action_string = "Unretryable error"; 1887 xpt_print(ccb->ccb_h.path, "Error %d, %s\n", 1888 error, action_string); 1889 } else if (action_string != NULL) 1890 xpt_print(ccb->ccb_h.path, "%s\n", action_string); 1891 else 1892 xpt_print(ccb->ccb_h.path, "Retrying command\n"); 1893 } 1894 1895 /* Attempt a retry */ 1896 if (error == ERESTART || error == 0) { 1897 if (frozen != 0) 1898 ccb->ccb_h.status &= ~CAM_DEV_QFRZN; 1899 if (error == ERESTART) 1900 xpt_action(ccb); 1901 if (frozen != 0) 1902 cam_release_devq(ccb->ccb_h.path, 1903 relsim_flags, 1904 openings, 1905 timeout, 1906 /*getcount_only*/0); 1907 } 1908 1909 return (error); 1910 } 1911