1 /*- 2 * Common functions for CAM "type" (peripheral) drivers. 3 * 4 * Copyright (c) 1997, 1998 Justin T. Gibbs. 5 * Copyright (c) 1997, 1998, 1999, 2000 Kenneth D. Merry. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions, and the following disclaimer, 13 * without modification, immediately at the beginning of the file. 14 * 2. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/types.h> 36 #include <sys/malloc.h> 37 #include <sys/kernel.h> 38 #include <sys/linker_set.h> 39 #include <sys/bio.h> 40 #include <sys/lock.h> 41 #include <sys/mutex.h> 42 #include <sys/buf.h> 43 #include <sys/proc.h> 44 #include <sys/devicestat.h> 45 #include <sys/bus.h> 46 #include <vm/vm.h> 47 #include <vm/vm_extern.h> 48 49 #include <cam/cam.h> 50 #include <cam/cam_ccb.h> 51 #include <cam/cam_xpt_periph.h> 52 #include <cam/cam_periph.h> 53 #include <cam/cam_debug.h> 54 #include <cam/cam_sim.h> 55 56 #include <cam/scsi/scsi_all.h> 57 #include <cam/scsi/scsi_message.h> 58 #include <cam/scsi/scsi_pass.h> 59 60 static u_int camperiphnextunit(struct periph_driver *p_drv, 61 u_int newunit, int wired, 62 path_id_t pathid, target_id_t target, 63 lun_id_t lun); 64 static u_int camperiphunit(struct periph_driver *p_drv, 65 path_id_t pathid, target_id_t target, 66 lun_id_t lun); 67 static void camperiphdone(struct cam_periph *periph, 68 union ccb *done_ccb); 69 static void camperiphfree(struct cam_periph *periph); 70 static int camperiphscsistatuserror(union ccb *ccb, 71 cam_flags camflags, 72 u_int32_t sense_flags, 73 union ccb *save_ccb, 74 int *openings, 75 u_int32_t *relsim_flags, 76 u_int32_t *timeout); 77 static int camperiphscsisenseerror(union ccb *ccb, 78 cam_flags camflags, 79 u_int32_t sense_flags, 80 union ccb *save_ccb, 81 int *openings, 82 u_int32_t *relsim_flags, 83 u_int32_t *timeout); 84 85 static int nperiph_drivers; 86 struct periph_driver **periph_drivers; 87 88 MALLOC_DEFINE(M_CAMPERIPH, "CAM periph", "CAM peripheral buffers"); 89 90 static int periph_selto_delay = 1000; 91 TUNABLE_INT("kern.cam.periph_selto_delay", &periph_selto_delay); 92 static int periph_noresrc_delay = 500; 93 TUNABLE_INT("kern.cam.periph_noresrc_delay", &periph_noresrc_delay); 94 static int periph_busy_delay = 500; 95 TUNABLE_INT("kern.cam.periph_busy_delay", &periph_busy_delay); 96 97 98 void 99 periphdriver_register(void *data) 100 { 101 struct periph_driver **newdrivers, **old; 102 int ndrivers; 103 104 ndrivers = nperiph_drivers + 2; 105 newdrivers = malloc(sizeof(*newdrivers) * ndrivers, M_TEMP, M_WAITOK); 106 if (periph_drivers) 107 bcopy(periph_drivers, newdrivers, 108 sizeof(*newdrivers) * nperiph_drivers); 109 newdrivers[nperiph_drivers] = (struct periph_driver *)data; 110 newdrivers[nperiph_drivers + 1] = NULL; 111 old = periph_drivers; 112 periph_drivers = newdrivers; 113 if (old) 114 free(old, M_TEMP); 115 nperiph_drivers++; 116 } 117 118 cam_status 119 cam_periph_alloc(periph_ctor_t *periph_ctor, 120 periph_oninv_t *periph_oninvalidate, 121 periph_dtor_t *periph_dtor, periph_start_t *periph_start, 122 char *name, cam_periph_type type, struct cam_path *path, 123 ac_callback_t *ac_callback, ac_code code, void *arg) 124 { 125 struct periph_driver **p_drv; 126 struct cam_sim *sim; 127 struct cam_periph *periph; 128 struct cam_periph *cur_periph; 129 path_id_t path_id; 130 target_id_t target_id; 131 lun_id_t lun_id; 132 cam_status status; 133 u_int init_level; 134 int s; 135 136 init_level = 0; 137 /* 138 * Handle Hot-Plug scenarios. If there is already a peripheral 139 * of our type assigned to this path, we are likely waiting for 140 * final close on an old, invalidated, peripheral. If this is 141 * the case, queue up a deferred call to the peripheral's async 142 * handler. If it looks like a mistaken re-allocation, complain. 143 */ 144 if ((periph = cam_periph_find(path, name)) != NULL) { 145 146 if ((periph->flags & CAM_PERIPH_INVALID) != 0 147 && (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) == 0) { 148 periph->flags |= CAM_PERIPH_NEW_DEV_FOUND; 149 periph->deferred_callback = ac_callback; 150 periph->deferred_ac = code; 151 return (CAM_REQ_INPROG); 152 } else { 153 printf("cam_periph_alloc: attempt to re-allocate " 154 "valid device %s%d rejected\n", 155 periph->periph_name, periph->unit_number); 156 } 157 return (CAM_REQ_INVALID); 158 } 159 160 periph = (struct cam_periph *)malloc(sizeof(*periph), M_CAMPERIPH, 161 M_NOWAIT); 162 163 if (periph == NULL) 164 return (CAM_RESRC_UNAVAIL); 165 166 init_level++; 167 168 xpt_lock_buses(); 169 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) { 170 if (strcmp((*p_drv)->driver_name, name) == 0) 171 break; 172 } 173 xpt_unlock_buses(); 174 175 sim = xpt_path_sim(path); 176 path_id = xpt_path_path_id(path); 177 target_id = xpt_path_target_id(path); 178 lun_id = xpt_path_lun_id(path); 179 bzero(periph, sizeof(*periph)); 180 cam_init_pinfo(&periph->pinfo); 181 periph->periph_start = periph_start; 182 periph->periph_dtor = periph_dtor; 183 periph->periph_oninval = periph_oninvalidate; 184 periph->type = type; 185 periph->periph_name = name; 186 periph->unit_number = camperiphunit(*p_drv, path_id, target_id, lun_id); 187 periph->immediate_priority = CAM_PRIORITY_NONE; 188 periph->refcount = 0; 189 periph->sim = sim; 190 SLIST_INIT(&periph->ccb_list); 191 status = xpt_create_path(&path, periph, path_id, target_id, lun_id); 192 if (status != CAM_REQ_CMP) 193 goto failure; 194 195 periph->path = path; 196 init_level++; 197 198 status = xpt_add_periph(periph); 199 200 if (status != CAM_REQ_CMP) 201 goto failure; 202 203 s = splsoftcam(); 204 cur_periph = TAILQ_FIRST(&(*p_drv)->units); 205 while (cur_periph != NULL 206 && cur_periph->unit_number < periph->unit_number) 207 cur_periph = TAILQ_NEXT(cur_periph, unit_links); 208 209 if (cur_periph != NULL) 210 TAILQ_INSERT_BEFORE(cur_periph, periph, unit_links); 211 else { 212 TAILQ_INSERT_TAIL(&(*p_drv)->units, periph, unit_links); 213 (*p_drv)->generation++; 214 } 215 216 splx(s); 217 218 init_level++; 219 220 status = periph_ctor(periph, arg); 221 222 if (status == CAM_REQ_CMP) 223 init_level++; 224 225 failure: 226 switch (init_level) { 227 case 4: 228 /* Initialized successfully */ 229 break; 230 case 3: 231 s = splsoftcam(); 232 TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links); 233 splx(s); 234 xpt_remove_periph(periph); 235 /* FALLTHROUGH */ 236 case 2: 237 xpt_free_path(periph->path); 238 /* FALLTHROUGH */ 239 case 1: 240 free(periph, M_CAMPERIPH); 241 /* FALLTHROUGH */ 242 case 0: 243 /* No cleanup to perform. */ 244 break; 245 default: 246 panic("cam_periph_alloc: Unkown init level"); 247 } 248 return(status); 249 } 250 251 /* 252 * Find a peripheral structure with the specified path, target, lun, 253 * and (optionally) type. If the name is NULL, this function will return 254 * the first peripheral driver that matches the specified path. 255 */ 256 struct cam_periph * 257 cam_periph_find(struct cam_path *path, char *name) 258 { 259 struct periph_driver **p_drv; 260 struct cam_periph *periph; 261 int s; 262 263 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) { 264 265 if (name != NULL && (strcmp((*p_drv)->driver_name, name) != 0)) 266 continue; 267 268 s = splsoftcam(); 269 TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) { 270 if (xpt_path_comp(periph->path, path) == 0) { 271 splx(s); 272 return(periph); 273 } 274 } 275 splx(s); 276 if (name != NULL) 277 return(NULL); 278 } 279 return(NULL); 280 } 281 282 cam_status 283 cam_periph_acquire(struct cam_periph *periph) 284 { 285 286 if (periph == NULL) 287 return(CAM_REQ_CMP_ERR); 288 289 xpt_lock_buses(); 290 periph->refcount++; 291 xpt_unlock_buses(); 292 293 return(CAM_REQ_CMP); 294 } 295 296 void 297 cam_periph_release(struct cam_periph *periph) 298 { 299 300 if (periph == NULL) 301 return; 302 303 xpt_lock_buses(); 304 if ((--periph->refcount == 0) 305 && (periph->flags & CAM_PERIPH_INVALID)) { 306 camperiphfree(periph); 307 } 308 xpt_unlock_buses(); 309 310 } 311 312 int 313 cam_periph_hold(struct cam_periph *periph, int priority) 314 { 315 struct mtx *mtx; 316 int error; 317 318 mtx_assert(periph->sim->mtx, MA_OWNED); 319 320 /* 321 * Increment the reference count on the peripheral 322 * while we wait for our lock attempt to succeed 323 * to ensure the peripheral doesn't disappear out 324 * from user us while we sleep. 325 */ 326 327 if (cam_periph_acquire(periph) != CAM_REQ_CMP) 328 return (ENXIO); 329 330 mtx = periph->sim->mtx; 331 if (mtx == &Giant) 332 mtx = NULL; 333 334 while ((periph->flags & CAM_PERIPH_LOCKED) != 0) { 335 periph->flags |= CAM_PERIPH_LOCK_WANTED; 336 if ((error = msleep(periph, mtx, priority, "caplck", 0)) != 0) { 337 cam_periph_release(periph); 338 return (error); 339 } 340 } 341 342 periph->flags |= CAM_PERIPH_LOCKED; 343 return (0); 344 } 345 346 void 347 cam_periph_unhold(struct cam_periph *periph) 348 { 349 350 mtx_assert(periph->sim->mtx, MA_OWNED); 351 352 periph->flags &= ~CAM_PERIPH_LOCKED; 353 if ((periph->flags & CAM_PERIPH_LOCK_WANTED) != 0) { 354 periph->flags &= ~CAM_PERIPH_LOCK_WANTED; 355 wakeup(periph); 356 } 357 358 cam_periph_release(periph); 359 } 360 361 /* 362 * Look for the next unit number that is not currently in use for this 363 * peripheral type starting at "newunit". Also exclude unit numbers that 364 * are reserved by for future "hardwiring" unless we already know that this 365 * is a potential wired device. Only assume that the device is "wired" the 366 * first time through the loop since after that we'll be looking at unit 367 * numbers that did not match a wiring entry. 368 */ 369 static u_int 370 camperiphnextunit(struct periph_driver *p_drv, u_int newunit, int wired, 371 path_id_t pathid, target_id_t target, lun_id_t lun) 372 { 373 struct cam_periph *periph; 374 char *periph_name; 375 int s; 376 int i, val, dunit, r; 377 const char *dname, *strval; 378 379 s = splsoftcam(); 380 periph_name = p_drv->driver_name; 381 for (;;newunit++) { 382 383 for (periph = TAILQ_FIRST(&p_drv->units); 384 periph != NULL && periph->unit_number != newunit; 385 periph = TAILQ_NEXT(periph, unit_links)) 386 ; 387 388 if (periph != NULL && periph->unit_number == newunit) { 389 if (wired != 0) { 390 xpt_print(periph->path, "Duplicate Wired " 391 "Device entry!\n"); 392 xpt_print(periph->path, "Second device (%s " 393 "device at scbus%d target %d lun %d) will " 394 "not be wired\n", periph_name, pathid, 395 target, lun); 396 wired = 0; 397 } 398 continue; 399 } 400 if (wired) 401 break; 402 403 /* 404 * Don't match entries like "da 4" as a wired down 405 * device, but do match entries like "da 4 target 5" 406 * or even "da 4 scbus 1". 407 */ 408 i = 0; 409 dname = periph_name; 410 for (;;) { 411 r = resource_find_dev(&i, dname, &dunit, NULL, NULL); 412 if (r != 0) 413 break; 414 /* if no "target" and no specific scbus, skip */ 415 if (resource_int_value(dname, dunit, "target", &val) && 416 (resource_string_value(dname, dunit, "at",&strval)|| 417 strcmp(strval, "scbus") == 0)) 418 continue; 419 if (newunit == dunit) 420 break; 421 } 422 if (r != 0) 423 break; 424 } 425 splx(s); 426 return (newunit); 427 } 428 429 static u_int 430 camperiphunit(struct periph_driver *p_drv, path_id_t pathid, 431 target_id_t target, lun_id_t lun) 432 { 433 u_int unit; 434 int wired, i, val, dunit; 435 const char *dname, *strval; 436 char pathbuf[32], *periph_name; 437 438 periph_name = p_drv->driver_name; 439 snprintf(pathbuf, sizeof(pathbuf), "scbus%d", pathid); 440 unit = 0; 441 i = 0; 442 dname = periph_name; 443 for (wired = 0; resource_find_dev(&i, dname, &dunit, NULL, NULL) == 0; 444 wired = 0) { 445 if (resource_string_value(dname, dunit, "at", &strval) == 0) { 446 if (strcmp(strval, pathbuf) != 0) 447 continue; 448 wired++; 449 } 450 if (resource_int_value(dname, dunit, "target", &val) == 0) { 451 if (val != target) 452 continue; 453 wired++; 454 } 455 if (resource_int_value(dname, dunit, "lun", &val) == 0) { 456 if (val != lun) 457 continue; 458 wired++; 459 } 460 if (wired != 0) { 461 unit = dunit; 462 break; 463 } 464 } 465 466 /* 467 * Either start from 0 looking for the next unit or from 468 * the unit number given in the resource config. This way, 469 * if we have wildcard matches, we don't return the same 470 * unit number twice. 471 */ 472 unit = camperiphnextunit(p_drv, unit, wired, pathid, target, lun); 473 474 return (unit); 475 } 476 477 void 478 cam_periph_invalidate(struct cam_periph *periph) 479 { 480 481 /* 482 * We only call this routine the first time a peripheral is 483 * invalidated. The oninvalidate() routine is always called at 484 * splsoftcam(). 485 */ 486 if (((periph->flags & CAM_PERIPH_INVALID) == 0) 487 && (periph->periph_oninval != NULL)) 488 periph->periph_oninval(periph); 489 490 periph->flags |= CAM_PERIPH_INVALID; 491 periph->flags &= ~CAM_PERIPH_NEW_DEV_FOUND; 492 493 xpt_lock_buses(); 494 if (periph->refcount == 0) 495 camperiphfree(periph); 496 else if (periph->refcount < 0) 497 printf("cam_invalidate_periph: refcount < 0!!\n"); 498 xpt_unlock_buses(); 499 } 500 501 static void 502 camperiphfree(struct cam_periph *periph) 503 { 504 struct periph_driver **p_drv; 505 506 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) { 507 if (strcmp((*p_drv)->driver_name, periph->periph_name) == 0) 508 break; 509 } 510 if (*p_drv == NULL) { 511 printf("camperiphfree: attempt to free non-existant periph\n"); 512 return; 513 } 514 515 TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links); 516 (*p_drv)->generation++; 517 xpt_unlock_buses(); 518 519 if (periph->periph_dtor != NULL) 520 periph->periph_dtor(periph); 521 xpt_remove_periph(periph); 522 523 if (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) { 524 union ccb ccb; 525 void *arg; 526 527 switch (periph->deferred_ac) { 528 case AC_FOUND_DEVICE: 529 ccb.ccb_h.func_code = XPT_GDEV_TYPE; 530 xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/ 1); 531 xpt_action(&ccb); 532 arg = &ccb; 533 break; 534 case AC_PATH_REGISTERED: 535 ccb.ccb_h.func_code = XPT_PATH_INQ; 536 xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/ 1); 537 xpt_action(&ccb); 538 arg = &ccb; 539 break; 540 default: 541 arg = NULL; 542 break; 543 } 544 periph->deferred_callback(NULL, periph->deferred_ac, 545 periph->path, arg); 546 } 547 xpt_free_path(periph->path); 548 free(periph, M_CAMPERIPH); 549 xpt_lock_buses(); 550 } 551 552 /* 553 * Wait interruptibly for an exclusive lock. 554 */ 555 void 556 cam_periph_lock(struct cam_periph *periph) 557 { 558 559 mtx_lock(periph->sim->mtx); 560 } 561 562 /* 563 * Unlock and wake up any waiters. 564 */ 565 void 566 cam_periph_unlock(struct cam_periph *periph) 567 { 568 569 mtx_unlock(periph->sim->mtx); 570 } 571 572 /* 573 * Map user virtual pointers into kernel virtual address space, so we can 574 * access the memory. This won't work on physical pointers, for now it's 575 * up to the caller to check for that. (XXX KDM -- should we do that here 576 * instead?) This also only works for up to MAXPHYS memory. Since we use 577 * buffers to map stuff in and out, we're limited to the buffer size. 578 */ 579 int 580 cam_periph_mapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo) 581 { 582 int numbufs, i, j; 583 int flags[CAM_PERIPH_MAXMAPS]; 584 u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS]; 585 u_int32_t lengths[CAM_PERIPH_MAXMAPS]; 586 u_int32_t dirs[CAM_PERIPH_MAXMAPS]; 587 588 switch(ccb->ccb_h.func_code) { 589 case XPT_DEV_MATCH: 590 if (ccb->cdm.match_buf_len == 0) { 591 printf("cam_periph_mapmem: invalid match buffer " 592 "length 0\n"); 593 return(EINVAL); 594 } 595 if (ccb->cdm.pattern_buf_len > 0) { 596 data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns; 597 lengths[0] = ccb->cdm.pattern_buf_len; 598 dirs[0] = CAM_DIR_OUT; 599 data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches; 600 lengths[1] = ccb->cdm.match_buf_len; 601 dirs[1] = CAM_DIR_IN; 602 numbufs = 2; 603 } else { 604 data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches; 605 lengths[0] = ccb->cdm.match_buf_len; 606 dirs[0] = CAM_DIR_IN; 607 numbufs = 1; 608 } 609 break; 610 case XPT_SCSI_IO: 611 case XPT_CONT_TARGET_IO: 612 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) 613 return(0); 614 615 data_ptrs[0] = &ccb->csio.data_ptr; 616 lengths[0] = ccb->csio.dxfer_len; 617 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK; 618 numbufs = 1; 619 break; 620 default: 621 return(EINVAL); 622 break; /* NOTREACHED */ 623 } 624 625 /* 626 * Check the transfer length and permissions first, so we don't 627 * have to unmap any previously mapped buffers. 628 */ 629 for (i = 0; i < numbufs; i++) { 630 631 flags[i] = 0; 632 633 /* 634 * The userland data pointer passed in may not be page 635 * aligned. vmapbuf() truncates the address to a page 636 * boundary, so if the address isn't page aligned, we'll 637 * need enough space for the given transfer length, plus 638 * whatever extra space is necessary to make it to the page 639 * boundary. 640 */ 641 if ((lengths[i] + 642 (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)) > DFLTPHYS){ 643 printf("cam_periph_mapmem: attempt to map %lu bytes, " 644 "which is greater than DFLTPHYS(%d)\n", 645 (long)(lengths[i] + 646 (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)), 647 DFLTPHYS); 648 return(E2BIG); 649 } 650 651 if (dirs[i] & CAM_DIR_OUT) { 652 flags[i] = BIO_WRITE; 653 } 654 655 if (dirs[i] & CAM_DIR_IN) { 656 flags[i] = BIO_READ; 657 } 658 659 } 660 661 /* this keeps the current process from getting swapped */ 662 /* 663 * XXX KDM should I use P_NOSWAP instead? 664 */ 665 PHOLD(curproc); 666 667 for (i = 0; i < numbufs; i++) { 668 /* 669 * Get the buffer. 670 */ 671 mapinfo->bp[i] = getpbuf(NULL); 672 673 /* save the buffer's data address */ 674 mapinfo->bp[i]->b_saveaddr = mapinfo->bp[i]->b_data; 675 676 /* put our pointer in the data slot */ 677 mapinfo->bp[i]->b_data = *data_ptrs[i]; 678 679 /* set the transfer length, we know it's < DFLTPHYS */ 680 mapinfo->bp[i]->b_bufsize = lengths[i]; 681 682 /* set the direction */ 683 mapinfo->bp[i]->b_iocmd = flags[i]; 684 685 /* 686 * Map the buffer into kernel memory. 687 * 688 * Note that useracc() alone is not a sufficient test. 689 * vmapbuf() can still fail due to a smaller file mapped 690 * into a larger area of VM, or if userland races against 691 * vmapbuf() after the useracc() check. 692 */ 693 if (vmapbuf(mapinfo->bp[i]) < 0) { 694 for (j = 0; j < i; ++j) { 695 *data_ptrs[j] = mapinfo->bp[j]->b_saveaddr; 696 vunmapbuf(mapinfo->bp[j]); 697 relpbuf(mapinfo->bp[j], NULL); 698 } 699 relpbuf(mapinfo->bp[i], NULL); 700 PRELE(curproc); 701 return(EACCES); 702 } 703 704 /* set our pointer to the new mapped area */ 705 *data_ptrs[i] = mapinfo->bp[i]->b_data; 706 707 mapinfo->num_bufs_used++; 708 } 709 710 /* 711 * Now that we've gotten this far, change ownership to the kernel 712 * of the buffers so that we don't run afoul of returning to user 713 * space with locks (on the buffer) held. 714 */ 715 for (i = 0; i < numbufs; i++) { 716 BUF_KERNPROC(mapinfo->bp[i]); 717 } 718 719 720 return(0); 721 } 722 723 /* 724 * Unmap memory segments mapped into kernel virtual address space by 725 * cam_periph_mapmem(). 726 */ 727 void 728 cam_periph_unmapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo) 729 { 730 int numbufs, i; 731 u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS]; 732 733 if (mapinfo->num_bufs_used <= 0) { 734 /* allow ourselves to be swapped once again */ 735 PRELE(curproc); 736 return; 737 } 738 739 switch (ccb->ccb_h.func_code) { 740 case XPT_DEV_MATCH: 741 numbufs = min(mapinfo->num_bufs_used, 2); 742 743 if (numbufs == 1) { 744 data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches; 745 } else { 746 data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns; 747 data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches; 748 } 749 break; 750 case XPT_SCSI_IO: 751 case XPT_CONT_TARGET_IO: 752 data_ptrs[0] = &ccb->csio.data_ptr; 753 numbufs = min(mapinfo->num_bufs_used, 1); 754 break; 755 default: 756 /* allow ourselves to be swapped once again */ 757 PRELE(curproc); 758 return; 759 break; /* NOTREACHED */ 760 } 761 762 for (i = 0; i < numbufs; i++) { 763 /* Set the user's pointer back to the original value */ 764 *data_ptrs[i] = mapinfo->bp[i]->b_saveaddr; 765 766 /* unmap the buffer */ 767 vunmapbuf(mapinfo->bp[i]); 768 769 /* release the buffer */ 770 relpbuf(mapinfo->bp[i], NULL); 771 } 772 773 /* allow ourselves to be swapped once again */ 774 PRELE(curproc); 775 } 776 777 union ccb * 778 cam_periph_getccb(struct cam_periph *periph, u_int32_t priority) 779 { 780 struct ccb_hdr *ccb_h; 781 struct mtx *mtx; 782 783 mtx_assert(periph->sim->mtx, MA_OWNED); 784 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering cdgetccb\n")); 785 786 while (SLIST_FIRST(&periph->ccb_list) == NULL) { 787 if (periph->immediate_priority > priority) 788 periph->immediate_priority = priority; 789 xpt_schedule(periph, priority); 790 if ((SLIST_FIRST(&periph->ccb_list) != NULL) 791 && (SLIST_FIRST(&periph->ccb_list)->pinfo.priority == priority)) 792 break; 793 mtx_assert(periph->sim->mtx, MA_OWNED); 794 if (periph->sim->mtx == &Giant) 795 mtx = NULL; 796 else 797 mtx = periph->sim->mtx; 798 msleep(&periph->ccb_list, mtx, PRIBIO, "cgticb", 0); 799 } 800 801 ccb_h = SLIST_FIRST(&periph->ccb_list); 802 SLIST_REMOVE_HEAD(&periph->ccb_list, periph_links.sle); 803 return ((union ccb *)ccb_h); 804 } 805 806 void 807 cam_periph_ccbwait(union ccb *ccb) 808 { 809 struct mtx *mtx; 810 struct cam_sim *sim; 811 int s; 812 813 s = splsoftcam(); 814 sim = xpt_path_sim(ccb->ccb_h.path); 815 if (sim->mtx == &Giant) 816 mtx = NULL; 817 else 818 mtx = sim->mtx; 819 if ((ccb->ccb_h.pinfo.index != CAM_UNQUEUED_INDEX) 820 || ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG)) 821 msleep(&ccb->ccb_h.cbfcnp, mtx, PRIBIO, "cbwait", 0); 822 823 splx(s); 824 } 825 826 int 827 cam_periph_ioctl(struct cam_periph *periph, int cmd, caddr_t addr, 828 int (*error_routine)(union ccb *ccb, 829 cam_flags camflags, 830 u_int32_t sense_flags)) 831 { 832 union ccb *ccb; 833 int error; 834 int found; 835 836 error = found = 0; 837 838 switch(cmd){ 839 case CAMGETPASSTHRU: 840 ccb = cam_periph_getccb(periph, /* priority */ 1); 841 xpt_setup_ccb(&ccb->ccb_h, 842 ccb->ccb_h.path, 843 /*priority*/1); 844 ccb->ccb_h.func_code = XPT_GDEVLIST; 845 846 /* 847 * Basically, the point of this is that we go through 848 * getting the list of devices, until we find a passthrough 849 * device. In the current version of the CAM code, the 850 * only way to determine what type of device we're dealing 851 * with is by its name. 852 */ 853 while (found == 0) { 854 ccb->cgdl.index = 0; 855 ccb->cgdl.status = CAM_GDEVLIST_MORE_DEVS; 856 while (ccb->cgdl.status == CAM_GDEVLIST_MORE_DEVS) { 857 858 /* we want the next device in the list */ 859 xpt_action(ccb); 860 if (strncmp(ccb->cgdl.periph_name, 861 "pass", 4) == 0){ 862 found = 1; 863 break; 864 } 865 } 866 if ((ccb->cgdl.status == CAM_GDEVLIST_LAST_DEVICE) && 867 (found == 0)) { 868 ccb->cgdl.periph_name[0] = '\0'; 869 ccb->cgdl.unit_number = 0; 870 break; 871 } 872 } 873 874 /* copy the result back out */ 875 bcopy(ccb, addr, sizeof(union ccb)); 876 877 /* and release the ccb */ 878 xpt_release_ccb(ccb); 879 880 break; 881 default: 882 error = ENOTTY; 883 break; 884 } 885 return(error); 886 } 887 888 int 889 cam_periph_runccb(union ccb *ccb, 890 int (*error_routine)(union ccb *ccb, 891 cam_flags camflags, 892 u_int32_t sense_flags), 893 cam_flags camflags, u_int32_t sense_flags, 894 struct devstat *ds) 895 { 896 struct cam_sim *sim; 897 int error; 898 899 error = 0; 900 sim = xpt_path_sim(ccb->ccb_h.path); 901 mtx_assert(sim->mtx, MA_OWNED); 902 903 /* 904 * If the user has supplied a stats structure, and if we understand 905 * this particular type of ccb, record the transaction start. 906 */ 907 if ((ds != NULL) && (ccb->ccb_h.func_code == XPT_SCSI_IO)) 908 devstat_start_transaction(ds, NULL); 909 910 xpt_action(ccb); 911 912 do { 913 cam_periph_ccbwait(ccb); 914 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) 915 error = 0; 916 else if (error_routine != NULL) 917 error = (*error_routine)(ccb, camflags, sense_flags); 918 else 919 error = 0; 920 921 } while (error == ERESTART); 922 923 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 924 cam_release_devq(ccb->ccb_h.path, 925 /* relsim_flags */0, 926 /* openings */0, 927 /* timeout */0, 928 /* getcount_only */ FALSE); 929 930 if ((ds != NULL) && (ccb->ccb_h.func_code == XPT_SCSI_IO)) 931 devstat_end_transaction(ds, 932 ccb->csio.dxfer_len, 933 ccb->csio.tag_action & 0xf, 934 ((ccb->ccb_h.flags & CAM_DIR_MASK) == 935 CAM_DIR_NONE) ? DEVSTAT_NO_DATA : 936 (ccb->ccb_h.flags & CAM_DIR_OUT) ? 937 DEVSTAT_WRITE : 938 DEVSTAT_READ, NULL, NULL); 939 940 return(error); 941 } 942 943 void 944 cam_freeze_devq(struct cam_path *path) 945 { 946 struct ccb_hdr ccb_h; 947 948 xpt_setup_ccb(&ccb_h, path, /*priority*/1); 949 ccb_h.func_code = XPT_NOOP; 950 ccb_h.flags = CAM_DEV_QFREEZE; 951 xpt_action((union ccb *)&ccb_h); 952 } 953 954 u_int32_t 955 cam_release_devq(struct cam_path *path, u_int32_t relsim_flags, 956 u_int32_t openings, u_int32_t timeout, 957 int getcount_only) 958 { 959 struct ccb_relsim crs; 960 961 xpt_setup_ccb(&crs.ccb_h, path, 962 /*priority*/1); 963 crs.ccb_h.func_code = XPT_REL_SIMQ; 964 crs.ccb_h.flags = getcount_only ? CAM_DEV_QFREEZE : 0; 965 crs.release_flags = relsim_flags; 966 crs.openings = openings; 967 crs.release_timeout = timeout; 968 xpt_action((union ccb *)&crs); 969 return (crs.qfrozen_cnt); 970 } 971 972 #define saved_ccb_ptr ppriv_ptr0 973 static void 974 camperiphdone(struct cam_periph *periph, union ccb *done_ccb) 975 { 976 union ccb *saved_ccb; 977 cam_status status; 978 int frozen; 979 int sense; 980 struct scsi_start_stop_unit *scsi_cmd; 981 u_int32_t relsim_flags, timeout; 982 u_int32_t qfrozen_cnt; 983 int xpt_done_ccb; 984 985 xpt_done_ccb = FALSE; 986 status = done_ccb->ccb_h.status; 987 frozen = (status & CAM_DEV_QFRZN) != 0; 988 sense = (status & CAM_AUTOSNS_VALID) != 0; 989 status &= CAM_STATUS_MASK; 990 991 timeout = 0; 992 relsim_flags = 0; 993 saved_ccb = (union ccb *)done_ccb->ccb_h.saved_ccb_ptr; 994 995 /* 996 * Unfreeze the queue once if it is already frozen.. 997 */ 998 if (frozen != 0) { 999 qfrozen_cnt = cam_release_devq(done_ccb->ccb_h.path, 1000 /*relsim_flags*/0, 1001 /*openings*/0, 1002 /*timeout*/0, 1003 /*getcount_only*/0); 1004 } 1005 1006 switch (status) { 1007 case CAM_REQ_CMP: 1008 { 1009 /* 1010 * If we have successfully taken a device from the not 1011 * ready to ready state, re-scan the device and re-get 1012 * the inquiry information. Many devices (mostly disks) 1013 * don't properly report their inquiry information unless 1014 * they are spun up. 1015 * 1016 * If we manually retrieved sense into a CCB and got 1017 * something other than "NO SENSE" send the updated CCB 1018 * back to the client via xpt_done() to be processed via 1019 * the error recovery code again. 1020 */ 1021 if (done_ccb->ccb_h.func_code == XPT_SCSI_IO) { 1022 scsi_cmd = (struct scsi_start_stop_unit *) 1023 &done_ccb->csio.cdb_io.cdb_bytes; 1024 1025 if (scsi_cmd->opcode == START_STOP_UNIT) 1026 xpt_async(AC_INQ_CHANGED, 1027 done_ccb->ccb_h.path, NULL); 1028 if (scsi_cmd->opcode == REQUEST_SENSE) { 1029 u_int sense_key; 1030 1031 sense_key = saved_ccb->csio.sense_data.flags; 1032 sense_key &= SSD_KEY; 1033 if (sense_key != SSD_KEY_NO_SENSE) { 1034 saved_ccb->ccb_h.status |= 1035 CAM_AUTOSNS_VALID; 1036 #if 0 1037 xpt_print(saved_ccb->ccb_h.path, 1038 "Recovered Sense\n"); 1039 scsi_sense_print(&saved_ccb->csio); 1040 cam_error_print(saved_ccb, CAM_ESF_ALL, 1041 CAM_EPF_ALL); 1042 #endif 1043 xpt_done_ccb = TRUE; 1044 } 1045 } 1046 } 1047 bcopy(done_ccb->ccb_h.saved_ccb_ptr, done_ccb, 1048 sizeof(union ccb)); 1049 1050 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG; 1051 1052 if (xpt_done_ccb == FALSE) 1053 xpt_action(done_ccb); 1054 1055 break; 1056 } 1057 case CAM_SCSI_STATUS_ERROR: 1058 scsi_cmd = (struct scsi_start_stop_unit *) 1059 &done_ccb->csio.cdb_io.cdb_bytes; 1060 if (sense != 0) { 1061 struct ccb_getdev cgd; 1062 struct scsi_sense_data *sense; 1063 int error_code, sense_key, asc, ascq; 1064 scsi_sense_action err_action; 1065 1066 sense = &done_ccb->csio.sense_data; 1067 scsi_extract_sense(sense, &error_code, 1068 &sense_key, &asc, &ascq); 1069 1070 /* 1071 * Grab the inquiry data for this device. 1072 */ 1073 xpt_setup_ccb(&cgd.ccb_h, done_ccb->ccb_h.path, 1074 /*priority*/ 1); 1075 cgd.ccb_h.func_code = XPT_GDEV_TYPE; 1076 xpt_action((union ccb *)&cgd); 1077 err_action = scsi_error_action(&done_ccb->csio, 1078 &cgd.inq_data, 0); 1079 1080 /* 1081 * If the error is "invalid field in CDB", 1082 * and the load/eject flag is set, turn the 1083 * flag off and try again. This is just in 1084 * case the drive in question barfs on the 1085 * load eject flag. The CAM code should set 1086 * the load/eject flag by default for 1087 * removable media. 1088 */ 1089 1090 /* XXX KDM 1091 * Should we check to see what the specific 1092 * scsi status is?? Or does it not matter 1093 * since we already know that there was an 1094 * error, and we know what the specific 1095 * error code was, and we know what the 1096 * opcode is.. 1097 */ 1098 if ((scsi_cmd->opcode == START_STOP_UNIT) && 1099 ((scsi_cmd->how & SSS_LOEJ) != 0) && 1100 (asc == 0x24) && (ascq == 0x00) && 1101 (done_ccb->ccb_h.retry_count > 0)) { 1102 1103 scsi_cmd->how &= ~SSS_LOEJ; 1104 1105 xpt_action(done_ccb); 1106 1107 } else if ((done_ccb->ccb_h.retry_count > 1) 1108 && ((err_action & SS_MASK) != SS_FAIL)) { 1109 1110 /* 1111 * In this case, the error recovery 1112 * command failed, but we've got 1113 * some retries left on it. Give 1114 * it another try unless this is an 1115 * unretryable error. 1116 */ 1117 1118 /* set the timeout to .5 sec */ 1119 relsim_flags = 1120 RELSIM_RELEASE_AFTER_TIMEOUT; 1121 timeout = 500; 1122 1123 xpt_action(done_ccb); 1124 1125 break; 1126 1127 } else { 1128 /* 1129 * Perform the final retry with the original 1130 * CCB so that final error processing is 1131 * performed by the owner of the CCB. 1132 */ 1133 bcopy(done_ccb->ccb_h.saved_ccb_ptr, 1134 done_ccb, sizeof(union ccb)); 1135 1136 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG; 1137 1138 xpt_action(done_ccb); 1139 } 1140 } else { 1141 /* 1142 * Eh?? The command failed, but we don't 1143 * have any sense. What's up with that? 1144 * Fire the CCB again to return it to the 1145 * caller. 1146 */ 1147 bcopy(done_ccb->ccb_h.saved_ccb_ptr, 1148 done_ccb, sizeof(union ccb)); 1149 1150 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG; 1151 1152 xpt_action(done_ccb); 1153 1154 } 1155 break; 1156 default: 1157 bcopy(done_ccb->ccb_h.saved_ccb_ptr, done_ccb, 1158 sizeof(union ccb)); 1159 1160 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG; 1161 1162 xpt_action(done_ccb); 1163 1164 break; 1165 } 1166 1167 /* decrement the retry count */ 1168 /* 1169 * XXX This isn't appropriate in all cases. Restructure, 1170 * so that the retry count is only decremented on an 1171 * actual retry. Remeber that the orignal ccb had its 1172 * retry count dropped before entering recovery, so 1173 * doing it again is a bug. 1174 */ 1175 if (done_ccb->ccb_h.retry_count > 0) 1176 done_ccb->ccb_h.retry_count--; 1177 1178 qfrozen_cnt = cam_release_devq(done_ccb->ccb_h.path, 1179 /*relsim_flags*/relsim_flags, 1180 /*openings*/0, 1181 /*timeout*/timeout, 1182 /*getcount_only*/0); 1183 if (xpt_done_ccb == TRUE) 1184 (*done_ccb->ccb_h.cbfcnp)(periph, done_ccb); 1185 } 1186 1187 /* 1188 * Generic Async Event handler. Peripheral drivers usually 1189 * filter out the events that require personal attention, 1190 * and leave the rest to this function. 1191 */ 1192 void 1193 cam_periph_async(struct cam_periph *periph, u_int32_t code, 1194 struct cam_path *path, void *arg) 1195 { 1196 switch (code) { 1197 case AC_LOST_DEVICE: 1198 cam_periph_invalidate(periph); 1199 break; 1200 case AC_SENT_BDR: 1201 case AC_BUS_RESET: 1202 { 1203 cam_periph_bus_settle(periph, scsi_delay); 1204 break; 1205 } 1206 default: 1207 break; 1208 } 1209 } 1210 1211 void 1212 cam_periph_bus_settle(struct cam_periph *periph, u_int bus_settle) 1213 { 1214 struct ccb_getdevstats cgds; 1215 1216 xpt_setup_ccb(&cgds.ccb_h, periph->path, /*priority*/1); 1217 cgds.ccb_h.func_code = XPT_GDEV_STATS; 1218 xpt_action((union ccb *)&cgds); 1219 cam_periph_freeze_after_event(periph, &cgds.last_reset, bus_settle); 1220 } 1221 1222 void 1223 cam_periph_freeze_after_event(struct cam_periph *periph, 1224 struct timeval* event_time, u_int duration_ms) 1225 { 1226 struct timeval delta; 1227 struct timeval duration_tv; 1228 int s; 1229 1230 s = splclock(); 1231 microtime(&delta); 1232 splx(s); 1233 timevalsub(&delta, event_time); 1234 duration_tv.tv_sec = duration_ms / 1000; 1235 duration_tv.tv_usec = (duration_ms % 1000) * 1000; 1236 if (timevalcmp(&delta, &duration_tv, <)) { 1237 timevalsub(&duration_tv, &delta); 1238 1239 duration_ms = duration_tv.tv_sec * 1000; 1240 duration_ms += duration_tv.tv_usec / 1000; 1241 cam_freeze_devq(periph->path); 1242 cam_release_devq(periph->path, 1243 RELSIM_RELEASE_AFTER_TIMEOUT, 1244 /*reduction*/0, 1245 /*timeout*/duration_ms, 1246 /*getcount_only*/0); 1247 } 1248 1249 } 1250 1251 static int 1252 camperiphscsistatuserror(union ccb *ccb, cam_flags camflags, 1253 u_int32_t sense_flags, union ccb *save_ccb, 1254 int *openings, u_int32_t *relsim_flags, 1255 u_int32_t *timeout) 1256 { 1257 int error; 1258 1259 switch (ccb->csio.scsi_status) { 1260 case SCSI_STATUS_OK: 1261 case SCSI_STATUS_COND_MET: 1262 case SCSI_STATUS_INTERMED: 1263 case SCSI_STATUS_INTERMED_COND_MET: 1264 error = 0; 1265 break; 1266 case SCSI_STATUS_CMD_TERMINATED: 1267 case SCSI_STATUS_CHECK_COND: 1268 error = camperiphscsisenseerror(ccb, 1269 camflags, 1270 sense_flags, 1271 save_ccb, 1272 openings, 1273 relsim_flags, 1274 timeout); 1275 break; 1276 case SCSI_STATUS_QUEUE_FULL: 1277 { 1278 /* no decrement */ 1279 struct ccb_getdevstats cgds; 1280 1281 /* 1282 * First off, find out what the current 1283 * transaction counts are. 1284 */ 1285 xpt_setup_ccb(&cgds.ccb_h, 1286 ccb->ccb_h.path, 1287 /*priority*/1); 1288 cgds.ccb_h.func_code = XPT_GDEV_STATS; 1289 xpt_action((union ccb *)&cgds); 1290 1291 /* 1292 * If we were the only transaction active, treat 1293 * the QUEUE FULL as if it were a BUSY condition. 1294 */ 1295 if (cgds.dev_active != 0) { 1296 int total_openings; 1297 1298 /* 1299 * Reduce the number of openings to 1300 * be 1 less than the amount it took 1301 * to get a queue full bounded by the 1302 * minimum allowed tag count for this 1303 * device. 1304 */ 1305 total_openings = cgds.dev_active + cgds.dev_openings; 1306 *openings = cgds.dev_active; 1307 if (*openings < cgds.mintags) 1308 *openings = cgds.mintags; 1309 if (*openings < total_openings) 1310 *relsim_flags = RELSIM_ADJUST_OPENINGS; 1311 else { 1312 /* 1313 * Some devices report queue full for 1314 * temporary resource shortages. For 1315 * this reason, we allow a minimum 1316 * tag count to be entered via a 1317 * quirk entry to prevent the queue 1318 * count on these devices from falling 1319 * to a pessimisticly low value. We 1320 * still wait for the next successful 1321 * completion, however, before queueing 1322 * more transactions to the device. 1323 */ 1324 *relsim_flags = RELSIM_RELEASE_AFTER_CMDCMPLT; 1325 } 1326 *timeout = 0; 1327 error = ERESTART; 1328 if (bootverbose) { 1329 xpt_print(ccb->ccb_h.path, "Queue Full\n"); 1330 } 1331 break; 1332 } 1333 /* FALLTHROUGH */ 1334 } 1335 case SCSI_STATUS_BUSY: 1336 /* 1337 * Restart the queue after either another 1338 * command completes or a 1 second timeout. 1339 */ 1340 if (bootverbose) { 1341 xpt_print(ccb->ccb_h.path, "Device Busy\n"); 1342 } 1343 if (ccb->ccb_h.retry_count > 0) { 1344 ccb->ccb_h.retry_count--; 1345 error = ERESTART; 1346 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT 1347 | RELSIM_RELEASE_AFTER_CMDCMPLT; 1348 *timeout = 1000; 1349 } else { 1350 error = EIO; 1351 } 1352 break; 1353 case SCSI_STATUS_RESERV_CONFLICT: 1354 xpt_print(ccb->ccb_h.path, "Reservation Conflict\n"); 1355 error = EIO; 1356 break; 1357 default: 1358 xpt_print(ccb->ccb_h.path, "SCSI Status 0x%x\n", 1359 ccb->csio.scsi_status); 1360 error = EIO; 1361 break; 1362 } 1363 return (error); 1364 } 1365 1366 static int 1367 camperiphscsisenseerror(union ccb *ccb, cam_flags camflags, 1368 u_int32_t sense_flags, union ccb *save_ccb, 1369 int *openings, u_int32_t *relsim_flags, 1370 u_int32_t *timeout) 1371 { 1372 struct cam_periph *periph; 1373 int error; 1374 1375 periph = xpt_path_periph(ccb->ccb_h.path); 1376 if (periph->flags & CAM_PERIPH_RECOVERY_INPROG) { 1377 1378 /* 1379 * If error recovery is already in progress, don't attempt 1380 * to process this error, but requeue it unconditionally 1381 * and attempt to process it once error recovery has 1382 * completed. This failed command is probably related to 1383 * the error that caused the currently active error recovery 1384 * action so our current recovery efforts should also 1385 * address this command. Be aware that the error recovery 1386 * code assumes that only one recovery action is in progress 1387 * on a particular peripheral instance at any given time 1388 * (e.g. only one saved CCB for error recovery) so it is 1389 * imperitive that we don't violate this assumption. 1390 */ 1391 error = ERESTART; 1392 } else { 1393 scsi_sense_action err_action; 1394 struct ccb_getdev cgd; 1395 const char *action_string; 1396 union ccb* print_ccb; 1397 1398 /* A description of the error recovery action performed */ 1399 action_string = NULL; 1400 1401 /* 1402 * The location of the orignal ccb 1403 * for sense printing purposes. 1404 */ 1405 print_ccb = ccb; 1406 1407 /* 1408 * Grab the inquiry data for this device. 1409 */ 1410 xpt_setup_ccb(&cgd.ccb_h, ccb->ccb_h.path, /*priority*/ 1); 1411 cgd.ccb_h.func_code = XPT_GDEV_TYPE; 1412 xpt_action((union ccb *)&cgd); 1413 1414 if ((ccb->ccb_h.status & CAM_AUTOSNS_VALID) != 0) 1415 err_action = scsi_error_action(&ccb->csio, 1416 &cgd.inq_data, 1417 sense_flags); 1418 else if ((ccb->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0) 1419 err_action = SS_REQSENSE; 1420 else 1421 err_action = SS_RETRY|SSQ_DECREMENT_COUNT|EIO; 1422 1423 error = err_action & SS_ERRMASK; 1424 1425 /* 1426 * If the recovery action will consume a retry, 1427 * make sure we actually have retries available. 1428 */ 1429 if ((err_action & SSQ_DECREMENT_COUNT) != 0) { 1430 if (ccb->ccb_h.retry_count > 0) 1431 ccb->ccb_h.retry_count--; 1432 else { 1433 action_string = "Retries Exhausted"; 1434 goto sense_error_done; 1435 } 1436 } 1437 1438 if ((err_action & SS_MASK) >= SS_START) { 1439 /* 1440 * Do common portions of commands that 1441 * use recovery CCBs. 1442 */ 1443 if (save_ccb == NULL) { 1444 action_string = "No recovery CCB supplied"; 1445 goto sense_error_done; 1446 } 1447 bcopy(ccb, save_ccb, sizeof(*save_ccb)); 1448 print_ccb = save_ccb; 1449 periph->flags |= CAM_PERIPH_RECOVERY_INPROG; 1450 } 1451 1452 switch (err_action & SS_MASK) { 1453 case SS_NOP: 1454 action_string = "No Recovery Action Needed"; 1455 error = 0; 1456 break; 1457 case SS_RETRY: 1458 action_string = "Retrying Command (per Sense Data)"; 1459 error = ERESTART; 1460 break; 1461 case SS_FAIL: 1462 action_string = "Unretryable error"; 1463 break; 1464 case SS_START: 1465 { 1466 int le; 1467 1468 /* 1469 * Send a start unit command to the device, and 1470 * then retry the command. 1471 */ 1472 action_string = "Attempting to Start Unit"; 1473 1474 /* 1475 * Check for removable media and set 1476 * load/eject flag appropriately. 1477 */ 1478 if (SID_IS_REMOVABLE(&cgd.inq_data)) 1479 le = TRUE; 1480 else 1481 le = FALSE; 1482 1483 scsi_start_stop(&ccb->csio, 1484 /*retries*/1, 1485 camperiphdone, 1486 MSG_SIMPLE_Q_TAG, 1487 /*start*/TRUE, 1488 /*load/eject*/le, 1489 /*immediate*/FALSE, 1490 SSD_FULL_SIZE, 1491 /*timeout*/50000); 1492 break; 1493 } 1494 case SS_TUR: 1495 { 1496 /* 1497 * Send a Test Unit Ready to the device. 1498 * If the 'many' flag is set, we send 120 1499 * test unit ready commands, one every half 1500 * second. Otherwise, we just send one TUR. 1501 * We only want to do this if the retry 1502 * count has not been exhausted. 1503 */ 1504 int retries; 1505 1506 if ((err_action & SSQ_MANY) != 0) { 1507 action_string = "Polling device for readiness"; 1508 retries = 120; 1509 } else { 1510 action_string = "Testing device for readiness"; 1511 retries = 1; 1512 } 1513 scsi_test_unit_ready(&ccb->csio, 1514 retries, 1515 camperiphdone, 1516 MSG_SIMPLE_Q_TAG, 1517 SSD_FULL_SIZE, 1518 /*timeout*/5000); 1519 1520 /* 1521 * Accomplish our 500ms delay by deferring 1522 * the release of our device queue appropriately. 1523 */ 1524 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT; 1525 *timeout = 500; 1526 break; 1527 } 1528 case SS_REQSENSE: 1529 { 1530 /* 1531 * Send a Request Sense to the device. We 1532 * assume that we are in a contingent allegiance 1533 * condition so we do not tag this request. 1534 */ 1535 scsi_request_sense(&ccb->csio, /*retries*/1, 1536 camperiphdone, 1537 &save_ccb->csio.sense_data, 1538 sizeof(save_ccb->csio.sense_data), 1539 CAM_TAG_ACTION_NONE, 1540 /*sense_len*/SSD_FULL_SIZE, 1541 /*timeout*/5000); 1542 break; 1543 } 1544 default: 1545 panic("Unhandled error action %x", err_action); 1546 } 1547 1548 if ((err_action & SS_MASK) >= SS_START) { 1549 /* 1550 * Drop the priority to 0 so that the recovery 1551 * CCB is the first to execute. Freeze the queue 1552 * after this command is sent so that we can 1553 * restore the old csio and have it queued in 1554 * the proper order before we release normal 1555 * transactions to the device. 1556 */ 1557 ccb->ccb_h.pinfo.priority = 0; 1558 ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 1559 ccb->ccb_h.saved_ccb_ptr = save_ccb; 1560 error = ERESTART; 1561 } 1562 1563 sense_error_done: 1564 if ((err_action & SSQ_PRINT_SENSE) != 0 1565 && (ccb->ccb_h.status & CAM_AUTOSNS_VALID) != 0) { 1566 cam_error_print(print_ccb, CAM_ESF_ALL, CAM_EPF_ALL); 1567 xpt_print_path(ccb->ccb_h.path); 1568 if (bootverbose) 1569 scsi_sense_print(&print_ccb->csio); 1570 printf("%s\n", action_string); 1571 } 1572 } 1573 return (error); 1574 } 1575 1576 /* 1577 * Generic error handler. Peripheral drivers usually filter 1578 * out the errors that they handle in a unique mannor, then 1579 * call this function. 1580 */ 1581 int 1582 cam_periph_error(union ccb *ccb, cam_flags camflags, 1583 u_int32_t sense_flags, union ccb *save_ccb) 1584 { 1585 const char *action_string; 1586 cam_status status; 1587 int frozen; 1588 int error, printed = 0; 1589 int openings; 1590 u_int32_t relsim_flags; 1591 u_int32_t timeout = 0; 1592 1593 action_string = NULL; 1594 status = ccb->ccb_h.status; 1595 frozen = (status & CAM_DEV_QFRZN) != 0; 1596 status &= CAM_STATUS_MASK; 1597 openings = relsim_flags = 0; 1598 1599 switch (status) { 1600 case CAM_REQ_CMP: 1601 error = 0; 1602 break; 1603 case CAM_SCSI_STATUS_ERROR: 1604 error = camperiphscsistatuserror(ccb, 1605 camflags, 1606 sense_flags, 1607 save_ccb, 1608 &openings, 1609 &relsim_flags, 1610 &timeout); 1611 break; 1612 case CAM_AUTOSENSE_FAIL: 1613 xpt_print(ccb->ccb_h.path, "AutoSense Failed\n"); 1614 error = EIO; /* we have to kill the command */ 1615 break; 1616 case CAM_REQ_CMP_ERR: 1617 if (bootverbose && printed == 0) { 1618 xpt_print(ccb->ccb_h.path, 1619 "Request completed with CAM_REQ_CMP_ERR\n"); 1620 printed++; 1621 } 1622 /* FALLTHROUGH */ 1623 case CAM_CMD_TIMEOUT: 1624 if (bootverbose && printed == 0) { 1625 xpt_print(ccb->ccb_h.path, "Command timed out\n"); 1626 printed++; 1627 } 1628 /* FALLTHROUGH */ 1629 case CAM_UNEXP_BUSFREE: 1630 if (bootverbose && printed == 0) { 1631 xpt_print(ccb->ccb_h.path, "Unexpected Bus Free\n"); 1632 printed++; 1633 } 1634 /* FALLTHROUGH */ 1635 case CAM_UNCOR_PARITY: 1636 if (bootverbose && printed == 0) { 1637 xpt_print(ccb->ccb_h.path, 1638 "Uncorrected Parity Error\n"); 1639 printed++; 1640 } 1641 /* FALLTHROUGH */ 1642 case CAM_DATA_RUN_ERR: 1643 if (bootverbose && printed == 0) { 1644 xpt_print(ccb->ccb_h.path, "Data Overrun\n"); 1645 printed++; 1646 } 1647 error = EIO; /* we have to kill the command */ 1648 /* decrement the number of retries */ 1649 if (ccb->ccb_h.retry_count > 0) { 1650 ccb->ccb_h.retry_count--; 1651 error = ERESTART; 1652 } else { 1653 action_string = "Retries Exausted"; 1654 error = EIO; 1655 } 1656 break; 1657 case CAM_UA_ABORT: 1658 case CAM_UA_TERMIO: 1659 case CAM_MSG_REJECT_REC: 1660 /* XXX Don't know that these are correct */ 1661 error = EIO; 1662 break; 1663 case CAM_SEL_TIMEOUT: 1664 { 1665 struct cam_path *newpath; 1666 1667 if ((camflags & CAM_RETRY_SELTO) != 0) { 1668 if (ccb->ccb_h.retry_count > 0) { 1669 1670 ccb->ccb_h.retry_count--; 1671 error = ERESTART; 1672 if (bootverbose && printed == 0) { 1673 xpt_print(ccb->ccb_h.path, 1674 "Selection Timeout\n"); 1675 printed++; 1676 } 1677 1678 /* 1679 * Wait a bit to give the device 1680 * time to recover before we try again. 1681 */ 1682 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT; 1683 timeout = periph_selto_delay; 1684 break; 1685 } 1686 } 1687 error = ENXIO; 1688 /* Should we do more if we can't create the path?? */ 1689 if (xpt_create_path(&newpath, xpt_path_periph(ccb->ccb_h.path), 1690 xpt_path_path_id(ccb->ccb_h.path), 1691 xpt_path_target_id(ccb->ccb_h.path), 1692 CAM_LUN_WILDCARD) != CAM_REQ_CMP) 1693 break; 1694 1695 /* 1696 * Let peripheral drivers know that this device has gone 1697 * away. 1698 */ 1699 xpt_async(AC_LOST_DEVICE, newpath, NULL); 1700 xpt_free_path(newpath); 1701 break; 1702 } 1703 case CAM_REQ_INVALID: 1704 case CAM_PATH_INVALID: 1705 case CAM_DEV_NOT_THERE: 1706 case CAM_NO_HBA: 1707 case CAM_PROVIDE_FAIL: 1708 case CAM_REQ_TOO_BIG: 1709 case CAM_LUN_INVALID: 1710 case CAM_TID_INVALID: 1711 error = EINVAL; 1712 break; 1713 case CAM_SCSI_BUS_RESET: 1714 case CAM_BDR_SENT: 1715 /* 1716 * Commands that repeatedly timeout and cause these 1717 * kinds of error recovery actions, should return 1718 * CAM_CMD_TIMEOUT, which allows us to safely assume 1719 * that this command was an innocent bystander to 1720 * these events and should be unconditionally 1721 * retried. 1722 */ 1723 if (bootverbose && printed == 0) { 1724 xpt_print_path(ccb->ccb_h.path); 1725 if (status == CAM_BDR_SENT) 1726 printf("Bus Device Reset sent\n"); 1727 else 1728 printf("Bus Reset issued\n"); 1729 printed++; 1730 } 1731 /* FALLTHROUGH */ 1732 case CAM_REQUEUE_REQ: 1733 /* Unconditional requeue */ 1734 error = ERESTART; 1735 if (bootverbose && printed == 0) { 1736 xpt_print(ccb->ccb_h.path, "Request Requeued\n"); 1737 printed++; 1738 } 1739 break; 1740 case CAM_RESRC_UNAVAIL: 1741 /* Wait a bit for the resource shortage to abate. */ 1742 timeout = periph_noresrc_delay; 1743 /* FALLTHROUGH */ 1744 case CAM_BUSY: 1745 if (timeout == 0) { 1746 /* Wait a bit for the busy condition to abate. */ 1747 timeout = periph_busy_delay; 1748 } 1749 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT; 1750 /* FALLTHROUGH */ 1751 default: 1752 /* decrement the number of retries */ 1753 if (ccb->ccb_h.retry_count > 0) { 1754 ccb->ccb_h.retry_count--; 1755 error = ERESTART; 1756 if (bootverbose && printed == 0) { 1757 xpt_print(ccb->ccb_h.path, "CAM Status 0x%x\n", 1758 status); 1759 printed++; 1760 } 1761 } else { 1762 error = EIO; 1763 action_string = "Retries Exhausted"; 1764 } 1765 break; 1766 } 1767 1768 /* Attempt a retry */ 1769 if (error == ERESTART || error == 0) { 1770 if (frozen != 0) 1771 ccb->ccb_h.status &= ~CAM_DEV_QFRZN; 1772 1773 if (error == ERESTART) { 1774 action_string = "Retrying Command"; 1775 xpt_action(ccb); 1776 } 1777 1778 if (frozen != 0) 1779 cam_release_devq(ccb->ccb_h.path, 1780 relsim_flags, 1781 openings, 1782 timeout, 1783 /*getcount_only*/0); 1784 } 1785 1786 /* 1787 * If we have and error and are booting verbosely, whine 1788 * *unless* this was a non-retryable selection timeout. 1789 */ 1790 if (error != 0 && bootverbose && 1791 !(status == CAM_SEL_TIMEOUT && (camflags & CAM_RETRY_SELTO) == 0)) { 1792 1793 1794 if (action_string == NULL) 1795 action_string = "Unretryable Error"; 1796 if (error != ERESTART) { 1797 xpt_print(ccb->ccb_h.path, "error %d\n", error); 1798 } 1799 xpt_print(ccb->ccb_h.path, "%s\n", action_string); 1800 } 1801 1802 return (error); 1803 } 1804