1 /* 2 * Common functions for CAM "type" (peripheral) drivers. 3 * 4 * Copyright (c) 1997, 1998 Justin T. Gibbs. 5 * Copyright (c) 1997, 1998, 1999, 2000 Kenneth D. Merry. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions, and the following disclaimer, 13 * without modification, immediately at the beginning of the file. 14 * 2. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * $FreeBSD$ 30 */ 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/types.h> 35 #include <sys/malloc.h> 36 #include <sys/linker_set.h> 37 #include <sys/bio.h> 38 #include <sys/lock.h> 39 #include <sys/mutex.h> 40 #include <sys/buf.h> 41 #include <sys/proc.h> 42 #include <sys/devicestat.h> 43 #include <sys/bus.h> 44 #include <vm/vm.h> 45 #include <vm/vm_extern.h> 46 47 #include <cam/cam.h> 48 #include <cam/cam_ccb.h> 49 #include <cam/cam_xpt_periph.h> 50 #include <cam/cam_periph.h> 51 #include <cam/cam_debug.h> 52 53 #include <cam/scsi/scsi_all.h> 54 #include <cam/scsi/scsi_message.h> 55 #include <cam/scsi/scsi_pass.h> 56 57 static u_int camperiphnextunit(struct periph_driver *p_drv, 58 u_int newunit, int wired, 59 path_id_t pathid, target_id_t target, 60 lun_id_t lun); 61 static u_int camperiphunit(struct periph_driver *p_drv, 62 path_id_t pathid, target_id_t target, 63 lun_id_t lun); 64 static void camperiphdone(struct cam_periph *periph, 65 union ccb *done_ccb); 66 static void camperiphfree(struct cam_periph *periph); 67 static int camperiphscsistatuserror(union ccb *ccb, 68 cam_flags camflags, 69 u_int32_t sense_flags, 70 union ccb *save_ccb, 71 int *openings, 72 u_int32_t *relsim_flags, 73 u_int32_t *timeout); 74 static int camperiphscsisenseerror(union ccb *ccb, 75 cam_flags camflags, 76 u_int32_t sense_flags, 77 union ccb *save_ccb, 78 int *openings, 79 u_int32_t *relsim_flags, 80 u_int32_t *timeout); 81 82 static int nperiph_drivers; 83 struct periph_driver **periph_drivers; 84 85 void 86 periphdriver_register(void *data) 87 { 88 struct periph_driver **newdrivers, **old; 89 int ndrivers; 90 91 ndrivers = nperiph_drivers + 2; 92 newdrivers = malloc(sizeof(*newdrivers) * ndrivers, M_TEMP, M_WAITOK); 93 if (periph_drivers) 94 bcopy(periph_drivers, newdrivers, 95 sizeof(*newdrivers) * nperiph_drivers); 96 newdrivers[nperiph_drivers] = (struct periph_driver *)data; 97 newdrivers[nperiph_drivers + 1] = NULL; 98 old = periph_drivers; 99 periph_drivers = newdrivers; 100 if (old) 101 free(old, M_TEMP); 102 nperiph_drivers++; 103 } 104 105 cam_status 106 cam_periph_alloc(periph_ctor_t *periph_ctor, 107 periph_oninv_t *periph_oninvalidate, 108 periph_dtor_t *periph_dtor, periph_start_t *periph_start, 109 char *name, cam_periph_type type, struct cam_path *path, 110 ac_callback_t *ac_callback, ac_code code, void *arg) 111 { 112 struct periph_driver **p_drv; 113 struct cam_periph *periph; 114 struct cam_periph *cur_periph; 115 path_id_t path_id; 116 target_id_t target_id; 117 lun_id_t lun_id; 118 cam_status status; 119 u_int init_level; 120 int s; 121 122 init_level = 0; 123 /* 124 * Handle Hot-Plug scenarios. If there is already a peripheral 125 * of our type assigned to this path, we are likely waiting for 126 * final close on an old, invalidated, peripheral. If this is 127 * the case, queue up a deferred call to the peripheral's async 128 * handler. If it looks like a mistaken re-alloation, complain. 129 */ 130 if ((periph = cam_periph_find(path, name)) != NULL) { 131 132 if ((periph->flags & CAM_PERIPH_INVALID) != 0 133 && (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) == 0) { 134 periph->flags |= CAM_PERIPH_NEW_DEV_FOUND; 135 periph->deferred_callback = ac_callback; 136 periph->deferred_ac = code; 137 return (CAM_REQ_INPROG); 138 } else { 139 printf("cam_periph_alloc: attempt to re-allocate " 140 "valid device %s%d rejected\n", 141 periph->periph_name, periph->unit_number); 142 } 143 return (CAM_REQ_INVALID); 144 } 145 146 periph = (struct cam_periph *)malloc(sizeof(*periph), M_DEVBUF, 147 M_NOWAIT); 148 149 if (periph == NULL) 150 return (CAM_RESRC_UNAVAIL); 151 152 init_level++; 153 154 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) { 155 if (strcmp((*p_drv)->driver_name, name) == 0) 156 break; 157 } 158 159 path_id = xpt_path_path_id(path); 160 target_id = xpt_path_target_id(path); 161 lun_id = xpt_path_lun_id(path); 162 bzero(periph, sizeof(*periph)); 163 cam_init_pinfo(&periph->pinfo); 164 periph->periph_start = periph_start; 165 periph->periph_dtor = periph_dtor; 166 periph->periph_oninval = periph_oninvalidate; 167 periph->type = type; 168 periph->periph_name = name; 169 periph->unit_number = camperiphunit(*p_drv, path_id, target_id, lun_id); 170 periph->immediate_priority = CAM_PRIORITY_NONE; 171 periph->refcount = 0; 172 SLIST_INIT(&periph->ccb_list); 173 status = xpt_create_path(&path, periph, path_id, target_id, lun_id); 174 if (status != CAM_REQ_CMP) 175 goto failure; 176 177 periph->path = path; 178 init_level++; 179 180 status = xpt_add_periph(periph); 181 182 if (status != CAM_REQ_CMP) 183 goto failure; 184 185 s = splsoftcam(); 186 cur_periph = TAILQ_FIRST(&(*p_drv)->units); 187 while (cur_periph != NULL 188 && cur_periph->unit_number < periph->unit_number) 189 cur_periph = TAILQ_NEXT(cur_periph, unit_links); 190 191 if (cur_periph != NULL) 192 TAILQ_INSERT_BEFORE(cur_periph, periph, unit_links); 193 else { 194 TAILQ_INSERT_TAIL(&(*p_drv)->units, periph, unit_links); 195 (*p_drv)->generation++; 196 } 197 198 splx(s); 199 200 init_level++; 201 202 status = periph_ctor(periph, arg); 203 204 if (status == CAM_REQ_CMP) 205 init_level++; 206 207 failure: 208 switch (init_level) { 209 case 4: 210 /* Initialized successfully */ 211 break; 212 case 3: 213 s = splsoftcam(); 214 TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links); 215 splx(s); 216 xpt_remove_periph(periph); 217 /* FALLTHROUGH */ 218 case 2: 219 xpt_free_path(periph->path); 220 /* FALLTHROUGH */ 221 case 1: 222 free(periph, M_DEVBUF); 223 /* FALLTHROUGH */ 224 case 0: 225 /* No cleanup to perform. */ 226 break; 227 default: 228 panic("cam_periph_alloc: Unkown init level"); 229 } 230 return(status); 231 } 232 233 /* 234 * Find a peripheral structure with the specified path, target, lun, 235 * and (optionally) type. If the name is NULL, this function will return 236 * the first peripheral driver that matches the specified path. 237 */ 238 struct cam_periph * 239 cam_periph_find(struct cam_path *path, char *name) 240 { 241 struct periph_driver **p_drv; 242 struct cam_periph *periph; 243 int s; 244 245 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) { 246 247 if (name != NULL && (strcmp((*p_drv)->driver_name, name) != 0)) 248 continue; 249 250 s = splsoftcam(); 251 TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) { 252 if (xpt_path_comp(periph->path, path) == 0) { 253 splx(s); 254 return(periph); 255 } 256 } 257 splx(s); 258 if (name != NULL) 259 return(NULL); 260 } 261 return(NULL); 262 } 263 264 cam_status 265 cam_periph_acquire(struct cam_periph *periph) 266 { 267 int s; 268 269 if (periph == NULL) 270 return(CAM_REQ_CMP_ERR); 271 272 s = splsoftcam(); 273 periph->refcount++; 274 splx(s); 275 276 return(CAM_REQ_CMP); 277 } 278 279 void 280 cam_periph_release(struct cam_periph *periph) 281 { 282 int s; 283 284 if (periph == NULL) 285 return; 286 287 s = splsoftcam(); 288 if ((--periph->refcount == 0) 289 && (periph->flags & CAM_PERIPH_INVALID)) { 290 camperiphfree(periph); 291 } 292 splx(s); 293 294 } 295 296 /* 297 * Look for the next unit number that is not currently in use for this 298 * peripheral type starting at "newunit". Also exclude unit numbers that 299 * are reserved by for future "hardwiring" unless we already know that this 300 * is a potential wired device. Only assume that the device is "wired" the 301 * first time through the loop since after that we'll be looking at unit 302 * numbers that did not match a wiring entry. 303 */ 304 static u_int 305 camperiphnextunit(struct periph_driver *p_drv, u_int newunit, int wired, 306 path_id_t pathid, target_id_t target, lun_id_t lun) 307 { 308 struct cam_periph *periph; 309 char *periph_name; 310 int s; 311 int i, val, dunit, r; 312 const char *dname, *strval; 313 314 s = splsoftcam(); 315 periph_name = p_drv->driver_name; 316 for (;;newunit++) { 317 318 for (periph = TAILQ_FIRST(&p_drv->units); 319 periph != NULL && periph->unit_number != newunit; 320 periph = TAILQ_NEXT(periph, unit_links)) 321 ; 322 323 if (periph != NULL && periph->unit_number == newunit) { 324 if (wired != 0) { 325 xpt_print_path(periph->path); 326 printf("Duplicate Wired Device entry!\n"); 327 xpt_print_path(periph->path); 328 printf("Second device (%s device at scbus%d " 329 "target %d lun %d) will not be wired\n", 330 periph_name, pathid, target, lun); 331 wired = 0; 332 } 333 continue; 334 } 335 if (wired) 336 break; 337 338 /* 339 * Don't match entries like "da 4" as a wired down 340 * device, but do match entries like "da 4 target 5" 341 * or even "da 4 scbus 1". 342 */ 343 i = 0; 344 dname = periph_name; 345 for (;;) { 346 r = resource_find_dev(&i, dname, &dunit, NULL, NULL); 347 if (r != 0) 348 break; 349 /* if no "target" and no specific scbus, skip */ 350 if (resource_int_value(dname, dunit, "target", &val) && 351 (resource_string_value(dname, dunit, "at",&strval)|| 352 strcmp(strval, "scbus") == 0)) 353 continue; 354 if (newunit == dunit) 355 break; 356 } 357 if (r != 0) 358 break; 359 } 360 splx(s); 361 return (newunit); 362 } 363 364 static u_int 365 camperiphunit(struct periph_driver *p_drv, path_id_t pathid, 366 target_id_t target, lun_id_t lun) 367 { 368 u_int unit; 369 int wired, i, val, dunit; 370 const char *dname, *strval; 371 char pathbuf[32], *periph_name; 372 373 periph_name = p_drv->driver_name; 374 snprintf(pathbuf, sizeof(pathbuf), "scbus%d", pathid); 375 unit = 0; 376 i = 0; 377 dname = periph_name; 378 for (wired = 0; resource_find_dev(&i, dname, &dunit, NULL, NULL) == 0; 379 wired = 0) { 380 if (resource_string_value(dname, dunit, "at", &strval) == 0) { 381 if (strcmp(strval, pathbuf) != 0) 382 continue; 383 wired++; 384 } 385 if (resource_int_value(dname, dunit, "target", &val) == 0) { 386 if (val != target) 387 continue; 388 wired++; 389 } 390 if (resource_int_value(dname, dunit, "lun", &val) == 0) { 391 if (val != lun) 392 continue; 393 wired++; 394 } 395 if (wired != 0) { 396 unit = dunit; 397 break; 398 } 399 } 400 401 /* 402 * Either start from 0 looking for the next unit or from 403 * the unit number given in the resource config. This way, 404 * if we have wildcard matches, we don't return the same 405 * unit number twice. 406 */ 407 unit = camperiphnextunit(p_drv, unit, wired, pathid, target, lun); 408 409 return (unit); 410 } 411 412 void 413 cam_periph_invalidate(struct cam_periph *periph) 414 { 415 int s; 416 417 s = splsoftcam(); 418 /* 419 * We only call this routine the first time a peripheral is 420 * invalidated. The oninvalidate() routine is always called at 421 * splsoftcam(). 422 */ 423 if (((periph->flags & CAM_PERIPH_INVALID) == 0) 424 && (periph->periph_oninval != NULL)) 425 periph->periph_oninval(periph); 426 427 periph->flags |= CAM_PERIPH_INVALID; 428 periph->flags &= ~CAM_PERIPH_NEW_DEV_FOUND; 429 430 if (periph->refcount == 0) 431 camperiphfree(periph); 432 else if (periph->refcount < 0) 433 printf("cam_invalidate_periph: refcount < 0!!\n"); 434 splx(s); 435 } 436 437 static void 438 camperiphfree(struct cam_periph *periph) 439 { 440 int s; 441 struct periph_driver **p_drv; 442 443 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) { 444 if (strcmp((*p_drv)->driver_name, periph->periph_name) == 0) 445 break; 446 } 447 448 if (periph->periph_dtor != NULL) 449 periph->periph_dtor(periph); 450 451 s = splsoftcam(); 452 TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links); 453 (*p_drv)->generation++; 454 splx(s); 455 456 xpt_remove_periph(periph); 457 458 if (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) { 459 union ccb ccb; 460 void *arg; 461 462 switch (periph->deferred_ac) { 463 case AC_FOUND_DEVICE: 464 ccb.ccb_h.func_code = XPT_GDEV_TYPE; 465 xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/ 1); 466 xpt_action(&ccb); 467 arg = &ccb; 468 break; 469 case AC_PATH_REGISTERED: 470 ccb.ccb_h.func_code = XPT_PATH_INQ; 471 xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/ 1); 472 xpt_action(&ccb); 473 arg = &ccb; 474 break; 475 default: 476 arg = NULL; 477 break; 478 } 479 periph->deferred_callback(NULL, periph->deferred_ac, 480 periph->path, arg); 481 } 482 xpt_free_path(periph->path); 483 free(periph, M_DEVBUF); 484 } 485 486 /* 487 * Wait interruptibly for an exclusive lock. 488 */ 489 int 490 cam_periph_lock(struct cam_periph *periph, int priority) 491 { 492 int error; 493 494 /* 495 * Increment the reference count on the peripheral 496 * while we wait for our lock attempt to succeed 497 * to ensure the peripheral doesn't disappear out 498 * from under us while we sleep. 499 */ 500 if (cam_periph_acquire(periph) != CAM_REQ_CMP) 501 return(ENXIO); 502 503 while ((periph->flags & CAM_PERIPH_LOCKED) != 0) { 504 periph->flags |= CAM_PERIPH_LOCK_WANTED; 505 if ((error = tsleep(periph, priority, "caplck", 0)) != 0) { 506 cam_periph_release(periph); 507 return error; 508 } 509 } 510 511 periph->flags |= CAM_PERIPH_LOCKED; 512 return 0; 513 } 514 515 /* 516 * Unlock and wake up any waiters. 517 */ 518 void 519 cam_periph_unlock(struct cam_periph *periph) 520 { 521 periph->flags &= ~CAM_PERIPH_LOCKED; 522 if ((periph->flags & CAM_PERIPH_LOCK_WANTED) != 0) { 523 periph->flags &= ~CAM_PERIPH_LOCK_WANTED; 524 wakeup(periph); 525 } 526 527 cam_periph_release(periph); 528 } 529 530 /* 531 * Map user virtual pointers into kernel virtual address space, so we can 532 * access the memory. This won't work on physical pointers, for now it's 533 * up to the caller to check for that. (XXX KDM -- should we do that here 534 * instead?) This also only works for up to MAXPHYS memory. Since we use 535 * buffers to map stuff in and out, we're limited to the buffer size. 536 */ 537 int 538 cam_periph_mapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo) 539 { 540 int numbufs, i, j; 541 int flags[CAM_PERIPH_MAXMAPS]; 542 u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS]; 543 u_int32_t lengths[CAM_PERIPH_MAXMAPS]; 544 u_int32_t dirs[CAM_PERIPH_MAXMAPS]; 545 546 switch(ccb->ccb_h.func_code) { 547 case XPT_DEV_MATCH: 548 if (ccb->cdm.match_buf_len == 0) { 549 printf("cam_periph_mapmem: invalid match buffer " 550 "length 0\n"); 551 return(EINVAL); 552 } 553 if (ccb->cdm.pattern_buf_len > 0) { 554 data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns; 555 lengths[0] = ccb->cdm.pattern_buf_len; 556 dirs[0] = CAM_DIR_OUT; 557 data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches; 558 lengths[1] = ccb->cdm.match_buf_len; 559 dirs[1] = CAM_DIR_IN; 560 numbufs = 2; 561 } else { 562 data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches; 563 lengths[0] = ccb->cdm.match_buf_len; 564 dirs[0] = CAM_DIR_IN; 565 numbufs = 1; 566 } 567 break; 568 case XPT_SCSI_IO: 569 case XPT_CONT_TARGET_IO: 570 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) 571 return(0); 572 573 data_ptrs[0] = &ccb->csio.data_ptr; 574 lengths[0] = ccb->csio.dxfer_len; 575 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK; 576 numbufs = 1; 577 break; 578 default: 579 return(EINVAL); 580 break; /* NOTREACHED */ 581 } 582 583 /* 584 * Check the transfer length and permissions first, so we don't 585 * have to unmap any previously mapped buffers. 586 */ 587 for (i = 0; i < numbufs; i++) { 588 589 flags[i] = 0; 590 591 /* 592 * The userland data pointer passed in may not be page 593 * aligned. vmapbuf() truncates the address to a page 594 * boundary, so if the address isn't page aligned, we'll 595 * need enough space for the given transfer length, plus 596 * whatever extra space is necessary to make it to the page 597 * boundary. 598 */ 599 if ((lengths[i] + 600 (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)) > DFLTPHYS){ 601 printf("cam_periph_mapmem: attempt to map %lu bytes, " 602 "which is greater than DFLTPHYS(%d)\n", 603 (long)(lengths[i] + 604 (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)), 605 DFLTPHYS); 606 return(E2BIG); 607 } 608 609 if (dirs[i] & CAM_DIR_OUT) { 610 flags[i] = BIO_WRITE; 611 } 612 613 if (dirs[i] & CAM_DIR_IN) { 614 flags[i] = BIO_READ; 615 } 616 617 } 618 619 /* this keeps the current process from getting swapped */ 620 /* 621 * XXX KDM should I use P_NOSWAP instead? 622 */ 623 PHOLD(curproc); 624 625 for (i = 0; i < numbufs; i++) { 626 /* 627 * Get the buffer. 628 */ 629 mapinfo->bp[i] = getpbuf(NULL); 630 631 /* save the buffer's data address */ 632 mapinfo->bp[i]->b_saveaddr = mapinfo->bp[i]->b_data; 633 634 /* put our pointer in the data slot */ 635 mapinfo->bp[i]->b_data = *data_ptrs[i]; 636 637 /* set the transfer length, we know it's < DFLTPHYS */ 638 mapinfo->bp[i]->b_bufsize = lengths[i]; 639 640 /* set the flags */ 641 mapinfo->bp[i]->b_flags = B_PHYS; 642 643 /* set the direction */ 644 mapinfo->bp[i]->b_iocmd = flags[i]; 645 646 /* 647 * Map the buffer into kernel memory. 648 * 649 * Note that useracc() alone is not a sufficient test. 650 * vmapbuf() can still fail due to a smaller file mapped 651 * into a larger area of VM, or if userland races against 652 * vmapbuf() after the useracc() check. 653 */ 654 if (vmapbuf(mapinfo->bp[i]) < 0) { 655 printf("cam_periph_mapmem: error, " 656 "address %p, length %lu isn't " 657 "user accessible any more\n", 658 (void *)*data_ptrs[i], 659 (u_long)lengths[i]); 660 for (j = 0; j < i; ++j) { 661 *data_ptrs[j] = mapinfo->bp[j]->b_saveaddr; 662 mapinfo->bp[j]->b_flags &= ~B_PHYS; 663 relpbuf(mapinfo->bp[j], NULL); 664 } 665 PRELE(curproc); 666 return(EACCES); 667 } 668 669 /* set our pointer to the new mapped area */ 670 *data_ptrs[i] = mapinfo->bp[i]->b_data; 671 672 mapinfo->num_bufs_used++; 673 } 674 675 return(0); 676 } 677 678 /* 679 * Unmap memory segments mapped into kernel virtual address space by 680 * cam_periph_mapmem(). 681 */ 682 void 683 cam_periph_unmapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo) 684 { 685 int numbufs, i; 686 u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS]; 687 688 if (mapinfo->num_bufs_used <= 0) { 689 /* allow ourselves to be swapped once again */ 690 PRELE(curproc); 691 return; 692 } 693 694 switch (ccb->ccb_h.func_code) { 695 case XPT_DEV_MATCH: 696 numbufs = min(mapinfo->num_bufs_used, 2); 697 698 if (numbufs == 1) { 699 data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches; 700 } else { 701 data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns; 702 data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches; 703 } 704 break; 705 case XPT_SCSI_IO: 706 case XPT_CONT_TARGET_IO: 707 data_ptrs[0] = &ccb->csio.data_ptr; 708 numbufs = min(mapinfo->num_bufs_used, 1); 709 break; 710 default: 711 /* allow ourselves to be swapped once again */ 712 PRELE(curproc); 713 return; 714 break; /* NOTREACHED */ 715 } 716 717 for (i = 0; i < numbufs; i++) { 718 /* Set the user's pointer back to the original value */ 719 *data_ptrs[i] = mapinfo->bp[i]->b_saveaddr; 720 721 /* unmap the buffer */ 722 vunmapbuf(mapinfo->bp[i]); 723 724 /* clear the flags we set above */ 725 mapinfo->bp[i]->b_flags &= ~B_PHYS; 726 727 /* release the buffer */ 728 relpbuf(mapinfo->bp[i], NULL); 729 } 730 731 /* allow ourselves to be swapped once again */ 732 PRELE(curproc); 733 } 734 735 union ccb * 736 cam_periph_getccb(struct cam_periph *periph, u_int32_t priority) 737 { 738 struct ccb_hdr *ccb_h; 739 int s; 740 741 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering cdgetccb\n")); 742 743 s = splsoftcam(); 744 745 while (SLIST_FIRST(&periph->ccb_list) == NULL) { 746 if (periph->immediate_priority > priority) 747 periph->immediate_priority = priority; 748 xpt_schedule(periph, priority); 749 if ((SLIST_FIRST(&periph->ccb_list) != NULL) 750 && (SLIST_FIRST(&periph->ccb_list)->pinfo.priority == priority)) 751 break; 752 tsleep(&periph->ccb_list, PRIBIO, "cgticb", 0); 753 } 754 755 ccb_h = SLIST_FIRST(&periph->ccb_list); 756 SLIST_REMOVE_HEAD(&periph->ccb_list, periph_links.sle); 757 splx(s); 758 return ((union ccb *)ccb_h); 759 } 760 761 void 762 cam_periph_ccbwait(union ccb *ccb) 763 { 764 int s; 765 766 s = splsoftcam(); 767 if ((ccb->ccb_h.pinfo.index != CAM_UNQUEUED_INDEX) 768 || ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG)) 769 tsleep(&ccb->ccb_h.cbfcnp, PRIBIO, "cbwait", 0); 770 771 splx(s); 772 } 773 774 int 775 cam_periph_ioctl(struct cam_periph *periph, int cmd, caddr_t addr, 776 int (*error_routine)(union ccb *ccb, 777 cam_flags camflags, 778 u_int32_t sense_flags)) 779 { 780 union ccb *ccb; 781 int error; 782 int found; 783 784 error = found = 0; 785 786 switch(cmd){ 787 case CAMGETPASSTHRU: 788 ccb = cam_periph_getccb(periph, /* priority */ 1); 789 xpt_setup_ccb(&ccb->ccb_h, 790 ccb->ccb_h.path, 791 /*priority*/1); 792 ccb->ccb_h.func_code = XPT_GDEVLIST; 793 794 /* 795 * Basically, the point of this is that we go through 796 * getting the list of devices, until we find a passthrough 797 * device. In the current version of the CAM code, the 798 * only way to determine what type of device we're dealing 799 * with is by its name. 800 */ 801 while (found == 0) { 802 ccb->cgdl.index = 0; 803 ccb->cgdl.status = CAM_GDEVLIST_MORE_DEVS; 804 while (ccb->cgdl.status == CAM_GDEVLIST_MORE_DEVS) { 805 806 /* we want the next device in the list */ 807 xpt_action(ccb); 808 if (strncmp(ccb->cgdl.periph_name, 809 "pass", 4) == 0){ 810 found = 1; 811 break; 812 } 813 } 814 if ((ccb->cgdl.status == CAM_GDEVLIST_LAST_DEVICE) && 815 (found == 0)) { 816 ccb->cgdl.periph_name[0] = '\0'; 817 ccb->cgdl.unit_number = 0; 818 break; 819 } 820 } 821 822 /* copy the result back out */ 823 bcopy(ccb, addr, sizeof(union ccb)); 824 825 /* and release the ccb */ 826 xpt_release_ccb(ccb); 827 828 break; 829 default: 830 error = ENOTTY; 831 break; 832 } 833 return(error); 834 } 835 836 int 837 cam_periph_runccb(union ccb *ccb, 838 int (*error_routine)(union ccb *ccb, 839 cam_flags camflags, 840 u_int32_t sense_flags), 841 cam_flags camflags, u_int32_t sense_flags, 842 struct devstat *ds) 843 { 844 int error; 845 846 error = 0; 847 848 /* 849 * If the user has supplied a stats structure, and if we understand 850 * this particular type of ccb, record the transaction start. 851 */ 852 if ((ds != NULL) && (ccb->ccb_h.func_code == XPT_SCSI_IO)) 853 devstat_start_transaction(ds, NULL); 854 855 xpt_action(ccb); 856 857 do { 858 cam_periph_ccbwait(ccb); 859 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) 860 error = 0; 861 else if (error_routine != NULL) 862 error = (*error_routine)(ccb, camflags, sense_flags); 863 else 864 error = 0; 865 866 } while (error == ERESTART); 867 868 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 869 cam_release_devq(ccb->ccb_h.path, 870 /* relsim_flags */0, 871 /* openings */0, 872 /* timeout */0, 873 /* getcount_only */ FALSE); 874 875 if ((ds != NULL) && (ccb->ccb_h.func_code == XPT_SCSI_IO)) 876 devstat_end_transaction(ds, 877 ccb->csio.dxfer_len, 878 ccb->csio.tag_action & 0xf, 879 ((ccb->ccb_h.flags & CAM_DIR_MASK) == 880 CAM_DIR_NONE) ? DEVSTAT_NO_DATA : 881 (ccb->ccb_h.flags & CAM_DIR_OUT) ? 882 DEVSTAT_WRITE : 883 DEVSTAT_READ, NULL, NULL); 884 885 return(error); 886 } 887 888 void 889 cam_freeze_devq(struct cam_path *path) 890 { 891 struct ccb_hdr ccb_h; 892 893 xpt_setup_ccb(&ccb_h, path, /*priority*/1); 894 ccb_h.func_code = XPT_NOOP; 895 ccb_h.flags = CAM_DEV_QFREEZE; 896 xpt_action((union ccb *)&ccb_h); 897 } 898 899 u_int32_t 900 cam_release_devq(struct cam_path *path, u_int32_t relsim_flags, 901 u_int32_t openings, u_int32_t timeout, 902 int getcount_only) 903 { 904 struct ccb_relsim crs; 905 906 xpt_setup_ccb(&crs.ccb_h, path, 907 /*priority*/1); 908 crs.ccb_h.func_code = XPT_REL_SIMQ; 909 crs.ccb_h.flags = getcount_only ? CAM_DEV_QFREEZE : 0; 910 crs.release_flags = relsim_flags; 911 crs.openings = openings; 912 crs.release_timeout = timeout; 913 xpt_action((union ccb *)&crs); 914 return (crs.qfrozen_cnt); 915 } 916 917 #define saved_ccb_ptr ppriv_ptr0 918 static void 919 camperiphdone(struct cam_periph *periph, union ccb *done_ccb) 920 { 921 union ccb *saved_ccb; 922 cam_status status; 923 int frozen; 924 int sense; 925 struct scsi_start_stop_unit *scsi_cmd; 926 u_int32_t relsim_flags, timeout; 927 u_int32_t qfrozen_cnt; 928 int xpt_done_ccb; 929 930 xpt_done_ccb = FALSE; 931 status = done_ccb->ccb_h.status; 932 frozen = (status & CAM_DEV_QFRZN) != 0; 933 sense = (status & CAM_AUTOSNS_VALID) != 0; 934 status &= CAM_STATUS_MASK; 935 936 timeout = 0; 937 relsim_flags = 0; 938 saved_ccb = (union ccb *)done_ccb->ccb_h.saved_ccb_ptr; 939 940 /* 941 * Unfreeze the queue once if it is already frozen.. 942 */ 943 if (frozen != 0) { 944 qfrozen_cnt = cam_release_devq(done_ccb->ccb_h.path, 945 /*relsim_flags*/0, 946 /*openings*/0, 947 /*timeout*/0, 948 /*getcount_only*/0); 949 } 950 951 switch (status) { 952 case CAM_REQ_CMP: 953 { 954 /* 955 * If we have successfully taken a device from the not 956 * ready to ready state, re-scan the device and re-get 957 * the inquiry information. Many devices (mostly disks) 958 * don't properly report their inquiry information unless 959 * they are spun up. 960 * 961 * If we manually retrieved sense into a CCB and got 962 * something other than "NO SENSE" send the updated CCB 963 * back to the client via xpt_done() to be processed via 964 * the error recovery code again. 965 */ 966 if (done_ccb->ccb_h.func_code == XPT_SCSI_IO) { 967 scsi_cmd = (struct scsi_start_stop_unit *) 968 &done_ccb->csio.cdb_io.cdb_bytes; 969 970 if (scsi_cmd->opcode == START_STOP_UNIT) 971 xpt_async(AC_INQ_CHANGED, 972 done_ccb->ccb_h.path, NULL); 973 if (scsi_cmd->opcode == REQUEST_SENSE) { 974 u_int sense_key; 975 976 sense_key = saved_ccb->csio.sense_data.flags; 977 sense_key &= SSD_KEY; 978 if (sense_key != SSD_KEY_NO_SENSE) { 979 saved_ccb->ccb_h.flags |= 980 CAM_AUTOSNS_VALID; 981 xpt_print_path(saved_ccb->ccb_h.path); 982 printf("Recovered Sense\n"); 983 #if 0 984 scsi_sense_print(&saved_ccb->csio); 985 #endif 986 cam_error_print(saved_ccb, CAM_ESF_ALL, 987 CAM_EPF_ALL); 988 xpt_done_ccb = TRUE; 989 } 990 } 991 } 992 bcopy(done_ccb->ccb_h.saved_ccb_ptr, done_ccb, 993 sizeof(union ccb)); 994 995 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG; 996 997 if (xpt_done_ccb == FALSE) 998 xpt_action(done_ccb); 999 1000 break; 1001 } 1002 case CAM_SCSI_STATUS_ERROR: 1003 scsi_cmd = (struct scsi_start_stop_unit *) 1004 &done_ccb->csio.cdb_io.cdb_bytes; 1005 if (sense != 0) { 1006 struct scsi_sense_data *sense; 1007 int error_code, sense_key, asc, ascq; 1008 1009 sense = &done_ccb->csio.sense_data; 1010 scsi_extract_sense(sense, &error_code, 1011 &sense_key, &asc, &ascq); 1012 1013 /* 1014 * If the error is "invalid field in CDB", 1015 * and the load/eject flag is set, turn the 1016 * flag off and try again. This is just in 1017 * case the drive in question barfs on the 1018 * load eject flag. The CAM code should set 1019 * the load/eject flag by default for 1020 * removable media. 1021 */ 1022 1023 /* XXX KDM 1024 * Should we check to see what the specific 1025 * scsi status is?? Or does it not matter 1026 * since we already know that there was an 1027 * error, and we know what the specific 1028 * error code was, and we know what the 1029 * opcode is.. 1030 */ 1031 if ((scsi_cmd->opcode == START_STOP_UNIT) && 1032 ((scsi_cmd->how & SSS_LOEJ) != 0) && 1033 (asc == 0x24) && (ascq == 0x00) && 1034 (done_ccb->ccb_h.retry_count > 0)) { 1035 1036 scsi_cmd->how &= ~SSS_LOEJ; 1037 1038 xpt_action(done_ccb); 1039 1040 } else if (done_ccb->ccb_h.retry_count > 1) { 1041 /* 1042 * In this case, the error recovery 1043 * command failed, but we've got 1044 * some retries left on it. Give 1045 * it another try. 1046 */ 1047 1048 /* set the timeout to .5 sec */ 1049 relsim_flags = 1050 RELSIM_RELEASE_AFTER_TIMEOUT; 1051 timeout = 500; 1052 1053 xpt_action(done_ccb); 1054 1055 break; 1056 1057 } else { 1058 /* 1059 * Perform the final retry with the original 1060 * CCB so that final error processing is 1061 * performed by the owner of the CCB. 1062 */ 1063 bcopy(done_ccb->ccb_h.saved_ccb_ptr, 1064 done_ccb, sizeof(union ccb)); 1065 1066 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG; 1067 1068 xpt_action(done_ccb); 1069 } 1070 } else { 1071 /* 1072 * Eh?? The command failed, but we don't 1073 * have any sense. What's up with that? 1074 * Fire the CCB again to return it to the 1075 * caller. 1076 */ 1077 bcopy(done_ccb->ccb_h.saved_ccb_ptr, 1078 done_ccb, sizeof(union ccb)); 1079 1080 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG; 1081 1082 xpt_action(done_ccb); 1083 1084 } 1085 break; 1086 default: 1087 bcopy(done_ccb->ccb_h.saved_ccb_ptr, done_ccb, 1088 sizeof(union ccb)); 1089 1090 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG; 1091 1092 xpt_action(done_ccb); 1093 1094 break; 1095 } 1096 1097 /* decrement the retry count */ 1098 /* 1099 * XXX This isn't appropriate in all cases. Restructure, 1100 * so that the retry count is only decremented on an 1101 * actual retry. Remeber that the orignal ccb had its 1102 * retry count dropped before entering recovery, so 1103 * doing it again is a bug. 1104 */ 1105 if (done_ccb->ccb_h.retry_count > 0) 1106 done_ccb->ccb_h.retry_count--; 1107 1108 qfrozen_cnt = cam_release_devq(done_ccb->ccb_h.path, 1109 /*relsim_flags*/relsim_flags, 1110 /*openings*/0, 1111 /*timeout*/timeout, 1112 /*getcount_only*/0); 1113 if (xpt_done_ccb == TRUE) 1114 (*done_ccb->ccb_h.cbfcnp)(periph, done_ccb); 1115 } 1116 1117 /* 1118 * Generic Async Event handler. Peripheral drivers usually 1119 * filter out the events that require personal attention, 1120 * and leave the rest to this function. 1121 */ 1122 void 1123 cam_periph_async(struct cam_periph *periph, u_int32_t code, 1124 struct cam_path *path, void *arg) 1125 { 1126 switch (code) { 1127 case AC_LOST_DEVICE: 1128 cam_periph_invalidate(periph); 1129 break; 1130 case AC_SENT_BDR: 1131 case AC_BUS_RESET: 1132 { 1133 cam_periph_bus_settle(periph, scsi_delay); 1134 break; 1135 } 1136 default: 1137 break; 1138 } 1139 } 1140 1141 void 1142 cam_periph_bus_settle(struct cam_periph *periph, u_int bus_settle) 1143 { 1144 struct ccb_getdevstats cgds; 1145 1146 xpt_setup_ccb(&cgds.ccb_h, periph->path, /*priority*/1); 1147 cgds.ccb_h.func_code = XPT_GDEV_STATS; 1148 xpt_action((union ccb *)&cgds); 1149 cam_periph_freeze_after_event(periph, &cgds.last_reset, bus_settle); 1150 } 1151 1152 void 1153 cam_periph_freeze_after_event(struct cam_periph *periph, 1154 struct timeval* event_time, u_int duration_ms) 1155 { 1156 struct timeval delta; 1157 struct timeval duration_tv; 1158 int s; 1159 1160 s = splclock(); 1161 microtime(&delta); 1162 splx(s); 1163 timevalsub(&delta, event_time); 1164 duration_tv.tv_sec = duration_ms / 1000; 1165 duration_tv.tv_usec = (duration_ms % 1000) * 1000; 1166 if (timevalcmp(&delta, &duration_tv, <)) { 1167 timevalsub(&duration_tv, &delta); 1168 1169 duration_ms = duration_tv.tv_sec * 1000; 1170 duration_ms += duration_tv.tv_usec / 1000; 1171 cam_freeze_devq(periph->path); 1172 cam_release_devq(periph->path, 1173 RELSIM_RELEASE_AFTER_TIMEOUT, 1174 /*reduction*/0, 1175 /*timeout*/duration_ms, 1176 /*getcount_only*/0); 1177 } 1178 1179 } 1180 1181 static int 1182 camperiphscsistatuserror(union ccb *ccb, cam_flags camflags, 1183 u_int32_t sense_flags, union ccb *save_ccb, 1184 int *openings, u_int32_t *relsim_flags, 1185 u_int32_t *timeout) 1186 { 1187 int error; 1188 1189 switch (ccb->csio.scsi_status) { 1190 case SCSI_STATUS_OK: 1191 case SCSI_STATUS_COND_MET: 1192 case SCSI_STATUS_INTERMED: 1193 case SCSI_STATUS_INTERMED_COND_MET: 1194 error = 0; 1195 break; 1196 case SCSI_STATUS_CMD_TERMINATED: 1197 case SCSI_STATUS_CHECK_COND: 1198 error = camperiphscsisenseerror(ccb, 1199 camflags, 1200 sense_flags, 1201 save_ccb, 1202 openings, 1203 relsim_flags, 1204 timeout); 1205 break; 1206 case SCSI_STATUS_QUEUE_FULL: 1207 { 1208 /* no decrement */ 1209 struct ccb_getdevstats cgds; 1210 1211 /* 1212 * First off, find out what the current 1213 * transaction counts are. 1214 */ 1215 xpt_setup_ccb(&cgds.ccb_h, 1216 ccb->ccb_h.path, 1217 /*priority*/1); 1218 cgds.ccb_h.func_code = XPT_GDEV_STATS; 1219 xpt_action((union ccb *)&cgds); 1220 1221 /* 1222 * If we were the only transaction active, treat 1223 * the QUEUE FULL as if it were a BUSY condition. 1224 */ 1225 if (cgds.dev_active != 0) { 1226 int total_openings; 1227 1228 /* 1229 * Reduce the number of openings to 1230 * be 1 less than the amount it took 1231 * to get a queue full bounded by the 1232 * minimum allowed tag count for this 1233 * device. 1234 */ 1235 total_openings = cgds.dev_active + cgds.dev_openings; 1236 *openings = cgds.dev_active; 1237 if (*openings < cgds.mintags) 1238 *openings = cgds.mintags; 1239 if (*openings < total_openings) 1240 *relsim_flags = RELSIM_ADJUST_OPENINGS; 1241 else { 1242 /* 1243 * Some devices report queue full for 1244 * temporary resource shortages. For 1245 * this reason, we allow a minimum 1246 * tag count to be entered via a 1247 * quirk entry to prevent the queue 1248 * count on these devices from falling 1249 * to a pessimisticly low value. We 1250 * still wait for the next successful 1251 * completion, however, before queueing 1252 * more transactions to the device. 1253 */ 1254 *relsim_flags = RELSIM_RELEASE_AFTER_CMDCMPLT; 1255 } 1256 *timeout = 0; 1257 error = ERESTART; 1258 if (bootverbose) { 1259 xpt_print_path(ccb->ccb_h.path); 1260 printf("Queue Full\n"); 1261 } 1262 break; 1263 } 1264 /* FALLTHROUGH */ 1265 } 1266 case SCSI_STATUS_BUSY: 1267 /* 1268 * Restart the queue after either another 1269 * command completes or a 1 second timeout. 1270 */ 1271 if (bootverbose) { 1272 xpt_print_path(ccb->ccb_h.path); 1273 printf("Device Busy\n"); 1274 } 1275 if (ccb->ccb_h.retry_count > 0) { 1276 ccb->ccb_h.retry_count--; 1277 error = ERESTART; 1278 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT 1279 | RELSIM_RELEASE_AFTER_CMDCMPLT; 1280 *timeout = 1000; 1281 } else { 1282 error = EIO; 1283 } 1284 break; 1285 case SCSI_STATUS_RESERV_CONFLICT: 1286 xpt_print_path(ccb->ccb_h.path); 1287 printf("Reservation Conflict\n"); 1288 error = EIO; 1289 break; 1290 default: 1291 xpt_print_path(ccb->ccb_h.path); 1292 printf("SCSI Status 0x%x\n", ccb->csio.scsi_status); 1293 error = EIO; 1294 break; 1295 } 1296 return (error); 1297 } 1298 1299 static int 1300 camperiphscsisenseerror(union ccb *ccb, cam_flags camflags, 1301 u_int32_t sense_flags, union ccb *save_ccb, 1302 int *openings, u_int32_t *relsim_flags, 1303 u_int32_t *timeout) 1304 { 1305 struct cam_periph *periph; 1306 int error; 1307 1308 periph = xpt_path_periph(ccb->ccb_h.path); 1309 if (periph->flags & CAM_PERIPH_RECOVERY_INPROG) { 1310 1311 /* 1312 * If error recovery is already in progress, don't attempt 1313 * to process this error, but requeue it unconditionally 1314 * and attempt to process it once error recovery has 1315 * completed. This failed command is probably related to 1316 * the error that caused the currently active error recovery 1317 * action so our current recovery efforts should also 1318 * address this command. Be aware that the error recovery 1319 * code assumes that only one recovery action is in progress 1320 * on a particular peripheral instance at any given time 1321 * (e.g. only one saved CCB for error recovery) so it is 1322 * imperitive that we don't violate this assumption. 1323 */ 1324 error = ERESTART; 1325 } else { 1326 scsi_sense_action err_action; 1327 struct ccb_getdev cgd; 1328 const char *action_string; 1329 union ccb* print_ccb; 1330 1331 /* A description of the error recovery action performed */ 1332 action_string = NULL; 1333 1334 /* 1335 * The location of the orignal ccb 1336 * for sense printing purposes. 1337 */ 1338 print_ccb = ccb; 1339 1340 /* 1341 * Grab the inquiry data for this device. 1342 */ 1343 xpt_setup_ccb(&cgd.ccb_h, ccb->ccb_h.path, /*priority*/ 1); 1344 cgd.ccb_h.func_code = XPT_GDEV_TYPE; 1345 xpt_action((union ccb *)&cgd); 1346 1347 if ((ccb->ccb_h.status & CAM_AUTOSNS_VALID) != 0) 1348 err_action = scsi_error_action(&ccb->csio, 1349 &cgd.inq_data, 1350 sense_flags); 1351 else if ((ccb->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0) 1352 err_action = SS_REQSENSE; 1353 else 1354 err_action = SS_RETRY|SSQ_DECREMENT_COUNT|EIO; 1355 1356 error = err_action & SS_ERRMASK; 1357 1358 /* 1359 * If the recovery action will consume a retry, 1360 * make sure we actually have retries available. 1361 */ 1362 if ((err_action & SSQ_DECREMENT_COUNT) != 0) { 1363 if (ccb->ccb_h.retry_count > 0) 1364 ccb->ccb_h.retry_count--; 1365 else { 1366 action_string = "Retries Exhausted"; 1367 goto sense_error_done; 1368 } 1369 } 1370 1371 if ((err_action & SS_MASK) >= SS_START) { 1372 /* 1373 * Do common portions of commands that 1374 * use recovery CCBs. 1375 */ 1376 if (save_ccb == NULL) { 1377 action_string = "No recovery CCB supplied"; 1378 goto sense_error_done; 1379 } 1380 bcopy(ccb, save_ccb, sizeof(*save_ccb)); 1381 print_ccb = save_ccb; 1382 periph->flags |= CAM_PERIPH_RECOVERY_INPROG; 1383 } 1384 1385 switch (err_action & SS_MASK) { 1386 case SS_NOP: 1387 action_string = "No Recovery Action Needed"; 1388 error = 0; 1389 break; 1390 case SS_RETRY: 1391 action_string = "Retrying Command (per Sense Data)"; 1392 error = ERESTART; 1393 break; 1394 case SS_FAIL: 1395 action_string = "Unretryable error"; 1396 break; 1397 case SS_START: 1398 { 1399 int le; 1400 1401 /* 1402 * Send a start unit command to the device, and 1403 * then retry the command. 1404 */ 1405 action_string = "Attempting to Start Unit"; 1406 1407 /* 1408 * Check for removable media and set 1409 * load/eject flag appropriately. 1410 */ 1411 if (SID_IS_REMOVABLE(&cgd.inq_data)) 1412 le = TRUE; 1413 else 1414 le = FALSE; 1415 1416 scsi_start_stop(&ccb->csio, 1417 /*retries*/1, 1418 camperiphdone, 1419 MSG_SIMPLE_Q_TAG, 1420 /*start*/TRUE, 1421 /*load/eject*/le, 1422 /*immediate*/FALSE, 1423 SSD_FULL_SIZE, 1424 /*timeout*/50000); 1425 break; 1426 } 1427 case SS_TUR: 1428 { 1429 /* 1430 * Send a Test Unit Ready to the device. 1431 * If the 'many' flag is set, we send 120 1432 * test unit ready commands, one every half 1433 * second. Otherwise, we just send one TUR. 1434 * We only want to do this if the retry 1435 * count has not been exhausted. 1436 */ 1437 int retries; 1438 1439 if ((err_action & SSQ_MANY) != 0) { 1440 action_string = "Polling device for readiness"; 1441 retries = 120; 1442 } else { 1443 action_string = "Testing device for readiness"; 1444 retries = 1; 1445 } 1446 scsi_test_unit_ready(&ccb->csio, 1447 retries, 1448 camperiphdone, 1449 MSG_SIMPLE_Q_TAG, 1450 SSD_FULL_SIZE, 1451 /*timeout*/5000); 1452 1453 /* 1454 * Accomplish our 500ms delay by deferring 1455 * the release of our device queue appropriately. 1456 */ 1457 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT; 1458 *timeout = 500; 1459 break; 1460 } 1461 case SS_REQSENSE: 1462 { 1463 /* 1464 * Send a Request Sense to the device. We 1465 * assume that we are in a contingent allegiance 1466 * condition so we do not tag this request. 1467 */ 1468 scsi_request_sense(&ccb->csio, /*retries*/1, 1469 camperiphdone, 1470 &save_ccb->csio.sense_data, 1471 sizeof(save_ccb->csio.sense_data), 1472 CAM_TAG_ACTION_NONE, 1473 /*sense_len*/SSD_FULL_SIZE, 1474 /*timeout*/5000); 1475 break; 1476 } 1477 default: 1478 panic("Unhandled error action %x", err_action); 1479 } 1480 1481 if ((err_action & SS_MASK) >= SS_START) { 1482 /* 1483 * Drop the priority to 0 so that the recovery 1484 * CCB is the first to execute. Freeze the queue 1485 * after this command is sent so that we can 1486 * restore the old csio and have it queued in 1487 * the proper order before we release normal 1488 * transactions to the device. 1489 */ 1490 ccb->ccb_h.pinfo.priority = 0; 1491 ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 1492 ccb->ccb_h.saved_ccb_ptr = save_ccb; 1493 error = ERESTART; 1494 } 1495 1496 sense_error_done: 1497 if ((err_action & SSQ_PRINT_SENSE) != 0 1498 && (ccb->ccb_h.status & CAM_AUTOSNS_VALID) != 0) { 1499 cam_error_print(print_ccb, CAM_ESF_ALL, CAM_EPF_ALL); 1500 xpt_print_path(ccb->ccb_h.path); 1501 if (bootverbose) 1502 scsi_sense_print(&print_ccb->csio); 1503 printf("%s\n", action_string); 1504 } 1505 } 1506 return (error); 1507 } 1508 1509 /* 1510 * Generic error handler. Peripheral drivers usually filter 1511 * out the errors that they handle in a unique mannor, then 1512 * call this function. 1513 */ 1514 int 1515 cam_periph_error(union ccb *ccb, cam_flags camflags, 1516 u_int32_t sense_flags, union ccb *save_ccb) 1517 { 1518 const char *action_string; 1519 cam_status status; 1520 int frozen; 1521 int error, printed = 0; 1522 int openings; 1523 u_int32_t relsim_flags; 1524 u_int32_t timeout; 1525 1526 action_string = NULL; 1527 status = ccb->ccb_h.status; 1528 frozen = (status & CAM_DEV_QFRZN) != 0; 1529 status &= CAM_STATUS_MASK; 1530 openings = relsim_flags = 0; 1531 1532 switch (status) { 1533 case CAM_REQ_CMP: 1534 error = 0; 1535 break; 1536 case CAM_SCSI_STATUS_ERROR: 1537 error = camperiphscsistatuserror(ccb, 1538 camflags, 1539 sense_flags, 1540 save_ccb, 1541 &openings, 1542 &relsim_flags, 1543 &timeout); 1544 break; 1545 case CAM_AUTOSENSE_FAIL: 1546 xpt_print_path(ccb->ccb_h.path); 1547 printf("AutoSense Failed\n"); 1548 error = EIO; /* we have to kill the command */ 1549 break; 1550 case CAM_REQ_CMP_ERR: 1551 if (bootverbose && printed == 0) { 1552 xpt_print_path(ccb->ccb_h.path); 1553 printf("Request completed with CAM_REQ_CMP_ERR\n"); 1554 printed++; 1555 } 1556 /* FALLTHROUGH */ 1557 case CAM_CMD_TIMEOUT: 1558 if (bootverbose && printed == 0) { 1559 xpt_print_path(ccb->ccb_h.path); 1560 printf("Command timed out\n"); 1561 printed++; 1562 } 1563 /* FALLTHROUGH */ 1564 case CAM_UNEXP_BUSFREE: 1565 if (bootverbose && printed == 0) { 1566 xpt_print_path(ccb->ccb_h.path); 1567 printf("Unexpected Bus Free\n"); 1568 printed++; 1569 } 1570 /* FALLTHROUGH */ 1571 case CAM_UNCOR_PARITY: 1572 if (bootverbose && printed == 0) { 1573 xpt_print_path(ccb->ccb_h.path); 1574 printf("Uncorrected Parity Error\n"); 1575 printed++; 1576 } 1577 /* FALLTHROUGH */ 1578 case CAM_DATA_RUN_ERR: 1579 if (bootverbose && printed == 0) { 1580 xpt_print_path(ccb->ccb_h.path); 1581 printf("Data Overrun\n"); 1582 printed++; 1583 } 1584 error = EIO; /* we have to kill the command */ 1585 /* decrement the number of retries */ 1586 if (ccb->ccb_h.retry_count > 0) { 1587 ccb->ccb_h.retry_count--; 1588 error = ERESTART; 1589 } else { 1590 action_string = "Retries Exausted"; 1591 error = EIO; 1592 } 1593 break; 1594 case CAM_UA_ABORT: 1595 case CAM_UA_TERMIO: 1596 case CAM_MSG_REJECT_REC: 1597 /* XXX Don't know that these are correct */ 1598 error = EIO; 1599 break; 1600 case CAM_SEL_TIMEOUT: 1601 { 1602 struct cam_path *newpath; 1603 1604 if ((camflags & CAM_RETRY_SELTO) != 0) { 1605 if (ccb->ccb_h.retry_count > 0) { 1606 1607 ccb->ccb_h.retry_count--; 1608 error = ERESTART; 1609 if (bootverbose && printed == 0) { 1610 xpt_print_path(ccb->ccb_h.path); 1611 printf("Selection Timeout\n"); 1612 printed++; 1613 } 1614 1615 /* 1616 * Wait a second to give the device 1617 * time to recover before we try again. 1618 */ 1619 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT; 1620 timeout = 1000; 1621 break; 1622 } 1623 } 1624 error = ENXIO; 1625 /* Should we do more if we can't create the path?? */ 1626 if (xpt_create_path(&newpath, xpt_path_periph(ccb->ccb_h.path), 1627 xpt_path_path_id(ccb->ccb_h.path), 1628 xpt_path_target_id(ccb->ccb_h.path), 1629 CAM_LUN_WILDCARD) != CAM_REQ_CMP) 1630 break; 1631 1632 /* 1633 * Let peripheral drivers know that this device has gone 1634 * away. 1635 */ 1636 xpt_async(AC_LOST_DEVICE, newpath, NULL); 1637 xpt_free_path(newpath); 1638 break; 1639 } 1640 case CAM_REQ_INVALID: 1641 case CAM_PATH_INVALID: 1642 case CAM_DEV_NOT_THERE: 1643 case CAM_NO_HBA: 1644 case CAM_PROVIDE_FAIL: 1645 case CAM_REQ_TOO_BIG: 1646 error = EINVAL; 1647 break; 1648 case CAM_SCSI_BUS_RESET: 1649 case CAM_BDR_SENT: 1650 /* 1651 * Commands that repeatedly timeout and cause these 1652 * kinds of error recovery actions, should return 1653 * CAM_CMD_TIMEOUT, which allows us to safely assume 1654 * that this command was an innocent bystander to 1655 * these events and should be unconditionally 1656 * retried. 1657 */ 1658 if (bootverbose && printed == 0) { 1659 xpt_print_path(ccb->ccb_h.path); 1660 if (status == CAM_BDR_SENT) 1661 printf("Bus Device Reset sent\n"); 1662 else 1663 printf("Bus Reset issued\n"); 1664 printed++; 1665 } 1666 /* FALLTHROUGH */ 1667 case CAM_REQUEUE_REQ: 1668 /* Unconditional requeue */ 1669 error = ERESTART; 1670 if (bootverbose && printed == 0) { 1671 xpt_print_path(ccb->ccb_h.path); 1672 printf("Request Requeued\n"); 1673 printed++; 1674 } 1675 break; 1676 case CAM_RESRC_UNAVAIL: 1677 case CAM_BUSY: 1678 /* timeout??? */ 1679 default: 1680 /* decrement the number of retries */ 1681 if (ccb->ccb_h.retry_count > 0) { 1682 ccb->ccb_h.retry_count--; 1683 error = ERESTART; 1684 if (bootverbose && printed == 0) { 1685 xpt_print_path(ccb->ccb_h.path); 1686 printf("CAM Status 0x%x\n", status); 1687 printed++; 1688 } 1689 } else { 1690 error = EIO; 1691 action_string = "Retries Exhausted"; 1692 } 1693 break; 1694 } 1695 1696 /* Attempt a retry */ 1697 if (error == ERESTART || error == 0) { 1698 if (frozen != 0) 1699 ccb->ccb_h.status &= ~CAM_DEV_QFRZN; 1700 1701 if (error == ERESTART) { 1702 action_string = "Retrying Command"; 1703 xpt_action(ccb); 1704 } 1705 1706 if (frozen != 0) 1707 cam_release_devq(ccb->ccb_h.path, 1708 relsim_flags, 1709 openings, 1710 timeout, 1711 /*getcount_only*/0); 1712 } 1713 1714 /* 1715 * If we have and error and are booting verbosely, whine 1716 * *unless* this was a non-retryable selection timeout. 1717 */ 1718 if (error != 0 && bootverbose && 1719 !(status == CAM_SEL_TIMEOUT && (camflags & CAM_RETRY_SELTO) == 0)) { 1720 1721 1722 if (action_string == NULL) 1723 action_string = "Unretryable Error"; 1724 if (error != ERESTART) { 1725 xpt_print_path(ccb->ccb_h.path); 1726 printf("error %d\n", error); 1727 } 1728 xpt_print_path(ccb->ccb_h.path); 1729 printf("%s\n", action_string); 1730 } 1731 1732 return (error); 1733 } 1734