1 /*- 2 * Common functions for CAM "type" (peripheral) drivers. 3 * 4 * Copyright (c) 1997, 1998 Justin T. Gibbs. 5 * Copyright (c) 1997, 1998, 1999, 2000 Kenneth D. Merry. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions, and the following disclaimer, 13 * without modification, immediately at the beginning of the file. 14 * 2. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/types.h> 36 #include <sys/malloc.h> 37 #include <sys/kernel.h> 38 #include <sys/linker_set.h> 39 #include <sys/bio.h> 40 #include <sys/lock.h> 41 #include <sys/mutex.h> 42 #include <sys/buf.h> 43 #include <sys/proc.h> 44 #include <sys/devicestat.h> 45 #include <sys/bus.h> 46 #include <vm/vm.h> 47 #include <vm/vm_extern.h> 48 49 #include <cam/cam.h> 50 #include <cam/cam_ccb.h> 51 #include <cam/cam_xpt_periph.h> 52 #include <cam/cam_periph.h> 53 #include <cam/cam_debug.h> 54 #include <cam/cam_sim.h> 55 56 #include <cam/scsi/scsi_all.h> 57 #include <cam/scsi/scsi_message.h> 58 #include <cam/scsi/scsi_pass.h> 59 60 static u_int camperiphnextunit(struct periph_driver *p_drv, 61 u_int newunit, int wired, 62 path_id_t pathid, target_id_t target, 63 lun_id_t lun); 64 static u_int camperiphunit(struct periph_driver *p_drv, 65 path_id_t pathid, target_id_t target, 66 lun_id_t lun); 67 static void camperiphdone(struct cam_periph *periph, 68 union ccb *done_ccb); 69 static void camperiphfree(struct cam_periph *periph); 70 static int camperiphscsistatuserror(union ccb *ccb, 71 cam_flags camflags, 72 u_int32_t sense_flags, 73 union ccb *save_ccb, 74 int *openings, 75 u_int32_t *relsim_flags, 76 u_int32_t *timeout); 77 static int camperiphscsisenseerror(union ccb *ccb, 78 cam_flags camflags, 79 u_int32_t sense_flags, 80 union ccb *save_ccb, 81 int *openings, 82 u_int32_t *relsim_flags, 83 u_int32_t *timeout); 84 85 static int nperiph_drivers; 86 struct periph_driver **periph_drivers; 87 88 MALLOC_DEFINE(M_CAMPERIPH, "CAM periph", "CAM peripheral buffers"); 89 90 static int periph_selto_delay = 1000; 91 TUNABLE_INT("kern.cam.periph_selto_delay", &periph_selto_delay); 92 static int periph_noresrc_delay = 500; 93 TUNABLE_INT("kern.cam.periph_noresrc_delay", &periph_noresrc_delay); 94 static int periph_busy_delay = 500; 95 TUNABLE_INT("kern.cam.periph_busy_delay", &periph_busy_delay); 96 97 98 void 99 periphdriver_register(void *data) 100 { 101 struct periph_driver **newdrivers, **old; 102 int ndrivers; 103 104 ndrivers = nperiph_drivers + 2; 105 newdrivers = malloc(sizeof(*newdrivers) * ndrivers, M_CAMPERIPH, 106 M_WAITOK); 107 if (periph_drivers) 108 bcopy(periph_drivers, newdrivers, 109 sizeof(*newdrivers) * nperiph_drivers); 110 newdrivers[nperiph_drivers] = (struct periph_driver *)data; 111 newdrivers[nperiph_drivers + 1] = NULL; 112 old = periph_drivers; 113 periph_drivers = newdrivers; 114 if (old) 115 free(old, M_CAMPERIPH); 116 nperiph_drivers++; 117 } 118 119 cam_status 120 cam_periph_alloc(periph_ctor_t *periph_ctor, 121 periph_oninv_t *periph_oninvalidate, 122 periph_dtor_t *periph_dtor, periph_start_t *periph_start, 123 char *name, cam_periph_type type, struct cam_path *path, 124 ac_callback_t *ac_callback, ac_code code, void *arg) 125 { 126 struct periph_driver **p_drv; 127 struct cam_sim *sim; 128 struct cam_periph *periph; 129 struct cam_periph *cur_periph; 130 path_id_t path_id; 131 target_id_t target_id; 132 lun_id_t lun_id; 133 cam_status status; 134 u_int init_level; 135 136 init_level = 0; 137 /* 138 * Handle Hot-Plug scenarios. If there is already a peripheral 139 * of our type assigned to this path, we are likely waiting for 140 * final close on an old, invalidated, peripheral. If this is 141 * the case, queue up a deferred call to the peripheral's async 142 * handler. If it looks like a mistaken re-allocation, complain. 143 */ 144 if ((periph = cam_periph_find(path, name)) != NULL) { 145 146 if ((periph->flags & CAM_PERIPH_INVALID) != 0 147 && (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) == 0) { 148 periph->flags |= CAM_PERIPH_NEW_DEV_FOUND; 149 periph->deferred_callback = ac_callback; 150 periph->deferred_ac = code; 151 return (CAM_REQ_INPROG); 152 } else { 153 printf("cam_periph_alloc: attempt to re-allocate " 154 "valid device %s%d rejected\n", 155 periph->periph_name, periph->unit_number); 156 } 157 return (CAM_REQ_INVALID); 158 } 159 160 periph = (struct cam_periph *)malloc(sizeof(*periph), M_CAMPERIPH, 161 M_NOWAIT); 162 163 if (periph == NULL) 164 return (CAM_RESRC_UNAVAIL); 165 166 init_level++; 167 168 xpt_lock_buses(); 169 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) { 170 if (strcmp((*p_drv)->driver_name, name) == 0) 171 break; 172 } 173 xpt_unlock_buses(); 174 175 sim = xpt_path_sim(path); 176 path_id = xpt_path_path_id(path); 177 target_id = xpt_path_target_id(path); 178 lun_id = xpt_path_lun_id(path); 179 bzero(periph, sizeof(*periph)); 180 cam_init_pinfo(&periph->pinfo); 181 periph->periph_start = periph_start; 182 periph->periph_dtor = periph_dtor; 183 periph->periph_oninval = periph_oninvalidate; 184 periph->type = type; 185 periph->periph_name = name; 186 periph->unit_number = camperiphunit(*p_drv, path_id, target_id, lun_id); 187 periph->immediate_priority = CAM_PRIORITY_NONE; 188 periph->refcount = 0; 189 periph->sim = sim; 190 SLIST_INIT(&periph->ccb_list); 191 status = xpt_create_path(&path, periph, path_id, target_id, lun_id); 192 if (status != CAM_REQ_CMP) 193 goto failure; 194 195 periph->path = path; 196 init_level++; 197 198 status = xpt_add_periph(periph); 199 200 if (status != CAM_REQ_CMP) 201 goto failure; 202 203 cur_periph = TAILQ_FIRST(&(*p_drv)->units); 204 while (cur_periph != NULL 205 && cur_periph->unit_number < periph->unit_number) 206 cur_periph = TAILQ_NEXT(cur_periph, unit_links); 207 208 if (cur_periph != NULL) 209 TAILQ_INSERT_BEFORE(cur_periph, periph, unit_links); 210 else { 211 TAILQ_INSERT_TAIL(&(*p_drv)->units, periph, unit_links); 212 (*p_drv)->generation++; 213 } 214 215 init_level++; 216 217 status = periph_ctor(periph, arg); 218 219 if (status == CAM_REQ_CMP) 220 init_level++; 221 222 failure: 223 switch (init_level) { 224 case 4: 225 /* Initialized successfully */ 226 break; 227 case 3: 228 TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links); 229 xpt_remove_periph(periph); 230 /* FALLTHROUGH */ 231 case 2: 232 xpt_free_path(periph->path); 233 /* FALLTHROUGH */ 234 case 1: 235 free(periph, M_CAMPERIPH); 236 /* FALLTHROUGH */ 237 case 0: 238 /* No cleanup to perform. */ 239 break; 240 default: 241 panic("cam_periph_alloc: Unkown init level"); 242 } 243 return(status); 244 } 245 246 /* 247 * Find a peripheral structure with the specified path, target, lun, 248 * and (optionally) type. If the name is NULL, this function will return 249 * the first peripheral driver that matches the specified path. 250 */ 251 struct cam_periph * 252 cam_periph_find(struct cam_path *path, char *name) 253 { 254 struct periph_driver **p_drv; 255 struct cam_periph *periph; 256 257 xpt_lock_buses(); 258 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) { 259 260 if (name != NULL && (strcmp((*p_drv)->driver_name, name) != 0)) 261 continue; 262 263 TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) { 264 if (xpt_path_comp(periph->path, path) == 0) { 265 xpt_unlock_buses(); 266 return(periph); 267 } 268 } 269 if (name != NULL) { 270 xpt_unlock_buses(); 271 return(NULL); 272 } 273 } 274 xpt_unlock_buses(); 275 return(NULL); 276 } 277 278 cam_status 279 cam_periph_acquire(struct cam_periph *periph) 280 { 281 282 if (periph == NULL) 283 return(CAM_REQ_CMP_ERR); 284 285 xpt_lock_buses(); 286 periph->refcount++; 287 xpt_unlock_buses(); 288 289 return(CAM_REQ_CMP); 290 } 291 292 void 293 cam_periph_release(struct cam_periph *periph) 294 { 295 296 if (periph == NULL) 297 return; 298 299 xpt_lock_buses(); 300 if ((--periph->refcount == 0) 301 && (periph->flags & CAM_PERIPH_INVALID)) { 302 camperiphfree(periph); 303 } 304 xpt_unlock_buses(); 305 306 } 307 308 int 309 cam_periph_hold(struct cam_periph *periph, int priority) 310 { 311 struct mtx *mtx; 312 int error; 313 314 mtx_assert(periph->sim->mtx, MA_OWNED); 315 316 /* 317 * Increment the reference count on the peripheral 318 * while we wait for our lock attempt to succeed 319 * to ensure the peripheral doesn't disappear out 320 * from user us while we sleep. 321 */ 322 323 if (cam_periph_acquire(periph) != CAM_REQ_CMP) 324 return (ENXIO); 325 326 mtx = periph->sim->mtx; 327 if (mtx == &Giant) 328 mtx = NULL; 329 330 while ((periph->flags & CAM_PERIPH_LOCKED) != 0) { 331 periph->flags |= CAM_PERIPH_LOCK_WANTED; 332 if ((error = msleep(periph, mtx, priority, "caplck", 0)) != 0) { 333 cam_periph_release(periph); 334 return (error); 335 } 336 } 337 338 periph->flags |= CAM_PERIPH_LOCKED; 339 return (0); 340 } 341 342 void 343 cam_periph_unhold(struct cam_periph *periph) 344 { 345 346 mtx_assert(periph->sim->mtx, MA_OWNED); 347 348 periph->flags &= ~CAM_PERIPH_LOCKED; 349 if ((periph->flags & CAM_PERIPH_LOCK_WANTED) != 0) { 350 periph->flags &= ~CAM_PERIPH_LOCK_WANTED; 351 wakeup(periph); 352 } 353 354 cam_periph_release(periph); 355 } 356 357 /* 358 * Look for the next unit number that is not currently in use for this 359 * peripheral type starting at "newunit". Also exclude unit numbers that 360 * are reserved by for future "hardwiring" unless we already know that this 361 * is a potential wired device. Only assume that the device is "wired" the 362 * first time through the loop since after that we'll be looking at unit 363 * numbers that did not match a wiring entry. 364 */ 365 static u_int 366 camperiphnextunit(struct periph_driver *p_drv, u_int newunit, int wired, 367 path_id_t pathid, target_id_t target, lun_id_t lun) 368 { 369 struct cam_periph *periph; 370 char *periph_name; 371 int i, val, dunit, r; 372 const char *dname, *strval; 373 374 periph_name = p_drv->driver_name; 375 for (;;newunit++) { 376 377 for (periph = TAILQ_FIRST(&p_drv->units); 378 periph != NULL && periph->unit_number != newunit; 379 periph = TAILQ_NEXT(periph, unit_links)) 380 ; 381 382 if (periph != NULL && periph->unit_number == newunit) { 383 if (wired != 0) { 384 xpt_print(periph->path, "Duplicate Wired " 385 "Device entry!\n"); 386 xpt_print(periph->path, "Second device (%s " 387 "device at scbus%d target %d lun %d) will " 388 "not be wired\n", periph_name, pathid, 389 target, lun); 390 wired = 0; 391 } 392 continue; 393 } 394 if (wired) 395 break; 396 397 /* 398 * Don't match entries like "da 4" as a wired down 399 * device, but do match entries like "da 4 target 5" 400 * or even "da 4 scbus 1". 401 */ 402 i = 0; 403 dname = periph_name; 404 for (;;) { 405 r = resource_find_dev(&i, dname, &dunit, NULL, NULL); 406 if (r != 0) 407 break; 408 /* if no "target" and no specific scbus, skip */ 409 if (resource_int_value(dname, dunit, "target", &val) && 410 (resource_string_value(dname, dunit, "at",&strval)|| 411 strcmp(strval, "scbus") == 0)) 412 continue; 413 if (newunit == dunit) 414 break; 415 } 416 if (r != 0) 417 break; 418 } 419 return (newunit); 420 } 421 422 static u_int 423 camperiphunit(struct periph_driver *p_drv, path_id_t pathid, 424 target_id_t target, lun_id_t lun) 425 { 426 u_int unit; 427 int wired, i, val, dunit; 428 const char *dname, *strval; 429 char pathbuf[32], *periph_name; 430 431 periph_name = p_drv->driver_name; 432 snprintf(pathbuf, sizeof(pathbuf), "scbus%d", pathid); 433 unit = 0; 434 i = 0; 435 dname = periph_name; 436 for (wired = 0; resource_find_dev(&i, dname, &dunit, NULL, NULL) == 0; 437 wired = 0) { 438 if (resource_string_value(dname, dunit, "at", &strval) == 0) { 439 if (strcmp(strval, pathbuf) != 0) 440 continue; 441 wired++; 442 } 443 if (resource_int_value(dname, dunit, "target", &val) == 0) { 444 if (val != target) 445 continue; 446 wired++; 447 } 448 if (resource_int_value(dname, dunit, "lun", &val) == 0) { 449 if (val != lun) 450 continue; 451 wired++; 452 } 453 if (wired != 0) { 454 unit = dunit; 455 break; 456 } 457 } 458 459 /* 460 * Either start from 0 looking for the next unit or from 461 * the unit number given in the resource config. This way, 462 * if we have wildcard matches, we don't return the same 463 * unit number twice. 464 */ 465 unit = camperiphnextunit(p_drv, unit, wired, pathid, target, lun); 466 467 return (unit); 468 } 469 470 void 471 cam_periph_invalidate(struct cam_periph *periph) 472 { 473 474 /* 475 * We only call this routine the first time a peripheral is 476 * invalidated. 477 */ 478 if (((periph->flags & CAM_PERIPH_INVALID) == 0) 479 && (periph->periph_oninval != NULL)) 480 periph->periph_oninval(periph); 481 482 periph->flags |= CAM_PERIPH_INVALID; 483 periph->flags &= ~CAM_PERIPH_NEW_DEV_FOUND; 484 485 xpt_lock_buses(); 486 if (periph->refcount == 0) 487 camperiphfree(periph); 488 else if (periph->refcount < 0) 489 printf("cam_invalidate_periph: refcount < 0!!\n"); 490 xpt_unlock_buses(); 491 } 492 493 static void 494 camperiphfree(struct cam_periph *periph) 495 { 496 struct periph_driver **p_drv; 497 498 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) { 499 if (strcmp((*p_drv)->driver_name, periph->periph_name) == 0) 500 break; 501 } 502 if (*p_drv == NULL) { 503 printf("camperiphfree: attempt to free non-existant periph\n"); 504 return; 505 } 506 507 TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links); 508 (*p_drv)->generation++; 509 xpt_unlock_buses(); 510 511 if (periph->periph_dtor != NULL) 512 periph->periph_dtor(periph); 513 xpt_remove_periph(periph); 514 515 if (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) { 516 union ccb ccb; 517 void *arg; 518 519 switch (periph->deferred_ac) { 520 case AC_FOUND_DEVICE: 521 ccb.ccb_h.func_code = XPT_GDEV_TYPE; 522 xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/ 1); 523 xpt_action(&ccb); 524 arg = &ccb; 525 break; 526 case AC_PATH_REGISTERED: 527 ccb.ccb_h.func_code = XPT_PATH_INQ; 528 xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/ 1); 529 xpt_action(&ccb); 530 arg = &ccb; 531 break; 532 default: 533 arg = NULL; 534 break; 535 } 536 periph->deferred_callback(NULL, periph->deferred_ac, 537 periph->path, arg); 538 } 539 xpt_free_path(periph->path); 540 free(periph, M_CAMPERIPH); 541 xpt_lock_buses(); 542 } 543 544 /* 545 * Map user virtual pointers into kernel virtual address space, so we can 546 * access the memory. This won't work on physical pointers, for now it's 547 * up to the caller to check for that. (XXX KDM -- should we do that here 548 * instead?) This also only works for up to MAXPHYS memory. Since we use 549 * buffers to map stuff in and out, we're limited to the buffer size. 550 */ 551 int 552 cam_periph_mapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo) 553 { 554 int numbufs, i, j; 555 int flags[CAM_PERIPH_MAXMAPS]; 556 u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS]; 557 u_int32_t lengths[CAM_PERIPH_MAXMAPS]; 558 u_int32_t dirs[CAM_PERIPH_MAXMAPS]; 559 560 switch(ccb->ccb_h.func_code) { 561 case XPT_DEV_MATCH: 562 if (ccb->cdm.match_buf_len == 0) { 563 printf("cam_periph_mapmem: invalid match buffer " 564 "length 0\n"); 565 return(EINVAL); 566 } 567 if (ccb->cdm.pattern_buf_len > 0) { 568 data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns; 569 lengths[0] = ccb->cdm.pattern_buf_len; 570 dirs[0] = CAM_DIR_OUT; 571 data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches; 572 lengths[1] = ccb->cdm.match_buf_len; 573 dirs[1] = CAM_DIR_IN; 574 numbufs = 2; 575 } else { 576 data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches; 577 lengths[0] = ccb->cdm.match_buf_len; 578 dirs[0] = CAM_DIR_IN; 579 numbufs = 1; 580 } 581 break; 582 case XPT_SCSI_IO: 583 case XPT_CONT_TARGET_IO: 584 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) 585 return(0); 586 587 data_ptrs[0] = &ccb->csio.data_ptr; 588 lengths[0] = ccb->csio.dxfer_len; 589 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK; 590 numbufs = 1; 591 break; 592 default: 593 return(EINVAL); 594 break; /* NOTREACHED */ 595 } 596 597 /* 598 * Check the transfer length and permissions first, so we don't 599 * have to unmap any previously mapped buffers. 600 */ 601 for (i = 0; i < numbufs; i++) { 602 603 flags[i] = 0; 604 605 /* 606 * The userland data pointer passed in may not be page 607 * aligned. vmapbuf() truncates the address to a page 608 * boundary, so if the address isn't page aligned, we'll 609 * need enough space for the given transfer length, plus 610 * whatever extra space is necessary to make it to the page 611 * boundary. 612 */ 613 if ((lengths[i] + 614 (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)) > DFLTPHYS){ 615 printf("cam_periph_mapmem: attempt to map %lu bytes, " 616 "which is greater than DFLTPHYS(%d)\n", 617 (long)(lengths[i] + 618 (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)), 619 DFLTPHYS); 620 return(E2BIG); 621 } 622 623 if (dirs[i] & CAM_DIR_OUT) { 624 flags[i] = BIO_WRITE; 625 } 626 627 if (dirs[i] & CAM_DIR_IN) { 628 flags[i] = BIO_READ; 629 } 630 631 } 632 633 /* this keeps the current process from getting swapped */ 634 /* 635 * XXX KDM should I use P_NOSWAP instead? 636 */ 637 PHOLD(curproc); 638 639 for (i = 0; i < numbufs; i++) { 640 /* 641 * Get the buffer. 642 */ 643 mapinfo->bp[i] = getpbuf(NULL); 644 645 /* save the buffer's data address */ 646 mapinfo->bp[i]->b_saveaddr = mapinfo->bp[i]->b_data; 647 648 /* put our pointer in the data slot */ 649 mapinfo->bp[i]->b_data = *data_ptrs[i]; 650 651 /* set the transfer length, we know it's < DFLTPHYS */ 652 mapinfo->bp[i]->b_bufsize = lengths[i]; 653 654 /* set the direction */ 655 mapinfo->bp[i]->b_iocmd = flags[i]; 656 657 /* 658 * Map the buffer into kernel memory. 659 * 660 * Note that useracc() alone is not a sufficient test. 661 * vmapbuf() can still fail due to a smaller file mapped 662 * into a larger area of VM, or if userland races against 663 * vmapbuf() after the useracc() check. 664 */ 665 if (vmapbuf(mapinfo->bp[i]) < 0) { 666 for (j = 0; j < i; ++j) { 667 *data_ptrs[j] = mapinfo->bp[j]->b_saveaddr; 668 vunmapbuf(mapinfo->bp[j]); 669 relpbuf(mapinfo->bp[j], NULL); 670 } 671 relpbuf(mapinfo->bp[i], NULL); 672 PRELE(curproc); 673 return(EACCES); 674 } 675 676 /* set our pointer to the new mapped area */ 677 *data_ptrs[i] = mapinfo->bp[i]->b_data; 678 679 mapinfo->num_bufs_used++; 680 } 681 682 /* 683 * Now that we've gotten this far, change ownership to the kernel 684 * of the buffers so that we don't run afoul of returning to user 685 * space with locks (on the buffer) held. 686 */ 687 for (i = 0; i < numbufs; i++) { 688 BUF_KERNPROC(mapinfo->bp[i]); 689 } 690 691 692 return(0); 693 } 694 695 /* 696 * Unmap memory segments mapped into kernel virtual address space by 697 * cam_periph_mapmem(). 698 */ 699 void 700 cam_periph_unmapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo) 701 { 702 int numbufs, i; 703 u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS]; 704 705 if (mapinfo->num_bufs_used <= 0) { 706 /* allow ourselves to be swapped once again */ 707 PRELE(curproc); 708 return; 709 } 710 711 switch (ccb->ccb_h.func_code) { 712 case XPT_DEV_MATCH: 713 numbufs = min(mapinfo->num_bufs_used, 2); 714 715 if (numbufs == 1) { 716 data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches; 717 } else { 718 data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns; 719 data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches; 720 } 721 break; 722 case XPT_SCSI_IO: 723 case XPT_CONT_TARGET_IO: 724 data_ptrs[0] = &ccb->csio.data_ptr; 725 numbufs = min(mapinfo->num_bufs_used, 1); 726 break; 727 default: 728 /* allow ourselves to be swapped once again */ 729 PRELE(curproc); 730 return; 731 break; /* NOTREACHED */ 732 } 733 734 for (i = 0; i < numbufs; i++) { 735 /* Set the user's pointer back to the original value */ 736 *data_ptrs[i] = mapinfo->bp[i]->b_saveaddr; 737 738 /* unmap the buffer */ 739 vunmapbuf(mapinfo->bp[i]); 740 741 /* release the buffer */ 742 relpbuf(mapinfo->bp[i], NULL); 743 } 744 745 /* allow ourselves to be swapped once again */ 746 PRELE(curproc); 747 } 748 749 union ccb * 750 cam_periph_getccb(struct cam_periph *periph, u_int32_t priority) 751 { 752 struct ccb_hdr *ccb_h; 753 struct mtx *mtx; 754 755 mtx_assert(periph->sim->mtx, MA_OWNED); 756 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering cdgetccb\n")); 757 758 while (SLIST_FIRST(&periph->ccb_list) == NULL) { 759 if (periph->immediate_priority > priority) 760 periph->immediate_priority = priority; 761 xpt_schedule(periph, priority); 762 if ((SLIST_FIRST(&periph->ccb_list) != NULL) 763 && (SLIST_FIRST(&periph->ccb_list)->pinfo.priority == priority)) 764 break; 765 mtx_assert(periph->sim->mtx, MA_OWNED); 766 if (periph->sim->mtx == &Giant) 767 mtx = NULL; 768 else 769 mtx = periph->sim->mtx; 770 msleep(&periph->ccb_list, mtx, PRIBIO, "cgticb", 0); 771 } 772 773 ccb_h = SLIST_FIRST(&periph->ccb_list); 774 SLIST_REMOVE_HEAD(&periph->ccb_list, periph_links.sle); 775 return ((union ccb *)ccb_h); 776 } 777 778 void 779 cam_periph_ccbwait(union ccb *ccb) 780 { 781 struct mtx *mtx; 782 struct cam_sim *sim; 783 784 sim = xpt_path_sim(ccb->ccb_h.path); 785 if (sim->mtx == &Giant) 786 mtx = NULL; 787 else 788 mtx = sim->mtx; 789 if ((ccb->ccb_h.pinfo.index != CAM_UNQUEUED_INDEX) 790 || ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG)) 791 msleep(&ccb->ccb_h.cbfcnp, mtx, PRIBIO, "cbwait", 0); 792 } 793 794 int 795 cam_periph_ioctl(struct cam_periph *periph, int cmd, caddr_t addr, 796 int (*error_routine)(union ccb *ccb, 797 cam_flags camflags, 798 u_int32_t sense_flags)) 799 { 800 union ccb *ccb; 801 int error; 802 int found; 803 804 error = found = 0; 805 806 switch(cmd){ 807 case CAMGETPASSTHRU: 808 ccb = cam_periph_getccb(periph, /* priority */ 1); 809 xpt_setup_ccb(&ccb->ccb_h, 810 ccb->ccb_h.path, 811 /*priority*/1); 812 ccb->ccb_h.func_code = XPT_GDEVLIST; 813 814 /* 815 * Basically, the point of this is that we go through 816 * getting the list of devices, until we find a passthrough 817 * device. In the current version of the CAM code, the 818 * only way to determine what type of device we're dealing 819 * with is by its name. 820 */ 821 while (found == 0) { 822 ccb->cgdl.index = 0; 823 ccb->cgdl.status = CAM_GDEVLIST_MORE_DEVS; 824 while (ccb->cgdl.status == CAM_GDEVLIST_MORE_DEVS) { 825 826 /* we want the next device in the list */ 827 xpt_action(ccb); 828 if (strncmp(ccb->cgdl.periph_name, 829 "pass", 4) == 0){ 830 found = 1; 831 break; 832 } 833 } 834 if ((ccb->cgdl.status == CAM_GDEVLIST_LAST_DEVICE) && 835 (found == 0)) { 836 ccb->cgdl.periph_name[0] = '\0'; 837 ccb->cgdl.unit_number = 0; 838 break; 839 } 840 } 841 842 /* copy the result back out */ 843 bcopy(ccb, addr, sizeof(union ccb)); 844 845 /* and release the ccb */ 846 xpt_release_ccb(ccb); 847 848 break; 849 default: 850 error = ENOTTY; 851 break; 852 } 853 return(error); 854 } 855 856 int 857 cam_periph_runccb(union ccb *ccb, 858 int (*error_routine)(union ccb *ccb, 859 cam_flags camflags, 860 u_int32_t sense_flags), 861 cam_flags camflags, u_int32_t sense_flags, 862 struct devstat *ds) 863 { 864 struct cam_sim *sim; 865 int error; 866 867 error = 0; 868 sim = xpt_path_sim(ccb->ccb_h.path); 869 mtx_assert(sim->mtx, MA_OWNED); 870 871 /* 872 * If the user has supplied a stats structure, and if we understand 873 * this particular type of ccb, record the transaction start. 874 */ 875 if ((ds != NULL) && (ccb->ccb_h.func_code == XPT_SCSI_IO)) 876 devstat_start_transaction(ds, NULL); 877 878 xpt_action(ccb); 879 880 do { 881 cam_periph_ccbwait(ccb); 882 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) 883 error = 0; 884 else if (error_routine != NULL) 885 error = (*error_routine)(ccb, camflags, sense_flags); 886 else 887 error = 0; 888 889 } while (error == ERESTART); 890 891 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 892 cam_release_devq(ccb->ccb_h.path, 893 /* relsim_flags */0, 894 /* openings */0, 895 /* timeout */0, 896 /* getcount_only */ FALSE); 897 898 if ((ds != NULL) && (ccb->ccb_h.func_code == XPT_SCSI_IO)) 899 devstat_end_transaction(ds, 900 ccb->csio.dxfer_len, 901 ccb->csio.tag_action & 0xf, 902 ((ccb->ccb_h.flags & CAM_DIR_MASK) == 903 CAM_DIR_NONE) ? DEVSTAT_NO_DATA : 904 (ccb->ccb_h.flags & CAM_DIR_OUT) ? 905 DEVSTAT_WRITE : 906 DEVSTAT_READ, NULL, NULL); 907 908 return(error); 909 } 910 911 void 912 cam_freeze_devq(struct cam_path *path) 913 { 914 struct ccb_hdr ccb_h; 915 916 xpt_setup_ccb(&ccb_h, path, /*priority*/1); 917 ccb_h.func_code = XPT_NOOP; 918 ccb_h.flags = CAM_DEV_QFREEZE; 919 xpt_action((union ccb *)&ccb_h); 920 } 921 922 u_int32_t 923 cam_release_devq(struct cam_path *path, u_int32_t relsim_flags, 924 u_int32_t openings, u_int32_t timeout, 925 int getcount_only) 926 { 927 struct ccb_relsim crs; 928 929 xpt_setup_ccb(&crs.ccb_h, path, 930 /*priority*/1); 931 crs.ccb_h.func_code = XPT_REL_SIMQ; 932 crs.ccb_h.flags = getcount_only ? CAM_DEV_QFREEZE : 0; 933 crs.release_flags = relsim_flags; 934 crs.openings = openings; 935 crs.release_timeout = timeout; 936 xpt_action((union ccb *)&crs); 937 return (crs.qfrozen_cnt); 938 } 939 940 #define saved_ccb_ptr ppriv_ptr0 941 static void 942 camperiphdone(struct cam_periph *periph, union ccb *done_ccb) 943 { 944 union ccb *saved_ccb; 945 cam_status status; 946 int frozen; 947 int sense; 948 struct scsi_start_stop_unit *scsi_cmd; 949 u_int32_t relsim_flags, timeout; 950 u_int32_t qfrozen_cnt; 951 int xpt_done_ccb; 952 953 xpt_done_ccb = FALSE; 954 status = done_ccb->ccb_h.status; 955 frozen = (status & CAM_DEV_QFRZN) != 0; 956 sense = (status & CAM_AUTOSNS_VALID) != 0; 957 status &= CAM_STATUS_MASK; 958 959 timeout = 0; 960 relsim_flags = 0; 961 saved_ccb = (union ccb *)done_ccb->ccb_h.saved_ccb_ptr; 962 963 /* 964 * Unfreeze the queue once if it is already frozen.. 965 */ 966 if (frozen != 0) { 967 qfrozen_cnt = cam_release_devq(done_ccb->ccb_h.path, 968 /*relsim_flags*/0, 969 /*openings*/0, 970 /*timeout*/0, 971 /*getcount_only*/0); 972 } 973 974 switch (status) { 975 case CAM_REQ_CMP: 976 { 977 /* 978 * If we have successfully taken a device from the not 979 * ready to ready state, re-scan the device and re-get 980 * the inquiry information. Many devices (mostly disks) 981 * don't properly report their inquiry information unless 982 * they are spun up. 983 * 984 * If we manually retrieved sense into a CCB and got 985 * something other than "NO SENSE" send the updated CCB 986 * back to the client via xpt_done() to be processed via 987 * the error recovery code again. 988 */ 989 if (done_ccb->ccb_h.func_code == XPT_SCSI_IO) { 990 scsi_cmd = (struct scsi_start_stop_unit *) 991 &done_ccb->csio.cdb_io.cdb_bytes; 992 993 if (scsi_cmd->opcode == START_STOP_UNIT) 994 xpt_async(AC_INQ_CHANGED, 995 done_ccb->ccb_h.path, NULL); 996 if (scsi_cmd->opcode == REQUEST_SENSE) { 997 u_int sense_key; 998 999 sense_key = saved_ccb->csio.sense_data.flags; 1000 sense_key &= SSD_KEY; 1001 if (sense_key != SSD_KEY_NO_SENSE) { 1002 saved_ccb->ccb_h.status |= 1003 CAM_AUTOSNS_VALID; 1004 #if 0 1005 xpt_print(saved_ccb->ccb_h.path, 1006 "Recovered Sense\n"); 1007 scsi_sense_print(&saved_ccb->csio); 1008 cam_error_print(saved_ccb, CAM_ESF_ALL, 1009 CAM_EPF_ALL); 1010 #endif 1011 xpt_done_ccb = TRUE; 1012 } 1013 } 1014 } 1015 bcopy(done_ccb->ccb_h.saved_ccb_ptr, done_ccb, 1016 sizeof(union ccb)); 1017 1018 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG; 1019 1020 if (xpt_done_ccb == FALSE) 1021 xpt_action(done_ccb); 1022 1023 break; 1024 } 1025 case CAM_SCSI_STATUS_ERROR: 1026 scsi_cmd = (struct scsi_start_stop_unit *) 1027 &done_ccb->csio.cdb_io.cdb_bytes; 1028 if (sense != 0) { 1029 struct ccb_getdev cgd; 1030 struct scsi_sense_data *sense; 1031 int error_code, sense_key, asc, ascq; 1032 scsi_sense_action err_action; 1033 1034 sense = &done_ccb->csio.sense_data; 1035 scsi_extract_sense(sense, &error_code, 1036 &sense_key, &asc, &ascq); 1037 1038 /* 1039 * Grab the inquiry data for this device. 1040 */ 1041 xpt_setup_ccb(&cgd.ccb_h, done_ccb->ccb_h.path, 1042 /*priority*/ 1); 1043 cgd.ccb_h.func_code = XPT_GDEV_TYPE; 1044 xpt_action((union ccb *)&cgd); 1045 err_action = scsi_error_action(&done_ccb->csio, 1046 &cgd.inq_data, 0); 1047 1048 /* 1049 * If the error is "invalid field in CDB", 1050 * and the load/eject flag is set, turn the 1051 * flag off and try again. This is just in 1052 * case the drive in question barfs on the 1053 * load eject flag. The CAM code should set 1054 * the load/eject flag by default for 1055 * removable media. 1056 */ 1057 1058 /* XXX KDM 1059 * Should we check to see what the specific 1060 * scsi status is?? Or does it not matter 1061 * since we already know that there was an 1062 * error, and we know what the specific 1063 * error code was, and we know what the 1064 * opcode is.. 1065 */ 1066 if ((scsi_cmd->opcode == START_STOP_UNIT) && 1067 ((scsi_cmd->how & SSS_LOEJ) != 0) && 1068 (asc == 0x24) && (ascq == 0x00) && 1069 (done_ccb->ccb_h.retry_count > 0)) { 1070 1071 scsi_cmd->how &= ~SSS_LOEJ; 1072 1073 xpt_action(done_ccb); 1074 1075 } else if ((done_ccb->ccb_h.retry_count > 1) 1076 && ((err_action & SS_MASK) != SS_FAIL)) { 1077 1078 /* 1079 * In this case, the error recovery 1080 * command failed, but we've got 1081 * some retries left on it. Give 1082 * it another try unless this is an 1083 * unretryable error. 1084 */ 1085 1086 /* set the timeout to .5 sec */ 1087 relsim_flags = 1088 RELSIM_RELEASE_AFTER_TIMEOUT; 1089 timeout = 500; 1090 1091 xpt_action(done_ccb); 1092 1093 break; 1094 1095 } else { 1096 /* 1097 * Perform the final retry with the original 1098 * CCB so that final error processing is 1099 * performed by the owner of the CCB. 1100 */ 1101 bcopy(done_ccb->ccb_h.saved_ccb_ptr, 1102 done_ccb, sizeof(union ccb)); 1103 1104 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG; 1105 1106 xpt_action(done_ccb); 1107 } 1108 } else { 1109 /* 1110 * Eh?? The command failed, but we don't 1111 * have any sense. What's up with that? 1112 * Fire the CCB again to return it to the 1113 * caller. 1114 */ 1115 bcopy(done_ccb->ccb_h.saved_ccb_ptr, 1116 done_ccb, sizeof(union ccb)); 1117 1118 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG; 1119 1120 xpt_action(done_ccb); 1121 1122 } 1123 break; 1124 default: 1125 bcopy(done_ccb->ccb_h.saved_ccb_ptr, done_ccb, 1126 sizeof(union ccb)); 1127 1128 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG; 1129 1130 xpt_action(done_ccb); 1131 1132 break; 1133 } 1134 1135 /* decrement the retry count */ 1136 /* 1137 * XXX This isn't appropriate in all cases. Restructure, 1138 * so that the retry count is only decremented on an 1139 * actual retry. Remeber that the orignal ccb had its 1140 * retry count dropped before entering recovery, so 1141 * doing it again is a bug. 1142 */ 1143 if (done_ccb->ccb_h.retry_count > 0) 1144 done_ccb->ccb_h.retry_count--; 1145 1146 qfrozen_cnt = cam_release_devq(done_ccb->ccb_h.path, 1147 /*relsim_flags*/relsim_flags, 1148 /*openings*/0, 1149 /*timeout*/timeout, 1150 /*getcount_only*/0); 1151 if (xpt_done_ccb == TRUE) 1152 (*done_ccb->ccb_h.cbfcnp)(periph, done_ccb); 1153 } 1154 1155 /* 1156 * Generic Async Event handler. Peripheral drivers usually 1157 * filter out the events that require personal attention, 1158 * and leave the rest to this function. 1159 */ 1160 void 1161 cam_periph_async(struct cam_periph *periph, u_int32_t code, 1162 struct cam_path *path, void *arg) 1163 { 1164 switch (code) { 1165 case AC_LOST_DEVICE: 1166 cam_periph_invalidate(periph); 1167 break; 1168 case AC_SENT_BDR: 1169 case AC_BUS_RESET: 1170 { 1171 cam_periph_bus_settle(periph, scsi_delay); 1172 break; 1173 } 1174 default: 1175 break; 1176 } 1177 } 1178 1179 void 1180 cam_periph_bus_settle(struct cam_periph *periph, u_int bus_settle) 1181 { 1182 struct ccb_getdevstats cgds; 1183 1184 xpt_setup_ccb(&cgds.ccb_h, periph->path, /*priority*/1); 1185 cgds.ccb_h.func_code = XPT_GDEV_STATS; 1186 xpt_action((union ccb *)&cgds); 1187 cam_periph_freeze_after_event(periph, &cgds.last_reset, bus_settle); 1188 } 1189 1190 void 1191 cam_periph_freeze_after_event(struct cam_periph *periph, 1192 struct timeval* event_time, u_int duration_ms) 1193 { 1194 struct timeval delta; 1195 struct timeval duration_tv; 1196 1197 microtime(&delta); 1198 timevalsub(&delta, event_time); 1199 duration_tv.tv_sec = duration_ms / 1000; 1200 duration_tv.tv_usec = (duration_ms % 1000) * 1000; 1201 if (timevalcmp(&delta, &duration_tv, <)) { 1202 timevalsub(&duration_tv, &delta); 1203 1204 duration_ms = duration_tv.tv_sec * 1000; 1205 duration_ms += duration_tv.tv_usec / 1000; 1206 cam_freeze_devq(periph->path); 1207 cam_release_devq(periph->path, 1208 RELSIM_RELEASE_AFTER_TIMEOUT, 1209 /*reduction*/0, 1210 /*timeout*/duration_ms, 1211 /*getcount_only*/0); 1212 } 1213 1214 } 1215 1216 static int 1217 camperiphscsistatuserror(union ccb *ccb, cam_flags camflags, 1218 u_int32_t sense_flags, union ccb *save_ccb, 1219 int *openings, u_int32_t *relsim_flags, 1220 u_int32_t *timeout) 1221 { 1222 int error; 1223 1224 switch (ccb->csio.scsi_status) { 1225 case SCSI_STATUS_OK: 1226 case SCSI_STATUS_COND_MET: 1227 case SCSI_STATUS_INTERMED: 1228 case SCSI_STATUS_INTERMED_COND_MET: 1229 error = 0; 1230 break; 1231 case SCSI_STATUS_CMD_TERMINATED: 1232 case SCSI_STATUS_CHECK_COND: 1233 error = camperiphscsisenseerror(ccb, 1234 camflags, 1235 sense_flags, 1236 save_ccb, 1237 openings, 1238 relsim_flags, 1239 timeout); 1240 break; 1241 case SCSI_STATUS_QUEUE_FULL: 1242 { 1243 /* no decrement */ 1244 struct ccb_getdevstats cgds; 1245 1246 /* 1247 * First off, find out what the current 1248 * transaction counts are. 1249 */ 1250 xpt_setup_ccb(&cgds.ccb_h, 1251 ccb->ccb_h.path, 1252 /*priority*/1); 1253 cgds.ccb_h.func_code = XPT_GDEV_STATS; 1254 xpt_action((union ccb *)&cgds); 1255 1256 /* 1257 * If we were the only transaction active, treat 1258 * the QUEUE FULL as if it were a BUSY condition. 1259 */ 1260 if (cgds.dev_active != 0) { 1261 int total_openings; 1262 1263 /* 1264 * Reduce the number of openings to 1265 * be 1 less than the amount it took 1266 * to get a queue full bounded by the 1267 * minimum allowed tag count for this 1268 * device. 1269 */ 1270 total_openings = cgds.dev_active + cgds.dev_openings; 1271 *openings = cgds.dev_active; 1272 if (*openings < cgds.mintags) 1273 *openings = cgds.mintags; 1274 if (*openings < total_openings) 1275 *relsim_flags = RELSIM_ADJUST_OPENINGS; 1276 else { 1277 /* 1278 * Some devices report queue full for 1279 * temporary resource shortages. For 1280 * this reason, we allow a minimum 1281 * tag count to be entered via a 1282 * quirk entry to prevent the queue 1283 * count on these devices from falling 1284 * to a pessimisticly low value. We 1285 * still wait for the next successful 1286 * completion, however, before queueing 1287 * more transactions to the device. 1288 */ 1289 *relsim_flags = RELSIM_RELEASE_AFTER_CMDCMPLT; 1290 } 1291 *timeout = 0; 1292 error = ERESTART; 1293 if (bootverbose) { 1294 xpt_print(ccb->ccb_h.path, "Queue Full\n"); 1295 } 1296 break; 1297 } 1298 /* FALLTHROUGH */ 1299 } 1300 case SCSI_STATUS_BUSY: 1301 /* 1302 * Restart the queue after either another 1303 * command completes or a 1 second timeout. 1304 */ 1305 if (bootverbose) { 1306 xpt_print(ccb->ccb_h.path, "Device Busy\n"); 1307 } 1308 if (ccb->ccb_h.retry_count > 0) { 1309 ccb->ccb_h.retry_count--; 1310 error = ERESTART; 1311 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT 1312 | RELSIM_RELEASE_AFTER_CMDCMPLT; 1313 *timeout = 1000; 1314 } else { 1315 error = EIO; 1316 } 1317 break; 1318 case SCSI_STATUS_RESERV_CONFLICT: 1319 xpt_print(ccb->ccb_h.path, "Reservation Conflict\n"); 1320 error = EIO; 1321 break; 1322 default: 1323 xpt_print(ccb->ccb_h.path, "SCSI Status 0x%x\n", 1324 ccb->csio.scsi_status); 1325 error = EIO; 1326 break; 1327 } 1328 return (error); 1329 } 1330 1331 static int 1332 camperiphscsisenseerror(union ccb *ccb, cam_flags camflags, 1333 u_int32_t sense_flags, union ccb *save_ccb, 1334 int *openings, u_int32_t *relsim_flags, 1335 u_int32_t *timeout) 1336 { 1337 struct cam_periph *periph; 1338 int error; 1339 1340 periph = xpt_path_periph(ccb->ccb_h.path); 1341 if (periph->flags & CAM_PERIPH_RECOVERY_INPROG) { 1342 1343 /* 1344 * If error recovery is already in progress, don't attempt 1345 * to process this error, but requeue it unconditionally 1346 * and attempt to process it once error recovery has 1347 * completed. This failed command is probably related to 1348 * the error that caused the currently active error recovery 1349 * action so our current recovery efforts should also 1350 * address this command. Be aware that the error recovery 1351 * code assumes that only one recovery action is in progress 1352 * on a particular peripheral instance at any given time 1353 * (e.g. only one saved CCB for error recovery) so it is 1354 * imperitive that we don't violate this assumption. 1355 */ 1356 error = ERESTART; 1357 } else { 1358 scsi_sense_action err_action; 1359 struct ccb_getdev cgd; 1360 const char *action_string; 1361 union ccb* print_ccb; 1362 1363 /* A description of the error recovery action performed */ 1364 action_string = NULL; 1365 1366 /* 1367 * The location of the orignal ccb 1368 * for sense printing purposes. 1369 */ 1370 print_ccb = ccb; 1371 1372 /* 1373 * Grab the inquiry data for this device. 1374 */ 1375 xpt_setup_ccb(&cgd.ccb_h, ccb->ccb_h.path, /*priority*/ 1); 1376 cgd.ccb_h.func_code = XPT_GDEV_TYPE; 1377 xpt_action((union ccb *)&cgd); 1378 1379 if ((ccb->ccb_h.status & CAM_AUTOSNS_VALID) != 0) 1380 err_action = scsi_error_action(&ccb->csio, 1381 &cgd.inq_data, 1382 sense_flags); 1383 else if ((ccb->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0) 1384 err_action = SS_REQSENSE; 1385 else 1386 err_action = SS_RETRY|SSQ_DECREMENT_COUNT|EIO; 1387 1388 error = err_action & SS_ERRMASK; 1389 1390 /* 1391 * If the recovery action will consume a retry, 1392 * make sure we actually have retries available. 1393 */ 1394 if ((err_action & SSQ_DECREMENT_COUNT) != 0) { 1395 if (ccb->ccb_h.retry_count > 0) 1396 ccb->ccb_h.retry_count--; 1397 else { 1398 action_string = "Retries Exhausted"; 1399 goto sense_error_done; 1400 } 1401 } 1402 1403 if ((err_action & SS_MASK) >= SS_START) { 1404 /* 1405 * Do common portions of commands that 1406 * use recovery CCBs. 1407 */ 1408 if (save_ccb == NULL) { 1409 action_string = "No recovery CCB supplied"; 1410 goto sense_error_done; 1411 } 1412 bcopy(ccb, save_ccb, sizeof(*save_ccb)); 1413 print_ccb = save_ccb; 1414 periph->flags |= CAM_PERIPH_RECOVERY_INPROG; 1415 } 1416 1417 switch (err_action & SS_MASK) { 1418 case SS_NOP: 1419 action_string = "No Recovery Action Needed"; 1420 error = 0; 1421 break; 1422 case SS_RETRY: 1423 action_string = "Retrying Command (per Sense Data)"; 1424 error = ERESTART; 1425 break; 1426 case SS_FAIL: 1427 action_string = "Unretryable error"; 1428 break; 1429 case SS_START: 1430 { 1431 int le; 1432 1433 /* 1434 * Send a start unit command to the device, and 1435 * then retry the command. 1436 */ 1437 action_string = "Attempting to Start Unit"; 1438 1439 /* 1440 * Check for removable media and set 1441 * load/eject flag appropriately. 1442 */ 1443 if (SID_IS_REMOVABLE(&cgd.inq_data)) 1444 le = TRUE; 1445 else 1446 le = FALSE; 1447 1448 scsi_start_stop(&ccb->csio, 1449 /*retries*/1, 1450 camperiphdone, 1451 MSG_SIMPLE_Q_TAG, 1452 /*start*/TRUE, 1453 /*load/eject*/le, 1454 /*immediate*/FALSE, 1455 SSD_FULL_SIZE, 1456 /*timeout*/50000); 1457 break; 1458 } 1459 case SS_TUR: 1460 { 1461 /* 1462 * Send a Test Unit Ready to the device. 1463 * If the 'many' flag is set, we send 120 1464 * test unit ready commands, one every half 1465 * second. Otherwise, we just send one TUR. 1466 * We only want to do this if the retry 1467 * count has not been exhausted. 1468 */ 1469 int retries; 1470 1471 if ((err_action & SSQ_MANY) != 0) { 1472 action_string = "Polling device for readiness"; 1473 retries = 120; 1474 } else { 1475 action_string = "Testing device for readiness"; 1476 retries = 1; 1477 } 1478 scsi_test_unit_ready(&ccb->csio, 1479 retries, 1480 camperiphdone, 1481 MSG_SIMPLE_Q_TAG, 1482 SSD_FULL_SIZE, 1483 /*timeout*/5000); 1484 1485 /* 1486 * Accomplish our 500ms delay by deferring 1487 * the release of our device queue appropriately. 1488 */ 1489 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT; 1490 *timeout = 500; 1491 break; 1492 } 1493 case SS_REQSENSE: 1494 { 1495 /* 1496 * Send a Request Sense to the device. We 1497 * assume that we are in a contingent allegiance 1498 * condition so we do not tag this request. 1499 */ 1500 scsi_request_sense(&ccb->csio, /*retries*/1, 1501 camperiphdone, 1502 &save_ccb->csio.sense_data, 1503 sizeof(save_ccb->csio.sense_data), 1504 CAM_TAG_ACTION_NONE, 1505 /*sense_len*/SSD_FULL_SIZE, 1506 /*timeout*/5000); 1507 break; 1508 } 1509 default: 1510 panic("Unhandled error action %x", err_action); 1511 } 1512 1513 if ((err_action & SS_MASK) >= SS_START) { 1514 /* 1515 * Drop the priority to 0 so that the recovery 1516 * CCB is the first to execute. Freeze the queue 1517 * after this command is sent so that we can 1518 * restore the old csio and have it queued in 1519 * the proper order before we release normal 1520 * transactions to the device. 1521 */ 1522 ccb->ccb_h.pinfo.priority = 0; 1523 ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 1524 ccb->ccb_h.saved_ccb_ptr = save_ccb; 1525 error = ERESTART; 1526 } 1527 1528 sense_error_done: 1529 if ((err_action & SSQ_PRINT_SENSE) != 0 1530 && (ccb->ccb_h.status & CAM_AUTOSNS_VALID) != 0) { 1531 cam_error_print(print_ccb, CAM_ESF_ALL, CAM_EPF_ALL); 1532 xpt_print_path(ccb->ccb_h.path); 1533 if (bootverbose) 1534 scsi_sense_print(&print_ccb->csio); 1535 printf("%s\n", action_string); 1536 } 1537 } 1538 return (error); 1539 } 1540 1541 /* 1542 * Generic error handler. Peripheral drivers usually filter 1543 * out the errors that they handle in a unique mannor, then 1544 * call this function. 1545 */ 1546 int 1547 cam_periph_error(union ccb *ccb, cam_flags camflags, 1548 u_int32_t sense_flags, union ccb *save_ccb) 1549 { 1550 const char *action_string; 1551 cam_status status; 1552 int frozen; 1553 int error, printed = 0; 1554 int openings; 1555 u_int32_t relsim_flags; 1556 u_int32_t timeout = 0; 1557 1558 action_string = NULL; 1559 status = ccb->ccb_h.status; 1560 frozen = (status & CAM_DEV_QFRZN) != 0; 1561 status &= CAM_STATUS_MASK; 1562 openings = relsim_flags = 0; 1563 1564 switch (status) { 1565 case CAM_REQ_CMP: 1566 error = 0; 1567 break; 1568 case CAM_SCSI_STATUS_ERROR: 1569 error = camperiphscsistatuserror(ccb, 1570 camflags, 1571 sense_flags, 1572 save_ccb, 1573 &openings, 1574 &relsim_flags, 1575 &timeout); 1576 break; 1577 case CAM_AUTOSENSE_FAIL: 1578 xpt_print(ccb->ccb_h.path, "AutoSense Failed\n"); 1579 error = EIO; /* we have to kill the command */ 1580 break; 1581 case CAM_REQ_CMP_ERR: 1582 if (bootverbose && printed == 0) { 1583 xpt_print(ccb->ccb_h.path, 1584 "Request completed with CAM_REQ_CMP_ERR\n"); 1585 printed++; 1586 } 1587 /* FALLTHROUGH */ 1588 case CAM_CMD_TIMEOUT: 1589 if (bootverbose && printed == 0) { 1590 xpt_print(ccb->ccb_h.path, "Command timed out\n"); 1591 printed++; 1592 } 1593 /* FALLTHROUGH */ 1594 case CAM_UNEXP_BUSFREE: 1595 if (bootverbose && printed == 0) { 1596 xpt_print(ccb->ccb_h.path, "Unexpected Bus Free\n"); 1597 printed++; 1598 } 1599 /* FALLTHROUGH */ 1600 case CAM_UNCOR_PARITY: 1601 if (bootverbose && printed == 0) { 1602 xpt_print(ccb->ccb_h.path, 1603 "Uncorrected Parity Error\n"); 1604 printed++; 1605 } 1606 /* FALLTHROUGH */ 1607 case CAM_DATA_RUN_ERR: 1608 if (bootverbose && printed == 0) { 1609 xpt_print(ccb->ccb_h.path, "Data Overrun\n"); 1610 printed++; 1611 } 1612 error = EIO; /* we have to kill the command */ 1613 /* decrement the number of retries */ 1614 if (ccb->ccb_h.retry_count > 0) { 1615 ccb->ccb_h.retry_count--; 1616 error = ERESTART; 1617 } else { 1618 action_string = "Retries Exausted"; 1619 error = EIO; 1620 } 1621 break; 1622 case CAM_UA_ABORT: 1623 case CAM_UA_TERMIO: 1624 case CAM_MSG_REJECT_REC: 1625 /* XXX Don't know that these are correct */ 1626 error = EIO; 1627 break; 1628 case CAM_SEL_TIMEOUT: 1629 { 1630 struct cam_path *newpath; 1631 1632 if ((camflags & CAM_RETRY_SELTO) != 0) { 1633 if (ccb->ccb_h.retry_count > 0) { 1634 1635 ccb->ccb_h.retry_count--; 1636 error = ERESTART; 1637 if (bootverbose && printed == 0) { 1638 xpt_print(ccb->ccb_h.path, 1639 "Selection Timeout\n"); 1640 printed++; 1641 } 1642 1643 /* 1644 * Wait a bit to give the device 1645 * time to recover before we try again. 1646 */ 1647 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT; 1648 timeout = periph_selto_delay; 1649 break; 1650 } 1651 } 1652 error = ENXIO; 1653 /* Should we do more if we can't create the path?? */ 1654 if (xpt_create_path(&newpath, xpt_path_periph(ccb->ccb_h.path), 1655 xpt_path_path_id(ccb->ccb_h.path), 1656 xpt_path_target_id(ccb->ccb_h.path), 1657 CAM_LUN_WILDCARD) != CAM_REQ_CMP) 1658 break; 1659 1660 /* 1661 * Let peripheral drivers know that this device has gone 1662 * away. 1663 */ 1664 xpt_async(AC_LOST_DEVICE, newpath, NULL); 1665 xpt_free_path(newpath); 1666 break; 1667 } 1668 case CAM_REQ_INVALID: 1669 case CAM_PATH_INVALID: 1670 case CAM_DEV_NOT_THERE: 1671 case CAM_NO_HBA: 1672 case CAM_PROVIDE_FAIL: 1673 case CAM_REQ_TOO_BIG: 1674 case CAM_LUN_INVALID: 1675 case CAM_TID_INVALID: 1676 error = EINVAL; 1677 break; 1678 case CAM_SCSI_BUS_RESET: 1679 case CAM_BDR_SENT: 1680 /* 1681 * Commands that repeatedly timeout and cause these 1682 * kinds of error recovery actions, should return 1683 * CAM_CMD_TIMEOUT, which allows us to safely assume 1684 * that this command was an innocent bystander to 1685 * these events and should be unconditionally 1686 * retried. 1687 */ 1688 if (bootverbose && printed == 0) { 1689 xpt_print_path(ccb->ccb_h.path); 1690 if (status == CAM_BDR_SENT) 1691 printf("Bus Device Reset sent\n"); 1692 else 1693 printf("Bus Reset issued\n"); 1694 printed++; 1695 } 1696 /* FALLTHROUGH */ 1697 case CAM_REQUEUE_REQ: 1698 /* Unconditional requeue */ 1699 error = ERESTART; 1700 if (bootverbose && printed == 0) { 1701 xpt_print(ccb->ccb_h.path, "Request Requeued\n"); 1702 printed++; 1703 } 1704 break; 1705 case CAM_RESRC_UNAVAIL: 1706 /* Wait a bit for the resource shortage to abate. */ 1707 timeout = periph_noresrc_delay; 1708 /* FALLTHROUGH */ 1709 case CAM_BUSY: 1710 if (timeout == 0) { 1711 /* Wait a bit for the busy condition to abate. */ 1712 timeout = periph_busy_delay; 1713 } 1714 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT; 1715 /* FALLTHROUGH */ 1716 default: 1717 /* decrement the number of retries */ 1718 if (ccb->ccb_h.retry_count > 0) { 1719 ccb->ccb_h.retry_count--; 1720 error = ERESTART; 1721 if (bootverbose && printed == 0) { 1722 xpt_print(ccb->ccb_h.path, "CAM Status 0x%x\n", 1723 status); 1724 printed++; 1725 } 1726 } else { 1727 error = EIO; 1728 action_string = "Retries Exhausted"; 1729 } 1730 break; 1731 } 1732 1733 /* Attempt a retry */ 1734 if (error == ERESTART || error == 0) { 1735 if (frozen != 0) 1736 ccb->ccb_h.status &= ~CAM_DEV_QFRZN; 1737 1738 if (error == ERESTART) { 1739 action_string = "Retrying Command"; 1740 xpt_action(ccb); 1741 } 1742 1743 if (frozen != 0) 1744 cam_release_devq(ccb->ccb_h.path, 1745 relsim_flags, 1746 openings, 1747 timeout, 1748 /*getcount_only*/0); 1749 } 1750 1751 /* 1752 * If we have and error and are booting verbosely, whine 1753 * *unless* this was a non-retryable selection timeout. 1754 */ 1755 if (error != 0 && bootverbose && 1756 !(status == CAM_SEL_TIMEOUT && (camflags & CAM_RETRY_SELTO) == 0)) { 1757 1758 1759 if (action_string == NULL) 1760 action_string = "Unretryable Error"; 1761 if (error != ERESTART) { 1762 xpt_print(ccb->ccb_h.path, "error %d\n", error); 1763 } 1764 xpt_print(ccb->ccb_h.path, "%s\n", action_string); 1765 } 1766 1767 return (error); 1768 } 1769