1 /*- 2 * Common functions for CAM "type" (peripheral) drivers. 3 * 4 * Copyright (c) 1997, 1998 Justin T. Gibbs. 5 * Copyright (c) 1997, 1998, 1999, 2000 Kenneth D. Merry. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions, and the following disclaimer, 13 * without modification, immediately at the beginning of the file. 14 * 2. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/types.h> 36 #include <sys/malloc.h> 37 #include <sys/kernel.h> 38 #include <sys/linker_set.h> 39 #include <sys/bio.h> 40 #include <sys/lock.h> 41 #include <sys/mutex.h> 42 #include <sys/buf.h> 43 #include <sys/proc.h> 44 #include <sys/devicestat.h> 45 #include <sys/bus.h> 46 #include <vm/vm.h> 47 #include <vm/vm_extern.h> 48 49 #include <cam/cam.h> 50 #include <cam/cam_ccb.h> 51 #include <cam/cam_xpt_periph.h> 52 #include <cam/cam_periph.h> 53 #include <cam/cam_debug.h> 54 #include <cam/cam_sim.h> 55 56 #include <cam/scsi/scsi_all.h> 57 #include <cam/scsi/scsi_message.h> 58 #include <cam/scsi/scsi_pass.h> 59 60 static u_int camperiphnextunit(struct periph_driver *p_drv, 61 u_int newunit, int wired, 62 path_id_t pathid, target_id_t target, 63 lun_id_t lun); 64 static u_int camperiphunit(struct periph_driver *p_drv, 65 path_id_t pathid, target_id_t target, 66 lun_id_t lun); 67 static void camperiphdone(struct cam_periph *periph, 68 union ccb *done_ccb); 69 static void camperiphfree(struct cam_periph *periph); 70 static int camperiphscsistatuserror(union ccb *ccb, 71 cam_flags camflags, 72 u_int32_t sense_flags, 73 union ccb *save_ccb, 74 int *openings, 75 u_int32_t *relsim_flags, 76 u_int32_t *timeout); 77 static int camperiphscsisenseerror(union ccb *ccb, 78 cam_flags camflags, 79 u_int32_t sense_flags, 80 union ccb *save_ccb, 81 int *openings, 82 u_int32_t *relsim_flags, 83 u_int32_t *timeout); 84 85 static int nperiph_drivers; 86 struct periph_driver **periph_drivers; 87 88 MALLOC_DEFINE(M_CAMPERIPH, "CAM periph", "CAM peripheral buffers"); 89 90 static int periph_selto_delay = 1000; 91 TUNABLE_INT("kern.cam.periph_selto_delay", &periph_selto_delay); 92 static int periph_noresrc_delay = 500; 93 TUNABLE_INT("kern.cam.periph_noresrc_delay", &periph_noresrc_delay); 94 static int periph_busy_delay = 500; 95 TUNABLE_INT("kern.cam.periph_busy_delay", &periph_busy_delay); 96 97 98 void 99 periphdriver_register(void *data) 100 { 101 struct periph_driver **newdrivers, **old; 102 int ndrivers; 103 104 ndrivers = nperiph_drivers + 2; 105 newdrivers = malloc(sizeof(*newdrivers) * ndrivers, M_CAMPERIPH, 106 M_WAITOK); 107 if (periph_drivers) 108 bcopy(periph_drivers, newdrivers, 109 sizeof(*newdrivers) * nperiph_drivers); 110 newdrivers[nperiph_drivers] = (struct periph_driver *)data; 111 newdrivers[nperiph_drivers + 1] = NULL; 112 old = periph_drivers; 113 periph_drivers = newdrivers; 114 if (old) 115 free(old, M_CAMPERIPH); 116 nperiph_drivers++; 117 } 118 119 cam_status 120 cam_periph_alloc(periph_ctor_t *periph_ctor, 121 periph_oninv_t *periph_oninvalidate, 122 periph_dtor_t *periph_dtor, periph_start_t *periph_start, 123 char *name, cam_periph_type type, struct cam_path *path, 124 ac_callback_t *ac_callback, ac_code code, void *arg) 125 { 126 struct periph_driver **p_drv; 127 struct cam_sim *sim; 128 struct cam_periph *periph; 129 struct cam_periph *cur_periph; 130 path_id_t path_id; 131 target_id_t target_id; 132 lun_id_t lun_id; 133 cam_status status; 134 u_int init_level; 135 136 init_level = 0; 137 /* 138 * Handle Hot-Plug scenarios. If there is already a peripheral 139 * of our type assigned to this path, we are likely waiting for 140 * final close on an old, invalidated, peripheral. If this is 141 * the case, queue up a deferred call to the peripheral's async 142 * handler. If it looks like a mistaken re-allocation, complain. 143 */ 144 if ((periph = cam_periph_find(path, name)) != NULL) { 145 146 if ((periph->flags & CAM_PERIPH_INVALID) != 0 147 && (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) == 0) { 148 periph->flags |= CAM_PERIPH_NEW_DEV_FOUND; 149 periph->deferred_callback = ac_callback; 150 periph->deferred_ac = code; 151 return (CAM_REQ_INPROG); 152 } else { 153 printf("cam_periph_alloc: attempt to re-allocate " 154 "valid device %s%d rejected\n", 155 periph->periph_name, periph->unit_number); 156 } 157 return (CAM_REQ_INVALID); 158 } 159 160 periph = (struct cam_periph *)malloc(sizeof(*periph), M_CAMPERIPH, 161 M_NOWAIT); 162 163 if (periph == NULL) 164 return (CAM_RESRC_UNAVAIL); 165 166 init_level++; 167 168 xpt_lock_buses(); 169 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) { 170 if (strcmp((*p_drv)->driver_name, name) == 0) 171 break; 172 } 173 xpt_unlock_buses(); 174 if (*p_drv == NULL) { 175 printf("cam_periph_alloc: invalid periph name '%s'\n", name); 176 free(periph, M_CAMPERIPH); 177 return (CAM_REQ_INVALID); 178 } 179 180 sim = xpt_path_sim(path); 181 path_id = xpt_path_path_id(path); 182 target_id = xpt_path_target_id(path); 183 lun_id = xpt_path_lun_id(path); 184 bzero(periph, sizeof(*periph)); 185 cam_init_pinfo(&periph->pinfo); 186 periph->periph_start = periph_start; 187 periph->periph_dtor = periph_dtor; 188 periph->periph_oninval = periph_oninvalidate; 189 periph->type = type; 190 periph->periph_name = name; 191 periph->unit_number = camperiphunit(*p_drv, path_id, target_id, lun_id); 192 periph->immediate_priority = CAM_PRIORITY_NONE; 193 periph->refcount = 0; 194 periph->sim = sim; 195 SLIST_INIT(&periph->ccb_list); 196 status = xpt_create_path(&path, periph, path_id, target_id, lun_id); 197 if (status != CAM_REQ_CMP) 198 goto failure; 199 200 periph->path = path; 201 init_level++; 202 203 status = xpt_add_periph(periph); 204 205 if (status != CAM_REQ_CMP) 206 goto failure; 207 208 cur_periph = TAILQ_FIRST(&(*p_drv)->units); 209 while (cur_periph != NULL 210 && cur_periph->unit_number < periph->unit_number) 211 cur_periph = TAILQ_NEXT(cur_periph, unit_links); 212 213 if (cur_periph != NULL) 214 TAILQ_INSERT_BEFORE(cur_periph, periph, unit_links); 215 else { 216 TAILQ_INSERT_TAIL(&(*p_drv)->units, periph, unit_links); 217 (*p_drv)->generation++; 218 } 219 220 init_level++; 221 222 status = periph_ctor(periph, arg); 223 224 if (status == CAM_REQ_CMP) 225 init_level++; 226 227 failure: 228 switch (init_level) { 229 case 4: 230 /* Initialized successfully */ 231 break; 232 case 3: 233 TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links); 234 xpt_remove_periph(periph); 235 /* FALLTHROUGH */ 236 case 2: 237 xpt_free_path(periph->path); 238 /* FALLTHROUGH */ 239 case 1: 240 free(periph, M_CAMPERIPH); 241 /* FALLTHROUGH */ 242 case 0: 243 /* No cleanup to perform. */ 244 break; 245 default: 246 panic("cam_periph_alloc: Unkown init level"); 247 } 248 return(status); 249 } 250 251 /* 252 * Find a peripheral structure with the specified path, target, lun, 253 * and (optionally) type. If the name is NULL, this function will return 254 * the first peripheral driver that matches the specified path. 255 */ 256 struct cam_periph * 257 cam_periph_find(struct cam_path *path, char *name) 258 { 259 struct periph_driver **p_drv; 260 struct cam_periph *periph; 261 262 xpt_lock_buses(); 263 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) { 264 265 if (name != NULL && (strcmp((*p_drv)->driver_name, name) != 0)) 266 continue; 267 268 TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) { 269 if (xpt_path_comp(periph->path, path) == 0) { 270 xpt_unlock_buses(); 271 return(periph); 272 } 273 } 274 if (name != NULL) { 275 xpt_unlock_buses(); 276 return(NULL); 277 } 278 } 279 xpt_unlock_buses(); 280 return(NULL); 281 } 282 283 cam_status 284 cam_periph_acquire(struct cam_periph *periph) 285 { 286 287 if (periph == NULL) 288 return(CAM_REQ_CMP_ERR); 289 290 xpt_lock_buses(); 291 periph->refcount++; 292 xpt_unlock_buses(); 293 294 return(CAM_REQ_CMP); 295 } 296 297 void 298 cam_periph_release_locked(struct cam_periph *periph) 299 { 300 301 if (periph == NULL) 302 return; 303 304 xpt_lock_buses(); 305 if ((--periph->refcount == 0) 306 && (periph->flags & CAM_PERIPH_INVALID)) { 307 camperiphfree(periph); 308 } 309 xpt_unlock_buses(); 310 } 311 312 void 313 cam_periph_release(struct cam_periph *periph) 314 { 315 struct cam_sim *sim; 316 317 if (periph == NULL) 318 return; 319 320 sim = periph->sim; 321 mtx_assert(sim->mtx, MA_NOTOWNED); 322 mtx_lock(sim->mtx); 323 cam_periph_release_locked(periph); 324 mtx_unlock(sim->mtx); 325 } 326 327 int 328 cam_periph_hold(struct cam_periph *periph, int priority) 329 { 330 int error; 331 332 /* 333 * Increment the reference count on the peripheral 334 * while we wait for our lock attempt to succeed 335 * to ensure the peripheral doesn't disappear out 336 * from user us while we sleep. 337 */ 338 339 if (cam_periph_acquire(periph) != CAM_REQ_CMP) 340 return (ENXIO); 341 342 mtx_assert(periph->sim->mtx, MA_OWNED); 343 while ((periph->flags & CAM_PERIPH_LOCKED) != 0) { 344 periph->flags |= CAM_PERIPH_LOCK_WANTED; 345 if ((error = mtx_sleep(periph, periph->sim->mtx, priority, 346 "caplck", 0)) != 0) { 347 cam_periph_release_locked(periph); 348 return (error); 349 } 350 } 351 352 periph->flags |= CAM_PERIPH_LOCKED; 353 return (0); 354 } 355 356 void 357 cam_periph_unhold(struct cam_periph *periph) 358 { 359 360 mtx_assert(periph->sim->mtx, MA_OWNED); 361 362 periph->flags &= ~CAM_PERIPH_LOCKED; 363 if ((periph->flags & CAM_PERIPH_LOCK_WANTED) != 0) { 364 periph->flags &= ~CAM_PERIPH_LOCK_WANTED; 365 wakeup(periph); 366 } 367 368 cam_periph_release_locked(periph); 369 } 370 371 /* 372 * Look for the next unit number that is not currently in use for this 373 * peripheral type starting at "newunit". Also exclude unit numbers that 374 * are reserved by for future "hardwiring" unless we already know that this 375 * is a potential wired device. Only assume that the device is "wired" the 376 * first time through the loop since after that we'll be looking at unit 377 * numbers that did not match a wiring entry. 378 */ 379 static u_int 380 camperiphnextunit(struct periph_driver *p_drv, u_int newunit, int wired, 381 path_id_t pathid, target_id_t target, lun_id_t lun) 382 { 383 struct cam_periph *periph; 384 char *periph_name; 385 int i, val, dunit, r; 386 const char *dname, *strval; 387 388 periph_name = p_drv->driver_name; 389 for (;;newunit++) { 390 391 for (periph = TAILQ_FIRST(&p_drv->units); 392 periph != NULL && periph->unit_number != newunit; 393 periph = TAILQ_NEXT(periph, unit_links)) 394 ; 395 396 if (periph != NULL && periph->unit_number == newunit) { 397 if (wired != 0) { 398 xpt_print(periph->path, "Duplicate Wired " 399 "Device entry!\n"); 400 xpt_print(periph->path, "Second device (%s " 401 "device at scbus%d target %d lun %d) will " 402 "not be wired\n", periph_name, pathid, 403 target, lun); 404 wired = 0; 405 } 406 continue; 407 } 408 if (wired) 409 break; 410 411 /* 412 * Don't match entries like "da 4" as a wired down 413 * device, but do match entries like "da 4 target 5" 414 * or even "da 4 scbus 1". 415 */ 416 i = 0; 417 dname = periph_name; 418 for (;;) { 419 r = resource_find_dev(&i, dname, &dunit, NULL, NULL); 420 if (r != 0) 421 break; 422 /* if no "target" and no specific scbus, skip */ 423 if (resource_int_value(dname, dunit, "target", &val) && 424 (resource_string_value(dname, dunit, "at",&strval)|| 425 strcmp(strval, "scbus") == 0)) 426 continue; 427 if (newunit == dunit) 428 break; 429 } 430 if (r != 0) 431 break; 432 } 433 return (newunit); 434 } 435 436 static u_int 437 camperiphunit(struct periph_driver *p_drv, path_id_t pathid, 438 target_id_t target, lun_id_t lun) 439 { 440 u_int unit; 441 int wired, i, val, dunit; 442 const char *dname, *strval; 443 char pathbuf[32], *periph_name; 444 445 periph_name = p_drv->driver_name; 446 snprintf(pathbuf, sizeof(pathbuf), "scbus%d", pathid); 447 unit = 0; 448 i = 0; 449 dname = periph_name; 450 for (wired = 0; resource_find_dev(&i, dname, &dunit, NULL, NULL) == 0; 451 wired = 0) { 452 if (resource_string_value(dname, dunit, "at", &strval) == 0) { 453 if (strcmp(strval, pathbuf) != 0) 454 continue; 455 wired++; 456 } 457 if (resource_int_value(dname, dunit, "target", &val) == 0) { 458 if (val != target) 459 continue; 460 wired++; 461 } 462 if (resource_int_value(dname, dunit, "lun", &val) == 0) { 463 if (val != lun) 464 continue; 465 wired++; 466 } 467 if (wired != 0) { 468 unit = dunit; 469 break; 470 } 471 } 472 473 /* 474 * Either start from 0 looking for the next unit or from 475 * the unit number given in the resource config. This way, 476 * if we have wildcard matches, we don't return the same 477 * unit number twice. 478 */ 479 unit = camperiphnextunit(p_drv, unit, wired, pathid, target, lun); 480 481 return (unit); 482 } 483 484 void 485 cam_periph_invalidate(struct cam_periph *periph) 486 { 487 488 /* 489 * We only call this routine the first time a peripheral is 490 * invalidated. 491 */ 492 if (((periph->flags & CAM_PERIPH_INVALID) == 0) 493 && (periph->periph_oninval != NULL)) 494 periph->periph_oninval(periph); 495 496 periph->flags |= CAM_PERIPH_INVALID; 497 periph->flags &= ~CAM_PERIPH_NEW_DEV_FOUND; 498 499 xpt_lock_buses(); 500 if (periph->refcount == 0) 501 camperiphfree(periph); 502 else if (periph->refcount < 0) 503 printf("cam_invalidate_periph: refcount < 0!!\n"); 504 xpt_unlock_buses(); 505 } 506 507 static void 508 camperiphfree(struct cam_periph *periph) 509 { 510 struct periph_driver **p_drv; 511 512 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) { 513 if (strcmp((*p_drv)->driver_name, periph->periph_name) == 0) 514 break; 515 } 516 if (*p_drv == NULL) { 517 printf("camperiphfree: attempt to free non-existant periph\n"); 518 return; 519 } 520 521 TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links); 522 (*p_drv)->generation++; 523 xpt_unlock_buses(); 524 525 if (periph->periph_dtor != NULL) 526 periph->periph_dtor(periph); 527 xpt_remove_periph(periph); 528 529 if (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) { 530 union ccb ccb; 531 void *arg; 532 533 switch (periph->deferred_ac) { 534 case AC_FOUND_DEVICE: 535 ccb.ccb_h.func_code = XPT_GDEV_TYPE; 536 xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/ 1); 537 xpt_action(&ccb); 538 arg = &ccb; 539 break; 540 case AC_PATH_REGISTERED: 541 ccb.ccb_h.func_code = XPT_PATH_INQ; 542 xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/ 1); 543 xpt_action(&ccb); 544 arg = &ccb; 545 break; 546 default: 547 arg = NULL; 548 break; 549 } 550 periph->deferred_callback(NULL, periph->deferred_ac, 551 periph->path, arg); 552 } 553 xpt_free_path(periph->path); 554 free(periph, M_CAMPERIPH); 555 xpt_lock_buses(); 556 } 557 558 /* 559 * Map user virtual pointers into kernel virtual address space, so we can 560 * access the memory. This won't work on physical pointers, for now it's 561 * up to the caller to check for that. (XXX KDM -- should we do that here 562 * instead?) This also only works for up to MAXPHYS memory. Since we use 563 * buffers to map stuff in and out, we're limited to the buffer size. 564 */ 565 int 566 cam_periph_mapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo) 567 { 568 int numbufs, i, j; 569 int flags[CAM_PERIPH_MAXMAPS]; 570 u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS]; 571 u_int32_t lengths[CAM_PERIPH_MAXMAPS]; 572 u_int32_t dirs[CAM_PERIPH_MAXMAPS]; 573 574 switch(ccb->ccb_h.func_code) { 575 case XPT_DEV_MATCH: 576 if (ccb->cdm.match_buf_len == 0) { 577 printf("cam_periph_mapmem: invalid match buffer " 578 "length 0\n"); 579 return(EINVAL); 580 } 581 if (ccb->cdm.pattern_buf_len > 0) { 582 data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns; 583 lengths[0] = ccb->cdm.pattern_buf_len; 584 dirs[0] = CAM_DIR_OUT; 585 data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches; 586 lengths[1] = ccb->cdm.match_buf_len; 587 dirs[1] = CAM_DIR_IN; 588 numbufs = 2; 589 } else { 590 data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches; 591 lengths[0] = ccb->cdm.match_buf_len; 592 dirs[0] = CAM_DIR_IN; 593 numbufs = 1; 594 } 595 break; 596 case XPT_SCSI_IO: 597 case XPT_CONT_TARGET_IO: 598 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) 599 return(0); 600 601 data_ptrs[0] = &ccb->csio.data_ptr; 602 lengths[0] = ccb->csio.dxfer_len; 603 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK; 604 numbufs = 1; 605 break; 606 default: 607 return(EINVAL); 608 break; /* NOTREACHED */ 609 } 610 611 /* 612 * Check the transfer length and permissions first, so we don't 613 * have to unmap any previously mapped buffers. 614 */ 615 for (i = 0; i < numbufs; i++) { 616 617 flags[i] = 0; 618 619 /* 620 * The userland data pointer passed in may not be page 621 * aligned. vmapbuf() truncates the address to a page 622 * boundary, so if the address isn't page aligned, we'll 623 * need enough space for the given transfer length, plus 624 * whatever extra space is necessary to make it to the page 625 * boundary. 626 */ 627 if ((lengths[i] + 628 (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)) > DFLTPHYS){ 629 printf("cam_periph_mapmem: attempt to map %lu bytes, " 630 "which is greater than DFLTPHYS(%d)\n", 631 (long)(lengths[i] + 632 (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)), 633 DFLTPHYS); 634 return(E2BIG); 635 } 636 637 if (dirs[i] & CAM_DIR_OUT) { 638 flags[i] = BIO_WRITE; 639 } 640 641 if (dirs[i] & CAM_DIR_IN) { 642 flags[i] = BIO_READ; 643 } 644 645 } 646 647 /* this keeps the current process from getting swapped */ 648 /* 649 * XXX KDM should I use P_NOSWAP instead? 650 */ 651 PHOLD(curproc); 652 653 for (i = 0; i < numbufs; i++) { 654 /* 655 * Get the buffer. 656 */ 657 mapinfo->bp[i] = getpbuf(NULL); 658 659 /* save the buffer's data address */ 660 mapinfo->bp[i]->b_saveaddr = mapinfo->bp[i]->b_data; 661 662 /* put our pointer in the data slot */ 663 mapinfo->bp[i]->b_data = *data_ptrs[i]; 664 665 /* set the transfer length, we know it's < DFLTPHYS */ 666 mapinfo->bp[i]->b_bufsize = lengths[i]; 667 668 /* set the direction */ 669 mapinfo->bp[i]->b_iocmd = flags[i]; 670 671 /* 672 * Map the buffer into kernel memory. 673 * 674 * Note that useracc() alone is not a sufficient test. 675 * vmapbuf() can still fail due to a smaller file mapped 676 * into a larger area of VM, or if userland races against 677 * vmapbuf() after the useracc() check. 678 */ 679 if (vmapbuf(mapinfo->bp[i]) < 0) { 680 for (j = 0; j < i; ++j) { 681 *data_ptrs[j] = mapinfo->bp[j]->b_saveaddr; 682 vunmapbuf(mapinfo->bp[j]); 683 relpbuf(mapinfo->bp[j], NULL); 684 } 685 relpbuf(mapinfo->bp[i], NULL); 686 PRELE(curproc); 687 return(EACCES); 688 } 689 690 /* set our pointer to the new mapped area */ 691 *data_ptrs[i] = mapinfo->bp[i]->b_data; 692 693 mapinfo->num_bufs_used++; 694 } 695 696 /* 697 * Now that we've gotten this far, change ownership to the kernel 698 * of the buffers so that we don't run afoul of returning to user 699 * space with locks (on the buffer) held. 700 */ 701 for (i = 0; i < numbufs; i++) { 702 BUF_KERNPROC(mapinfo->bp[i]); 703 } 704 705 706 return(0); 707 } 708 709 /* 710 * Unmap memory segments mapped into kernel virtual address space by 711 * cam_periph_mapmem(). 712 */ 713 void 714 cam_periph_unmapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo) 715 { 716 int numbufs, i; 717 u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS]; 718 719 if (mapinfo->num_bufs_used <= 0) { 720 /* allow ourselves to be swapped once again */ 721 PRELE(curproc); 722 return; 723 } 724 725 switch (ccb->ccb_h.func_code) { 726 case XPT_DEV_MATCH: 727 numbufs = min(mapinfo->num_bufs_used, 2); 728 729 if (numbufs == 1) { 730 data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches; 731 } else { 732 data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns; 733 data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches; 734 } 735 break; 736 case XPT_SCSI_IO: 737 case XPT_CONT_TARGET_IO: 738 data_ptrs[0] = &ccb->csio.data_ptr; 739 numbufs = min(mapinfo->num_bufs_used, 1); 740 break; 741 default: 742 /* allow ourselves to be swapped once again */ 743 PRELE(curproc); 744 return; 745 break; /* NOTREACHED */ 746 } 747 748 for (i = 0; i < numbufs; i++) { 749 /* Set the user's pointer back to the original value */ 750 *data_ptrs[i] = mapinfo->bp[i]->b_saveaddr; 751 752 /* unmap the buffer */ 753 vunmapbuf(mapinfo->bp[i]); 754 755 /* release the buffer */ 756 relpbuf(mapinfo->bp[i], NULL); 757 } 758 759 /* allow ourselves to be swapped once again */ 760 PRELE(curproc); 761 } 762 763 union ccb * 764 cam_periph_getccb(struct cam_periph *periph, u_int32_t priority) 765 { 766 struct ccb_hdr *ccb_h; 767 768 mtx_assert(periph->sim->mtx, MA_OWNED); 769 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering cdgetccb\n")); 770 771 while (SLIST_FIRST(&periph->ccb_list) == NULL) { 772 if (periph->immediate_priority > priority) 773 periph->immediate_priority = priority; 774 xpt_schedule(periph, priority); 775 if ((SLIST_FIRST(&periph->ccb_list) != NULL) 776 && (SLIST_FIRST(&periph->ccb_list)->pinfo.priority == priority)) 777 break; 778 mtx_assert(periph->sim->mtx, MA_OWNED); 779 mtx_sleep(&periph->ccb_list, periph->sim->mtx, PRIBIO, "cgticb", 780 0); 781 } 782 783 ccb_h = SLIST_FIRST(&periph->ccb_list); 784 SLIST_REMOVE_HEAD(&periph->ccb_list, periph_links.sle); 785 return ((union ccb *)ccb_h); 786 } 787 788 void 789 cam_periph_ccbwait(union ccb *ccb) 790 { 791 struct cam_sim *sim; 792 793 sim = xpt_path_sim(ccb->ccb_h.path); 794 if ((ccb->ccb_h.pinfo.index != CAM_UNQUEUED_INDEX) 795 || ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG)) 796 mtx_sleep(&ccb->ccb_h.cbfcnp, sim->mtx, PRIBIO, "cbwait", 0); 797 } 798 799 int 800 cam_periph_ioctl(struct cam_periph *periph, int cmd, caddr_t addr, 801 int (*error_routine)(union ccb *ccb, 802 cam_flags camflags, 803 u_int32_t sense_flags)) 804 { 805 union ccb *ccb; 806 int error; 807 int found; 808 809 error = found = 0; 810 811 switch(cmd){ 812 case CAMGETPASSTHRU: 813 ccb = cam_periph_getccb(periph, /* priority */ 1); 814 xpt_setup_ccb(&ccb->ccb_h, 815 ccb->ccb_h.path, 816 /*priority*/1); 817 ccb->ccb_h.func_code = XPT_GDEVLIST; 818 819 /* 820 * Basically, the point of this is that we go through 821 * getting the list of devices, until we find a passthrough 822 * device. In the current version of the CAM code, the 823 * only way to determine what type of device we're dealing 824 * with is by its name. 825 */ 826 while (found == 0) { 827 ccb->cgdl.index = 0; 828 ccb->cgdl.status = CAM_GDEVLIST_MORE_DEVS; 829 while (ccb->cgdl.status == CAM_GDEVLIST_MORE_DEVS) { 830 831 /* we want the next device in the list */ 832 xpt_action(ccb); 833 if (strncmp(ccb->cgdl.periph_name, 834 "pass", 4) == 0){ 835 found = 1; 836 break; 837 } 838 } 839 if ((ccb->cgdl.status == CAM_GDEVLIST_LAST_DEVICE) && 840 (found == 0)) { 841 ccb->cgdl.periph_name[0] = '\0'; 842 ccb->cgdl.unit_number = 0; 843 break; 844 } 845 } 846 847 /* copy the result back out */ 848 bcopy(ccb, addr, sizeof(union ccb)); 849 850 /* and release the ccb */ 851 xpt_release_ccb(ccb); 852 853 break; 854 default: 855 error = ENOTTY; 856 break; 857 } 858 return(error); 859 } 860 861 int 862 cam_periph_runccb(union ccb *ccb, 863 int (*error_routine)(union ccb *ccb, 864 cam_flags camflags, 865 u_int32_t sense_flags), 866 cam_flags camflags, u_int32_t sense_flags, 867 struct devstat *ds) 868 { 869 struct cam_sim *sim; 870 int error; 871 872 error = 0; 873 sim = xpt_path_sim(ccb->ccb_h.path); 874 mtx_assert(sim->mtx, MA_OWNED); 875 876 /* 877 * If the user has supplied a stats structure, and if we understand 878 * this particular type of ccb, record the transaction start. 879 */ 880 if ((ds != NULL) && (ccb->ccb_h.func_code == XPT_SCSI_IO)) 881 devstat_start_transaction(ds, NULL); 882 883 xpt_action(ccb); 884 885 do { 886 cam_periph_ccbwait(ccb); 887 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) 888 error = 0; 889 else if (error_routine != NULL) 890 error = (*error_routine)(ccb, camflags, sense_flags); 891 else 892 error = 0; 893 894 } while (error == ERESTART); 895 896 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 897 cam_release_devq(ccb->ccb_h.path, 898 /* relsim_flags */0, 899 /* openings */0, 900 /* timeout */0, 901 /* getcount_only */ FALSE); 902 903 if ((ds != NULL) && (ccb->ccb_h.func_code == XPT_SCSI_IO)) 904 devstat_end_transaction(ds, 905 ccb->csio.dxfer_len, 906 ccb->csio.tag_action & 0xf, 907 ((ccb->ccb_h.flags & CAM_DIR_MASK) == 908 CAM_DIR_NONE) ? DEVSTAT_NO_DATA : 909 (ccb->ccb_h.flags & CAM_DIR_OUT) ? 910 DEVSTAT_WRITE : 911 DEVSTAT_READ, NULL, NULL); 912 913 return(error); 914 } 915 916 void 917 cam_freeze_devq(struct cam_path *path) 918 { 919 struct ccb_hdr ccb_h; 920 921 xpt_setup_ccb(&ccb_h, path, /*priority*/1); 922 ccb_h.func_code = XPT_NOOP; 923 ccb_h.flags = CAM_DEV_QFREEZE; 924 xpt_action((union ccb *)&ccb_h); 925 } 926 927 u_int32_t 928 cam_release_devq(struct cam_path *path, u_int32_t relsim_flags, 929 u_int32_t openings, u_int32_t timeout, 930 int getcount_only) 931 { 932 struct ccb_relsim crs; 933 934 xpt_setup_ccb(&crs.ccb_h, path, 935 /*priority*/1); 936 crs.ccb_h.func_code = XPT_REL_SIMQ; 937 crs.ccb_h.flags = getcount_only ? CAM_DEV_QFREEZE : 0; 938 crs.release_flags = relsim_flags; 939 crs.openings = openings; 940 crs.release_timeout = timeout; 941 xpt_action((union ccb *)&crs); 942 return (crs.qfrozen_cnt); 943 } 944 945 #define saved_ccb_ptr ppriv_ptr0 946 static void 947 camperiphdone(struct cam_periph *periph, union ccb *done_ccb) 948 { 949 union ccb *saved_ccb; 950 cam_status status; 951 int frozen; 952 int sense; 953 struct scsi_start_stop_unit *scsi_cmd; 954 u_int32_t relsim_flags, timeout; 955 u_int32_t qfrozen_cnt; 956 int xpt_done_ccb; 957 958 xpt_done_ccb = FALSE; 959 status = done_ccb->ccb_h.status; 960 frozen = (status & CAM_DEV_QFRZN) != 0; 961 sense = (status & CAM_AUTOSNS_VALID) != 0; 962 status &= CAM_STATUS_MASK; 963 964 timeout = 0; 965 relsim_flags = 0; 966 saved_ccb = (union ccb *)done_ccb->ccb_h.saved_ccb_ptr; 967 968 /* 969 * Unfreeze the queue once if it is already frozen.. 970 */ 971 if (frozen != 0) { 972 qfrozen_cnt = cam_release_devq(done_ccb->ccb_h.path, 973 /*relsim_flags*/0, 974 /*openings*/0, 975 /*timeout*/0, 976 /*getcount_only*/0); 977 } 978 979 switch (status) { 980 case CAM_REQ_CMP: 981 { 982 /* 983 * If we have successfully taken a device from the not 984 * ready to ready state, re-scan the device and re-get 985 * the inquiry information. Many devices (mostly disks) 986 * don't properly report their inquiry information unless 987 * they are spun up. 988 * 989 * If we manually retrieved sense into a CCB and got 990 * something other than "NO SENSE" send the updated CCB 991 * back to the client via xpt_done() to be processed via 992 * the error recovery code again. 993 */ 994 if (done_ccb->ccb_h.func_code == XPT_SCSI_IO) { 995 scsi_cmd = (struct scsi_start_stop_unit *) 996 &done_ccb->csio.cdb_io.cdb_bytes; 997 998 if (scsi_cmd->opcode == START_STOP_UNIT) 999 xpt_async(AC_INQ_CHANGED, 1000 done_ccb->ccb_h.path, NULL); 1001 if (scsi_cmd->opcode == REQUEST_SENSE) { 1002 u_int sense_key; 1003 1004 sense_key = saved_ccb->csio.sense_data.flags; 1005 sense_key &= SSD_KEY; 1006 if (sense_key != SSD_KEY_NO_SENSE) { 1007 saved_ccb->ccb_h.status |= 1008 CAM_AUTOSNS_VALID; 1009 #if 0 1010 xpt_print(saved_ccb->ccb_h.path, 1011 "Recovered Sense\n"); 1012 scsi_sense_print(&saved_ccb->csio); 1013 cam_error_print(saved_ccb, CAM_ESF_ALL, 1014 CAM_EPF_ALL); 1015 #endif 1016 xpt_done_ccb = TRUE; 1017 } 1018 } 1019 } 1020 bcopy(done_ccb->ccb_h.saved_ccb_ptr, done_ccb, 1021 sizeof(union ccb)); 1022 1023 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG; 1024 1025 if (xpt_done_ccb == FALSE) 1026 xpt_action(done_ccb); 1027 1028 break; 1029 } 1030 case CAM_SCSI_STATUS_ERROR: 1031 scsi_cmd = (struct scsi_start_stop_unit *) 1032 &done_ccb->csio.cdb_io.cdb_bytes; 1033 if (sense != 0) { 1034 struct ccb_getdev cgd; 1035 struct scsi_sense_data *sense; 1036 int error_code, sense_key, asc, ascq; 1037 scsi_sense_action err_action; 1038 1039 sense = &done_ccb->csio.sense_data; 1040 scsi_extract_sense(sense, &error_code, 1041 &sense_key, &asc, &ascq); 1042 1043 /* 1044 * Grab the inquiry data for this device. 1045 */ 1046 xpt_setup_ccb(&cgd.ccb_h, done_ccb->ccb_h.path, 1047 /*priority*/ 1); 1048 cgd.ccb_h.func_code = XPT_GDEV_TYPE; 1049 xpt_action((union ccb *)&cgd); 1050 err_action = scsi_error_action(&done_ccb->csio, 1051 &cgd.inq_data, 0); 1052 1053 /* 1054 * If the error is "invalid field in CDB", 1055 * and the load/eject flag is set, turn the 1056 * flag off and try again. This is just in 1057 * case the drive in question barfs on the 1058 * load eject flag. The CAM code should set 1059 * the load/eject flag by default for 1060 * removable media. 1061 */ 1062 1063 /* XXX KDM 1064 * Should we check to see what the specific 1065 * scsi status is?? Or does it not matter 1066 * since we already know that there was an 1067 * error, and we know what the specific 1068 * error code was, and we know what the 1069 * opcode is.. 1070 */ 1071 if ((scsi_cmd->opcode == START_STOP_UNIT) && 1072 ((scsi_cmd->how & SSS_LOEJ) != 0) && 1073 (asc == 0x24) && (ascq == 0x00) && 1074 (done_ccb->ccb_h.retry_count > 0)) { 1075 1076 scsi_cmd->how &= ~SSS_LOEJ; 1077 1078 xpt_action(done_ccb); 1079 1080 } else if ((done_ccb->ccb_h.retry_count > 1) 1081 && ((err_action & SS_MASK) != SS_FAIL)) { 1082 1083 /* 1084 * In this case, the error recovery 1085 * command failed, but we've got 1086 * some retries left on it. Give 1087 * it another try unless this is an 1088 * unretryable error. 1089 */ 1090 1091 /* set the timeout to .5 sec */ 1092 relsim_flags = 1093 RELSIM_RELEASE_AFTER_TIMEOUT; 1094 timeout = 500; 1095 1096 xpt_action(done_ccb); 1097 1098 break; 1099 1100 } else { 1101 /* 1102 * Perform the final retry with the original 1103 * CCB so that final error processing is 1104 * performed by the owner of the CCB. 1105 */ 1106 bcopy(done_ccb->ccb_h.saved_ccb_ptr, 1107 done_ccb, sizeof(union ccb)); 1108 1109 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG; 1110 1111 xpt_action(done_ccb); 1112 } 1113 } else { 1114 /* 1115 * Eh?? The command failed, but we don't 1116 * have any sense. What's up with that? 1117 * Fire the CCB again to return it to the 1118 * caller. 1119 */ 1120 bcopy(done_ccb->ccb_h.saved_ccb_ptr, 1121 done_ccb, sizeof(union ccb)); 1122 1123 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG; 1124 1125 xpt_action(done_ccb); 1126 1127 } 1128 break; 1129 default: 1130 bcopy(done_ccb->ccb_h.saved_ccb_ptr, done_ccb, 1131 sizeof(union ccb)); 1132 1133 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG; 1134 1135 xpt_action(done_ccb); 1136 1137 break; 1138 } 1139 1140 /* decrement the retry count */ 1141 /* 1142 * XXX This isn't appropriate in all cases. Restructure, 1143 * so that the retry count is only decremented on an 1144 * actual retry. Remeber that the orignal ccb had its 1145 * retry count dropped before entering recovery, so 1146 * doing it again is a bug. 1147 */ 1148 if (done_ccb->ccb_h.retry_count > 0) 1149 done_ccb->ccb_h.retry_count--; 1150 1151 qfrozen_cnt = cam_release_devq(done_ccb->ccb_h.path, 1152 /*relsim_flags*/relsim_flags, 1153 /*openings*/0, 1154 /*timeout*/timeout, 1155 /*getcount_only*/0); 1156 if (xpt_done_ccb == TRUE) 1157 (*done_ccb->ccb_h.cbfcnp)(periph, done_ccb); 1158 } 1159 1160 /* 1161 * Generic Async Event handler. Peripheral drivers usually 1162 * filter out the events that require personal attention, 1163 * and leave the rest to this function. 1164 */ 1165 void 1166 cam_periph_async(struct cam_periph *periph, u_int32_t code, 1167 struct cam_path *path, void *arg) 1168 { 1169 switch (code) { 1170 case AC_LOST_DEVICE: 1171 cam_periph_invalidate(periph); 1172 break; 1173 case AC_SENT_BDR: 1174 case AC_BUS_RESET: 1175 { 1176 cam_periph_bus_settle(periph, scsi_delay); 1177 break; 1178 } 1179 default: 1180 break; 1181 } 1182 } 1183 1184 void 1185 cam_periph_bus_settle(struct cam_periph *periph, u_int bus_settle) 1186 { 1187 struct ccb_getdevstats cgds; 1188 1189 xpt_setup_ccb(&cgds.ccb_h, periph->path, /*priority*/1); 1190 cgds.ccb_h.func_code = XPT_GDEV_STATS; 1191 xpt_action((union ccb *)&cgds); 1192 cam_periph_freeze_after_event(periph, &cgds.last_reset, bus_settle); 1193 } 1194 1195 void 1196 cam_periph_freeze_after_event(struct cam_periph *periph, 1197 struct timeval* event_time, u_int duration_ms) 1198 { 1199 struct timeval delta; 1200 struct timeval duration_tv; 1201 1202 microtime(&delta); 1203 timevalsub(&delta, event_time); 1204 duration_tv.tv_sec = duration_ms / 1000; 1205 duration_tv.tv_usec = (duration_ms % 1000) * 1000; 1206 if (timevalcmp(&delta, &duration_tv, <)) { 1207 timevalsub(&duration_tv, &delta); 1208 1209 duration_ms = duration_tv.tv_sec * 1000; 1210 duration_ms += duration_tv.tv_usec / 1000; 1211 cam_freeze_devq(periph->path); 1212 cam_release_devq(periph->path, 1213 RELSIM_RELEASE_AFTER_TIMEOUT, 1214 /*reduction*/0, 1215 /*timeout*/duration_ms, 1216 /*getcount_only*/0); 1217 } 1218 1219 } 1220 1221 static int 1222 camperiphscsistatuserror(union ccb *ccb, cam_flags camflags, 1223 u_int32_t sense_flags, union ccb *save_ccb, 1224 int *openings, u_int32_t *relsim_flags, 1225 u_int32_t *timeout) 1226 { 1227 int error; 1228 1229 switch (ccb->csio.scsi_status) { 1230 case SCSI_STATUS_OK: 1231 case SCSI_STATUS_COND_MET: 1232 case SCSI_STATUS_INTERMED: 1233 case SCSI_STATUS_INTERMED_COND_MET: 1234 error = 0; 1235 break; 1236 case SCSI_STATUS_CMD_TERMINATED: 1237 case SCSI_STATUS_CHECK_COND: 1238 error = camperiphscsisenseerror(ccb, 1239 camflags, 1240 sense_flags, 1241 save_ccb, 1242 openings, 1243 relsim_flags, 1244 timeout); 1245 break; 1246 case SCSI_STATUS_QUEUE_FULL: 1247 { 1248 /* no decrement */ 1249 struct ccb_getdevstats cgds; 1250 1251 /* 1252 * First off, find out what the current 1253 * transaction counts are. 1254 */ 1255 xpt_setup_ccb(&cgds.ccb_h, 1256 ccb->ccb_h.path, 1257 /*priority*/1); 1258 cgds.ccb_h.func_code = XPT_GDEV_STATS; 1259 xpt_action((union ccb *)&cgds); 1260 1261 /* 1262 * If we were the only transaction active, treat 1263 * the QUEUE FULL as if it were a BUSY condition. 1264 */ 1265 if (cgds.dev_active != 0) { 1266 int total_openings; 1267 1268 /* 1269 * Reduce the number of openings to 1270 * be 1 less than the amount it took 1271 * to get a queue full bounded by the 1272 * minimum allowed tag count for this 1273 * device. 1274 */ 1275 total_openings = cgds.dev_active + cgds.dev_openings; 1276 *openings = cgds.dev_active; 1277 if (*openings < cgds.mintags) 1278 *openings = cgds.mintags; 1279 if (*openings < total_openings) 1280 *relsim_flags = RELSIM_ADJUST_OPENINGS; 1281 else { 1282 /* 1283 * Some devices report queue full for 1284 * temporary resource shortages. For 1285 * this reason, we allow a minimum 1286 * tag count to be entered via a 1287 * quirk entry to prevent the queue 1288 * count on these devices from falling 1289 * to a pessimisticly low value. We 1290 * still wait for the next successful 1291 * completion, however, before queueing 1292 * more transactions to the device. 1293 */ 1294 *relsim_flags = RELSIM_RELEASE_AFTER_CMDCMPLT; 1295 } 1296 *timeout = 0; 1297 error = ERESTART; 1298 if (bootverbose) { 1299 xpt_print(ccb->ccb_h.path, "Queue Full\n"); 1300 } 1301 break; 1302 } 1303 /* FALLTHROUGH */ 1304 } 1305 case SCSI_STATUS_BUSY: 1306 /* 1307 * Restart the queue after either another 1308 * command completes or a 1 second timeout. 1309 */ 1310 if (bootverbose) { 1311 xpt_print(ccb->ccb_h.path, "Device Busy\n"); 1312 } 1313 if (ccb->ccb_h.retry_count > 0) { 1314 ccb->ccb_h.retry_count--; 1315 error = ERESTART; 1316 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT 1317 | RELSIM_RELEASE_AFTER_CMDCMPLT; 1318 *timeout = 1000; 1319 } else { 1320 error = EIO; 1321 } 1322 break; 1323 case SCSI_STATUS_RESERV_CONFLICT: 1324 xpt_print(ccb->ccb_h.path, "Reservation Conflict\n"); 1325 error = EIO; 1326 break; 1327 default: 1328 xpt_print(ccb->ccb_h.path, "SCSI Status 0x%x\n", 1329 ccb->csio.scsi_status); 1330 error = EIO; 1331 break; 1332 } 1333 return (error); 1334 } 1335 1336 static int 1337 camperiphscsisenseerror(union ccb *ccb, cam_flags camflags, 1338 u_int32_t sense_flags, union ccb *save_ccb, 1339 int *openings, u_int32_t *relsim_flags, 1340 u_int32_t *timeout) 1341 { 1342 struct cam_periph *periph; 1343 int error; 1344 1345 periph = xpt_path_periph(ccb->ccb_h.path); 1346 if (periph->flags & CAM_PERIPH_RECOVERY_INPROG) { 1347 1348 /* 1349 * If error recovery is already in progress, don't attempt 1350 * to process this error, but requeue it unconditionally 1351 * and attempt to process it once error recovery has 1352 * completed. This failed command is probably related to 1353 * the error that caused the currently active error recovery 1354 * action so our current recovery efforts should also 1355 * address this command. Be aware that the error recovery 1356 * code assumes that only one recovery action is in progress 1357 * on a particular peripheral instance at any given time 1358 * (e.g. only one saved CCB for error recovery) so it is 1359 * imperitive that we don't violate this assumption. 1360 */ 1361 error = ERESTART; 1362 } else { 1363 scsi_sense_action err_action; 1364 struct ccb_getdev cgd; 1365 const char *action_string; 1366 union ccb* print_ccb; 1367 1368 /* A description of the error recovery action performed */ 1369 action_string = NULL; 1370 1371 /* 1372 * The location of the orignal ccb 1373 * for sense printing purposes. 1374 */ 1375 print_ccb = ccb; 1376 1377 /* 1378 * Grab the inquiry data for this device. 1379 */ 1380 xpt_setup_ccb(&cgd.ccb_h, ccb->ccb_h.path, /*priority*/ 1); 1381 cgd.ccb_h.func_code = XPT_GDEV_TYPE; 1382 xpt_action((union ccb *)&cgd); 1383 1384 if ((ccb->ccb_h.status & CAM_AUTOSNS_VALID) != 0) 1385 err_action = scsi_error_action(&ccb->csio, 1386 &cgd.inq_data, 1387 sense_flags); 1388 else if ((ccb->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0) 1389 err_action = SS_REQSENSE; 1390 else 1391 err_action = SS_RETRY|SSQ_DECREMENT_COUNT|EIO; 1392 1393 error = err_action & SS_ERRMASK; 1394 1395 /* 1396 * If the recovery action will consume a retry, 1397 * make sure we actually have retries available. 1398 */ 1399 if ((err_action & SSQ_DECREMENT_COUNT) != 0) { 1400 if (ccb->ccb_h.retry_count > 0) 1401 ccb->ccb_h.retry_count--; 1402 else { 1403 action_string = "Retries Exhausted"; 1404 goto sense_error_done; 1405 } 1406 } 1407 1408 if ((err_action & SS_MASK) >= SS_START) { 1409 /* 1410 * Do common portions of commands that 1411 * use recovery CCBs. 1412 */ 1413 if (save_ccb == NULL) { 1414 action_string = "No recovery CCB supplied"; 1415 goto sense_error_done; 1416 } 1417 bcopy(ccb, save_ccb, sizeof(*save_ccb)); 1418 print_ccb = save_ccb; 1419 periph->flags |= CAM_PERIPH_RECOVERY_INPROG; 1420 } 1421 1422 switch (err_action & SS_MASK) { 1423 case SS_NOP: 1424 action_string = "No Recovery Action Needed"; 1425 error = 0; 1426 break; 1427 case SS_RETRY: 1428 action_string = "Retrying Command (per Sense Data)"; 1429 error = ERESTART; 1430 break; 1431 case SS_FAIL: 1432 action_string = "Unretryable error"; 1433 break; 1434 case SS_START: 1435 { 1436 int le; 1437 1438 /* 1439 * Send a start unit command to the device, and 1440 * then retry the command. 1441 */ 1442 action_string = "Attempting to Start Unit"; 1443 1444 /* 1445 * Check for removable media and set 1446 * load/eject flag appropriately. 1447 */ 1448 if (SID_IS_REMOVABLE(&cgd.inq_data)) 1449 le = TRUE; 1450 else 1451 le = FALSE; 1452 1453 scsi_start_stop(&ccb->csio, 1454 /*retries*/1, 1455 camperiphdone, 1456 MSG_SIMPLE_Q_TAG, 1457 /*start*/TRUE, 1458 /*load/eject*/le, 1459 /*immediate*/FALSE, 1460 SSD_FULL_SIZE, 1461 /*timeout*/50000); 1462 break; 1463 } 1464 case SS_TUR: 1465 { 1466 /* 1467 * Send a Test Unit Ready to the device. 1468 * If the 'many' flag is set, we send 120 1469 * test unit ready commands, one every half 1470 * second. Otherwise, we just send one TUR. 1471 * We only want to do this if the retry 1472 * count has not been exhausted. 1473 */ 1474 int retries; 1475 1476 if ((err_action & SSQ_MANY) != 0) { 1477 action_string = "Polling device for readiness"; 1478 retries = 120; 1479 } else { 1480 action_string = "Testing device for readiness"; 1481 retries = 1; 1482 } 1483 scsi_test_unit_ready(&ccb->csio, 1484 retries, 1485 camperiphdone, 1486 MSG_SIMPLE_Q_TAG, 1487 SSD_FULL_SIZE, 1488 /*timeout*/5000); 1489 1490 /* 1491 * Accomplish our 500ms delay by deferring 1492 * the release of our device queue appropriately. 1493 */ 1494 *relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT; 1495 *timeout = 500; 1496 break; 1497 } 1498 case SS_REQSENSE: 1499 { 1500 /* 1501 * Send a Request Sense to the device. We 1502 * assume that we are in a contingent allegiance 1503 * condition so we do not tag this request. 1504 */ 1505 scsi_request_sense(&ccb->csio, /*retries*/1, 1506 camperiphdone, 1507 &save_ccb->csio.sense_data, 1508 sizeof(save_ccb->csio.sense_data), 1509 CAM_TAG_ACTION_NONE, 1510 /*sense_len*/SSD_FULL_SIZE, 1511 /*timeout*/5000); 1512 break; 1513 } 1514 default: 1515 panic("Unhandled error action %x", err_action); 1516 } 1517 1518 if ((err_action & SS_MASK) >= SS_START) { 1519 /* 1520 * Drop the priority to 0 so that the recovery 1521 * CCB is the first to execute. Freeze the queue 1522 * after this command is sent so that we can 1523 * restore the old csio and have it queued in 1524 * the proper order before we release normal 1525 * transactions to the device. 1526 */ 1527 ccb->ccb_h.pinfo.priority = 0; 1528 ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 1529 ccb->ccb_h.saved_ccb_ptr = save_ccb; 1530 error = ERESTART; 1531 } 1532 1533 sense_error_done: 1534 if ((err_action & SSQ_PRINT_SENSE) != 0 1535 && (ccb->ccb_h.status & CAM_AUTOSNS_VALID) != 0) { 1536 cam_error_print(print_ccb, CAM_ESF_ALL, CAM_EPF_ALL); 1537 xpt_print_path(ccb->ccb_h.path); 1538 if (bootverbose) 1539 scsi_sense_print(&print_ccb->csio); 1540 printf("%s\n", action_string); 1541 } 1542 } 1543 return (error); 1544 } 1545 1546 /* 1547 * Generic error handler. Peripheral drivers usually filter 1548 * out the errors that they handle in a unique mannor, then 1549 * call this function. 1550 */ 1551 int 1552 cam_periph_error(union ccb *ccb, cam_flags camflags, 1553 u_int32_t sense_flags, union ccb *save_ccb) 1554 { 1555 const char *action_string; 1556 cam_status status; 1557 int frozen; 1558 int error, printed = 0; 1559 int openings; 1560 u_int32_t relsim_flags; 1561 u_int32_t timeout = 0; 1562 1563 action_string = NULL; 1564 status = ccb->ccb_h.status; 1565 frozen = (status & CAM_DEV_QFRZN) != 0; 1566 status &= CAM_STATUS_MASK; 1567 openings = relsim_flags = 0; 1568 1569 switch (status) { 1570 case CAM_REQ_CMP: 1571 error = 0; 1572 break; 1573 case CAM_SCSI_STATUS_ERROR: 1574 error = camperiphscsistatuserror(ccb, 1575 camflags, 1576 sense_flags, 1577 save_ccb, 1578 &openings, 1579 &relsim_flags, 1580 &timeout); 1581 break; 1582 case CAM_AUTOSENSE_FAIL: 1583 xpt_print(ccb->ccb_h.path, "AutoSense Failed\n"); 1584 error = EIO; /* we have to kill the command */ 1585 break; 1586 case CAM_REQ_CMP_ERR: 1587 if (bootverbose && printed == 0) { 1588 xpt_print(ccb->ccb_h.path, 1589 "Request completed with CAM_REQ_CMP_ERR\n"); 1590 printed++; 1591 } 1592 /* FALLTHROUGH */ 1593 case CAM_CMD_TIMEOUT: 1594 if (bootverbose && printed == 0) { 1595 xpt_print(ccb->ccb_h.path, "Command timed out\n"); 1596 printed++; 1597 } 1598 /* FALLTHROUGH */ 1599 case CAM_UNEXP_BUSFREE: 1600 if (bootverbose && printed == 0) { 1601 xpt_print(ccb->ccb_h.path, "Unexpected Bus Free\n"); 1602 printed++; 1603 } 1604 /* FALLTHROUGH */ 1605 case CAM_UNCOR_PARITY: 1606 if (bootverbose && printed == 0) { 1607 xpt_print(ccb->ccb_h.path, 1608 "Uncorrected Parity Error\n"); 1609 printed++; 1610 } 1611 /* FALLTHROUGH */ 1612 case CAM_DATA_RUN_ERR: 1613 if (bootverbose && printed == 0) { 1614 xpt_print(ccb->ccb_h.path, "Data Overrun\n"); 1615 printed++; 1616 } 1617 error = EIO; /* we have to kill the command */ 1618 /* decrement the number of retries */ 1619 if (ccb->ccb_h.retry_count > 0) { 1620 ccb->ccb_h.retry_count--; 1621 error = ERESTART; 1622 } else { 1623 action_string = "Retries Exhausted"; 1624 error = EIO; 1625 } 1626 break; 1627 case CAM_UA_ABORT: 1628 case CAM_UA_TERMIO: 1629 case CAM_MSG_REJECT_REC: 1630 /* XXX Don't know that these are correct */ 1631 error = EIO; 1632 break; 1633 case CAM_SEL_TIMEOUT: 1634 { 1635 struct cam_path *newpath; 1636 1637 if ((camflags & CAM_RETRY_SELTO) != 0) { 1638 if (ccb->ccb_h.retry_count > 0) { 1639 1640 ccb->ccb_h.retry_count--; 1641 error = ERESTART; 1642 if (bootverbose && printed == 0) { 1643 xpt_print(ccb->ccb_h.path, 1644 "Selection Timeout\n"); 1645 printed++; 1646 } 1647 1648 /* 1649 * Wait a bit to give the device 1650 * time to recover before we try again. 1651 */ 1652 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT; 1653 timeout = periph_selto_delay; 1654 break; 1655 } 1656 } 1657 error = ENXIO; 1658 /* Should we do more if we can't create the path?? */ 1659 if (xpt_create_path(&newpath, xpt_path_periph(ccb->ccb_h.path), 1660 xpt_path_path_id(ccb->ccb_h.path), 1661 xpt_path_target_id(ccb->ccb_h.path), 1662 CAM_LUN_WILDCARD) != CAM_REQ_CMP) 1663 break; 1664 1665 /* 1666 * Let peripheral drivers know that this device has gone 1667 * away. 1668 */ 1669 xpt_async(AC_LOST_DEVICE, newpath, NULL); 1670 xpt_free_path(newpath); 1671 break; 1672 } 1673 case CAM_REQ_INVALID: 1674 case CAM_PATH_INVALID: 1675 case CAM_DEV_NOT_THERE: 1676 case CAM_NO_HBA: 1677 case CAM_PROVIDE_FAIL: 1678 case CAM_REQ_TOO_BIG: 1679 case CAM_LUN_INVALID: 1680 case CAM_TID_INVALID: 1681 error = EINVAL; 1682 break; 1683 case CAM_SCSI_BUS_RESET: 1684 case CAM_BDR_SENT: 1685 /* 1686 * Commands that repeatedly timeout and cause these 1687 * kinds of error recovery actions, should return 1688 * CAM_CMD_TIMEOUT, which allows us to safely assume 1689 * that this command was an innocent bystander to 1690 * these events and should be unconditionally 1691 * retried. 1692 */ 1693 if (bootverbose && printed == 0) { 1694 xpt_print_path(ccb->ccb_h.path); 1695 if (status == CAM_BDR_SENT) 1696 printf("Bus Device Reset sent\n"); 1697 else 1698 printf("Bus Reset issued\n"); 1699 printed++; 1700 } 1701 /* FALLTHROUGH */ 1702 case CAM_REQUEUE_REQ: 1703 /* Unconditional requeue */ 1704 error = ERESTART; 1705 if (bootverbose && printed == 0) { 1706 xpt_print(ccb->ccb_h.path, "Request Requeued\n"); 1707 printed++; 1708 } 1709 break; 1710 case CAM_RESRC_UNAVAIL: 1711 /* Wait a bit for the resource shortage to abate. */ 1712 timeout = periph_noresrc_delay; 1713 /* FALLTHROUGH */ 1714 case CAM_BUSY: 1715 if (timeout == 0) { 1716 /* Wait a bit for the busy condition to abate. */ 1717 timeout = periph_busy_delay; 1718 } 1719 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT; 1720 /* FALLTHROUGH */ 1721 default: 1722 /* decrement the number of retries */ 1723 if (ccb->ccb_h.retry_count > 0) { 1724 ccb->ccb_h.retry_count--; 1725 error = ERESTART; 1726 if (bootverbose && printed == 0) { 1727 xpt_print(ccb->ccb_h.path, "CAM Status 0x%x\n", 1728 status); 1729 printed++; 1730 } 1731 } else { 1732 error = EIO; 1733 action_string = "Retries Exhausted"; 1734 } 1735 break; 1736 } 1737 1738 /* Attempt a retry */ 1739 if (error == ERESTART || error == 0) { 1740 if (frozen != 0) 1741 ccb->ccb_h.status &= ~CAM_DEV_QFRZN; 1742 1743 if (error == ERESTART) { 1744 action_string = "Retrying Command"; 1745 xpt_action(ccb); 1746 } 1747 1748 if (frozen != 0) 1749 cam_release_devq(ccb->ccb_h.path, 1750 relsim_flags, 1751 openings, 1752 timeout, 1753 /*getcount_only*/0); 1754 } 1755 1756 /* 1757 * If we have and error and are booting verbosely, whine 1758 * *unless* this was a non-retryable selection timeout. 1759 */ 1760 if (error != 0 && bootverbose && 1761 !(status == CAM_SEL_TIMEOUT && (camflags & CAM_RETRY_SELTO) == 0)) { 1762 1763 1764 if (action_string == NULL) 1765 action_string = "Unretryable Error"; 1766 if (error != ERESTART) { 1767 xpt_print(ccb->ccb_h.path, "error %d\n", error); 1768 } 1769 xpt_print(ccb->ccb_h.path, "%s\n", action_string); 1770 } 1771 1772 return (error); 1773 } 1774