1 /* 2 * Common functions for CAM "type" (peripheral) drivers. 3 * 4 * Copyright (c) 1997, 1998 Justin T. Gibbs. 5 * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions, and the following disclaimer, 13 * without modification, immediately at the beginning of the file. 14 * 2. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * $FreeBSD$ 30 */ 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/types.h> 35 #include <sys/malloc.h> 36 #include <sys/linker_set.h> 37 #include <sys/bio.h> 38 #include <sys/buf.h> 39 #include <sys/proc.h> 40 #include <sys/devicestat.h> 41 #include <sys/bus.h> 42 #include <vm/vm.h> 43 #include <vm/vm_extern.h> 44 45 #include <cam/cam.h> 46 #include <cam/cam_ccb.h> 47 #include <cam/cam_xpt_periph.h> 48 #include <cam/cam_periph.h> 49 #include <cam/cam_debug.h> 50 51 #include <cam/scsi/scsi_all.h> 52 #include <cam/scsi/scsi_message.h> 53 #include <cam/scsi/scsi_da.h> 54 #include <cam/scsi/scsi_pass.h> 55 56 static u_int camperiphnextunit(struct periph_driver *p_drv, 57 u_int newunit, int wired); 58 static u_int camperiphunit(struct periph_driver *p_drv, 59 path_id_t path_id_t, 60 target_id_t target, lun_id_t lun); 61 static void camperiphdone(struct cam_periph *periph, 62 union ccb *done_ccb); 63 static void camperiphfree(struct cam_periph *periph); 64 65 cam_status 66 cam_periph_alloc(periph_ctor_t *periph_ctor, 67 periph_oninv_t *periph_oninvalidate, 68 periph_dtor_t *periph_dtor, periph_start_t *periph_start, 69 char *name, cam_periph_type type, struct cam_path *path, 70 ac_callback_t *ac_callback, ac_code code, void *arg) 71 { 72 struct periph_driver **p_drv; 73 struct cam_periph *periph; 74 struct cam_periph *cur_periph; 75 path_id_t path_id; 76 target_id_t target_id; 77 lun_id_t lun_id; 78 cam_status status; 79 u_int init_level; 80 int s; 81 82 init_level = 0; 83 /* 84 * Handle Hot-Plug scenarios. If there is already a peripheral 85 * of our type assigned to this path, we are likely waiting for 86 * final close on an old, invalidated, peripheral. If this is 87 * the case, queue up a deferred call to the peripheral's async 88 * handler. If it looks like a mistaken re-alloation, complain. 89 */ 90 if ((periph = cam_periph_find(path, name)) != NULL) { 91 92 if ((periph->flags & CAM_PERIPH_INVALID) != 0 93 && (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) == 0) { 94 periph->flags |= CAM_PERIPH_NEW_DEV_FOUND; 95 periph->deferred_callback = ac_callback; 96 periph->deferred_ac = code; 97 return (CAM_REQ_INPROG); 98 } else { 99 printf("cam_periph_alloc: attempt to re-allocate " 100 "valid device %s%d rejected\n", 101 periph->periph_name, periph->unit_number); 102 } 103 return (CAM_REQ_INVALID); 104 } 105 106 periph = (struct cam_periph *)malloc(sizeof(*periph), M_DEVBUF, 107 M_NOWAIT); 108 109 if (periph == NULL) 110 return (CAM_RESRC_UNAVAIL); 111 112 init_level++; 113 114 for (p_drv = (struct periph_driver **)periphdriver_set.ls_items; 115 *p_drv != NULL; p_drv++) { 116 if (strcmp((*p_drv)->driver_name, name) == 0) 117 break; 118 } 119 120 path_id = xpt_path_path_id(path); 121 target_id = xpt_path_target_id(path); 122 lun_id = xpt_path_lun_id(path); 123 bzero(periph, sizeof(*periph)); 124 cam_init_pinfo(&periph->pinfo); 125 periph->periph_start = periph_start; 126 periph->periph_dtor = periph_dtor; 127 periph->periph_oninval = periph_oninvalidate; 128 periph->type = type; 129 periph->periph_name = name; 130 periph->unit_number = camperiphunit(*p_drv, path_id, target_id, lun_id); 131 periph->immediate_priority = CAM_PRIORITY_NONE; 132 periph->refcount = 0; 133 SLIST_INIT(&periph->ccb_list); 134 status = xpt_create_path(&path, periph, path_id, target_id, lun_id); 135 if (status != CAM_REQ_CMP) 136 goto failure; 137 138 periph->path = path; 139 init_level++; 140 141 status = xpt_add_periph(periph); 142 143 if (status != CAM_REQ_CMP) 144 goto failure; 145 146 s = splsoftcam(); 147 cur_periph = TAILQ_FIRST(&(*p_drv)->units); 148 while (cur_periph != NULL 149 && cur_periph->unit_number < periph->unit_number) 150 cur_periph = TAILQ_NEXT(cur_periph, unit_links); 151 152 if (cur_periph != NULL) 153 TAILQ_INSERT_BEFORE(cur_periph, periph, unit_links); 154 else { 155 TAILQ_INSERT_TAIL(&(*p_drv)->units, periph, unit_links); 156 (*p_drv)->generation++; 157 } 158 159 splx(s); 160 161 init_level++; 162 163 status = periph_ctor(periph, arg); 164 165 if (status == CAM_REQ_CMP) 166 init_level++; 167 168 failure: 169 switch (init_level) { 170 case 4: 171 /* Initialized successfully */ 172 break; 173 case 3: 174 s = splsoftcam(); 175 TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links); 176 splx(s); 177 xpt_remove_periph(periph); 178 case 2: 179 xpt_free_path(periph->path); 180 case 1: 181 free(periph, M_DEVBUF); 182 case 0: 183 /* No cleanup to perform. */ 184 break; 185 default: 186 panic("cam_periph_alloc: Unkown init level"); 187 } 188 return(status); 189 } 190 191 /* 192 * Find a peripheral structure with the specified path, target, lun, 193 * and (optionally) type. If the name is NULL, this function will return 194 * the first peripheral driver that matches the specified path. 195 */ 196 struct cam_periph * 197 cam_periph_find(struct cam_path *path, char *name) 198 { 199 struct periph_driver **p_drv; 200 struct cam_periph *periph; 201 int s; 202 203 for (p_drv = (struct periph_driver **)periphdriver_set.ls_items; 204 *p_drv != NULL; p_drv++) { 205 206 if (name != NULL && (strcmp((*p_drv)->driver_name, name) != 0)) 207 continue; 208 209 s = splsoftcam(); 210 for (periph = TAILQ_FIRST(&(*p_drv)->units); periph != NULL; 211 periph = TAILQ_NEXT(periph, unit_links)) { 212 if (xpt_path_comp(periph->path, path) == 0) { 213 splx(s); 214 return(periph); 215 } 216 } 217 splx(s); 218 if (name != NULL) 219 return(NULL); 220 } 221 return(NULL); 222 } 223 224 cam_status 225 cam_periph_acquire(struct cam_periph *periph) 226 { 227 int s; 228 229 if (periph == NULL) 230 return(CAM_REQ_CMP_ERR); 231 232 s = splsoftcam(); 233 periph->refcount++; 234 splx(s); 235 236 return(CAM_REQ_CMP); 237 } 238 239 void 240 cam_periph_release(struct cam_periph *periph) 241 { 242 int s; 243 244 if (periph == NULL) 245 return; 246 247 s = splsoftcam(); 248 if ((--periph->refcount == 0) 249 && (periph->flags & CAM_PERIPH_INVALID)) { 250 camperiphfree(periph); 251 } 252 splx(s); 253 254 } 255 256 /* 257 * Look for the next unit number that is not currently in use for this 258 * peripheral type starting at "newunit". Also exclude unit numbers that 259 * are reserved by for future "hardwiring" unless we already know that this 260 * is a potential wired device. Only assume that the device is "wired" the 261 * first time through the loop since after that we'll be looking at unit 262 * numbers that did not match a wiring entry. 263 */ 264 static u_int 265 camperiphnextunit(struct periph_driver *p_drv, u_int newunit, int wired) 266 { 267 struct cam_periph *periph; 268 char *periph_name, *strval; 269 int s; 270 int i, val, dunit; 271 const char *dname; 272 273 s = splsoftcam(); 274 periph_name = p_drv->driver_name; 275 for (;;newunit++) { 276 277 for (periph = TAILQ_FIRST(&p_drv->units); 278 periph != NULL && periph->unit_number != newunit; 279 periph = TAILQ_NEXT(periph, unit_links)) 280 ; 281 282 if (periph != NULL && periph->unit_number == newunit) { 283 if (wired != 0) { 284 xpt_print_path(periph->path); 285 printf("Duplicate Wired Device entry!\n"); 286 xpt_print_path(periph->path); 287 printf("Second device will not be wired\n"); 288 wired = 0; 289 } 290 continue; 291 } 292 if (wired) 293 break; 294 295 /* 296 * Don't match entries like "da 4" as a wired down 297 * device, but do match entries like "da 4 target 5" 298 * or even "da 4 scbus 1". 299 */ 300 i = -1; 301 while ((i = resource_locate(i, periph_name)) != -1) { 302 dname = resource_query_name(i); 303 dunit = resource_query_unit(i); 304 /* if no "target" and no specific scbus, skip */ 305 if (resource_int_value(dname, dunit, "target", &val) && 306 (resource_string_value(dname, dunit, "at",&strval)|| 307 strcmp(strval, "scbus") == 0)) 308 continue; 309 if (newunit == dunit) 310 break; 311 } 312 if (i == -1) 313 break; 314 } 315 splx(s); 316 return (newunit); 317 } 318 319 static u_int 320 camperiphunit(struct periph_driver *p_drv, path_id_t pathid, 321 target_id_t target, lun_id_t lun) 322 { 323 u_int unit; 324 int hit, i, val, dunit; 325 const char *dname; 326 char pathbuf[32], *strval, *periph_name; 327 328 unit = 0; 329 hit = 0; 330 331 periph_name = p_drv->driver_name; 332 snprintf(pathbuf, sizeof(pathbuf), "scbus%d", pathid); 333 i = -1; 334 while ((i = resource_locate(i, periph_name)) != -1) { 335 dname = resource_query_name(i); 336 dunit = resource_query_unit(i); 337 if (resource_string_value(dname, dunit, "at", &strval) == 0) { 338 if (strcmp(strval, pathbuf) != 0) 339 continue; 340 hit++; 341 } 342 if (resource_int_value(dname, dunit, "target", &val) == 0) { 343 if (val != target) 344 continue; 345 hit++; 346 } 347 if (resource_int_value(dname, dunit, "lun", &val) == 0) { 348 if (val != lun) 349 continue; 350 hit++; 351 } 352 if (hit != 0) { 353 unit = dunit; 354 break; 355 } 356 } 357 358 /* 359 * Either start from 0 looking for the next unit or from 360 * the unit number given in the resource config. This way, 361 * if we have wildcard matches, we don't return the same 362 * unit number twice. 363 */ 364 unit = camperiphnextunit(p_drv, unit, /*wired*/hit); 365 366 return (unit); 367 } 368 369 void 370 cam_periph_invalidate(struct cam_periph *periph) 371 { 372 int s; 373 374 s = splsoftcam(); 375 /* 376 * We only call this routine the first time a peripheral is 377 * invalidated. The oninvalidate() routine is always called at 378 * splsoftcam(). 379 */ 380 if (((periph->flags & CAM_PERIPH_INVALID) == 0) 381 && (periph->periph_oninval != NULL)) 382 periph->periph_oninval(periph); 383 384 periph->flags |= CAM_PERIPH_INVALID; 385 periph->flags &= ~CAM_PERIPH_NEW_DEV_FOUND; 386 387 if (periph->refcount == 0) 388 camperiphfree(periph); 389 else if (periph->refcount < 0) 390 printf("cam_invalidate_periph: refcount < 0!!\n"); 391 splx(s); 392 } 393 394 static void 395 camperiphfree(struct cam_periph *periph) 396 { 397 int s; 398 struct periph_driver **p_drv; 399 400 for (p_drv = (struct periph_driver **)periphdriver_set.ls_items; 401 *p_drv != NULL; p_drv++) { 402 if (strcmp((*p_drv)->driver_name, periph->periph_name) == 0) 403 break; 404 } 405 406 if (periph->periph_dtor != NULL) 407 periph->periph_dtor(periph); 408 409 s = splsoftcam(); 410 TAILQ_REMOVE(&(*p_drv)->units, periph, unit_links); 411 (*p_drv)->generation++; 412 splx(s); 413 414 xpt_remove_periph(periph); 415 416 if (periph->flags & CAM_PERIPH_NEW_DEV_FOUND) { 417 union ccb ccb; 418 void *arg; 419 420 switch (periph->deferred_ac) { 421 case AC_FOUND_DEVICE: 422 ccb.ccb_h.func_code = XPT_GDEV_TYPE; 423 xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/ 1); 424 xpt_action(&ccb); 425 arg = &ccb; 426 break; 427 case AC_PATH_REGISTERED: 428 ccb.ccb_h.func_code = XPT_PATH_INQ; 429 xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/ 1); 430 xpt_action(&ccb); 431 arg = &ccb; 432 break; 433 default: 434 arg = NULL; 435 break; 436 } 437 periph->deferred_callback(NULL, periph->deferred_ac, 438 periph->path, arg); 439 } 440 xpt_free_path(periph->path); 441 free(periph, M_DEVBUF); 442 } 443 444 /* 445 * Wait interruptibly for an exclusive lock. 446 */ 447 int 448 cam_periph_lock(struct cam_periph *periph, int priority) 449 { 450 int error; 451 452 while ((periph->flags & CAM_PERIPH_LOCKED) != 0) { 453 periph->flags |= CAM_PERIPH_LOCK_WANTED; 454 if ((error = tsleep(periph, priority, "caplck", 0)) != 0) 455 return error; 456 } 457 458 if (cam_periph_acquire(periph) != CAM_REQ_CMP) 459 return(ENXIO); 460 461 periph->flags |= CAM_PERIPH_LOCKED; 462 return 0; 463 } 464 465 /* 466 * Unlock and wake up any waiters. 467 */ 468 void 469 cam_periph_unlock(struct cam_periph *periph) 470 { 471 periph->flags &= ~CAM_PERIPH_LOCKED; 472 if ((periph->flags & CAM_PERIPH_LOCK_WANTED) != 0) { 473 periph->flags &= ~CAM_PERIPH_LOCK_WANTED; 474 wakeup(periph); 475 } 476 477 cam_periph_release(periph); 478 } 479 480 /* 481 * Map user virtual pointers into kernel virtual address space, so we can 482 * access the memory. This won't work on physical pointers, for now it's 483 * up to the caller to check for that. (XXX KDM -- should we do that here 484 * instead?) This also only works for up to MAXPHYS memory. Since we use 485 * buffers to map stuff in and out, we're limited to the buffer size. 486 */ 487 int 488 cam_periph_mapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo) 489 { 490 int numbufs, i; 491 int flags[CAM_PERIPH_MAXMAPS]; 492 u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS]; 493 u_int32_t lengths[CAM_PERIPH_MAXMAPS]; 494 u_int32_t dirs[CAM_PERIPH_MAXMAPS]; 495 496 switch(ccb->ccb_h.func_code) { 497 case XPT_DEV_MATCH: 498 if (ccb->cdm.match_buf_len == 0) { 499 printf("cam_periph_mapmem: invalid match buffer " 500 "length 0\n"); 501 return(EINVAL); 502 } 503 if (ccb->cdm.pattern_buf_len > 0) { 504 data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns; 505 lengths[0] = ccb->cdm.pattern_buf_len; 506 dirs[0] = CAM_DIR_OUT; 507 data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches; 508 lengths[1] = ccb->cdm.match_buf_len; 509 dirs[1] = CAM_DIR_IN; 510 numbufs = 2; 511 } else { 512 data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches; 513 lengths[0] = ccb->cdm.match_buf_len; 514 dirs[0] = CAM_DIR_IN; 515 numbufs = 1; 516 } 517 break; 518 case XPT_SCSI_IO: 519 case XPT_CONT_TARGET_IO: 520 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE) 521 return(0); 522 523 data_ptrs[0] = &ccb->csio.data_ptr; 524 lengths[0] = ccb->csio.dxfer_len; 525 dirs[0] = ccb->ccb_h.flags & CAM_DIR_MASK; 526 numbufs = 1; 527 break; 528 default: 529 return(EINVAL); 530 break; /* NOTREACHED */ 531 } 532 533 /* 534 * Check the transfer length and permissions first, so we don't 535 * have to unmap any previously mapped buffers. 536 */ 537 for (i = 0; i < numbufs; i++) { 538 539 flags[i] = 0; 540 541 /* 542 * The userland data pointer passed in may not be page 543 * aligned. vmapbuf() truncates the address to a page 544 * boundary, so if the address isn't page aligned, we'll 545 * need enough space for the given transfer length, plus 546 * whatever extra space is necessary to make it to the page 547 * boundary. 548 */ 549 if ((lengths[i] + 550 (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)) > DFLTPHYS){ 551 printf("cam_periph_mapmem: attempt to map %lu bytes, " 552 "which is greater than DFLTPHYS(%d)\n", 553 (long)(lengths[i] + 554 (((vm_offset_t)(*data_ptrs[i])) & PAGE_MASK)), 555 DFLTPHYS); 556 return(E2BIG); 557 } 558 559 if (dirs[i] & CAM_DIR_OUT) { 560 flags[i] = BIO_WRITE; 561 if (!useracc(*data_ptrs[i], lengths[i], 562 VM_PROT_READ)) { 563 printf("cam_periph_mapmem: error, " 564 "address %p, length %lu isn't " 565 "user accessible for READ\n", 566 (void *)*data_ptrs[i], 567 (u_long)lengths[i]); 568 return(EACCES); 569 } 570 } 571 572 if (dirs[i] & CAM_DIR_IN) { 573 flags[i] = BIO_READ; 574 if (!useracc(*data_ptrs[i], lengths[i], 575 VM_PROT_WRITE)) { 576 printf("cam_periph_mapmem: error, " 577 "address %p, length %lu isn't " 578 "user accessible for WRITE\n", 579 (void *)*data_ptrs[i], 580 (u_long)lengths[i]); 581 582 return(EACCES); 583 } 584 } 585 586 } 587 588 /* this keeps the current process from getting swapped */ 589 /* 590 * XXX KDM should I use P_NOSWAP instead? 591 */ 592 PHOLD(curproc); 593 594 for (i = 0; i < numbufs; i++) { 595 /* 596 * Get the buffer. 597 */ 598 mapinfo->bp[i] = getpbuf(NULL); 599 600 /* save the buffer's data address */ 601 mapinfo->bp[i]->b_saveaddr = mapinfo->bp[i]->b_data; 602 603 /* put our pointer in the data slot */ 604 mapinfo->bp[i]->b_data = *data_ptrs[i]; 605 606 /* set the transfer length, we know it's < DFLTPHYS */ 607 mapinfo->bp[i]->b_bufsize = lengths[i]; 608 609 /* set the flags */ 610 mapinfo->bp[i]->b_flags = B_PHYS; 611 612 /* set the direction */ 613 mapinfo->bp[i]->b_iocmd = flags[i]; 614 615 /* map the buffer into kernel memory */ 616 vmapbuf(mapinfo->bp[i]); 617 618 /* set our pointer to the new mapped area */ 619 *data_ptrs[i] = mapinfo->bp[i]->b_data; 620 621 mapinfo->num_bufs_used++; 622 } 623 624 return(0); 625 } 626 627 /* 628 * Unmap memory segments mapped into kernel virtual address space by 629 * cam_periph_mapmem(). 630 */ 631 void 632 cam_periph_unmapmem(union ccb *ccb, struct cam_periph_map_info *mapinfo) 633 { 634 int numbufs, i; 635 u_int8_t **data_ptrs[CAM_PERIPH_MAXMAPS]; 636 637 if (mapinfo->num_bufs_used <= 0) { 638 /* allow ourselves to be swapped once again */ 639 PRELE(curproc); 640 return; 641 } 642 643 switch (ccb->ccb_h.func_code) { 644 case XPT_DEV_MATCH: 645 numbufs = min(mapinfo->num_bufs_used, 2); 646 647 if (numbufs == 1) { 648 data_ptrs[0] = (u_int8_t **)&ccb->cdm.matches; 649 } else { 650 data_ptrs[0] = (u_int8_t **)&ccb->cdm.patterns; 651 data_ptrs[1] = (u_int8_t **)&ccb->cdm.matches; 652 } 653 break; 654 case XPT_SCSI_IO: 655 case XPT_CONT_TARGET_IO: 656 data_ptrs[0] = &ccb->csio.data_ptr; 657 numbufs = min(mapinfo->num_bufs_used, 1); 658 break; 659 default: 660 /* allow ourselves to be swapped once again */ 661 PRELE(curproc); 662 return; 663 break; /* NOTREACHED */ 664 } 665 666 for (i = 0; i < numbufs; i++) { 667 /* Set the user's pointer back to the original value */ 668 *data_ptrs[i] = mapinfo->bp[i]->b_saveaddr; 669 670 /* unmap the buffer */ 671 vunmapbuf(mapinfo->bp[i]); 672 673 /* clear the flags we set above */ 674 mapinfo->bp[i]->b_flags &= ~B_PHYS; 675 676 /* release the buffer */ 677 relpbuf(mapinfo->bp[i], NULL); 678 } 679 680 /* allow ourselves to be swapped once again */ 681 PRELE(curproc); 682 } 683 684 union ccb * 685 cam_periph_getccb(struct cam_periph *periph, u_int32_t priority) 686 { 687 struct ccb_hdr *ccb_h; 688 int s; 689 690 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("entering cdgetccb\n")); 691 692 s = splsoftcam(); 693 694 while (periph->ccb_list.slh_first == NULL) { 695 if (periph->immediate_priority > priority) 696 periph->immediate_priority = priority; 697 xpt_schedule(periph, priority); 698 if ((periph->ccb_list.slh_first != NULL) 699 && (periph->ccb_list.slh_first->pinfo.priority == priority)) 700 break; 701 tsleep(&periph->ccb_list, PRIBIO, "cgticb", 0); 702 } 703 704 ccb_h = periph->ccb_list.slh_first; 705 SLIST_REMOVE_HEAD(&periph->ccb_list, periph_links.sle); 706 splx(s); 707 return ((union ccb *)ccb_h); 708 } 709 710 void 711 cam_periph_ccbwait(union ccb *ccb) 712 { 713 int s; 714 715 s = splsoftcam(); 716 if ((ccb->ccb_h.pinfo.index != CAM_UNQUEUED_INDEX) 717 || ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG)) 718 tsleep(&ccb->ccb_h.cbfcnp, PRIBIO, "cbwait", 0); 719 720 splx(s); 721 } 722 723 int 724 cam_periph_ioctl(struct cam_periph *periph, int cmd, caddr_t addr, 725 int (*error_routine)(union ccb *ccb, 726 cam_flags camflags, 727 u_int32_t sense_flags)) 728 { 729 union ccb *ccb; 730 int error; 731 int found; 732 733 error = found = 0; 734 735 switch(cmd){ 736 case CAMGETPASSTHRU: 737 ccb = cam_periph_getccb(periph, /* priority */ 1); 738 xpt_setup_ccb(&ccb->ccb_h, 739 ccb->ccb_h.path, 740 /*priority*/1); 741 ccb->ccb_h.func_code = XPT_GDEVLIST; 742 743 /* 744 * Basically, the point of this is that we go through 745 * getting the list of devices, until we find a passthrough 746 * device. In the current version of the CAM code, the 747 * only way to determine what type of device we're dealing 748 * with is by its name. 749 */ 750 while (found == 0) { 751 ccb->cgdl.index = 0; 752 ccb->cgdl.status = CAM_GDEVLIST_MORE_DEVS; 753 while (ccb->cgdl.status == CAM_GDEVLIST_MORE_DEVS) { 754 755 /* we want the next device in the list */ 756 xpt_action(ccb); 757 if (strncmp(ccb->cgdl.periph_name, 758 "pass", 4) == 0){ 759 found = 1; 760 break; 761 } 762 } 763 if ((ccb->cgdl.status == CAM_GDEVLIST_LAST_DEVICE) && 764 (found == 0)) { 765 ccb->cgdl.periph_name[0] = '\0'; 766 ccb->cgdl.unit_number = 0; 767 break; 768 } 769 } 770 771 /* copy the result back out */ 772 bcopy(ccb, addr, sizeof(union ccb)); 773 774 /* and release the ccb */ 775 xpt_release_ccb(ccb); 776 777 break; 778 default: 779 error = ENOTTY; 780 break; 781 } 782 return(error); 783 } 784 785 int 786 cam_periph_runccb(union ccb *ccb, 787 int (*error_routine)(union ccb *ccb, 788 cam_flags camflags, 789 u_int32_t sense_flags), 790 cam_flags camflags, u_int32_t sense_flags, 791 struct devstat *ds) 792 { 793 int error; 794 795 error = 0; 796 797 /* 798 * If the user has supplied a stats structure, and if we understand 799 * this particular type of ccb, record the transaction start. 800 */ 801 if ((ds != NULL) && (ccb->ccb_h.func_code == XPT_SCSI_IO)) 802 devstat_start_transaction(ds); 803 804 xpt_action(ccb); 805 806 do { 807 cam_periph_ccbwait(ccb); 808 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) 809 error = 0; 810 else if (error_routine != NULL) 811 error = (*error_routine)(ccb, camflags, sense_flags); 812 else 813 error = 0; 814 815 } while (error == ERESTART); 816 817 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 818 cam_release_devq(ccb->ccb_h.path, 819 /* relsim_flags */0, 820 /* openings */0, 821 /* timeout */0, 822 /* getcount_only */ FALSE); 823 824 if ((ds != NULL) && (ccb->ccb_h.func_code == XPT_SCSI_IO)) 825 devstat_end_transaction(ds, 826 ccb->csio.dxfer_len, 827 ccb->csio.tag_action & 0xf, 828 ((ccb->ccb_h.flags & CAM_DIR_MASK) == 829 CAM_DIR_NONE) ? DEVSTAT_NO_DATA : 830 (ccb->ccb_h.flags & CAM_DIR_OUT) ? 831 DEVSTAT_WRITE : 832 DEVSTAT_READ); 833 834 return(error); 835 } 836 837 void 838 cam_freeze_devq(struct cam_path *path) 839 { 840 struct ccb_hdr ccb_h; 841 842 xpt_setup_ccb(&ccb_h, path, /*priority*/1); 843 ccb_h.func_code = XPT_NOOP; 844 ccb_h.flags = CAM_DEV_QFREEZE; 845 xpt_action((union ccb *)&ccb_h); 846 } 847 848 u_int32_t 849 cam_release_devq(struct cam_path *path, u_int32_t relsim_flags, 850 u_int32_t openings, u_int32_t timeout, 851 int getcount_only) 852 { 853 struct ccb_relsim crs; 854 855 xpt_setup_ccb(&crs.ccb_h, path, 856 /*priority*/1); 857 crs.ccb_h.func_code = XPT_REL_SIMQ; 858 crs.ccb_h.flags = getcount_only ? CAM_DEV_QFREEZE : 0; 859 crs.release_flags = relsim_flags; 860 crs.openings = openings; 861 crs.release_timeout = timeout; 862 xpt_action((union ccb *)&crs); 863 return (crs.qfrozen_cnt); 864 } 865 866 #define saved_ccb_ptr ppriv_ptr0 867 static void 868 camperiphdone(struct cam_periph *periph, union ccb *done_ccb) 869 { 870 cam_status status; 871 int frozen; 872 int sense; 873 struct scsi_start_stop_unit *scsi_cmd; 874 u_int32_t relsim_flags, timeout; 875 u_int32_t qfrozen_cnt; 876 877 status = done_ccb->ccb_h.status; 878 frozen = (status & CAM_DEV_QFRZN) != 0; 879 sense = (status & CAM_AUTOSNS_VALID) != 0; 880 status &= CAM_STATUS_MASK; 881 882 timeout = 0; 883 relsim_flags = 0; 884 885 /* 886 * Unfreeze the queue once if it is already frozen.. 887 */ 888 if (frozen != 0) { 889 qfrozen_cnt = cam_release_devq(done_ccb->ccb_h.path, 890 /*relsim_flags*/0, 891 /*openings*/0, 892 /*timeout*/0, 893 /*getcount_only*/0); 894 } 895 896 switch (status) { 897 898 case CAM_REQ_CMP: 899 900 /* 901 * If we have successfully taken a device from the not 902 * ready to ready state, re-scan the device and re-get the 903 * inquiry information. Many devices (mostly disks) don't 904 * properly report their inquiry information unless they 905 * are spun up. 906 */ 907 if (done_ccb->ccb_h.func_code == XPT_SCSI_IO) { 908 scsi_cmd = (struct scsi_start_stop_unit *) 909 &done_ccb->csio.cdb_io.cdb_bytes; 910 911 if (scsi_cmd->opcode == START_STOP_UNIT) 912 xpt_async(AC_INQ_CHANGED, 913 done_ccb->ccb_h.path, NULL); 914 } 915 bcopy(done_ccb->ccb_h.saved_ccb_ptr, done_ccb, 916 sizeof(union ccb)); 917 918 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG; 919 920 xpt_action(done_ccb); 921 922 break; 923 case CAM_SCSI_STATUS_ERROR: 924 scsi_cmd = (struct scsi_start_stop_unit *) 925 &done_ccb->csio.cdb_io.cdb_bytes; 926 if (sense != 0) { 927 struct scsi_sense_data *sense; 928 int error_code, sense_key, asc, ascq; 929 930 sense = &done_ccb->csio.sense_data; 931 scsi_extract_sense(sense, &error_code, 932 &sense_key, &asc, &ascq); 933 934 /* 935 * If the error is "invalid field in CDB", 936 * and the load/eject flag is set, turn the 937 * flag off and try again. This is just in 938 * case the drive in question barfs on the 939 * load eject flag. The CAM code should set 940 * the load/eject flag by default for 941 * removable media. 942 */ 943 944 /* XXX KDM 945 * Should we check to see what the specific 946 * scsi status is?? Or does it not matter 947 * since we already know that there was an 948 * error, and we know what the specific 949 * error code was, and we know what the 950 * opcode is.. 951 */ 952 if ((scsi_cmd->opcode == START_STOP_UNIT) && 953 ((scsi_cmd->how & SSS_LOEJ) != 0) && 954 (asc == 0x24) && (ascq == 0x00) && 955 (done_ccb->ccb_h.retry_count > 0)) { 956 957 scsi_cmd->how &= ~SSS_LOEJ; 958 959 xpt_action(done_ccb); 960 961 } else if (done_ccb->ccb_h.retry_count > 0) { 962 /* 963 * In this case, the error recovery 964 * command failed, but we've got 965 * some retries left on it. Give 966 * it another try. 967 */ 968 969 /* set the timeout to .5 sec */ 970 relsim_flags = 971 RELSIM_RELEASE_AFTER_TIMEOUT; 972 timeout = 500; 973 974 xpt_action(done_ccb); 975 976 break; 977 978 } else { 979 /* 980 * Copy the original CCB back and 981 * send it back to the caller. 982 */ 983 bcopy(done_ccb->ccb_h.saved_ccb_ptr, 984 done_ccb, sizeof(union ccb)); 985 986 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG; 987 988 xpt_action(done_ccb); 989 } 990 } else { 991 /* 992 * Eh?? The command failed, but we don't 993 * have any sense. What's up with that? 994 * Fire the CCB again to return it to the 995 * caller. 996 */ 997 bcopy(done_ccb->ccb_h.saved_ccb_ptr, 998 done_ccb, sizeof(union ccb)); 999 1000 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG; 1001 1002 xpt_action(done_ccb); 1003 1004 } 1005 break; 1006 default: 1007 bcopy(done_ccb->ccb_h.saved_ccb_ptr, done_ccb, 1008 sizeof(union ccb)); 1009 1010 periph->flags &= ~CAM_PERIPH_RECOVERY_INPROG; 1011 1012 xpt_action(done_ccb); 1013 1014 break; 1015 } 1016 1017 /* decrement the retry count */ 1018 if (done_ccb->ccb_h.retry_count > 0) 1019 done_ccb->ccb_h.retry_count--; 1020 1021 qfrozen_cnt = cam_release_devq(done_ccb->ccb_h.path, 1022 /*relsim_flags*/relsim_flags, 1023 /*openings*/0, 1024 /*timeout*/timeout, 1025 /*getcount_only*/0); 1026 } 1027 1028 /* 1029 * Generic Async Event handler. Peripheral drivers usually 1030 * filter out the events that require personal attention, 1031 * and leave the rest to this function. 1032 */ 1033 void 1034 cam_periph_async(struct cam_periph *periph, u_int32_t code, 1035 struct cam_path *path, void *arg) 1036 { 1037 switch (code) { 1038 case AC_LOST_DEVICE: 1039 cam_periph_invalidate(periph); 1040 break; 1041 case AC_SENT_BDR: 1042 case AC_BUS_RESET: 1043 { 1044 cam_periph_bus_settle(periph, SCSI_DELAY); 1045 break; 1046 } 1047 default: 1048 break; 1049 } 1050 } 1051 1052 void 1053 cam_periph_bus_settle(struct cam_periph *periph, u_int bus_settle) 1054 { 1055 struct ccb_getdevstats cgds; 1056 1057 xpt_setup_ccb(&cgds.ccb_h, periph->path, /*priority*/1); 1058 cgds.ccb_h.func_code = XPT_GDEV_STATS; 1059 xpt_action((union ccb *)&cgds); 1060 cam_periph_freeze_after_event(periph, &cgds.last_reset, bus_settle); 1061 } 1062 1063 void 1064 cam_periph_freeze_after_event(struct cam_periph *periph, 1065 struct timeval* event_time, u_int duration_ms) 1066 { 1067 struct timeval delta; 1068 struct timeval duration_tv; 1069 int s; 1070 1071 s = splclock(); 1072 microtime(&delta); 1073 splx(s); 1074 timevalsub(&delta, event_time); 1075 duration_tv.tv_sec = duration_ms / 1000; 1076 duration_tv.tv_usec = (duration_ms % 1000) * 1000; 1077 if (timevalcmp(&delta, &duration_tv, <)) { 1078 timevalsub(&duration_tv, &delta); 1079 1080 duration_ms = duration_tv.tv_sec * 1000; 1081 duration_ms += duration_tv.tv_usec / 1000; 1082 cam_freeze_devq(periph->path); 1083 cam_release_devq(periph->path, 1084 RELSIM_RELEASE_AFTER_TIMEOUT, 1085 /*reduction*/0, 1086 /*timeout*/duration_ms, 1087 /*getcount_only*/0); 1088 } 1089 1090 } 1091 1092 /* 1093 * Generic error handler. Peripheral drivers usually filter 1094 * out the errors that they handle in a unique mannor, then 1095 * call this function. 1096 */ 1097 int 1098 cam_periph_error(union ccb *ccb, cam_flags camflags, 1099 u_int32_t sense_flags, union ccb *save_ccb) 1100 { 1101 cam_status status; 1102 int frozen; 1103 int sense; 1104 int error; 1105 int openings; 1106 int retry; 1107 u_int32_t relsim_flags; 1108 u_int32_t timeout; 1109 1110 status = ccb->ccb_h.status; 1111 frozen = (status & CAM_DEV_QFRZN) != 0; 1112 sense = (status & CAM_AUTOSNS_VALID) != 0; 1113 status &= CAM_STATUS_MASK; 1114 relsim_flags = 0; 1115 1116 switch (status) { 1117 case CAM_REQ_CMP: 1118 /* decrement the number of retries */ 1119 retry = ccb->ccb_h.retry_count > 0; 1120 if (retry) 1121 ccb->ccb_h.retry_count--; 1122 error = 0; 1123 break; 1124 case CAM_AUTOSENSE_FAIL: 1125 case CAM_SCSI_STATUS_ERROR: 1126 1127 switch (ccb->csio.scsi_status) { 1128 case SCSI_STATUS_OK: 1129 case SCSI_STATUS_COND_MET: 1130 case SCSI_STATUS_INTERMED: 1131 case SCSI_STATUS_INTERMED_COND_MET: 1132 error = 0; 1133 break; 1134 case SCSI_STATUS_CMD_TERMINATED: 1135 case SCSI_STATUS_CHECK_COND: 1136 if (sense != 0) { 1137 struct scsi_sense_data *sense; 1138 int error_code, sense_key, asc, ascq; 1139 struct cam_periph *periph; 1140 scsi_sense_action err_action; 1141 struct ccb_getdev cgd; 1142 1143 sense = &ccb->csio.sense_data; 1144 scsi_extract_sense(sense, &error_code, 1145 &sense_key, &asc, &ascq); 1146 periph = xpt_path_periph(ccb->ccb_h.path); 1147 1148 /* 1149 * Grab the inquiry data for this device. 1150 */ 1151 xpt_setup_ccb(&cgd.ccb_h, ccb->ccb_h.path, 1152 /*priority*/ 1); 1153 cgd.ccb_h.func_code = XPT_GDEV_TYPE; 1154 xpt_action((union ccb *)&cgd); 1155 1156 err_action = scsi_error_action(asc, ascq, 1157 &cgd.inq_data); 1158 1159 /* 1160 * Send a Test Unit Ready to the device. 1161 * If the 'many' flag is set, we send 120 1162 * test unit ready commands, one every half 1163 * second. Otherwise, we just send one TUR. 1164 * We only want to do this if the retry 1165 * count has not been exhausted. 1166 */ 1167 if (((err_action & SS_MASK) == SS_TUR) 1168 && save_ccb != NULL 1169 && ccb->ccb_h.retry_count > 0) { 1170 1171 /* 1172 * Since error recovery is already 1173 * in progress, don't attempt to 1174 * process this error. It is probably 1175 * related to the error that caused 1176 * the currently active error recovery 1177 * action. Also, we only have 1178 * space for one saved CCB, so if we 1179 * had two concurrent error recovery 1180 * actions, we would end up 1181 * over-writing one error recovery 1182 * CCB with another one. 1183 */ 1184 if (periph->flags & 1185 CAM_PERIPH_RECOVERY_INPROG) { 1186 error = ERESTART; 1187 break; 1188 } 1189 1190 periph->flags |= 1191 CAM_PERIPH_RECOVERY_INPROG; 1192 1193 /* decrement the number of retries */ 1194 if ((err_action & 1195 SSQ_DECREMENT_COUNT) != 0) { 1196 retry = 1; 1197 ccb->ccb_h.retry_count--; 1198 } 1199 1200 bcopy(ccb, save_ccb, sizeof(*save_ccb)); 1201 1202 /* 1203 * We retry this one every half 1204 * second for a minute. If the 1205 * device hasn't become ready in a 1206 * minute's time, it's unlikely to 1207 * ever become ready. If the table 1208 * doesn't specify SSQ_MANY, we can 1209 * only try this once. Oh well. 1210 */ 1211 if ((err_action & SSQ_MANY) != 0) 1212 scsi_test_unit_ready(&ccb->csio, 1213 /*retries*/120, 1214 camperiphdone, 1215 MSG_SIMPLE_Q_TAG, 1216 SSD_FULL_SIZE, 1217 /*timeout*/5000); 1218 else 1219 scsi_test_unit_ready(&ccb->csio, 1220 /*retries*/1, 1221 camperiphdone, 1222 MSG_SIMPLE_Q_TAG, 1223 SSD_FULL_SIZE, 1224 /*timeout*/5000); 1225 1226 /* release the queue after .5 sec. */ 1227 relsim_flags = 1228 RELSIM_RELEASE_AFTER_TIMEOUT; 1229 timeout = 500; 1230 /* 1231 * Drop the priority to 0 so that 1232 * we are the first to execute. Also 1233 * freeze the queue after this command 1234 * is sent so that we can restore the 1235 * old csio and have it queued in the 1236 * proper order before we let normal 1237 * transactions go to the drive. 1238 */ 1239 ccb->ccb_h.pinfo.priority = 0; 1240 ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 1241 1242 /* 1243 * Save a pointer to the original 1244 * CCB in the new CCB. 1245 */ 1246 ccb->ccb_h.saved_ccb_ptr = save_ccb; 1247 1248 error = ERESTART; 1249 } 1250 /* 1251 * Send a start unit command to the device, 1252 * and then retry the command. We only 1253 * want to do this if the retry count has 1254 * not been exhausted. If the user 1255 * specified 0 retries, then we follow 1256 * their request and do not retry. 1257 */ 1258 else if (((err_action & SS_MASK) == SS_START) 1259 && save_ccb != NULL 1260 && ccb->ccb_h.retry_count > 0) { 1261 int le; 1262 1263 /* 1264 * Only one error recovery action 1265 * at a time. See above. 1266 */ 1267 if (periph->flags & 1268 CAM_PERIPH_RECOVERY_INPROG) { 1269 error = ERESTART; 1270 break; 1271 } 1272 1273 periph->flags |= 1274 CAM_PERIPH_RECOVERY_INPROG; 1275 1276 /* decrement the number of retries */ 1277 retry = 1; 1278 ccb->ccb_h.retry_count--; 1279 1280 /* 1281 * Check for removable media and 1282 * set load/eject flag 1283 * appropriately. 1284 */ 1285 if (SID_IS_REMOVABLE(&cgd.inq_data)) 1286 le = TRUE; 1287 else 1288 le = FALSE; 1289 1290 /* 1291 * Attempt to start the drive up. 1292 * 1293 * Save the current ccb so it can 1294 * be restored and retried once the 1295 * drive is started up. 1296 */ 1297 bcopy(ccb, save_ccb, sizeof(*save_ccb)); 1298 1299 scsi_start_stop(&ccb->csio, 1300 /*retries*/1, 1301 camperiphdone, 1302 MSG_SIMPLE_Q_TAG, 1303 /*start*/TRUE, 1304 /*load/eject*/le, 1305 /*immediate*/FALSE, 1306 SSD_FULL_SIZE, 1307 /*timeout*/50000); 1308 /* 1309 * Drop the priority to 0 so that 1310 * we are the first to execute. Also 1311 * freeze the queue after this command 1312 * is sent so that we can restore the 1313 * old csio and have it queued in the 1314 * proper order before we let normal 1315 * transactions go to the drive. 1316 */ 1317 ccb->ccb_h.pinfo.priority = 0; 1318 ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 1319 1320 /* 1321 * Save a pointer to the original 1322 * CCB in the new CCB. 1323 */ 1324 ccb->ccb_h.saved_ccb_ptr = save_ccb; 1325 1326 error = ERESTART; 1327 } else if ((sense_flags & SF_RETRY_UA) != 0) { 1328 /* 1329 * XXX KDM this is a *horrible* 1330 * hack. 1331 */ 1332 error = scsi_interpret_sense(ccb, 1333 sense_flags, 1334 &relsim_flags, 1335 &openings, 1336 &timeout, 1337 err_action); 1338 } 1339 1340 /* 1341 * Theoretically, this code should send a 1342 * test unit ready to the given device, and 1343 * if it returns and error, send a start 1344 * unit command. Since we don't yet have 1345 * the capability to do two-command error 1346 * recovery, just send a start unit. 1347 * XXX KDM fix this! 1348 */ 1349 else if (((err_action & SS_MASK) == SS_TURSTART) 1350 && save_ccb != NULL 1351 && ccb->ccb_h.retry_count > 0) { 1352 int le; 1353 1354 /* 1355 * Only one error recovery action 1356 * at a time. See above. 1357 */ 1358 if (periph->flags & 1359 CAM_PERIPH_RECOVERY_INPROG) { 1360 error = ERESTART; 1361 break; 1362 } 1363 1364 periph->flags |= 1365 CAM_PERIPH_RECOVERY_INPROG; 1366 1367 /* decrement the number of retries */ 1368 retry = 1; 1369 ccb->ccb_h.retry_count--; 1370 1371 /* 1372 * Check for removable media and 1373 * set load/eject flag 1374 * appropriately. 1375 */ 1376 if (SID_IS_REMOVABLE(&cgd.inq_data)) 1377 le = TRUE; 1378 else 1379 le = FALSE; 1380 1381 /* 1382 * Attempt to start the drive up. 1383 * 1384 * Save the current ccb so it can 1385 * be restored and retried once the 1386 * drive is started up. 1387 */ 1388 bcopy(ccb, save_ccb, sizeof(*save_ccb)); 1389 1390 scsi_start_stop(&ccb->csio, 1391 /*retries*/1, 1392 camperiphdone, 1393 MSG_SIMPLE_Q_TAG, 1394 /*start*/TRUE, 1395 /*load/eject*/le, 1396 /*immediate*/FALSE, 1397 SSD_FULL_SIZE, 1398 /*timeout*/50000); 1399 1400 /* release the queue after .5 sec. */ 1401 relsim_flags = 1402 RELSIM_RELEASE_AFTER_TIMEOUT; 1403 timeout = 500; 1404 /* 1405 * Drop the priority to 0 so that 1406 * we are the first to execute. Also 1407 * freeze the queue after this command 1408 * is sent so that we can restore the 1409 * old csio and have it queued in the 1410 * proper order before we let normal 1411 * transactions go to the drive. 1412 */ 1413 ccb->ccb_h.pinfo.priority = 0; 1414 ccb->ccb_h.flags |= CAM_DEV_QFREEZE; 1415 1416 /* 1417 * Save a pointer to the original 1418 * CCB in the new CCB. 1419 */ 1420 ccb->ccb_h.saved_ccb_ptr = save_ccb; 1421 1422 error = ERESTART; 1423 } else { 1424 error = scsi_interpret_sense(ccb, 1425 sense_flags, 1426 &relsim_flags, 1427 &openings, 1428 &timeout, 1429 err_action); 1430 } 1431 } else if (ccb->csio.scsi_status == 1432 SCSI_STATUS_CHECK_COND 1433 && status != CAM_AUTOSENSE_FAIL) { 1434 /* no point in decrementing the retry count */ 1435 panic("cam_periph_error: scsi status of " 1436 "CHECK COND returned but no sense " 1437 "information is availible. " 1438 "Controller should have returned " 1439 "CAM_AUTOSENSE_FAILED"); 1440 /* NOTREACHED */ 1441 error = EIO; 1442 } else if (ccb->ccb_h.retry_count == 0) { 1443 /* 1444 * XXX KDM shouldn't there be a better 1445 * argument to return?? 1446 */ 1447 error = EIO; 1448 } else { 1449 /* decrement the number of retries */ 1450 retry = ccb->ccb_h.retry_count > 0; 1451 if (retry) 1452 ccb->ccb_h.retry_count--; 1453 /* 1454 * If it was aborted with no 1455 * clue as to the reason, just 1456 * retry it again. 1457 */ 1458 error = ERESTART; 1459 } 1460 break; 1461 case SCSI_STATUS_QUEUE_FULL: 1462 { 1463 /* no decrement */ 1464 struct ccb_getdevstats cgds; 1465 1466 /* 1467 * First off, find out what the current 1468 * transaction counts are. 1469 */ 1470 xpt_setup_ccb(&cgds.ccb_h, 1471 ccb->ccb_h.path, 1472 /*priority*/1); 1473 cgds.ccb_h.func_code = XPT_GDEV_STATS; 1474 xpt_action((union ccb *)&cgds); 1475 1476 /* 1477 * If we were the only transaction active, treat 1478 * the QUEUE FULL as if it were a BUSY condition. 1479 */ 1480 if (cgds.dev_active != 0) { 1481 int total_openings; 1482 1483 /* 1484 * Reduce the number of openings to 1485 * be 1 less than the amount it took 1486 * to get a queue full bounded by the 1487 * minimum allowed tag count for this 1488 * device. 1489 */ 1490 total_openings = 1491 cgds.dev_active+cgds.dev_openings; 1492 openings = cgds.dev_active; 1493 if (openings < cgds.mintags) 1494 openings = cgds.mintags; 1495 if (openings < total_openings) 1496 relsim_flags = RELSIM_ADJUST_OPENINGS; 1497 else { 1498 /* 1499 * Some devices report queue full for 1500 * temporary resource shortages. For 1501 * this reason, we allow a minimum 1502 * tag count to be entered via a 1503 * quirk entry to prevent the queue 1504 * count on these devices from falling 1505 * to a pessimisticly low value. We 1506 * still wait for the next successful 1507 * completion, however, before queueing 1508 * more transactions to the device. 1509 */ 1510 relsim_flags = 1511 RELSIM_RELEASE_AFTER_CMDCMPLT; 1512 } 1513 timeout = 0; 1514 error = ERESTART; 1515 break; 1516 } 1517 /* FALLTHROUGH */ 1518 } 1519 case SCSI_STATUS_BUSY: 1520 /* 1521 * Restart the queue after either another 1522 * command completes or a 1 second timeout. 1523 * If we have any retries left, that is. 1524 */ 1525 retry = ccb->ccb_h.retry_count > 0; 1526 if (retry) { 1527 ccb->ccb_h.retry_count--; 1528 error = ERESTART; 1529 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT 1530 | RELSIM_RELEASE_AFTER_CMDCMPLT; 1531 timeout = 1000; 1532 } else { 1533 error = EIO; 1534 } 1535 break; 1536 case SCSI_STATUS_RESERV_CONFLICT: 1537 error = EIO; 1538 break; 1539 default: 1540 error = EIO; 1541 break; 1542 } 1543 break; 1544 case CAM_REQ_CMP_ERR: 1545 case CAM_CMD_TIMEOUT: 1546 case CAM_UNEXP_BUSFREE: 1547 case CAM_UNCOR_PARITY: 1548 case CAM_DATA_RUN_ERR: 1549 /* decrement the number of retries */ 1550 retry = ccb->ccb_h.retry_count > 0; 1551 if (retry) { 1552 ccb->ccb_h.retry_count--; 1553 error = ERESTART; 1554 } else { 1555 error = EIO; 1556 } 1557 break; 1558 case CAM_UA_ABORT: 1559 case CAM_UA_TERMIO: 1560 case CAM_MSG_REJECT_REC: 1561 /* XXX Don't know that these are correct */ 1562 error = EIO; 1563 break; 1564 case CAM_SEL_TIMEOUT: 1565 { 1566 /* 1567 * XXX 1568 * A single selection timeout should not be enough 1569 * to invalidate a device. We should retry for multiple 1570 * seconds assuming this isn't a probe. We'll probably 1571 * need a special flag for that. 1572 */ 1573 #if 0 1574 struct cam_path *newpath; 1575 1576 /* Should we do more if we can't create the path?? */ 1577 if (xpt_create_path(&newpath, xpt_path_periph(ccb->ccb_h.path), 1578 xpt_path_path_id(ccb->ccb_h.path), 1579 xpt_path_target_id(ccb->ccb_h.path), 1580 CAM_LUN_WILDCARD) != CAM_REQ_CMP) 1581 break; 1582 /* 1583 * Let peripheral drivers know that this device has gone 1584 * away. 1585 */ 1586 xpt_async(AC_LOST_DEVICE, newpath, NULL); 1587 xpt_free_path(newpath); 1588 #endif 1589 if ((sense_flags & SF_RETRY_SELTO) != 0) { 1590 retry = ccb->ccb_h.retry_count > 0; 1591 if (retry) { 1592 ccb->ccb_h.retry_count--; 1593 error = ERESTART; 1594 /* 1595 * Wait half a second to give the device 1596 * time to recover before we try again. 1597 */ 1598 relsim_flags = RELSIM_RELEASE_AFTER_TIMEOUT; 1599 timeout = 500; 1600 } else { 1601 error = ENXIO; 1602 } 1603 } else { 1604 error = ENXIO; 1605 } 1606 break; 1607 } 1608 case CAM_REQ_INVALID: 1609 case CAM_PATH_INVALID: 1610 case CAM_DEV_NOT_THERE: 1611 case CAM_NO_HBA: 1612 case CAM_PROVIDE_FAIL: 1613 case CAM_REQ_TOO_BIG: 1614 error = EINVAL; 1615 break; 1616 case CAM_SCSI_BUS_RESET: 1617 case CAM_BDR_SENT: 1618 case CAM_REQUEUE_REQ: 1619 /* Unconditional requeue, dammit */ 1620 error = ERESTART; 1621 break; 1622 case CAM_RESRC_UNAVAIL: 1623 case CAM_BUSY: 1624 /* timeout??? */ 1625 default: 1626 /* decrement the number of retries */ 1627 retry = ccb->ccb_h.retry_count > 0; 1628 if (retry) { 1629 ccb->ccb_h.retry_count--; 1630 error = ERESTART; 1631 } else { 1632 /* Check the sense codes */ 1633 error = EIO; 1634 } 1635 break; 1636 } 1637 1638 /* Attempt a retry */ 1639 if (error == ERESTART || error == 0) { 1640 if (frozen != 0) 1641 ccb->ccb_h.status &= ~CAM_DEV_QFRZN; 1642 1643 if (error == ERESTART) 1644 xpt_action(ccb); 1645 1646 if (frozen != 0) { 1647 cam_release_devq(ccb->ccb_h.path, 1648 relsim_flags, 1649 openings, 1650 timeout, 1651 /*getcount_only*/0); 1652 } 1653 } 1654 1655 1656 return (error); 1657 } 1658