1 /* 2 * Implementation of SCSI Sequential Access Peripheral driver for CAM. 3 * 4 * Copyright (c) 1997 Justin T. Gibbs 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions, and the following disclaimer, 12 * without modification, immediately at the beginning of the file. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $Id$ 29 */ 30 31 #include <sys/param.h> 32 #include <sys/queue.h> 33 #ifdef KERNEL 34 #include <sys/systm.h> 35 #include <sys/kernel.h> 36 #endif 37 #include <sys/types.h> 38 #include <sys/buf.h> 39 #include <sys/malloc.h> 40 #include <sys/mtio.h> 41 #include <sys/conf.h> 42 #include <sys/buf.h> 43 #include <sys/devicestat.h> 44 #include <machine/limits.h> 45 46 #ifndef KERNEL 47 #include <stdio.h> 48 #include <string.h> 49 #endif 50 51 #include <cam/cam.h> 52 #include <cam/cam_ccb.h> 53 #include <cam/cam_extend.h> 54 #include <cam/cam_periph.h> 55 #include <cam/cam_xpt_periph.h> 56 #include <cam/cam_debug.h> 57 58 #include <cam/scsi/scsi_all.h> 59 #include <cam/scsi/scsi_message.h> 60 #include <cam/scsi/scsi_sa.h> 61 62 #ifdef KERNEL 63 64 #define SAUNIT(DEV) ((minor(DEV)&0xF0) >> 4) /* 4 bit unit. */ 65 #define SASETUNIT(DEV, U) makedev(major(DEV), ((U) << 4)) 66 67 typedef enum { 68 SA_STATE_NORMAL 69 } sa_state; 70 71 typedef enum { 72 SA_CCB_BUFFER_IO, 73 SA_CCB_WAITING 74 } sa_ccb_types; 75 76 #define ccb_type ppriv_field0 77 #define ccb_bp ppriv_ptr1 78 79 typedef enum { 80 SA_FLAG_OPEN = 0x0001, 81 SA_FLAG_FIXED = 0x0002, 82 SA_FLAG_TAPE_LOCKED = 0x0004, 83 SA_FLAG_TAPE_MOUNTED = 0x0008, 84 SA_FLAG_TAPE_WP = 0x0010, 85 SA_FLAG_TAPE_WRITTEN = 0x0020, 86 SA_FLAG_2FM_AT_EOD = 0x0040, 87 SA_FLAG_EOM_PENDING = 0x0080, 88 SA_FLAG_EIO_PENDING = 0x0100, 89 SA_FLAG_EOF_PENDING = 0x0200, 90 SA_FLAG_ERR_PENDING = (SA_FLAG_EOM_PENDING|SA_FLAG_EIO_PENDING| 91 SA_FLAG_EOF_PENDING), 92 SA_FLAG_INVALID = 0x0400, 93 SA_FLAG_COMP_ENABLED = 0x0800, 94 SA_FLAG_COMP_UNSUPP = 0x1000 95 } sa_flags; 96 97 typedef enum { 98 SA_MODE_REWIND = 0x00, 99 SA_MODE_NOREWIND = 0x01, 100 SA_MODE_OFFLINE = 0x02 101 } sa_mode; 102 103 typedef enum { 104 SA_PARAM_NONE = 0x00, 105 SA_PARAM_BLOCKSIZE = 0x01, 106 SA_PARAM_DENSITY = 0x02, 107 SA_PARAM_COMPRESSION = 0x04, 108 SA_PARAM_BUFF_MODE = 0x08, 109 SA_PARAM_NUMBLOCKS = 0x10, 110 SA_PARAM_WP = 0x20, 111 SA_PARAM_SPEED = 0x40, 112 SA_PARAM_ALL = 0x7f 113 } sa_params; 114 115 typedef enum { 116 SA_QUIRK_NONE = 0x00, 117 SA_QUIRK_NOCOMP = 0x01 118 } sa_quirks; 119 120 struct sa_softc { 121 sa_state state; 122 sa_flags flags; 123 sa_quirks quirks; 124 struct buf_queue_head buf_queue; 125 struct devstat device_stats; 126 int blk_gran; 127 int blk_mask; 128 int blk_shift; 129 u_int32_t max_blk; 130 u_int32_t min_blk; 131 u_int8_t media_density; 132 u_int32_t media_blksize; 133 u_int32_t media_numblks; 134 u_int32_t comp_algorithm; 135 u_int32_t saved_comp_algorithm; 136 u_int8_t speed; 137 int buffer_mode; 138 int filemarks; 139 union ccb saved_ccb; 140 }; 141 142 struct sa_quirk_entry { 143 struct scsi_inquiry_pattern inq_pat; 144 sa_quirks quirks; 145 }; 146 147 static struct sa_quirk_entry sa_quirk_table[] = 148 { 149 { 150 { T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "ARCHIVE", 151 "Python 25601*", "*"}, /*quirks*/SA_QUIRK_NOCOMP 152 } 153 }; 154 155 static d_open_t saopen; 156 static d_read_t saread; 157 static d_write_t sawrite; 158 static d_close_t saclose; 159 static d_strategy_t sastrategy; 160 static d_ioctl_t saioctl; 161 static periph_init_t sainit; 162 static periph_ctor_t saregister; 163 static periph_dtor_t sacleanup; 164 static periph_start_t sastart; 165 static void saasync(void *callback_arg, u_int32_t code, 166 struct cam_path *path, void *arg); 167 static void sadone(struct cam_periph *periph, 168 union ccb *start_ccb); 169 static int saerror(union ccb *ccb, u_int32_t cam_flags, 170 u_int32_t sense_flags); 171 static int sacheckeod(struct cam_periph *periph); 172 static int sagetparams(struct cam_periph *periph, 173 sa_params params_to_get, 174 u_int32_t *blocksize, u_int8_t *density, 175 u_int32_t *numblocks, int *buff_mode, 176 u_int8_t *write_protect, u_int8_t *speed, 177 int *comp_supported, int *comp_enabled, 178 u_int32_t *comp_algorithm, 179 struct scsi_data_compression_page *comp_page); 180 static int sasetparams(struct cam_periph *periph, 181 sa_params params_to_set, 182 u_int32_t blocksize, u_int8_t density, 183 u_int32_t comp_algorithm); 184 static void saprevent(struct cam_periph *periph, int action); 185 static int sarewind(struct cam_periph *periph); 186 static int saspace(struct cam_periph *periph, int count, 187 scsi_space_code code); 188 static int samount(struct cam_periph *periph); 189 static int saretension(struct cam_periph *periph); 190 static int sareservereleaseunit(struct cam_periph *periph, 191 int reserve); 192 static int saloadunload(struct cam_periph *periph, int load); 193 static int saerase(struct cam_periph *periph, int longerase); 194 static int sawritefilemarks(struct cam_periph *periph, 195 int nmarks, int setmarks); 196 197 static struct periph_driver sadriver = 198 { 199 sainit, "sa", 200 TAILQ_HEAD_INITIALIZER(sadriver.units), /* generation */ 0 201 }; 202 203 DATA_SET(periphdriver_set, sadriver); 204 205 #define SAUNIT(DEV) ((minor(DEV)&0xF0) >> 4) /* 4 bit unit. */ 206 #define SASETUNIT(DEV, U) makedev(major(DEV), ((U) << 4)) 207 208 #define SAMODE(z) ((minor(z) & 0x03)) 209 #define SADENSITY(z) (((minor(z) >> 2) & 0x03)) 210 211 /* For 2.2-stable support */ 212 #ifndef D_TAPE 213 #define D_TAPE 0 214 #endif 215 216 #define CTLMODE 3 217 #define SA_CDEV_MAJOR 14 218 #define SA_BDEV_MAJOR 5 219 220 static struct cdevsw sa_cdevsw = 221 { 222 /*d_open*/ saopen, 223 /*d_close*/ saclose, 224 /*d_read*/ saread, 225 /*d_write*/ sawrite, 226 /*d_ioctl*/ saioctl, 227 /*d_stop*/ nostop, 228 /*d_reset*/ noreset, 229 /*d_devtotty*/ nodevtotty, 230 /*d_poll*/ seltrue, 231 /*d_mmap*/ nommap, 232 /*d_strategy*/ sastrategy, 233 /*d_name*/ "sa", 234 /*d_spare*/ NULL, 235 /*d_maj*/ -1, 236 /*d_dump*/ nodump, 237 /*d_psize*/ nopsize, 238 /*d_flags*/ D_TAPE, 239 /*d_maxio*/ 0, 240 /*b_maj*/ -1 241 }; 242 243 static struct extend_array *saperiphs; 244 245 static int 246 saopen(dev_t dev, int flags, int fmt, struct proc *p) 247 { 248 struct cam_periph *periph; 249 struct sa_softc *softc; 250 int unit; 251 int mode; 252 int density; 253 int error; 254 255 unit = SAUNIT(dev); 256 mode = SAMODE(dev); 257 density = SADENSITY(dev); 258 259 periph = cam_extend_get(saperiphs, unit); 260 if (periph == NULL) 261 return (ENXIO); 262 263 softc = (struct sa_softc *)periph->softc; 264 265 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, 266 ("saaopen: dev=0x%x (unit %d , mode %d, density %d)\n", dev, 267 unit, mode, density)); 268 269 if (softc->flags & SA_FLAG_INVALID) 270 return(ENXIO); 271 272 if ((error = cam_periph_lock(periph, PRIBIO|PCATCH)) != 0) { 273 return (error); /* error code from tsleep */ 274 } 275 276 if ((softc->flags & SA_FLAG_OPEN) == 0) { 277 if (cam_periph_acquire(periph) != CAM_REQ_CMP) 278 return(ENXIO); 279 280 if ((error = sareservereleaseunit(periph, TRUE)) != 0) { 281 cam_periph_unlock(periph); 282 cam_periph_release(periph); 283 return(error); 284 } 285 } 286 287 if (error == 0) { 288 if ((softc->flags & SA_FLAG_OPEN) != 0) { 289 error = EBUSY; 290 } 291 292 if (error == 0) { 293 error = samount(periph); 294 } 295 /* Perform other checking... */ 296 } 297 298 if (error == 0) { 299 saprevent(periph, PR_PREVENT); 300 softc->flags |= SA_FLAG_OPEN; 301 } 302 303 cam_periph_unlock(periph); 304 return (error); 305 } 306 307 static int 308 saclose(dev_t dev, int flag, int fmt, struct proc *p) 309 { 310 struct cam_periph *periph; 311 struct sa_softc *softc; 312 int unit; 313 int mode; 314 int error; 315 316 unit = SAUNIT(dev); 317 mode = SAMODE(dev); 318 periph = cam_extend_get(saperiphs, unit); 319 if (periph == NULL) 320 return (ENXIO); 321 322 softc = (struct sa_softc *)periph->softc; 323 324 if ((error = cam_periph_lock(periph, PRIBIO)) != 0) { 325 return (error); /* error code from tsleep */ 326 } 327 328 sacheckeod(periph); 329 330 saprevent(periph, PR_ALLOW); 331 332 switch (mode) { 333 case SA_MODE_REWIND: 334 sarewind(periph); 335 break; 336 case SA_MODE_OFFLINE: 337 sarewind(periph); 338 saloadunload(periph, /*load*/FALSE); 339 break; 340 case SA_MODE_NOREWIND: 341 default: 342 break; 343 } 344 345 softc->flags &= ~SA_FLAG_OPEN; 346 347 /* release the device */ 348 sareservereleaseunit(periph, FALSE); 349 350 cam_periph_unlock(periph); 351 cam_periph_release(periph); 352 353 return (0); 354 } 355 356 static int 357 saread(dev_t dev, struct uio *uio, int ioflag) 358 { 359 return(physio(sastrategy, NULL, dev, 1, minphys, uio)); 360 } 361 362 static int 363 sawrite(dev_t dev, struct uio *uio, int ioflag) 364 { 365 return(physio(sastrategy, NULL, dev, 0, minphys, uio)); 366 } 367 368 /* 369 * Actually translate the requested transfer into one the physical driver 370 * can understand. The transfer is described by a buf and will include 371 * only one physical transfer. 372 */ 373 static void 374 sastrategy(struct buf *bp) 375 { 376 struct cam_periph *periph; 377 struct sa_softc *softc; 378 u_int unit; 379 int s; 380 381 unit = SAUNIT(bp->b_dev); 382 periph = cam_extend_get(saperiphs, unit); 383 if (periph == NULL) { 384 bp->b_error = ENXIO; 385 goto bad; 386 } 387 softc = (struct sa_softc *)periph->softc; 388 389 /* 390 * If it's a null transfer, return immediatly 391 */ 392 if (bp->b_bcount == 0) 393 goto done; 394 395 /* valid request? */ 396 if (softc->flags & SA_FLAG_FIXED) { 397 /* 398 * Fixed block device. The byte count must 399 * be a multiple of our block size. 400 */ 401 if (((softc->blk_mask != ~0) 402 && ((bp->b_bcount & softc->blk_mask) != 0)) 403 || ((softc->blk_mask == ~0) 404 && ((bp->b_bcount % softc->min_blk) != 0))) { 405 xpt_print_path(periph->path); 406 printf("Invalid request. Fixed block device " 407 "requests must be a multiple " 408 "of %d bytes\n", softc->min_blk); 409 bp->b_error = EINVAL; 410 goto bad; 411 } 412 } else if ((bp->b_bcount > softc->max_blk) 413 || (bp->b_bcount < softc->min_blk) 414 || (bp->b_bcount & softc->blk_mask) != 0) { 415 416 xpt_print_path(periph->path); 417 printf("Invalid request. Variable block device " 418 "requests must be "); 419 if (softc->blk_mask != 0) { 420 printf("a multiple of %d ", 421 (0x1 << softc->blk_gran)); 422 } 423 printf("between %d and %d bytes\n", 424 softc->min_blk, softc->max_blk); 425 bp->b_error = EINVAL; 426 goto bad; 427 } 428 429 /* 430 * Mask interrupts so that the pack cannot be invalidated until 431 * after we are in the queue. Otherwise, we might not properly 432 * clean up one of the buffers. 433 */ 434 s = splbio(); 435 436 /* 437 * Place it in the queue of disk activities for this disk 438 */ 439 bufq_insert_tail(&softc->buf_queue, bp); 440 441 splx(s); 442 443 /* 444 * Schedule ourselves for performing the work. 445 */ 446 xpt_schedule(periph, /* XXX priority */1); 447 448 return; 449 bad: 450 bp->b_flags |= B_ERROR; 451 done: 452 453 /* 454 * Correctly set the buf to indicate a completed xfer 455 */ 456 bp->b_resid = bp->b_bcount; 457 biodone(bp); 458 } 459 460 static int 461 saioctl(dev_t dev, u_long cmd, caddr_t arg, int flag, struct proc *p) 462 { 463 struct cam_periph *periph; 464 struct sa_softc *softc; 465 int unit; 466 int mode; 467 int density; 468 int error; 469 470 unit = SAUNIT(dev); 471 mode = SAMODE(dev); 472 density = SADENSITY(dev); 473 474 periph = cam_extend_get(saperiphs, unit); 475 if (periph == NULL) 476 return (ENXIO); 477 478 softc = (struct sa_softc *)periph->softc; 479 480 /* 481 * Find the device that the user is talking about 482 */ 483 switch (cmd) { 484 case MTIOCGET: 485 { 486 struct mtget *g = (struct mtget *)arg; 487 488 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, 489 ("saioctl: MTIOGET\n")); 490 491 bzero(g, sizeof(struct mtget)); 492 g->mt_type = 0x7; /* Ultrix compat *//*? */ 493 g->mt_density = softc->media_density; 494 g->mt_blksiz = softc->media_blksize; 495 if (softc->flags & SA_FLAG_COMP_UNSUPP) { 496 g->mt_comp = MT_COMP_UNSUPP; 497 g->mt_comp0 = MT_COMP_UNSUPP; 498 g->mt_comp1 = MT_COMP_UNSUPP; 499 g->mt_comp2 = MT_COMP_UNSUPP; 500 g->mt_comp3 = MT_COMP_UNSUPP; 501 } else if ((softc->flags & SA_FLAG_COMP_ENABLED) == 0) { 502 g->mt_comp = MT_COMP_DISABLED; 503 g->mt_comp0 = MT_COMP_DISABLED; 504 g->mt_comp1 = MT_COMP_DISABLED; 505 g->mt_comp2 = MT_COMP_DISABLED; 506 g->mt_comp3 = MT_COMP_DISABLED; 507 } else { 508 g->mt_comp = softc->comp_algorithm; 509 g->mt_comp0 = softc->comp_algorithm; 510 g->mt_comp1 = softc->comp_algorithm; 511 g->mt_comp2 = softc->comp_algorithm; 512 g->mt_comp3 = softc->comp_algorithm; 513 } 514 g->mt_density0 = softc->media_density; 515 g->mt_density1 = softc->media_density; 516 g->mt_density2 = softc->media_density; 517 g->mt_density3 = softc->media_density; 518 g->mt_blksiz0 = softc->media_blksize; 519 g->mt_blksiz1 = softc->media_blksize; 520 g->mt_blksiz2 = softc->media_blksize; 521 g->mt_blksiz3 = softc->media_blksize; 522 error = 0; 523 break; 524 } 525 case MTIOCTOP: 526 { 527 struct mtop *mt; 528 int count; 529 530 mt = (struct mtop *)arg; 531 532 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, 533 ("saioctl: op=0x%x count=0x%x\n", 534 mt->mt_op, mt->mt_count)); 535 536 count = mt->mt_count; 537 switch (mt->mt_op) { 538 case MTWEOF: /* write an end-of-file record */ 539 error = sawritefilemarks(periph, count, 540 /*setmarks*/FALSE); 541 break; 542 case MTBSR: /* backward space record */ 543 case MTFSR: /* forward space record */ 544 case MTBSF: /* backward space file */ 545 case MTFSF: /* forward space file */ 546 case MTEOD: /* space to end of recorded medium */ 547 { 548 int nmarks; 549 scsi_space_code spaceop; 550 551 nmarks = softc->filemarks; 552 error = sacheckeod(periph); 553 nmarks -= softc->filemarks; 554 555 if ((mt->mt_op == MTBSR) || (mt->mt_op == MTBSF)) 556 count = -count; 557 558 if ((mt->mt_op == MTBSF) || (mt->mt_op == MTFSF)) 559 spaceop = SS_FILEMARKS; 560 else if ((mt->mt_op == MTBSR) || (mt->mt_op == MTFSR)) 561 spaceop = SS_BLOCKS; 562 else { 563 spaceop = SS_EOD; 564 count = 0; 565 nmarks = 0; 566 } 567 568 nmarks = softc->filemarks; 569 error = sacheckeod(periph); 570 nmarks -= softc->filemarks; 571 if (error == 0) 572 error = saspace(periph, count - nmarks, 573 spaceop); 574 break; 575 } 576 case MTREW: /* rewind */ 577 error = sarewind(periph); 578 break; 579 case MTERASE: /* erase */ 580 error = saerase(periph, count); 581 break; 582 case MTRETENS: /* re-tension tape */ 583 error = saretension(periph); 584 break; 585 case MTOFFL: /* rewind and put the drive offline */ 586 /* 587 * Be sure to allow media removal before 588 * attempting the eject. 589 */ 590 saprevent(periph, PR_ALLOW); 591 error = sarewind(periph); 592 593 if (error == 0) 594 error = saloadunload(periph, /*load*/FALSE); 595 else 596 break; 597 598 /* XXX KDM */ 599 softc->flags &= ~SA_FLAG_TAPE_LOCKED; 600 softc->flags &= ~SA_FLAG_TAPE_MOUNTED; 601 break; 602 case MTNOP: /* no operation, sets status only */ 603 case MTCACHE: /* enable controller cache */ 604 case MTNOCACHE: /* disable controller cache */ 605 error = 0; 606 break; 607 case MTSETBSIZ: /* Set block size for device */ 608 609 error = sasetparams(periph, SA_PARAM_BLOCKSIZE, count, 610 0, 0); 611 break; 612 case MTSETDNSTY: /* Set density for device and mode */ 613 if (count > UCHAR_MAX) { 614 error = EINVAL; 615 break; 616 } else { 617 error = sasetparams(periph, SA_PARAM_DENSITY, 618 0, count, 0); 619 } 620 break; 621 case MTCOMP: /* enable compression */ 622 /* 623 * Some devices don't support compression, and 624 * don't like it if you ask them for the 625 * compression page. 626 */ 627 if ((softc->quirks & SA_QUIRK_NOCOMP) 628 || (softc->flags & SA_FLAG_COMP_UNSUPP)) { 629 error = ENODEV; 630 break; 631 } 632 error = sasetparams(periph, SA_PARAM_COMPRESSION, 633 0, 0, count); 634 break; 635 default: 636 error = EINVAL; 637 } 638 break; 639 } 640 case MTIOCIEOT: 641 case MTIOCEEOT: 642 error = 0; 643 break; 644 default: 645 error = cam_periph_ioctl(periph, cmd, arg, saerror); 646 break; 647 } 648 return (error); 649 } 650 651 static void 652 sainit(void) 653 { 654 cam_status status; 655 struct cam_path *path; 656 657 /* 658 * Create our extend array for storing the devices we attach to. 659 */ 660 saperiphs = cam_extend_new(); 661 if (saperiphs == NULL) { 662 printf("sa: Failed to alloc extend array!\n"); 663 return; 664 } 665 666 /* 667 * Install a global async callback. 668 */ 669 status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID, 670 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 671 672 if (status == CAM_REQ_CMP) { 673 /* Register the async callbacks of interrest */ 674 struct ccb_setasync csa; /* 675 * This is an immediate CCB, 676 * so using the stack is OK 677 */ 678 xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5); 679 csa.ccb_h.func_code = XPT_SASYNC_CB; 680 csa.event_enable = AC_FOUND_DEVICE; 681 csa.callback = saasync; 682 csa.callback_arg = NULL; 683 xpt_action((union ccb *)&csa); 684 status = csa.ccb_h.status; 685 xpt_free_path(path); 686 } 687 688 if (status != CAM_REQ_CMP) { 689 printf("sa: Failed to attach master async callback " 690 "due to status 0x%x!\n", status); 691 } else { 692 /* If we were successfull, register our devsw */ 693 cdevsw_add_generic(SA_BDEV_MAJOR, SA_CDEV_MAJOR, &sa_cdevsw); 694 } 695 } 696 697 static void 698 sacleanup(struct cam_periph *periph) 699 { 700 cam_extend_release(saperiphs, periph->unit_number); 701 xpt_print_path(periph->path); 702 printf("removing device entry\n"); 703 free(periph->softc, M_DEVBUF); 704 } 705 706 static void 707 saasync(void *callback_arg, u_int32_t code, 708 struct cam_path *path, void *arg) 709 { 710 struct cam_periph *periph; 711 712 periph = (struct cam_periph *)callback_arg; 713 switch (code) { 714 case AC_FOUND_DEVICE: 715 { 716 struct ccb_getdev *cgd; 717 cam_status status; 718 719 cgd = (struct ccb_getdev *)arg; 720 721 if (cgd->pd_type != T_SEQUENTIAL) 722 break; 723 724 /* 725 * Allocate a peripheral instance for 726 * this device and start the probe 727 * process. 728 */ 729 status = cam_periph_alloc(saregister, sacleanup, sastart, 730 "sa", CAM_PERIPH_BIO, cgd->ccb_h.path, 731 saasync, AC_FOUND_DEVICE, cgd); 732 733 if (status != CAM_REQ_CMP 734 && status != CAM_REQ_INPROG) 735 printf("saasync: Unable to probe new device " 736 "due to status 0x%x\n", status); 737 break; 738 } 739 case AC_LOST_DEVICE: 740 { 741 int s; 742 struct sa_softc *softc; 743 struct buf *q_bp; 744 struct ccb_setasync csa; 745 746 softc = (struct sa_softc *)periph->softc; 747 748 /* 749 * Insure that no other async callbacks that 750 * might affect this peripheral can come through. 751 */ 752 s = splcam(); 753 754 /* 755 * De-register any async callbacks. 756 */ 757 xpt_setup_ccb(&csa.ccb_h, periph->path, 758 /* priority */ 5); 759 csa.ccb_h.func_code = XPT_SASYNC_CB; 760 csa.event_enable = 0; 761 csa.callback = saasync; 762 csa.callback_arg = periph; 763 xpt_action((union ccb *)&csa); 764 765 softc->flags |= SA_FLAG_INVALID; 766 767 /* 768 * Return all queued I/O with ENXIO. 769 * XXX Handle any transactions queued to the card 770 * with XPT_ABORT_CCB. 771 */ 772 while ((q_bp = bufq_first(&softc->buf_queue)) != NULL){ 773 bufq_remove(&softc->buf_queue, q_bp); 774 q_bp->b_resid = q_bp->b_bcount; 775 q_bp->b_error = ENXIO; 776 q_bp->b_flags |= B_ERROR; 777 biodone(q_bp); 778 } 779 devstat_remove_entry(&softc->device_stats); 780 781 xpt_print_path(periph->path); 782 printf("lost device\n"); 783 784 splx(s); 785 786 cam_periph_invalidate(periph); 787 } 788 case AC_TRANSFER_NEG: 789 case AC_SENT_BDR: 790 case AC_SCSI_AEN: 791 case AC_UNSOL_RESEL: 792 case AC_BUS_RESET: 793 default: 794 break; 795 } 796 } 797 798 static cam_status 799 saregister(struct cam_periph *periph, void *arg) 800 { 801 int s; 802 struct sa_softc *softc; 803 struct ccb_setasync csa; 804 struct ccb_getdev *cgd; 805 caddr_t match; 806 807 cgd = (struct ccb_getdev *)arg; 808 if (periph == NULL) { 809 printf("saregister: periph was NULL!!\n"); 810 return(CAM_REQ_CMP_ERR); 811 } 812 813 if (cgd == NULL) { 814 printf("saregister: no getdev CCB, can't register device\n"); 815 return(CAM_REQ_CMP_ERR); 816 } 817 818 softc = (struct sa_softc *)malloc(sizeof(*softc),M_DEVBUF,M_NOWAIT); 819 820 if (softc == NULL) { 821 printf("saregister: Unable to probe new device. " 822 "Unable to allocate softc\n"); 823 return(CAM_REQ_CMP_ERR); 824 } 825 826 bzero(softc, sizeof(*softc)); 827 softc->state = SA_STATE_NORMAL; 828 bufq_init(&softc->buf_queue); 829 periph->softc = softc; 830 cam_extend_set(saperiphs, periph->unit_number, periph); 831 832 /* 833 * See if this device has any quirks. 834 */ 835 match = cam_quirkmatch((caddr_t)&cgd->inq_data, 836 (caddr_t)sa_quirk_table, 837 sizeof(sa_quirk_table)/sizeof(*sa_quirk_table), 838 sizeof(*sa_quirk_table), scsi_inquiry_match); 839 840 if (match != NULL) 841 softc->quirks = ((struct sa_quirk_entry *)match)->quirks; 842 else 843 softc->quirks = SA_QUIRK_NONE; 844 845 /* 846 * The SA driver supports a blocksize, but we don't know the 847 * blocksize until we sense the media. So, set a flag to 848 * indicate that the blocksize is unavailable right now. 849 * We'll clear the flag as soon as we've done a read capacity. 850 */ 851 devstat_add_entry(&softc->device_stats, "sa", 852 periph->unit_number, 0, 853 DEVSTAT_BS_UNAVAILABLE, 854 cgd->pd_type | DEVSTAT_TYPE_IF_SCSI); 855 856 /* 857 * Add an async callback so that we get 858 * notified if this device goes away. 859 */ 860 xpt_setup_ccb(&csa.ccb_h, periph->path, /* priority */ 5); 861 csa.ccb_h.func_code = XPT_SASYNC_CB; 862 csa.event_enable = AC_LOST_DEVICE; 863 csa.callback = saasync; 864 csa.callback_arg = periph; 865 xpt_action((union ccb *)&csa); 866 867 xpt_announce_periph(periph, NULL); 868 869 return(CAM_REQ_CMP); 870 } 871 872 static void 873 sastart(struct cam_periph *periph, union ccb *start_ccb) 874 { 875 struct sa_softc *softc; 876 877 softc = (struct sa_softc *)periph->softc; 878 879 880 switch (softc->state) { 881 case SA_STATE_NORMAL: 882 { 883 /* Pull a buffer from the queue and get going on it */ 884 struct buf *bp; 885 int s; 886 887 /* 888 * See if there is a buf with work for us to do.. 889 */ 890 s = splbio(); 891 bp = bufq_first(&softc->buf_queue); 892 if (periph->immediate_priority <= periph->pinfo.priority) { 893 CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE, 894 ("queuing for immediate ccb\n")); 895 start_ccb->ccb_h.ccb_type = SA_CCB_WAITING; 896 SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h, 897 periph_links.sle); 898 periph->immediate_priority = CAM_PRIORITY_NONE; 899 splx(s); 900 wakeup(&periph->ccb_list); 901 } else if (bp == NULL) { 902 splx(s); 903 xpt_release_ccb(start_ccb); 904 } else if ((softc->flags & SA_FLAG_ERR_PENDING) != 0) { 905 906 bufq_remove(&softc->buf_queue, bp); 907 bp->b_resid = bp->b_bcount; 908 bp->b_flags |= B_ERROR; 909 if ((softc->flags & SA_FLAG_EOM_PENDING) != 0) { 910 if ((bp->b_flags & B_READ) == 0) 911 bp->b_error = ENOSPC; 912 } 913 if ((softc->flags & SA_FLAG_EIO_PENDING) != 0) { 914 bp->b_error = EIO; 915 } 916 softc->flags &= ~SA_FLAG_ERR_PENDING; 917 bp = bufq_first(&softc->buf_queue); 918 splx(s); 919 biodone(bp); 920 } else { 921 u_int32_t length; 922 923 bufq_remove(&softc->buf_queue, bp); 924 925 if ((softc->flags & SA_FLAG_FIXED) != 0) { 926 if (softc->blk_shift != 0) { 927 length = 928 bp->b_bcount >> softc->blk_shift; 929 } else { 930 length = 931 bp->b_bcount / softc->min_blk; 932 } 933 } else { 934 length = bp->b_bcount; 935 } 936 937 devstat_start_transaction(&softc->device_stats); 938 939 /* 940 * XXX - Perhaps we should... 941 * suppress illegal length indication if we are 942 * running in variable block mode so that we don't 943 * have to request sense every time our requested 944 * block size is larger than the written block. 945 * The residual information from the ccb allows 946 * us to identify this situation anyway. The only 947 * problem with this is that we will not get 948 * information about blocks that are larger than 949 * our read buffer unless we set the block size 950 * in the mode page to something other than 0. 951 */ 952 scsi_sa_read_write(&start_ccb->csio, 953 /*retries*/4, 954 sadone, 955 MSG_SIMPLE_Q_TAG, 956 bp->b_flags & B_READ, 957 /*SILI*/FALSE, 958 softc->flags & SA_FLAG_FIXED, 959 length, 960 bp->b_data, 961 bp->b_bcount, 962 SSD_FULL_SIZE, 963 120 * 60 * 1000); /* 2min */ 964 start_ccb->ccb_h.ccb_type = SA_CCB_BUFFER_IO; 965 start_ccb->ccb_h.ccb_bp = bp; 966 bp = bufq_first(&softc->buf_queue); 967 splx(s); 968 969 xpt_action(start_ccb); 970 } 971 972 if (bp != NULL) { 973 /* Have more work to do, so ensure we stay scheduled */ 974 xpt_schedule(periph, /* XXX priority */1); 975 } 976 break; 977 } 978 } 979 } 980 981 982 static void 983 sadone(struct cam_periph *periph, union ccb *done_ccb) 984 { 985 struct sa_softc *softc; 986 struct ccb_scsiio *csio; 987 988 softc = (struct sa_softc *)periph->softc; 989 csio = &done_ccb->csio; 990 switch (csio->ccb_h.ccb_type) { 991 case SA_CCB_BUFFER_IO: 992 { 993 struct buf *bp; 994 int error; 995 996 bp = (struct buf *)done_ccb->ccb_h.ccb_bp; 997 error = 0; 998 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 999 1000 if ((error = saerror(done_ccb, 0, 0)) == ERESTART) { 1001 /* 1002 * A retry was scheuled, so 1003 * just return. 1004 */ 1005 return; 1006 } 1007 } 1008 1009 if (error == EIO) { 1010 int s; 1011 struct buf *q_bp; 1012 1013 /* 1014 * Catastrophic error. Mark our pack as invalid, 1015 * return all queued I/O with EIO, and unfreeze 1016 * our queue so that future transactions that 1017 * attempt to fix this problem can get to the 1018 * device. 1019 * 1020 */ 1021 1022 s = splbio(); 1023 softc->flags &= ~SA_FLAG_TAPE_MOUNTED; 1024 1025 while ((q_bp = bufq_first(&softc->buf_queue)) != NULL) { 1026 bufq_remove(&softc->buf_queue, q_bp); 1027 q_bp->b_resid = q_bp->b_bcount; 1028 q_bp->b_error = EIO; 1029 q_bp->b_flags |= B_ERROR; 1030 biodone(q_bp); 1031 } 1032 splx(s); 1033 } 1034 if (error != 0) { 1035 bp->b_resid = bp->b_bcount; 1036 bp->b_error = error; 1037 bp->b_flags |= B_ERROR; 1038 cam_release_devq(done_ccb->ccb_h.path, 1039 /*relsim_flags*/0, 1040 /*reduction*/0, 1041 /*timeout*/0, 1042 /*getcount_only*/0); 1043 } else { 1044 bp->b_resid = csio->resid; 1045 bp->b_error = 0; 1046 if (csio->resid != 0) { 1047 bp->b_flags |= B_ERROR; 1048 } 1049 if ((bp->b_flags & B_READ) == 0) { 1050 softc->flags |= SA_FLAG_TAPE_WRITTEN; 1051 softc->filemarks = 0; 1052 } 1053 } 1054 1055 devstat_end_transaction(&softc->device_stats, 1056 bp->b_bcount - bp->b_resid, 1057 done_ccb->csio.tag_action & 0xf, 1058 (bp->b_flags & B_READ) ? DEVSTAT_READ 1059 : DEVSTAT_WRITE); 1060 biodone(bp); 1061 break; 1062 } 1063 case SA_CCB_WAITING: 1064 { 1065 /* Caller will release the CCB */ 1066 wakeup(&done_ccb->ccb_h.cbfcnp); 1067 return; 1068 } 1069 } 1070 xpt_release_ccb(done_ccb); 1071 } 1072 1073 static int 1074 samount(struct cam_periph *periph) 1075 { 1076 struct sa_softc *softc; 1077 union ccb *ccb; 1078 struct ccb_scsiio *csio; 1079 int error; 1080 1081 softc = (struct sa_softc *)periph->softc; 1082 ccb = cam_periph_getccb(periph, /* priority */1); 1083 csio = &ccb->csio; 1084 error = 0; 1085 1086 /* 1087 * Determine if something has happend since the last 1088 * open/mount that would invalidate a mount. This 1089 * will also eat any pending UAs. 1090 */ 1091 scsi_test_unit_ready(csio, 1092 /*retries*/1, 1093 sadone, 1094 MSG_SIMPLE_Q_TAG, 1095 SSD_FULL_SIZE, 1096 /*timeout*/5000); 1097 1098 cam_periph_runccb(ccb, /*error handler*/NULL, /*cam_flags*/0, 1099 /*sense_flags*/0, &softc->device_stats); 1100 1101 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) { 1102 cam_release_devq(ccb->ccb_h.path, 1103 /*relsim_flags*/0, 1104 /*reduction*/0, 1105 /*timeout*/0, 1106 /*getcount_only*/0); 1107 softc->flags &= ~SA_FLAG_TAPE_MOUNTED; 1108 } 1109 1110 if ((softc->flags & SA_FLAG_TAPE_MOUNTED) == 0) { 1111 struct scsi_read_block_limits_data *rblim; 1112 int buff_mode, comp_enabled, comp_supported; 1113 u_int8_t write_protect; 1114 1115 /* 1116 * Clear out old state. 1117 */ 1118 softc->flags &= ~(SA_FLAG_TAPE_WP|SA_FLAG_TAPE_WRITTEN| 1119 SA_FLAG_ERR_PENDING|SA_FLAG_COMP_ENABLED| 1120 SA_FLAG_COMP_UNSUPP); 1121 softc->filemarks = 0; 1122 1123 /* 1124 * First off, determine block limits. 1125 */ 1126 rblim = (struct scsi_read_block_limits_data *) 1127 malloc(sizeof(*rblim), M_TEMP, M_WAITOK); 1128 1129 scsi_read_block_limits(csio, 1130 /*retries*/1, 1131 sadone, 1132 MSG_SIMPLE_Q_TAG, 1133 rblim, 1134 SSD_FULL_SIZE, 1135 /*timeout*/5000); 1136 1137 error = cam_periph_runccb(ccb, saerror, /*cam_flags*/0, 1138 /*sense_flags*/SF_RETRY_UA, 1139 &softc->device_stats); 1140 1141 xpt_release_ccb(ccb); 1142 1143 if (error != 0) 1144 goto exit; 1145 1146 softc->blk_gran = RBL_GRAN(rblim); 1147 softc->max_blk = scsi_3btoul(rblim->maximum); 1148 softc->min_blk = scsi_2btoul(rblim->minimum); 1149 if (softc->max_blk == softc->min_blk) { 1150 softc->flags |= SA_FLAG_FIXED; 1151 if (powerof2(softc->min_blk)) { 1152 softc->blk_mask = softc->min_blk - 1; 1153 softc->blk_shift = 0; 1154 softc->blk_shift = ffs(softc->min_blk) - 1; 1155 } else { 1156 softc->blk_mask = ~0; 1157 softc->blk_shift = 0; 1158 } 1159 } else { 1160 /* 1161 * SCSI-III spec allows 0 1162 * to mean "unspecified" 1163 */ 1164 if (softc->max_blk == 0) { 1165 softc->max_blk = ~0; 1166 } 1167 softc->blk_shift = 0; 1168 if (softc->blk_gran != 0) { 1169 softc->blk_mask = softc->blk_gran - 1; 1170 } else { 1171 softc->blk_mask = 0; 1172 } 1173 } 1174 1175 /* 1176 * Next, perform a mode sense to determine 1177 * current density, blocksize, compression etc. 1178 */ 1179 error = sagetparams(periph, SA_PARAM_ALL, 1180 &softc->media_blksize, 1181 &softc->media_density, 1182 &softc->media_numblks, 1183 &softc->buffer_mode, &write_protect, 1184 &softc->speed, &comp_supported, 1185 &comp_enabled, &softc->comp_algorithm, 1186 NULL); 1187 1188 if (error != 0) 1189 goto exit; 1190 1191 if (write_protect) 1192 softc->flags |= SA_FLAG_TAPE_WP; 1193 1194 if (comp_supported) { 1195 if (comp_enabled) { 1196 softc->flags |= SA_FLAG_COMP_ENABLED; 1197 1198 if (softc->saved_comp_algorithm == 0) 1199 softc->saved_comp_algorithm = 1200 softc->comp_algorithm; 1201 } 1202 } else 1203 softc->flags |= SA_FLAG_COMP_UNSUPP; 1204 1205 if (softc->buffer_mode != SMH_SA_BUF_MODE_NOBUF) 1206 goto exit; 1207 1208 error = sasetparams(periph, SA_PARAM_BUFF_MODE, 0, 0, 0); 1209 1210 if (error == 0) 1211 softc->buffer_mode = SMH_SA_BUF_MODE_SIBUF; 1212 exit: 1213 if (rblim != NULL) 1214 free(rblim, M_TEMP); 1215 1216 if (error != 0) { 1217 cam_release_devq(ccb->ccb_h.path, 1218 /*relsim_flags*/0, 1219 /*reduction*/0, 1220 /*timeout*/0, 1221 /*getcount_only*/0); 1222 } 1223 } else 1224 xpt_release_ccb(ccb); 1225 1226 return (error); 1227 } 1228 1229 static int 1230 sacheckeod(struct cam_periph *periph) 1231 { 1232 int error; 1233 int markswanted; 1234 struct sa_softc *softc; 1235 1236 softc = (struct sa_softc *)periph->softc; 1237 markswanted = 0; 1238 1239 if ((softc->flags & SA_FLAG_TAPE_WRITTEN) != 0) { 1240 markswanted++; 1241 1242 if ((softc->flags & SA_FLAG_2FM_AT_EOD) != 0) 1243 markswanted++; 1244 } 1245 1246 if (softc->filemarks < markswanted) { 1247 markswanted -= softc->filemarks; 1248 error = sawritefilemarks(periph, markswanted, 1249 /*setmarks*/FALSE); 1250 } else { 1251 error = 0; 1252 } 1253 return (error); 1254 } 1255 1256 static int 1257 saerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags) 1258 { 1259 struct cam_periph *periph; 1260 struct sa_softc *softc; 1261 struct ccb_scsiio *csio; 1262 struct scsi_sense_data *sense; 1263 int error_code, sense_key, asc, ascq; 1264 int error; 1265 1266 periph = xpt_path_periph(ccb->ccb_h.path); 1267 softc = (struct sa_softc *)periph->softc; 1268 csio = &ccb->csio; 1269 sense = &csio->sense_data; 1270 scsi_extract_sense(sense, &error_code, &sense_key, &asc, &ascq); 1271 error = 0; 1272 1273 if (((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_SCSI_STATUS_ERROR) 1274 && ((sense->flags & (SSD_EOM|SSD_FILEMARK|SSD_ILI)) != 0) 1275 && ((sense_key == SSD_KEY_NO_SENSE) 1276 || (sense_key == SSD_KEY_BLANK_CHECK))) { 1277 u_int32_t info; 1278 u_int32_t resid; 1279 int defer_action; 1280 1281 /* 1282 * Filter out some sense codes of interest. 1283 */ 1284 if ((sense->error_code & SSD_ERRCODE_VALID) != 0) { 1285 info = scsi_4btoul(sense->info); 1286 resid = info; 1287 if ((softc->flags & SA_FLAG_FIXED) != 0) 1288 resid *= softc->media_blksize; 1289 } else { 1290 resid = csio->dxfer_len; 1291 info = resid; 1292 if ((softc->flags & SA_FLAG_FIXED) != 0) 1293 info /= softc->media_blksize; 1294 } 1295 if ((resid > 0 && resid < csio->dxfer_len) 1296 && (softc->flags & SA_FLAG_FIXED) != 0) 1297 defer_action = TRUE; 1298 else 1299 defer_action = FALSE; 1300 1301 if ((sense->flags & SSD_EOM) != 0 1302 || (sense_key == 0x8 /* BLANK CHECK*/)) { 1303 csio->resid = resid; 1304 if (defer_action) { 1305 softc->flags |= SA_FLAG_EOM_PENDING; 1306 } else { 1307 if (csio->cdb_io.cdb_bytes[0] == SA_WRITE) 1308 error = ENOSPC; 1309 } 1310 } 1311 if ((sense->flags & SSD_FILEMARK) != 0) { 1312 csio->resid = resid; 1313 if (defer_action) 1314 softc->flags |= SA_FLAG_EOF_PENDING; 1315 } 1316 if (sense->flags & SSD_ILI) { 1317 if (info < 0) { 1318 /* 1319 * The record was too big. 1320 */ 1321 xpt_print_path(csio->ccb_h.path); 1322 printf("%d-byte tape record bigger " 1323 "than suplied read buffer\n", 1324 csio->dxfer_len - info); 1325 csio->resid = csio->dxfer_len; 1326 error = EIO; 1327 } else { 1328 csio->resid = resid; 1329 if ((softc->flags & SA_FLAG_FIXED) != 0) { 1330 if (defer_action) 1331 softc->flags |= 1332 SA_FLAG_EIO_PENDING; 1333 else 1334 error = EIO; 1335 } 1336 } 1337 } 1338 } 1339 if (error == 0) 1340 error = cam_periph_error(ccb, cam_flags, sense_flags, 1341 &softc->saved_ccb); 1342 1343 return (error); 1344 } 1345 1346 static int 1347 sagetparams(struct cam_periph *periph, sa_params params_to_get, 1348 u_int32_t *blocksize, u_int8_t *density, u_int32_t *numblocks, 1349 int *buff_mode, u_int8_t *write_protect, u_int8_t *speed, 1350 int *comp_supported, int *comp_enabled, u_int32_t *comp_algorithm, 1351 struct scsi_data_compression_page *comp_page) 1352 { 1353 union ccb *ccb; 1354 void *mode_buffer; 1355 struct scsi_mode_header_6 *mode_hdr; 1356 struct scsi_mode_blk_desc *mode_blk; 1357 struct scsi_data_compression_page *ncomp_page; 1358 int mode_buffer_len; 1359 struct sa_softc *softc; 1360 int error; 1361 cam_status status; 1362 1363 softc = (struct sa_softc *)periph->softc; 1364 1365 ccb = cam_periph_getccb(periph, /*priority*/ 1); 1366 1367 retry: 1368 mode_buffer_len = sizeof(*mode_hdr) + sizeof(*mode_blk); 1369 1370 if (params_to_get & SA_PARAM_COMPRESSION) { 1371 if (softc->quirks & SA_QUIRK_NOCOMP) { 1372 *comp_supported = FALSE; 1373 params_to_get &= ~SA_PARAM_COMPRESSION; 1374 } else 1375 mode_buffer_len += 1376 sizeof(struct scsi_data_compression_page); 1377 } 1378 1379 mode_buffer = malloc(mode_buffer_len, M_TEMP, M_WAITOK); 1380 1381 bzero(mode_buffer, mode_buffer_len); 1382 1383 mode_hdr = (struct scsi_mode_header_6 *)mode_buffer; 1384 mode_blk = (struct scsi_mode_blk_desc *)&mode_hdr[1]; 1385 1386 if (params_to_get & SA_PARAM_COMPRESSION) 1387 ncomp_page = (struct scsi_data_compression_page *)&mode_blk[1]; 1388 else 1389 ncomp_page = NULL; 1390 1391 scsi_mode_sense(&ccb->csio, 1392 /*retries*/ 1, 1393 /*cbfcnp*/ sadone, 1394 /*tag_action*/ MSG_SIMPLE_Q_TAG, 1395 /*dbd*/ FALSE, 1396 /*page_code*/ SMS_PAGE_CTRL_CURRENT, 1397 /*page*/ (params_to_get & SA_PARAM_COMPRESSION) ? 1398 SA_DATA_COMPRESSION_PAGE : 1399 SMS_VENDOR_SPECIFIC_PAGE, 1400 /*param_buf*/ mode_buffer, 1401 /*param_len*/ mode_buffer_len, 1402 /*sense_len*/ SSD_FULL_SIZE, 1403 /*timeout*/ 5000); 1404 1405 error = cam_periph_runccb(ccb, saerror, /*cam_flags*/ 0, 1406 /*sense_flags*/SF_NO_PRINT, 1407 &softc->device_stats); 1408 1409 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 1410 cam_release_devq(ccb->ccb_h.path, 1411 /* relsim_flags */0, 1412 /* opening_reduction */0, 1413 /* timeout */0, 1414 /* getcount_only */ FALSE); 1415 1416 status = ccb->ccb_h.status & CAM_STATUS_MASK; 1417 1418 if (error == EINVAL 1419 && (params_to_get & SA_PARAM_COMPRESSION) != 0) { 1420 /* 1421 * Most likely doesn't support the compression 1422 * page. Remeber this for the future and attempt 1423 * the request without asking for compression info. 1424 */ 1425 softc->quirks |= SA_QUIRK_NOCOMP; 1426 free(mode_buffer, M_TEMP); 1427 goto retry; 1428 } else if (error == 0) { 1429 struct scsi_data_compression_page *temp_comp_page; 1430 1431 temp_comp_page = NULL; 1432 1433 /* 1434 * If the user only wants the compression information, and 1435 * the device doesn't send back the block descriptor, it's 1436 * no big deal. If the user wants more than just 1437 * compression, though, and the device doesn't pass back the 1438 * block descriptor, we need to send another mode sense to 1439 * get the block descriptor. 1440 */ 1441 if ((mode_hdr->blk_desc_len == 0) 1442 && (params_to_get & SA_PARAM_COMPRESSION) 1443 && ((params_to_get & ~(SA_PARAM_COMPRESSION)) != 0)) { 1444 1445 /* 1446 * Decrease the mode buffer length by the size of 1447 * the compression page, to make sure the data 1448 * there doesn't get overwritten. 1449 */ 1450 mode_buffer_len -= sizeof(*ncomp_page); 1451 1452 /* 1453 * Now move the compression page that we presumably 1454 * got back down the memory chunk a little bit so 1455 * it doesn't get spammed. 1456 */ 1457 temp_comp_page = 1458 (struct scsi_data_compression_page *)&mode_hdr[1]; 1459 bcopy(temp_comp_page, ncomp_page, sizeof(*ncomp_page)); 1460 1461 /* 1462 * Now, we issue another mode sense and just ask 1463 * for the block descriptor, etc. 1464 */ 1465 scsi_mode_sense(&ccb->csio, 1466 /*retries*/ 1, 1467 /*cbfcnp*/ sadone, 1468 /*tag_action*/ MSG_SIMPLE_Q_TAG, 1469 /*dbd*/ FALSE, 1470 /*page_code*/ SMS_PAGE_CTRL_CURRENT, 1471 /*page*/ SMS_VENDOR_SPECIFIC_PAGE, 1472 /*param_buf*/ mode_buffer, 1473 /*param_len*/ mode_buffer_len, 1474 /*sense_len*/ SSD_FULL_SIZE, 1475 /*timeout*/ 5000); 1476 1477 error = cam_periph_runccb(ccb, saerror, /*cam_flags*/ 0, 1478 /*sense_flags*/ 0, 1479 &softc->device_stats); 1480 1481 if (error != 0) 1482 goto sagetparamsexit; 1483 1484 } 1485 1486 if (params_to_get & SA_PARAM_BLOCKSIZE) 1487 *blocksize = scsi_3btoul(mode_blk->blklen); 1488 1489 if (params_to_get & SA_PARAM_NUMBLOCKS) 1490 *numblocks = scsi_3btoul(mode_blk->nblocks); 1491 1492 if (params_to_get & SA_PARAM_BUFF_MODE) 1493 *buff_mode = mode_hdr->dev_spec & SMH_SA_BUF_MODE_MASK; 1494 1495 if (params_to_get & SA_PARAM_DENSITY) 1496 *density = mode_blk->density; 1497 1498 if (params_to_get & SA_PARAM_WP) 1499 *write_protect = (mode_hdr->dev_spec & SMH_SA_WP) ? 1500 TRUE : FALSE; 1501 if (params_to_get & SA_PARAM_SPEED) 1502 *speed = mode_hdr->dev_spec & SMH_SA_SPEED_MASK; 1503 1504 if (params_to_get & SA_PARAM_COMPRESSION) { 1505 *comp_supported =(ncomp_page->dce_and_dcc & SA_DCP_DCC)? 1506 TRUE : FALSE; 1507 *comp_enabled = (ncomp_page->dce_and_dcc & SA_DCP_DCE)? 1508 TRUE : FALSE; 1509 *comp_algorithm = 1510 scsi_4btoul(ncomp_page->comp_algorithm); 1511 if (comp_page != NULL) 1512 bcopy(ncomp_page, comp_page,sizeof(*comp_page)); 1513 } 1514 1515 } else if (status == CAM_SCSI_STATUS_ERROR) { 1516 /* Tell the user about the fatal error. */ 1517 scsi_sense_print(&ccb->csio); 1518 } 1519 1520 sagetparamsexit: 1521 1522 xpt_release_ccb(ccb); 1523 free(mode_buffer, M_TEMP); 1524 return(error); 1525 } 1526 1527 /* 1528 * The purpose of this function is to set one of four different parameters 1529 * for a tape drive: 1530 * - blocksize 1531 * - density 1532 * - compression / compression algorithm 1533 * - buffering mode 1534 * 1535 * The assumption is that this will be called from saioctl(), and therefore 1536 * from a process context. Thus the waiting malloc calls below. If that 1537 * assumption ever changes, the malloc calls should be changed to be 1538 * NOWAIT mallocs. 1539 * 1540 * Any or all of the four parameters may be set when this function is 1541 * called. It should handle setting more than one parameter at once. 1542 */ 1543 static int 1544 sasetparams(struct cam_periph *periph, sa_params params_to_set, 1545 u_int32_t blocksize, u_int8_t density, u_int32_t comp_algorithm) 1546 { 1547 struct sa_softc *softc; 1548 u_int32_t current_blocksize; 1549 u_int32_t current_comp_algorithm; 1550 u_int8_t current_density; 1551 u_int8_t current_speed; 1552 int comp_enabled, comp_supported; 1553 void *mode_buffer; 1554 int mode_buffer_len; 1555 struct scsi_mode_header_6 *mode_hdr; 1556 struct scsi_mode_blk_desc *mode_blk; 1557 struct scsi_data_compression_page *comp_page; 1558 struct scsi_data_compression_page *current_comp_page; 1559 int buff_mode; 1560 union ccb *ccb; 1561 int error; 1562 1563 softc = (struct sa_softc *)periph->softc; 1564 1565 /* silence the compiler */ 1566 ccb = NULL; 1567 1568 current_comp_page = malloc(sizeof(*current_comp_page),M_TEMP, M_WAITOK); 1569 1570 /* 1571 * Since it doesn't make sense to set the number of blocks, or 1572 * write protection, we won't try to get the current value. We 1573 * always want to get the blocksize, so we can set it back to the 1574 * proper value. 1575 */ 1576 error = sagetparams(periph, params_to_set | SA_PARAM_BLOCKSIZE | 1577 SA_PARAM_SPEED, ¤t_blocksize, 1578 ¤t_density, NULL, &buff_mode, NULL, 1579 ¤t_speed, &comp_supported, &comp_enabled, 1580 ¤t_comp_algorithm, current_comp_page); 1581 1582 if (error != 0) { 1583 free(current_comp_page, M_TEMP); 1584 return(error); 1585 } 1586 1587 mode_buffer_len = sizeof(*mode_hdr) + sizeof(*mode_blk); 1588 if (params_to_set & SA_PARAM_COMPRESSION) 1589 mode_buffer_len += sizeof(struct scsi_data_compression_page); 1590 1591 mode_buffer = malloc(mode_buffer_len, M_TEMP, M_WAITOK); 1592 1593 bzero(mode_buffer, mode_buffer_len); 1594 1595 mode_hdr = (struct scsi_mode_header_6 *)mode_buffer; 1596 mode_blk = (struct scsi_mode_blk_desc *)&mode_hdr[1]; 1597 1598 if (params_to_set & SA_PARAM_COMPRESSION) { 1599 comp_page = (struct scsi_data_compression_page *)&mode_blk[1]; 1600 bcopy(current_comp_page, comp_page, sizeof(*comp_page)); 1601 } else 1602 comp_page = NULL; 1603 1604 /* 1605 * If the caller wants us to set the blocksize, use the one they 1606 * pass in. Otherwise, use the blocksize we got back from the 1607 * mode select above. 1608 */ 1609 if (params_to_set & SA_PARAM_BLOCKSIZE) 1610 scsi_ulto3b(blocksize, mode_blk->blklen); 1611 else 1612 scsi_ulto3b(current_blocksize, mode_blk->blklen); 1613 1614 /* 1615 * 0x7f means "same as before" 1616 */ 1617 if (params_to_set & SA_PARAM_DENSITY) 1618 mode_blk->density = density; 1619 else 1620 mode_blk->density = 0x7f; 1621 1622 /* 1623 * For mode selects, these two fields must be zero. 1624 */ 1625 mode_hdr->data_length = 0; 1626 mode_hdr->medium_type = 0; 1627 1628 /* set the speed to the current value */ 1629 mode_hdr->dev_spec = current_speed; 1630 1631 /* set single-initiator buffering mode */ 1632 mode_hdr->dev_spec |= SMH_SA_BUF_MODE_SIBUF; 1633 1634 mode_hdr->blk_desc_len = sizeof(struct scsi_mode_blk_desc); 1635 1636 /* 1637 * First, if the user wants us to set the compression algorithm or 1638 * just turn compression on, check to make sure that this drive 1639 * supports compression. 1640 */ 1641 if ((params_to_set & SA_PARAM_COMPRESSION) 1642 && (current_comp_page->dce_and_dcc & SA_DCP_DCC)) { 1643 1644 /* 1645 * If the compression algorithm is 0, disable compression. 1646 * If the compression algorithm is non-zero, enable 1647 * compression and set the compression type to the 1648 * specified compression algorithm, unless the algorithm is 1649 * MT_COMP_ENABLE. In that case, we look at the 1650 * compression algorithm that is currently set and if it is 1651 * non-zero, we leave it as-is. If it is zero, and we have 1652 * saved a compression algorithm from a time when 1653 * compression was enabled before, set the compression to 1654 * the saved value. 1655 */ 1656 if (comp_algorithm == 0) { 1657 /* disable compression */ 1658 comp_page->dce_and_dcc &= ~SA_DCP_DCE; 1659 } else { 1660 /* enable compression */ 1661 comp_page->dce_and_dcc |= SA_DCP_DCE; 1662 1663 /* enable decompression */ 1664 comp_page->dde_and_red |= SA_DCP_DDE; 1665 1666 if (comp_algorithm != MT_COMP_ENABLE) { 1667 /* set the compression algorithm */ 1668 scsi_ulto4b(comp_algorithm, 1669 comp_page->comp_algorithm); 1670 1671 } else if ((scsi_4btoul(comp_page->comp_algorithm) == 0) 1672 && (softc->saved_comp_algorithm != 0)) { 1673 scsi_ulto4b(softc->saved_comp_algorithm, 1674 comp_page->comp_algorithm); 1675 } 1676 } 1677 } else if (params_to_set & SA_PARAM_COMPRESSION) { 1678 /* 1679 * The drive doesn't support compression, so turn off the 1680 * set compression bit. 1681 */ 1682 params_to_set &= ~SA_PARAM_COMPRESSION; 1683 1684 /* 1685 * Should probably do something other than a printf...like 1686 * set a flag in the softc saying that this drive doesn't 1687 * support compression. 1688 */ 1689 xpt_print_path(periph->path); 1690 printf("sasetparams: device does not support compression\n"); 1691 1692 /* 1693 * If that was the only thing the user wanted us to set, 1694 * clean up allocated resources and return with 'operation 1695 * not supported'. 1696 */ 1697 if (params_to_set == SA_PARAM_NONE) { 1698 free(mode_buffer, M_TEMP); 1699 return(ENODEV); 1700 } 1701 1702 /* 1703 * That wasn't the only thing the user wanted us to set. 1704 * So, decrease the stated mode buffer length by the size 1705 * of the compression mode page. 1706 */ 1707 mode_buffer_len -= sizeof(*comp_page); 1708 } 1709 1710 ccb = cam_periph_getccb(periph, /*priority*/ 1); 1711 1712 scsi_mode_select(&ccb->csio, 1713 /*retries*/1, 1714 /*cbfcnp*/ sadone, 1715 /*tag_action*/ MSG_SIMPLE_Q_TAG, 1716 /*scsi_page_fmt*/(params_to_set & SA_PARAM_COMPRESSION)? 1717 TRUE : FALSE, 1718 /*save_pages*/ FALSE, 1719 /*param_buf*/ mode_buffer, 1720 /*param_len*/ mode_buffer_len, 1721 /*sense_len*/ SSD_FULL_SIZE, 1722 /*timeout*/ 5000); 1723 1724 error = cam_periph_runccb(ccb, saerror, /*cam_flags*/ 0, 1725 /*sense_flags*/ 0, &softc->device_stats); 1726 1727 if (error == 0) { 1728 xpt_release_ccb(ccb); 1729 } else { 1730 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 1731 cam_release_devq(ccb->ccb_h.path, 1732 /*relsim_flags*/0, 1733 /*reduction*/0, 1734 /*timeout*/0, 1735 /*getcount_only*/0); 1736 /* 1737 * If we were setting the blocksize, and that failed, we 1738 * want to set it to its original value. If we weren't 1739 * setting the blocksize, we don't want to change it. 1740 */ 1741 scsi_ulto3b(current_blocksize, mode_blk->blklen); 1742 1743 /* 1744 * 0x7f means "same as before". 1745 */ 1746 if (params_to_set & SA_PARAM_DENSITY) 1747 mode_blk->density = current_density; 1748 else 1749 mode_blk->density = 0x7f; 1750 1751 if (params_to_set & SA_PARAM_COMPRESSION) 1752 bcopy(current_comp_page, comp_page, 1753 sizeof(struct scsi_data_compression_page)); 1754 1755 /* 1756 * The retry count is the only CCB field that might have been 1757 * changed that we care about, so reset it back to 1. 1758 */ 1759 ccb->ccb_h.retry_count = 1; 1760 cam_periph_runccb(ccb, saerror, /*cam_flags*/ 0, 1761 /*sense_flags*/ 0, &softc->device_stats); 1762 1763 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 1764 cam_release_devq(ccb->ccb_h.path, 1765 /*relsim_flags*/0, 1766 /*reduction*/0, 1767 /*timeout*/0, 1768 /*getcount_only*/0); 1769 1770 xpt_release_ccb(ccb); 1771 } 1772 1773 if (params_to_set & SA_PARAM_COMPRESSION) 1774 free(current_comp_page, M_TEMP); 1775 1776 free(mode_buffer, M_TEMP); 1777 return(error); 1778 } 1779 1780 static void 1781 saprevent(struct cam_periph *periph, int action) 1782 { 1783 struct sa_softc *softc; 1784 union ccb *ccb; 1785 int error; 1786 1787 softc = (struct sa_softc *)periph->softc; 1788 1789 if (((action == PR_ALLOW) 1790 && (softc->flags & SA_FLAG_TAPE_LOCKED) == 0) 1791 || ((action == PR_PREVENT) 1792 && (softc->flags & SA_FLAG_TAPE_LOCKED) != 0)) { 1793 return; 1794 } 1795 1796 ccb = cam_periph_getccb(periph, /*priority*/1); 1797 1798 scsi_prevent(&ccb->csio, 1799 /*retries*/0, 1800 /*cbcfp*/sadone, 1801 MSG_SIMPLE_Q_TAG, 1802 action, 1803 SSD_FULL_SIZE, 1804 60000); 1805 1806 error = cam_periph_runccb(ccb, saerror, /*cam_flags*/0, 1807 /*sense_flags*/0, &softc->device_stats); 1808 1809 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 1810 cam_release_devq(ccb->ccb_h.path, 1811 /*relsim_flags*/0, 1812 /*reduction*/0, 1813 /*timeout*/0, 1814 /*getcount_only*/0); 1815 1816 1817 if (error == 0) { 1818 if (action == PR_ALLOW) 1819 softc->flags &= ~SA_FLAG_TAPE_LOCKED; 1820 else 1821 softc->flags |= SA_FLAG_TAPE_LOCKED; 1822 } 1823 1824 xpt_release_ccb(ccb); 1825 } 1826 1827 static int 1828 sarewind(struct cam_periph *periph) 1829 { 1830 union ccb *ccb; 1831 struct sa_softc *softc; 1832 int error; 1833 1834 softc = (struct sa_softc *)periph->softc; 1835 1836 ccb = cam_periph_getccb(periph, /*priority*/1); 1837 1838 /* 1839 * Put in a 2 hour timeout to deal with especially slow tape drives. 1840 */ 1841 scsi_rewind(&ccb->csio, 1842 /*retries*/1, 1843 /*cbcfp*/sadone, 1844 MSG_SIMPLE_Q_TAG, 1845 /*immediate*/FALSE, 1846 SSD_FULL_SIZE, 1847 (120 * 60 * 1000)); /* 2 hours */ 1848 1849 error = cam_periph_runccb(ccb, saerror, /*cam_flags*/0, 1850 /*sense_flags*/0, &softc->device_stats); 1851 1852 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 1853 cam_release_devq(ccb->ccb_h.path, 1854 /*relsim_flags*/0, 1855 /*reduction*/0, 1856 /*timeout*/0, 1857 /*getcount_only*/0); 1858 1859 xpt_release_ccb(ccb); 1860 1861 return (error); 1862 } 1863 1864 static int 1865 saspace(struct cam_periph *periph, int count, scsi_space_code code) 1866 { 1867 union ccb *ccb; 1868 struct sa_softc *softc; 1869 int error; 1870 1871 softc = (struct sa_softc *)periph->softc; 1872 1873 ccb = cam_periph_getccb(periph, /*priority*/1); 1874 1875 scsi_space(&ccb->csio, 1876 /*retries*/1, 1877 /*cbcfp*/sadone, 1878 MSG_SIMPLE_Q_TAG, 1879 code, count, 1880 SSD_FULL_SIZE, 1881 60 * 60 *1000); 1882 1883 error = cam_periph_runccb(ccb, saerror, /*cam_flags*/0, 1884 /*sense_flags*/0, &softc->device_stats); 1885 1886 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 1887 cam_release_devq(ccb->ccb_h.path, 1888 /*relsim_flags*/0, 1889 /*reduction*/0, 1890 /*timeout*/0, 1891 /*getcount_only*/0); 1892 1893 xpt_release_ccb(ccb); 1894 1895 return (error); 1896 } 1897 1898 static int 1899 sawritefilemarks(struct cam_periph *periph, int nmarks, int setmarks) 1900 { 1901 union ccb *ccb; 1902 struct sa_softc *softc; 1903 int error; 1904 1905 softc = (struct sa_softc *)periph->softc; 1906 1907 ccb = cam_periph_getccb(periph, /*priority*/1); 1908 1909 scsi_write_filemarks(&ccb->csio, 1910 /*retries*/1, 1911 /*cbcfp*/sadone, 1912 MSG_SIMPLE_Q_TAG, 1913 /*immediate*/FALSE, 1914 setmarks, 1915 nmarks, 1916 SSD_FULL_SIZE, 1917 60000); 1918 1919 error = cam_periph_runccb(ccb, saerror, /*cam_flags*/0, 1920 /*sense_flags*/0, &softc->device_stats); 1921 1922 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 1923 cam_release_devq(ccb->ccb_h.path, 1924 /*relsim_flags*/0, 1925 /*reduction*/0, 1926 /*timeout*/0, 1927 /*getcount_only*/0); 1928 1929 if (error == 0) { 1930 struct sa_softc *softc; 1931 1932 softc = (struct sa_softc *)periph->softc; 1933 softc->filemarks += nmarks; 1934 } 1935 1936 xpt_release_ccb(ccb); 1937 1938 return (error); 1939 } 1940 1941 static int 1942 saretension(struct cam_periph *periph) 1943 { 1944 union ccb *ccb; 1945 struct sa_softc *softc; 1946 int error; 1947 1948 softc = (struct sa_softc *)periph->softc; 1949 1950 ccb = cam_periph_getccb(periph, /*priority*/1); 1951 1952 scsi_load_unload(&ccb->csio, 1953 /*retries*/ 1, 1954 /*cbfcnp*/ sadone, 1955 MSG_SIMPLE_Q_TAG, 1956 /*immediate*/ FALSE, 1957 /*eot*/ FALSE, 1958 /*reten*/ TRUE, 1959 /*load*/ TRUE, 1960 SSD_FULL_SIZE, 1961 60000); 1962 1963 error = cam_periph_runccb(ccb, saerror, /*cam_flags*/0, 1964 /*sense_flags*/0, &softc->device_stats); 1965 1966 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 1967 cam_release_devq(ccb->ccb_h.path, 1968 /*relsim_flags*/0, 1969 /*reduction*/0, 1970 /*timeout*/0, 1971 /*getcount_only*/0); 1972 1973 xpt_release_ccb(ccb); 1974 1975 return(error); 1976 } 1977 1978 static int 1979 sareservereleaseunit(struct cam_periph *periph, int reserve) 1980 { 1981 union ccb *ccb; 1982 struct sa_softc *softc; 1983 int error; 1984 1985 softc = (struct sa_softc *)periph->softc; 1986 1987 ccb = cam_periph_getccb(periph, /*priority*/ 1); 1988 1989 scsi_reserve_release_unit(&ccb->csio, 1990 /*retries*/ 1, 1991 /*cbfcnp*/ sadone, 1992 /*tag_action*/ MSG_SIMPLE_Q_TAG, 1993 /*third_party*/ FALSE, 1994 /*third_party_id*/ 0, 1995 /*sense_len*/ SSD_FULL_SIZE, 1996 /*timeout*/ 5000, 1997 reserve); 1998 1999 /* 2000 * We set SF_RETRY_UA, since this is often the first command run 2001 * when a tape device is opened, and there may be a unit attention 2002 * condition pending. 2003 */ 2004 error = cam_periph_runccb(ccb, saerror, /*cam_flags*/0, 2005 /*sense_flags*/SF_RETRY_UA, 2006 &softc->device_stats); 2007 2008 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 2009 cam_release_devq(ccb->ccb_h.path, 2010 /*relsim_flags*/0, 2011 /*reduction*/0, 2012 /*timeout*/0, 2013 /*getcount_only*/0); 2014 2015 xpt_release_ccb(ccb); 2016 2017 return (error); 2018 } 2019 2020 static int 2021 saloadunload(struct cam_periph *periph, int load) 2022 { 2023 union ccb *ccb; 2024 struct sa_softc *softc; 2025 int error; 2026 2027 softc = (struct sa_softc *)periph->softc; 2028 2029 ccb = cam_periph_getccb(periph, /*priority*/1); 2030 2031 scsi_load_unload(&ccb->csio, 2032 /*retries*/1, 2033 /*cbfcnp*/sadone, 2034 MSG_SIMPLE_Q_TAG, 2035 /*immediate*/FALSE, 2036 /*eot*/FALSE, 2037 /*reten*/FALSE, 2038 load, 2039 SSD_FULL_SIZE, 2040 60000); 2041 2042 error = cam_periph_runccb(ccb, saerror, /*cam_flags*/0, 2043 /*sense_flags*/0, &softc->device_stats); 2044 2045 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 2046 cam_release_devq(ccb->ccb_h.path, 2047 /*relsim_flags*/0, 2048 /*reduction*/0, 2049 /*timeout*/0, 2050 /*getcount_only*/0); 2051 2052 xpt_release_ccb(ccb); 2053 2054 return (error); 2055 } 2056 2057 static int 2058 saerase(struct cam_periph *periph, int longerase) 2059 { 2060 2061 union ccb *ccb; 2062 struct sa_softc *softc; 2063 int error; 2064 2065 softc = (struct sa_softc *)periph->softc; 2066 2067 ccb = cam_periph_getccb(periph, /*priority*/ 1); 2068 2069 scsi_erase(&ccb->csio, 2070 /*retries*/ 1, 2071 /*cbfcnp*/ sadone, 2072 /*tag_action*/ MSG_SIMPLE_Q_TAG, 2073 /*immediate*/ FALSE, 2074 /*long_erase*/ longerase, 2075 /*sense_len*/ SSD_FULL_SIZE, 2076 /*timeout*/ 4 * 60 * 60 * 1000); /* 4 hours */ 2077 2078 error = cam_periph_runccb(ccb, saerror, /*cam_flags*/0, 2079 /*sense_flags*/0, &softc->device_stats); 2080 2081 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 2082 cam_release_devq(ccb->ccb_h.path, 2083 /*relsim_flags*/0, 2084 /*reduction*/0, 2085 /*timeout*/0, 2086 /*getcount_only*/0); 2087 2088 xpt_release_ccb(ccb); 2089 2090 return (error); 2091 } 2092 2093 #endif /* KERNEL */ 2094 2095 /* 2096 * Read tape block limits command. 2097 */ 2098 void 2099 scsi_read_block_limits(struct ccb_scsiio *csio, u_int32_t retries, 2100 void (*cbfcnp)(struct cam_periph *, union ccb *), 2101 u_int8_t tag_action, 2102 struct scsi_read_block_limits_data *rlimit_buf, 2103 u_int8_t sense_len, u_int32_t timeout) 2104 { 2105 struct scsi_read_block_limits *scsi_cmd; 2106 2107 cam_fill_csio(csio, 2108 retries, 2109 cbfcnp, 2110 /*flags*/CAM_DIR_IN, 2111 tag_action, 2112 /*data_ptr*/(u_int8_t *)rlimit_buf, 2113 /*dxfer_len*/sizeof(*rlimit_buf), 2114 sense_len, 2115 sizeof(*scsi_cmd), 2116 timeout); 2117 2118 scsi_cmd = (struct scsi_read_block_limits *)&csio->cdb_io.cdb_bytes; 2119 bzero(scsi_cmd, sizeof(*scsi_cmd)); 2120 scsi_cmd->opcode = READ_BLOCK_LIMITS; 2121 } 2122 2123 void 2124 scsi_sa_read_write(struct ccb_scsiio *csio, u_int32_t retries, 2125 void (*cbfcnp)(struct cam_periph *, union ccb *), 2126 u_int8_t tag_action, int readop, int sli, 2127 int fixed, u_int32_t length, u_int8_t *data_ptr, 2128 u_int32_t dxfer_len, u_int8_t sense_len, u_int32_t timeout) 2129 { 2130 struct scsi_sa_rw *scsi_cmd; 2131 2132 scsi_cmd = (struct scsi_sa_rw *)&csio->cdb_io.cdb_bytes; 2133 scsi_cmd->opcode = readop ? SA_READ : SA_WRITE; 2134 scsi_cmd->sli_fixed = 0; 2135 if (sli && readop) 2136 scsi_cmd->sli_fixed |= SAR_SLI; 2137 if (fixed) 2138 scsi_cmd->sli_fixed |= SARW_FIXED; 2139 scsi_ulto3b(length, scsi_cmd->length); 2140 scsi_cmd->control = 0; 2141 2142 cam_fill_csio(csio, 2143 retries, 2144 cbfcnp, 2145 /*flags*/readop ? CAM_DIR_IN : CAM_DIR_OUT, 2146 tag_action, 2147 data_ptr, 2148 dxfer_len, 2149 sense_len, 2150 sizeof(*scsi_cmd), 2151 timeout); 2152 } 2153 2154 void 2155 scsi_load_unload(struct ccb_scsiio *csio, u_int32_t retries, 2156 void (*cbfcnp)(struct cam_periph *, union ccb *), 2157 u_int8_t tag_action, int immediate, int eot, 2158 int reten, int load, u_int8_t sense_len, 2159 u_int32_t timeout) 2160 { 2161 struct scsi_load_unload *scsi_cmd; 2162 2163 scsi_cmd = (struct scsi_load_unload *)&csio->cdb_io.cdb_bytes; 2164 bzero(scsi_cmd, sizeof(*scsi_cmd)); 2165 scsi_cmd->opcode = LOAD_UNLOAD; 2166 if (immediate) 2167 scsi_cmd->immediate = SLU_IMMED; 2168 if (eot) 2169 scsi_cmd->eot_reten_load |= SLU_EOT; 2170 if (reten) 2171 scsi_cmd->eot_reten_load |= SLU_RETEN; 2172 if (load) 2173 scsi_cmd->eot_reten_load |= SLU_LOAD; 2174 2175 cam_fill_csio(csio, 2176 retries, 2177 cbfcnp, 2178 /*flags*/CAM_DIR_NONE, 2179 tag_action, 2180 /*data_ptr*/NULL, 2181 /*dxfer_len*/0, 2182 sense_len, 2183 sizeof(*scsi_cmd), 2184 timeout); 2185 } 2186 2187 void 2188 scsi_rewind(struct ccb_scsiio *csio, u_int32_t retries, 2189 void (*cbfcnp)(struct cam_periph *, union ccb *), 2190 u_int8_t tag_action, int immediate, u_int8_t sense_len, 2191 u_int32_t timeout) 2192 { 2193 struct scsi_rewind *scsi_cmd; 2194 2195 scsi_cmd = (struct scsi_rewind *)&csio->cdb_io.cdb_bytes; 2196 bzero(scsi_cmd, sizeof(*scsi_cmd)); 2197 scsi_cmd->opcode = REWIND; 2198 if (immediate) 2199 scsi_cmd->immediate = SREW_IMMED; 2200 2201 cam_fill_csio(csio, 2202 retries, 2203 cbfcnp, 2204 /*flags*/CAM_DIR_NONE, 2205 tag_action, 2206 /*data_ptr*/NULL, 2207 /*dxfer_len*/0, 2208 sense_len, 2209 sizeof(*scsi_cmd), 2210 timeout); 2211 } 2212 2213 void 2214 scsi_space(struct ccb_scsiio *csio, u_int32_t retries, 2215 void (*cbfcnp)(struct cam_periph *, union ccb *), 2216 u_int8_t tag_action, scsi_space_code code, 2217 u_int32_t count, u_int8_t sense_len, u_int32_t timeout) 2218 { 2219 struct scsi_space *scsi_cmd; 2220 2221 scsi_cmd = (struct scsi_space *)&csio->cdb_io.cdb_bytes; 2222 scsi_cmd->opcode = SPACE; 2223 scsi_cmd->code = code; 2224 scsi_ulto3b(count, scsi_cmd->count); 2225 scsi_cmd->control = 0; 2226 2227 cam_fill_csio(csio, 2228 retries, 2229 cbfcnp, 2230 /*flags*/CAM_DIR_NONE, 2231 tag_action, 2232 /*data_ptr*/NULL, 2233 /*dxfer_len*/0, 2234 sense_len, 2235 sizeof(*scsi_cmd), 2236 timeout); 2237 } 2238 2239 void 2240 scsi_write_filemarks(struct ccb_scsiio *csio, u_int32_t retries, 2241 void (*cbfcnp)(struct cam_periph *, union ccb *), 2242 u_int8_t tag_action, int immediate, int setmark, 2243 u_int32_t num_marks, u_int8_t sense_len, 2244 u_int32_t timeout) 2245 { 2246 struct scsi_write_filemarks *scsi_cmd; 2247 2248 scsi_cmd = (struct scsi_write_filemarks *)&csio->cdb_io.cdb_bytes; 2249 bzero(scsi_cmd, sizeof(*scsi_cmd)); 2250 scsi_cmd->opcode = WRITE_FILEMARKS; 2251 if (immediate) 2252 scsi_cmd->byte2 |= SWFMRK_IMMED; 2253 if (setmark) 2254 scsi_cmd->byte2 |= SWFMRK_WSMK; 2255 2256 scsi_ulto3b(num_marks, scsi_cmd->num_marks); 2257 2258 cam_fill_csio(csio, 2259 retries, 2260 cbfcnp, 2261 /*flags*/CAM_DIR_NONE, 2262 tag_action, 2263 /*data_ptr*/NULL, 2264 /*dxfer_len*/0, 2265 sense_len, 2266 sizeof(*scsi_cmd), 2267 timeout); 2268 } 2269 2270 /* 2271 * The reserve and release unit commands differ only by their opcodes. 2272 */ 2273 void 2274 scsi_reserve_release_unit(struct ccb_scsiio *csio, u_int32_t retries, 2275 void (*cbfcnp)(struct cam_periph *, union ccb *), 2276 u_int8_t tag_action, int third_party, 2277 int third_party_id, u_int8_t sense_len, 2278 u_int32_t timeout, int reserve) 2279 { 2280 struct scsi_reserve_release_unit *scsi_cmd; 2281 2282 scsi_cmd = (struct scsi_reserve_release_unit *)&csio->cdb_io.cdb_bytes; 2283 bzero(scsi_cmd, sizeof(*scsi_cmd)); 2284 2285 if (reserve) 2286 scsi_cmd->opcode = RESERVE_UNIT; 2287 else 2288 scsi_cmd->opcode = RELEASE_UNIT; 2289 2290 if (third_party) { 2291 scsi_cmd->lun_thirdparty |= SRRU_3RD_PARTY; 2292 scsi_cmd->lun_thirdparty |= 2293 ((third_party_id << SRRU_3RD_SHAMT) & SRRU_3RD_MASK); 2294 } 2295 2296 cam_fill_csio(csio, 2297 retries, 2298 cbfcnp, 2299 /*flags*/ CAM_DIR_NONE, 2300 tag_action, 2301 /*data_ptr*/ NULL, 2302 /*dxfer_len*/ 0, 2303 sense_len, 2304 sizeof(*scsi_cmd), 2305 timeout); 2306 } 2307 2308 void 2309 scsi_erase(struct ccb_scsiio *csio, u_int32_t retries, 2310 void (*cbfcnp)(struct cam_periph *, union ccb *), 2311 u_int8_t tag_action, int immediate, int long_erase, 2312 u_int8_t sense_len, u_int32_t timeout) 2313 { 2314 struct scsi_erase *scsi_cmd; 2315 2316 scsi_cmd = (struct scsi_erase *)&csio->cdb_io.cdb_bytes; 2317 bzero(scsi_cmd, sizeof(*scsi_cmd)); 2318 2319 scsi_cmd->opcode = ERASE; 2320 2321 if (immediate) 2322 scsi_cmd->lun_imm_long |= SE_IMMED; 2323 2324 if (long_erase) 2325 scsi_cmd->lun_imm_long |= SE_LONG; 2326 2327 cam_fill_csio(csio, 2328 retries, 2329 cbfcnp, 2330 /*flags*/ CAM_DIR_NONE, 2331 tag_action, 2332 /*data_ptr*/ NULL, 2333 /*dxfer_len*/ 0, 2334 sense_len, 2335 sizeof(*scsi_cmd), 2336 timeout); 2337 } 2338