1 /*- 2 * Copyright (c) 1997-2007 Kenneth D. Merry 3 * Copyright (c) 2013, 2014, 2015 Spectra Logic Corporation 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions, and the following disclaimer, 11 * without modification. 12 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 13 * substantially similar to the "NO WARRANTY" disclaimer below 14 * ("Disclaimer") and any redistribution must be conditioned upon 15 * including a substantially similar Disclaimer requirement for further 16 * binary redistribution. 17 * 18 * NO WARRANTY 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 22 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 23 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 27 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 28 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGES. 30 * 31 * Authors: Ken Merry (Spectra Logic Corporation) 32 */ 33 34 /* 35 * This is eventually intended to be: 36 * - A basic data transfer/copy utility 37 * - A simple benchmark utility 38 * - An example of how to use the asynchronous pass(4) driver interface. 39 */ 40 #include <sys/cdefs.h> 41 __FBSDID("$FreeBSD$"); 42 43 #include <sys/ioctl.h> 44 #include <sys/stdint.h> 45 #include <sys/types.h> 46 #include <sys/endian.h> 47 #include <sys/param.h> 48 #include <sys/sbuf.h> 49 #include <sys/stat.h> 50 #include <sys/event.h> 51 #include <sys/time.h> 52 #include <sys/uio.h> 53 #include <vm/vm.h> 54 #include <machine/bus.h> 55 #include <sys/bus.h> 56 #include <sys/bus_dma.h> 57 #include <sys/mtio.h> 58 #include <sys/conf.h> 59 #include <sys/disk.h> 60 61 #include <stdio.h> 62 #include <stdlib.h> 63 #include <semaphore.h> 64 #include <string.h> 65 #include <unistd.h> 66 #include <inttypes.h> 67 #include <limits.h> 68 #include <fcntl.h> 69 #include <ctype.h> 70 #include <err.h> 71 #include <libutil.h> 72 #include <pthread.h> 73 #include <assert.h> 74 #include <bsdxml.h> 75 76 #include <cam/cam.h> 77 #include <cam/cam_debug.h> 78 #include <cam/cam_ccb.h> 79 #include <cam/scsi/scsi_all.h> 80 #include <cam/scsi/scsi_da.h> 81 #include <cam/scsi/scsi_pass.h> 82 #include <cam/scsi/scsi_message.h> 83 #include <cam/scsi/smp_all.h> 84 #include <camlib.h> 85 #include <mtlib.h> 86 #include <zlib.h> 87 88 typedef enum { 89 CAMDD_CMD_NONE = 0x00000000, 90 CAMDD_CMD_HELP = 0x00000001, 91 CAMDD_CMD_WRITE = 0x00000002, 92 CAMDD_CMD_READ = 0x00000003 93 } camdd_cmdmask; 94 95 typedef enum { 96 CAMDD_ARG_NONE = 0x00000000, 97 CAMDD_ARG_VERBOSE = 0x00000001, 98 CAMDD_ARG_DEVICE = 0x00000002, 99 CAMDD_ARG_BUS = 0x00000004, 100 CAMDD_ARG_TARGET = 0x00000008, 101 CAMDD_ARG_LUN = 0x00000010, 102 CAMDD_ARG_UNIT = 0x00000020, 103 CAMDD_ARG_TIMEOUT = 0x00000040, 104 CAMDD_ARG_ERR_RECOVER = 0x00000080, 105 CAMDD_ARG_RETRIES = 0x00000100 106 } camdd_argmask; 107 108 typedef enum { 109 CAMDD_DEV_NONE = 0x00, 110 CAMDD_DEV_PASS = 0x01, 111 CAMDD_DEV_FILE = 0x02 112 } camdd_dev_type; 113 114 struct camdd_io_opts { 115 camdd_dev_type dev_type; 116 char *dev_name; 117 uint64_t blocksize; 118 uint64_t queue_depth; 119 uint64_t offset; 120 int min_cmd_size; 121 int write_dev; 122 uint64_t debug; 123 }; 124 125 typedef enum { 126 CAMDD_BUF_NONE, 127 CAMDD_BUF_DATA, 128 CAMDD_BUF_INDIRECT 129 } camdd_buf_type; 130 131 struct camdd_buf_indirect { 132 /* 133 * Pointer to the source buffer. 134 */ 135 struct camdd_buf *src_buf; 136 137 /* 138 * Offset into the source buffer, in bytes. 139 */ 140 uint64_t offset; 141 /* 142 * Pointer to the starting point in the source buffer. 143 */ 144 uint8_t *start_ptr; 145 146 /* 147 * Length of this chunk in bytes. 148 */ 149 size_t len; 150 }; 151 152 struct camdd_buf_data { 153 /* 154 * Buffer allocated when we allocate this camdd_buf. This should 155 * be the size of the blocksize for this device. 156 */ 157 uint8_t *buf; 158 159 /* 160 * The amount of backing store allocated in buf. Generally this 161 * will be the blocksize of the device. 162 */ 163 uint32_t alloc_len; 164 165 /* 166 * The amount of data that was put into the buffer (on reads) or 167 * the amount of data we have put onto the src_list so far (on 168 * writes). 169 */ 170 uint32_t fill_len; 171 172 /* 173 * The amount of data that was not transferred. 174 */ 175 uint32_t resid; 176 177 /* 178 * Starting byte offset on the reader. 179 */ 180 uint64_t src_start_offset; 181 182 /* 183 * CCB used for pass(4) device targets. 184 */ 185 union ccb ccb; 186 187 /* 188 * Number of scatter/gather segments. 189 */ 190 int sg_count; 191 192 /* 193 * Set if we had to tack on an extra buffer to round the transfer 194 * up to a sector size. 195 */ 196 int extra_buf; 197 198 /* 199 * Scatter/gather list used generally when we're the writer for a 200 * pass(4) device. 201 */ 202 bus_dma_segment_t *segs; 203 204 /* 205 * Scatter/gather list used generally when we're the writer for a 206 * file or block device; 207 */ 208 struct iovec *iovec; 209 }; 210 211 union camdd_buf_types { 212 struct camdd_buf_indirect indirect; 213 struct camdd_buf_data data; 214 }; 215 216 typedef enum { 217 CAMDD_STATUS_NONE, 218 CAMDD_STATUS_OK, 219 CAMDD_STATUS_SHORT_IO, 220 CAMDD_STATUS_EOF, 221 CAMDD_STATUS_ERROR 222 } camdd_buf_status; 223 224 struct camdd_buf { 225 camdd_buf_type buf_type; 226 union camdd_buf_types buf_type_spec; 227 228 camdd_buf_status status; 229 230 uint64_t lba; 231 size_t len; 232 233 /* 234 * A reference count of how many indirect buffers point to this 235 * buffer. 236 */ 237 int refcount; 238 239 /* 240 * A link back to our parent device. 241 */ 242 struct camdd_dev *dev; 243 STAILQ_ENTRY(camdd_buf) links; 244 STAILQ_ENTRY(camdd_buf) work_links; 245 246 /* 247 * A count of the buffers on the src_list. 248 */ 249 int src_count; 250 251 /* 252 * List of buffers from our partner thread that are the components 253 * of this buffer for the I/O. Uses src_links. 254 */ 255 STAILQ_HEAD(,camdd_buf) src_list; 256 STAILQ_ENTRY(camdd_buf) src_links; 257 }; 258 259 #define NUM_DEV_TYPES 2 260 261 struct camdd_dev_pass { 262 int scsi_dev_type; 263 struct cam_device *dev; 264 uint64_t max_sector; 265 uint32_t block_len; 266 uint32_t cpi_maxio; 267 }; 268 269 typedef enum { 270 CAMDD_FILE_NONE, 271 CAMDD_FILE_REG, 272 CAMDD_FILE_STD, 273 CAMDD_FILE_PIPE, 274 CAMDD_FILE_DISK, 275 CAMDD_FILE_TAPE, 276 CAMDD_FILE_TTY, 277 CAMDD_FILE_MEM 278 } camdd_file_type; 279 280 typedef enum { 281 CAMDD_FF_NONE = 0x00, 282 CAMDD_FF_CAN_SEEK = 0x01 283 } camdd_file_flags; 284 285 struct camdd_dev_file { 286 int fd; 287 struct stat sb; 288 char filename[MAXPATHLEN + 1]; 289 camdd_file_type file_type; 290 camdd_file_flags file_flags; 291 uint8_t *tmp_buf; 292 }; 293 294 struct camdd_dev_block { 295 int fd; 296 uint64_t size_bytes; 297 uint32_t block_len; 298 }; 299 300 union camdd_dev_spec { 301 struct camdd_dev_pass pass; 302 struct camdd_dev_file file; 303 struct camdd_dev_block block; 304 }; 305 306 typedef enum { 307 CAMDD_DEV_FLAG_NONE = 0x00, 308 CAMDD_DEV_FLAG_EOF = 0x01, 309 CAMDD_DEV_FLAG_PEER_EOF = 0x02, 310 CAMDD_DEV_FLAG_ACTIVE = 0x04, 311 CAMDD_DEV_FLAG_EOF_SENT = 0x08, 312 CAMDD_DEV_FLAG_EOF_QUEUED = 0x10 313 } camdd_dev_flags; 314 315 struct camdd_dev { 316 camdd_dev_type dev_type; 317 union camdd_dev_spec dev_spec; 318 camdd_dev_flags flags; 319 char device_name[MAXPATHLEN+1]; 320 uint32_t blocksize; 321 uint32_t sector_size; 322 uint64_t max_sector; 323 uint64_t sector_io_limit; 324 int min_cmd_size; 325 int write_dev; 326 int retry_count; 327 int io_timeout; 328 int debug; 329 uint64_t start_offset_bytes; 330 uint64_t next_io_pos_bytes; 331 uint64_t next_peer_pos_bytes; 332 uint64_t next_completion_pos_bytes; 333 uint64_t peer_bytes_queued; 334 uint64_t bytes_transferred; 335 uint32_t target_queue_depth; 336 uint32_t cur_active_io; 337 uint8_t *extra_buf; 338 uint32_t extra_buf_len; 339 struct camdd_dev *peer_dev; 340 pthread_mutex_t mutex; 341 pthread_cond_t cond; 342 int kq; 343 344 int (*run)(struct camdd_dev *dev); 345 int (*fetch)(struct camdd_dev *dev); 346 347 /* 348 * Buffers that are available for I/O. Uses links. 349 */ 350 STAILQ_HEAD(,camdd_buf) free_queue; 351 352 /* 353 * Free indirect buffers. These are used for breaking a large 354 * buffer into multiple pieces. 355 */ 356 STAILQ_HEAD(,camdd_buf) free_indirect_queue; 357 358 /* 359 * Buffers that have been queued to the kernel. Uses links. 360 */ 361 STAILQ_HEAD(,camdd_buf) active_queue; 362 363 /* 364 * Will generally contain one of our buffers that is waiting for enough 365 * I/O from our partner thread to be able to execute. This will 366 * generally happen when our per-I/O-size is larger than the 367 * partner thread's per-I/O-size. Uses links. 368 */ 369 STAILQ_HEAD(,camdd_buf) pending_queue; 370 371 /* 372 * Number of buffers on the pending queue 373 */ 374 int num_pending_queue; 375 376 /* 377 * Buffers that are filled and ready to execute. This is used when 378 * our partner (reader) thread sends us blocks that are larger than 379 * our blocksize, and so we have to split them into multiple pieces. 380 */ 381 STAILQ_HEAD(,camdd_buf) run_queue; 382 383 /* 384 * Number of buffers on the run queue. 385 */ 386 int num_run_queue; 387 388 STAILQ_HEAD(,camdd_buf) reorder_queue; 389 390 int num_reorder_queue; 391 392 /* 393 * Buffers that have been queued to us by our partner thread 394 * (generally the reader thread) to be written out. Uses 395 * work_links. 396 */ 397 STAILQ_HEAD(,camdd_buf) work_queue; 398 399 /* 400 * Buffers that have been completed by our partner thread. Uses 401 * work_links. 402 */ 403 STAILQ_HEAD(,camdd_buf) peer_done_queue; 404 405 /* 406 * Number of buffers on the peer done queue. 407 */ 408 uint32_t num_peer_done_queue; 409 410 /* 411 * A list of buffers that we have queued to our peer thread. Uses 412 * links. 413 */ 414 STAILQ_HEAD(,camdd_buf) peer_work_queue; 415 416 /* 417 * Number of buffers on the peer work queue. 418 */ 419 uint32_t num_peer_work_queue; 420 }; 421 422 static sem_t camdd_sem; 423 static int need_exit = 0; 424 static int error_exit = 0; 425 static int need_status = 0; 426 427 #ifndef min 428 #define min(a, b) (a < b) ? a : b 429 #endif 430 431 /* 432 * XXX KDM private copy of timespecsub(). This is normally defined in 433 * sys/time.h, but is only enabled in the kernel. If that definition is 434 * enabled in userland, it breaks the build of libnetbsd. 435 */ 436 #ifndef timespecsub 437 #define timespecsub(vvp, uvp) \ 438 do { \ 439 (vvp)->tv_sec -= (uvp)->tv_sec; \ 440 (vvp)->tv_nsec -= (uvp)->tv_nsec; \ 441 if ((vvp)->tv_nsec < 0) { \ 442 (vvp)->tv_sec--; \ 443 (vvp)->tv_nsec += 1000000000; \ 444 } \ 445 } while (0) 446 #endif 447 448 449 /* Generically usefull offsets into the peripheral private area */ 450 #define ppriv_ptr0 periph_priv.entries[0].ptr 451 #define ppriv_ptr1 periph_priv.entries[1].ptr 452 #define ppriv_field0 periph_priv.entries[0].field 453 #define ppriv_field1 periph_priv.entries[1].field 454 455 #define ccb_buf ppriv_ptr0 456 457 #define CAMDD_FILE_DEFAULT_BLOCK 524288 458 #define CAMDD_FILE_DEFAULT_DEPTH 1 459 #define CAMDD_PASS_MAX_BLOCK 1048576 460 #define CAMDD_PASS_DEFAULT_DEPTH 6 461 #define CAMDD_PASS_RW_TIMEOUT 60 * 1000 462 463 static int parse_btl(char *tstr, int *bus, int *target, int *lun, 464 camdd_argmask *arglst); 465 void camdd_free_dev(struct camdd_dev *dev); 466 struct camdd_dev *camdd_alloc_dev(camdd_dev_type dev_type, 467 struct kevent *new_ke, int num_ke, 468 int retry_count, int timeout); 469 static struct camdd_buf *camdd_alloc_buf(struct camdd_dev *dev, 470 camdd_buf_type buf_type); 471 void camdd_release_buf(struct camdd_buf *buf); 472 struct camdd_buf *camdd_get_buf(struct camdd_dev *dev, camdd_buf_type buf_type); 473 int camdd_buf_sg_create(struct camdd_buf *buf, int iovec, 474 uint32_t sector_size, uint32_t *num_sectors_used, 475 int *double_buf_needed); 476 uint32_t camdd_buf_get_len(struct camdd_buf *buf); 477 void camdd_buf_add_child(struct camdd_buf *buf, struct camdd_buf *child_buf); 478 int camdd_probe_tape(int fd, char *filename, uint64_t *max_iosize, 479 uint64_t *max_blk, uint64_t *min_blk, uint64_t *blk_gran); 480 struct camdd_dev *camdd_probe_file(int fd, struct camdd_io_opts *io_opts, 481 int retry_count, int timeout); 482 struct camdd_dev *camdd_probe_pass(struct cam_device *cam_dev, 483 struct camdd_io_opts *io_opts, 484 camdd_argmask arglist, int probe_retry_count, 485 int probe_timeout, int io_retry_count, 486 int io_timeout); 487 void *camdd_file_worker(void *arg); 488 camdd_buf_status camdd_ccb_status(union ccb *ccb); 489 int camdd_queue_peer_buf(struct camdd_dev *dev, struct camdd_buf *buf); 490 int camdd_complete_peer_buf(struct camdd_dev *dev, struct camdd_buf *peer_buf); 491 void camdd_peer_done(struct camdd_buf *buf); 492 void camdd_complete_buf(struct camdd_dev *dev, struct camdd_buf *buf, 493 int *error_count); 494 int camdd_pass_fetch(struct camdd_dev *dev); 495 int camdd_file_run(struct camdd_dev *dev); 496 int camdd_pass_run(struct camdd_dev *dev); 497 int camdd_get_next_lba_len(struct camdd_dev *dev, uint64_t *lba, ssize_t *len); 498 int camdd_queue(struct camdd_dev *dev, struct camdd_buf *read_buf); 499 void camdd_get_depth(struct camdd_dev *dev, uint32_t *our_depth, 500 uint32_t *peer_depth, uint32_t *our_bytes, 501 uint32_t *peer_bytes); 502 void *camdd_worker(void *arg); 503 void camdd_sig_handler(int sig); 504 void camdd_print_status(struct camdd_dev *camdd_dev, 505 struct camdd_dev *other_dev, 506 struct timespec *start_time); 507 int camdd_rw(struct camdd_io_opts *io_opts, int num_io_opts, 508 uint64_t max_io, int retry_count, int timeout); 509 int camdd_parse_io_opts(char *args, int is_write, 510 struct camdd_io_opts *io_opts); 511 void usage(void); 512 513 /* 514 * Parse out a bus, or a bus, target and lun in the following 515 * format: 516 * bus 517 * bus:target 518 * bus:target:lun 519 * 520 * Returns the number of parsed components, or 0. 521 */ 522 static int 523 parse_btl(char *tstr, int *bus, int *target, int *lun, camdd_argmask *arglst) 524 { 525 char *tmpstr; 526 int convs = 0; 527 528 while (isspace(*tstr) && (*tstr != '\0')) 529 tstr++; 530 531 tmpstr = (char *)strtok(tstr, ":"); 532 if ((tmpstr != NULL) && (*tmpstr != '\0')) { 533 *bus = strtol(tmpstr, NULL, 0); 534 *arglst |= CAMDD_ARG_BUS; 535 convs++; 536 tmpstr = (char *)strtok(NULL, ":"); 537 if ((tmpstr != NULL) && (*tmpstr != '\0')) { 538 *target = strtol(tmpstr, NULL, 0); 539 *arglst |= CAMDD_ARG_TARGET; 540 convs++; 541 tmpstr = (char *)strtok(NULL, ":"); 542 if ((tmpstr != NULL) && (*tmpstr != '\0')) { 543 *lun = strtol(tmpstr, NULL, 0); 544 *arglst |= CAMDD_ARG_LUN; 545 convs++; 546 } 547 } 548 } 549 550 return convs; 551 } 552 553 /* 554 * XXX KDM clean up and free all of the buffers on the queue! 555 */ 556 void 557 camdd_free_dev(struct camdd_dev *dev) 558 { 559 if (dev == NULL) 560 return; 561 562 switch (dev->dev_type) { 563 case CAMDD_DEV_FILE: { 564 struct camdd_dev_file *file_dev = &dev->dev_spec.file; 565 566 if (file_dev->fd != -1) 567 close(file_dev->fd); 568 free(file_dev->tmp_buf); 569 break; 570 } 571 case CAMDD_DEV_PASS: { 572 struct camdd_dev_pass *pass_dev = &dev->dev_spec.pass; 573 574 if (pass_dev->dev != NULL) 575 cam_close_device(pass_dev->dev); 576 break; 577 } 578 default: 579 break; 580 } 581 582 free(dev); 583 } 584 585 struct camdd_dev * 586 camdd_alloc_dev(camdd_dev_type dev_type, struct kevent *new_ke, int num_ke, 587 int retry_count, int timeout) 588 { 589 struct camdd_dev *dev = NULL; 590 struct kevent *ke; 591 size_t ke_size; 592 int retval = 0; 593 594 dev = malloc(sizeof(*dev)); 595 if (dev == NULL) { 596 warn("%s: unable to malloc %zu bytes", __func__, sizeof(*dev)); 597 goto bailout; 598 } 599 600 bzero(dev, sizeof(*dev)); 601 602 dev->dev_type = dev_type; 603 dev->io_timeout = timeout; 604 dev->retry_count = retry_count; 605 STAILQ_INIT(&dev->free_queue); 606 STAILQ_INIT(&dev->free_indirect_queue); 607 STAILQ_INIT(&dev->active_queue); 608 STAILQ_INIT(&dev->pending_queue); 609 STAILQ_INIT(&dev->run_queue); 610 STAILQ_INIT(&dev->reorder_queue); 611 STAILQ_INIT(&dev->work_queue); 612 STAILQ_INIT(&dev->peer_done_queue); 613 STAILQ_INIT(&dev->peer_work_queue); 614 retval = pthread_mutex_init(&dev->mutex, NULL); 615 if (retval != 0) { 616 warnc(retval, "%s: failed to initialize mutex", __func__); 617 goto bailout; 618 } 619 620 retval = pthread_cond_init(&dev->cond, NULL); 621 if (retval != 0) { 622 warnc(retval, "%s: failed to initialize condition variable", 623 __func__); 624 goto bailout; 625 } 626 627 dev->kq = kqueue(); 628 if (dev->kq == -1) { 629 warn("%s: Unable to create kqueue", __func__); 630 goto bailout; 631 } 632 633 ke_size = sizeof(struct kevent) * (num_ke + 4); 634 ke = malloc(ke_size); 635 if (ke == NULL) { 636 warn("%s: unable to malloc %zu bytes", __func__, ke_size); 637 goto bailout; 638 } 639 bzero(ke, ke_size); 640 if (num_ke > 0) 641 bcopy(new_ke, ke, num_ke * sizeof(struct kevent)); 642 643 EV_SET(&ke[num_ke++], (uintptr_t)&dev->work_queue, EVFILT_USER, 644 EV_ADD|EV_ENABLE|EV_CLEAR, 0,0, 0); 645 EV_SET(&ke[num_ke++], (uintptr_t)&dev->peer_done_queue, EVFILT_USER, 646 EV_ADD|EV_ENABLE|EV_CLEAR, 0,0, 0); 647 EV_SET(&ke[num_ke++], SIGINFO, EVFILT_SIGNAL, EV_ADD|EV_ENABLE, 0,0,0); 648 EV_SET(&ke[num_ke++], SIGINT, EVFILT_SIGNAL, EV_ADD|EV_ENABLE, 0,0,0); 649 650 retval = kevent(dev->kq, ke, num_ke, NULL, 0, NULL); 651 if (retval == -1) { 652 warn("%s: Unable to register kevents", __func__); 653 goto bailout; 654 } 655 656 657 return (dev); 658 659 bailout: 660 free(dev); 661 662 return (NULL); 663 } 664 665 static struct camdd_buf * 666 camdd_alloc_buf(struct camdd_dev *dev, camdd_buf_type buf_type) 667 { 668 struct camdd_buf *buf = NULL; 669 uint8_t *data_ptr = NULL; 670 671 /* 672 * We only need to allocate data space for data buffers. 673 */ 674 switch (buf_type) { 675 case CAMDD_BUF_DATA: 676 data_ptr = malloc(dev->blocksize); 677 if (data_ptr == NULL) { 678 warn("unable to allocate %u bytes", dev->blocksize); 679 goto bailout_error; 680 } 681 break; 682 default: 683 break; 684 } 685 686 buf = malloc(sizeof(*buf)); 687 if (buf == NULL) { 688 warn("unable to allocate %zu bytes", sizeof(*buf)); 689 goto bailout_error; 690 } 691 692 bzero(buf, sizeof(*buf)); 693 buf->buf_type = buf_type; 694 buf->dev = dev; 695 switch (buf_type) { 696 case CAMDD_BUF_DATA: { 697 struct camdd_buf_data *data; 698 699 data = &buf->buf_type_spec.data; 700 701 data->alloc_len = dev->blocksize; 702 data->buf = data_ptr; 703 break; 704 } 705 case CAMDD_BUF_INDIRECT: 706 break; 707 default: 708 break; 709 } 710 STAILQ_INIT(&buf->src_list); 711 712 return (buf); 713 714 bailout_error: 715 if (data_ptr != NULL) 716 free(data_ptr); 717 718 if (buf != NULL) 719 free(buf); 720 721 return (NULL); 722 } 723 724 void 725 camdd_release_buf(struct camdd_buf *buf) 726 { 727 struct camdd_dev *dev; 728 729 dev = buf->dev; 730 731 switch (buf->buf_type) { 732 case CAMDD_BUF_DATA: { 733 struct camdd_buf_data *data; 734 735 data = &buf->buf_type_spec.data; 736 737 if (data->segs != NULL) { 738 if (data->extra_buf != 0) { 739 void *extra_buf; 740 741 extra_buf = (void *) 742 data->segs[data->sg_count - 1].ds_addr; 743 free(extra_buf); 744 data->extra_buf = 0; 745 } 746 free(data->segs); 747 data->segs = NULL; 748 data->sg_count = 0; 749 } else if (data->iovec != NULL) { 750 if (data->extra_buf != 0) { 751 free(data->iovec[data->sg_count - 1].iov_base); 752 data->extra_buf = 0; 753 } 754 free(data->iovec); 755 data->iovec = NULL; 756 data->sg_count = 0; 757 } 758 STAILQ_INSERT_TAIL(&dev->free_queue, buf, links); 759 break; 760 } 761 case CAMDD_BUF_INDIRECT: 762 STAILQ_INSERT_TAIL(&dev->free_indirect_queue, buf, links); 763 break; 764 default: 765 err(1, "%s: Invalid buffer type %d for released buffer", 766 __func__, buf->buf_type); 767 break; 768 } 769 } 770 771 struct camdd_buf * 772 camdd_get_buf(struct camdd_dev *dev, camdd_buf_type buf_type) 773 { 774 struct camdd_buf *buf = NULL; 775 776 switch (buf_type) { 777 case CAMDD_BUF_DATA: 778 buf = STAILQ_FIRST(&dev->free_queue); 779 if (buf != NULL) { 780 struct camdd_buf_data *data; 781 uint8_t *data_ptr; 782 uint32_t alloc_len; 783 784 STAILQ_REMOVE_HEAD(&dev->free_queue, links); 785 data = &buf->buf_type_spec.data; 786 data_ptr = data->buf; 787 alloc_len = data->alloc_len; 788 bzero(buf, sizeof(*buf)); 789 data->buf = data_ptr; 790 data->alloc_len = alloc_len; 791 } 792 break; 793 case CAMDD_BUF_INDIRECT: 794 buf = STAILQ_FIRST(&dev->free_indirect_queue); 795 if (buf != NULL) { 796 STAILQ_REMOVE_HEAD(&dev->free_indirect_queue, links); 797 798 bzero(buf, sizeof(*buf)); 799 } 800 break; 801 default: 802 warnx("Unknown buffer type %d requested", buf_type); 803 break; 804 } 805 806 807 if (buf == NULL) 808 return (camdd_alloc_buf(dev, buf_type)); 809 else { 810 STAILQ_INIT(&buf->src_list); 811 buf->dev = dev; 812 buf->buf_type = buf_type; 813 814 return (buf); 815 } 816 } 817 818 int 819 camdd_buf_sg_create(struct camdd_buf *buf, int iovec, uint32_t sector_size, 820 uint32_t *num_sectors_used, int *double_buf_needed) 821 { 822 struct camdd_buf *tmp_buf; 823 struct camdd_buf_data *data; 824 uint8_t *extra_buf = NULL; 825 size_t extra_buf_len = 0; 826 int i, retval = 0; 827 828 data = &buf->buf_type_spec.data; 829 830 data->sg_count = buf->src_count; 831 /* 832 * Compose a scatter/gather list from all of the buffers in the list. 833 * If the length of the buffer isn't a multiple of the sector size, 834 * we'll have to add an extra buffer. This should only happen 835 * at the end of a transfer. 836 */ 837 if ((data->fill_len % sector_size) != 0) { 838 extra_buf_len = sector_size - (data->fill_len % sector_size); 839 extra_buf = calloc(extra_buf_len, 1); 840 if (extra_buf == NULL) { 841 warn("%s: unable to allocate %zu bytes for extra " 842 "buffer space", __func__, extra_buf_len); 843 retval = 1; 844 goto bailout; 845 } 846 data->extra_buf = 1; 847 data->sg_count++; 848 } 849 if (iovec == 0) { 850 data->segs = calloc(data->sg_count, sizeof(bus_dma_segment_t)); 851 if (data->segs == NULL) { 852 warn("%s: unable to allocate %zu bytes for S/G list", 853 __func__, sizeof(bus_dma_segment_t) * 854 data->sg_count); 855 retval = 1; 856 goto bailout; 857 } 858 859 } else { 860 data->iovec = calloc(data->sg_count, sizeof(struct iovec)); 861 if (data->iovec == NULL) { 862 warn("%s: unable to allocate %zu bytes for S/G list", 863 __func__, sizeof(struct iovec) * data->sg_count); 864 retval = 1; 865 goto bailout; 866 } 867 } 868 869 for (i = 0, tmp_buf = STAILQ_FIRST(&buf->src_list); 870 i < buf->src_count && tmp_buf != NULL; i++, 871 tmp_buf = STAILQ_NEXT(tmp_buf, src_links)) { 872 873 if (tmp_buf->buf_type == CAMDD_BUF_DATA) { 874 struct camdd_buf_data *tmp_data; 875 876 tmp_data = &tmp_buf->buf_type_spec.data; 877 if (iovec == 0) { 878 data->segs[i].ds_addr = 879 (bus_addr_t) tmp_data->buf; 880 data->segs[i].ds_len = tmp_data->fill_len - 881 tmp_data->resid; 882 } else { 883 data->iovec[i].iov_base = tmp_data->buf; 884 data->iovec[i].iov_len = tmp_data->fill_len - 885 tmp_data->resid; 886 } 887 if (((tmp_data->fill_len - tmp_data->resid) % 888 sector_size) != 0) 889 *double_buf_needed = 1; 890 } else { 891 struct camdd_buf_indirect *tmp_ind; 892 893 tmp_ind = &tmp_buf->buf_type_spec.indirect; 894 if (iovec == 0) { 895 data->segs[i].ds_addr = 896 (bus_addr_t)tmp_ind->start_ptr; 897 data->segs[i].ds_len = tmp_ind->len; 898 } else { 899 data->iovec[i].iov_base = tmp_ind->start_ptr; 900 data->iovec[i].iov_len = tmp_ind->len; 901 } 902 if ((tmp_ind->len % sector_size) != 0) 903 *double_buf_needed = 1; 904 } 905 } 906 907 if (extra_buf != NULL) { 908 if (iovec == 0) { 909 data->segs[i].ds_addr = (bus_addr_t)extra_buf; 910 data->segs[i].ds_len = extra_buf_len; 911 } else { 912 data->iovec[i].iov_base = extra_buf; 913 data->iovec[i].iov_len = extra_buf_len; 914 } 915 i++; 916 } 917 if ((tmp_buf != NULL) || (i != data->sg_count)) { 918 warnx("buffer source count does not match " 919 "number of buffers in list!"); 920 retval = 1; 921 goto bailout; 922 } 923 924 bailout: 925 if (retval == 0) { 926 *num_sectors_used = (data->fill_len + extra_buf_len) / 927 sector_size; 928 } 929 return (retval); 930 } 931 932 uint32_t 933 camdd_buf_get_len(struct camdd_buf *buf) 934 { 935 uint32_t len = 0; 936 937 if (buf->buf_type != CAMDD_BUF_DATA) { 938 struct camdd_buf_indirect *indirect; 939 940 indirect = &buf->buf_type_spec.indirect; 941 len = indirect->len; 942 } else { 943 struct camdd_buf_data *data; 944 945 data = &buf->buf_type_spec.data; 946 len = data->fill_len; 947 } 948 949 return (len); 950 } 951 952 void 953 camdd_buf_add_child(struct camdd_buf *buf, struct camdd_buf *child_buf) 954 { 955 struct camdd_buf_data *data; 956 957 assert(buf->buf_type == CAMDD_BUF_DATA); 958 959 data = &buf->buf_type_spec.data; 960 961 STAILQ_INSERT_TAIL(&buf->src_list, child_buf, src_links); 962 buf->src_count++; 963 964 data->fill_len += camdd_buf_get_len(child_buf); 965 } 966 967 typedef enum { 968 CAMDD_TS_MAX_BLK, 969 CAMDD_TS_MIN_BLK, 970 CAMDD_TS_BLK_GRAN, 971 CAMDD_TS_EFF_IOSIZE 972 } camdd_status_item_index; 973 974 static struct camdd_status_items { 975 const char *name; 976 struct mt_status_entry *entry; 977 } req_status_items[] = { 978 { "max_blk", NULL }, 979 { "min_blk", NULL }, 980 { "blk_gran", NULL }, 981 { "max_effective_iosize", NULL } 982 }; 983 984 int 985 camdd_probe_tape(int fd, char *filename, uint64_t *max_iosize, 986 uint64_t *max_blk, uint64_t *min_blk, uint64_t *blk_gran) 987 { 988 struct mt_status_data status_data; 989 char *xml_str = NULL; 990 unsigned int i; 991 int retval = 0; 992 993 retval = mt_get_xml_str(fd, MTIOCEXTGET, &xml_str); 994 if (retval != 0) 995 err(1, "Couldn't get XML string from %s", filename); 996 997 retval = mt_get_status(xml_str, &status_data); 998 if (retval != XML_STATUS_OK) { 999 warn("couldn't get status for %s", filename); 1000 retval = 1; 1001 goto bailout; 1002 } else 1003 retval = 0; 1004 1005 if (status_data.error != 0) { 1006 warnx("%s", status_data.error_str); 1007 retval = 1; 1008 goto bailout; 1009 } 1010 1011 for (i = 0; i < sizeof(req_status_items) / 1012 sizeof(req_status_items[0]); i++) { 1013 char *name; 1014 1015 name = __DECONST(char *, req_status_items[i].name); 1016 req_status_items[i].entry = mt_status_entry_find(&status_data, 1017 name); 1018 if (req_status_items[i].entry == NULL) { 1019 errx(1, "Cannot find status entry %s", 1020 req_status_items[i].name); 1021 } 1022 } 1023 1024 *max_iosize = req_status_items[CAMDD_TS_EFF_IOSIZE].entry->value_unsigned; 1025 *max_blk= req_status_items[CAMDD_TS_MAX_BLK].entry->value_unsigned; 1026 *min_blk= req_status_items[CAMDD_TS_MIN_BLK].entry->value_unsigned; 1027 *blk_gran = req_status_items[CAMDD_TS_BLK_GRAN].entry->value_unsigned; 1028 bailout: 1029 1030 free(xml_str); 1031 mt_status_free(&status_data); 1032 1033 return (retval); 1034 } 1035 1036 struct camdd_dev * 1037 camdd_probe_file(int fd, struct camdd_io_opts *io_opts, int retry_count, 1038 int timeout) 1039 { 1040 struct camdd_dev *dev = NULL; 1041 struct camdd_dev_file *file_dev; 1042 uint64_t blocksize = io_opts->blocksize; 1043 1044 dev = camdd_alloc_dev(CAMDD_DEV_FILE, NULL, 0, retry_count, timeout); 1045 if (dev == NULL) 1046 goto bailout; 1047 1048 file_dev = &dev->dev_spec.file; 1049 file_dev->fd = fd; 1050 strlcpy(file_dev->filename, io_opts->dev_name, 1051 sizeof(file_dev->filename)); 1052 strlcpy(dev->device_name, io_opts->dev_name, sizeof(dev->device_name)); 1053 if (blocksize == 0) 1054 dev->blocksize = CAMDD_FILE_DEFAULT_BLOCK; 1055 else 1056 dev->blocksize = blocksize; 1057 1058 if ((io_opts->queue_depth != 0) 1059 && (io_opts->queue_depth != 1)) { 1060 warnx("Queue depth %ju for %s ignored, only 1 outstanding " 1061 "command supported", (uintmax_t)io_opts->queue_depth, 1062 io_opts->dev_name); 1063 } 1064 dev->target_queue_depth = CAMDD_FILE_DEFAULT_DEPTH; 1065 dev->run = camdd_file_run; 1066 dev->fetch = NULL; 1067 1068 /* 1069 * We can effectively access files on byte boundaries. We'll reset 1070 * this for devices like disks that can be accessed on sector 1071 * boundaries. 1072 */ 1073 dev->sector_size = 1; 1074 1075 if ((fd != STDIN_FILENO) 1076 && (fd != STDOUT_FILENO)) { 1077 int retval; 1078 1079 retval = fstat(fd, &file_dev->sb); 1080 if (retval != 0) { 1081 warn("Cannot stat %s", dev->device_name); 1082 goto bailout; 1083 camdd_free_dev(dev); 1084 dev = NULL; 1085 } 1086 if (S_ISREG(file_dev->sb.st_mode)) { 1087 file_dev->file_type = CAMDD_FILE_REG; 1088 } else if (S_ISCHR(file_dev->sb.st_mode)) { 1089 int type; 1090 1091 if (ioctl(fd, FIODTYPE, &type) == -1) 1092 err(1, "FIODTYPE ioctl failed on %s", 1093 dev->device_name); 1094 else { 1095 if (type & D_TAPE) 1096 file_dev->file_type = CAMDD_FILE_TAPE; 1097 else if (type & D_DISK) 1098 file_dev->file_type = CAMDD_FILE_DISK; 1099 else if (type & D_MEM) 1100 file_dev->file_type = CAMDD_FILE_MEM; 1101 else if (type & D_TTY) 1102 file_dev->file_type = CAMDD_FILE_TTY; 1103 } 1104 } else if (S_ISDIR(file_dev->sb.st_mode)) { 1105 errx(1, "cannot operate on directory %s", 1106 dev->device_name); 1107 } else if (S_ISFIFO(file_dev->sb.st_mode)) { 1108 file_dev->file_type = CAMDD_FILE_PIPE; 1109 } else 1110 errx(1, "Cannot determine file type for %s", 1111 dev->device_name); 1112 1113 switch (file_dev->file_type) { 1114 case CAMDD_FILE_REG: 1115 if (file_dev->sb.st_size != 0) 1116 dev->max_sector = file_dev->sb.st_size - 1; 1117 else 1118 dev->max_sector = 0; 1119 file_dev->file_flags |= CAMDD_FF_CAN_SEEK; 1120 break; 1121 case CAMDD_FILE_TAPE: { 1122 uint64_t max_iosize, max_blk, min_blk, blk_gran; 1123 /* 1124 * Check block limits and maximum effective iosize. 1125 * Make sure the blocksize is within the block 1126 * limits (and a multiple of the minimum blocksize) 1127 * and that the blocksize is <= maximum effective 1128 * iosize. 1129 */ 1130 retval = camdd_probe_tape(fd, dev->device_name, 1131 &max_iosize, &max_blk, &min_blk, &blk_gran); 1132 if (retval != 0) 1133 errx(1, "Unable to probe tape %s", 1134 dev->device_name); 1135 1136 /* 1137 * The blocksize needs to be <= the maximum 1138 * effective I/O size of the tape device. Note 1139 * that this also takes into account the maximum 1140 * blocksize reported by READ BLOCK LIMITS. 1141 */ 1142 if (dev->blocksize > max_iosize) { 1143 warnx("Blocksize %u too big for %s, limiting " 1144 "to %ju", dev->blocksize, dev->device_name, 1145 max_iosize); 1146 dev->blocksize = max_iosize; 1147 } 1148 1149 /* 1150 * The blocksize needs to be at least min_blk; 1151 */ 1152 if (dev->blocksize < min_blk) { 1153 warnx("Blocksize %u too small for %s, " 1154 "increasing to %ju", dev->blocksize, 1155 dev->device_name, min_blk); 1156 dev->blocksize = min_blk; 1157 } 1158 1159 /* 1160 * And the blocksize needs to be a multiple of 1161 * the block granularity. 1162 */ 1163 if ((blk_gran != 0) 1164 && (dev->blocksize % (1 << blk_gran))) { 1165 warnx("Blocksize %u for %s not a multiple of " 1166 "%d, adjusting to %d", dev->blocksize, 1167 dev->device_name, (1 << blk_gran), 1168 dev->blocksize & ~((1 << blk_gran) - 1)); 1169 dev->blocksize &= ~((1 << blk_gran) - 1); 1170 } 1171 1172 if (dev->blocksize == 0) { 1173 errx(1, "Unable to derive valid blocksize for " 1174 "%s", dev->device_name); 1175 } 1176 1177 /* 1178 * For tape drives, set the sector size to the 1179 * blocksize so that we make sure not to write 1180 * less than the blocksize out to the drive. 1181 */ 1182 dev->sector_size = dev->blocksize; 1183 break; 1184 } 1185 case CAMDD_FILE_DISK: { 1186 off_t media_size; 1187 unsigned int sector_size; 1188 1189 file_dev->file_flags |= CAMDD_FF_CAN_SEEK; 1190 1191 if (ioctl(fd, DIOCGSECTORSIZE, §or_size) == -1) { 1192 err(1, "DIOCGSECTORSIZE ioctl failed on %s", 1193 dev->device_name); 1194 } 1195 1196 if (sector_size == 0) { 1197 errx(1, "DIOCGSECTORSIZE ioctl returned " 1198 "invalid sector size %u for %s", 1199 sector_size, dev->device_name); 1200 } 1201 1202 if (ioctl(fd, DIOCGMEDIASIZE, &media_size) == -1) { 1203 err(1, "DIOCGMEDIASIZE ioctl failed on %s", 1204 dev->device_name); 1205 } 1206 1207 if (media_size == 0) { 1208 errx(1, "DIOCGMEDIASIZE ioctl returned " 1209 "invalid media size %ju for %s", 1210 (uintmax_t)media_size, dev->device_name); 1211 } 1212 1213 if (dev->blocksize % sector_size) { 1214 errx(1, "%s blocksize %u not a multiple of " 1215 "sector size %u", dev->device_name, 1216 dev->blocksize, sector_size); 1217 } 1218 1219 dev->sector_size = sector_size; 1220 dev->max_sector = (media_size / sector_size) - 1; 1221 break; 1222 } 1223 case CAMDD_FILE_MEM: 1224 file_dev->file_flags |= CAMDD_FF_CAN_SEEK; 1225 break; 1226 default: 1227 break; 1228 } 1229 } 1230 1231 if ((io_opts->offset != 0) 1232 && ((file_dev->file_flags & CAMDD_FF_CAN_SEEK) == 0)) { 1233 warnx("Offset %ju specified for %s, but we cannot seek on %s", 1234 io_opts->offset, io_opts->dev_name, io_opts->dev_name); 1235 goto bailout_error; 1236 } 1237 #if 0 1238 else if ((io_opts->offset != 0) 1239 && ((io_opts->offset % dev->sector_size) != 0)) { 1240 warnx("Offset %ju for %s is not a multiple of the " 1241 "sector size %u", io_opts->offset, 1242 io_opts->dev_name, dev->sector_size); 1243 goto bailout_error; 1244 } else { 1245 dev->start_offset_bytes = io_opts->offset; 1246 } 1247 #endif 1248 1249 bailout: 1250 return (dev); 1251 1252 bailout_error: 1253 camdd_free_dev(dev); 1254 return (NULL); 1255 } 1256 1257 /* 1258 * Need to implement this. Do a basic probe: 1259 * - Check the inquiry data, make sure we're talking to a device that we 1260 * can reasonably expect to talk to -- direct, RBC, CD, WORM. 1261 * - Send a test unit ready, make sure the device is available. 1262 * - Get the capacity and block size. 1263 */ 1264 struct camdd_dev * 1265 camdd_probe_pass(struct cam_device *cam_dev, struct camdd_io_opts *io_opts, 1266 camdd_argmask arglist, int probe_retry_count, 1267 int probe_timeout, int io_retry_count, int io_timeout) 1268 { 1269 union ccb *ccb; 1270 uint64_t maxsector; 1271 uint32_t cpi_maxio, max_iosize, pass_numblocks; 1272 uint32_t block_len; 1273 struct scsi_read_capacity_data rcap; 1274 struct scsi_read_capacity_data_long rcaplong; 1275 struct camdd_dev *dev; 1276 struct camdd_dev_pass *pass_dev; 1277 struct kevent ke; 1278 int scsi_dev_type; 1279 int retval; 1280 1281 dev = NULL; 1282 1283 scsi_dev_type = SID_TYPE(&cam_dev->inq_data); 1284 maxsector = 0; 1285 block_len = 0; 1286 1287 /* 1288 * For devices that support READ CAPACITY, we'll attempt to get the 1289 * capacity. Otherwise, we really don't support tape or other 1290 * devices via SCSI passthrough, so just return an error in that case. 1291 */ 1292 switch (scsi_dev_type) { 1293 case T_DIRECT: 1294 case T_WORM: 1295 case T_CDROM: 1296 case T_OPTICAL: 1297 case T_RBC: 1298 break; 1299 default: 1300 errx(1, "Unsupported SCSI device type %d", scsi_dev_type); 1301 break; /*NOTREACHED*/ 1302 } 1303 1304 ccb = cam_getccb(cam_dev); 1305 1306 if (ccb == NULL) { 1307 warnx("%s: error allocating ccb", __func__); 1308 goto bailout; 1309 } 1310 1311 bzero(&(&ccb->ccb_h)[1], 1312 sizeof(struct ccb_scsiio) - sizeof(struct ccb_hdr)); 1313 1314 scsi_read_capacity(&ccb->csio, 1315 /*retries*/ probe_retry_count, 1316 /*cbfcnp*/ NULL, 1317 /*tag_action*/ MSG_SIMPLE_Q_TAG, 1318 &rcap, 1319 SSD_FULL_SIZE, 1320 /*timeout*/ probe_timeout ? probe_timeout : 5000); 1321 1322 /* Disable freezing the device queue */ 1323 ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; 1324 1325 if (arglist & CAMDD_ARG_ERR_RECOVER) 1326 ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; 1327 1328 if (cam_send_ccb(cam_dev, ccb) < 0) { 1329 warn("error sending READ CAPACITY command"); 1330 1331 cam_error_print(cam_dev, ccb, CAM_ESF_ALL, 1332 CAM_EPF_ALL, stderr); 1333 1334 goto bailout; 1335 } 1336 1337 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1338 cam_error_print(cam_dev, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); 1339 retval = 1; 1340 goto bailout; 1341 } 1342 1343 maxsector = scsi_4btoul(rcap.addr); 1344 block_len = scsi_4btoul(rcap.length); 1345 1346 /* 1347 * A last block of 2^32-1 means that the true capacity is over 2TB, 1348 * and we need to issue the long READ CAPACITY to get the real 1349 * capacity. Otherwise, we're all set. 1350 */ 1351 if (maxsector != 0xffffffff) 1352 goto rcap_done; 1353 1354 scsi_read_capacity_16(&ccb->csio, 1355 /*retries*/ probe_retry_count, 1356 /*cbfcnp*/ NULL, 1357 /*tag_action*/ MSG_SIMPLE_Q_TAG, 1358 /*lba*/ 0, 1359 /*reladdr*/ 0, 1360 /*pmi*/ 0, 1361 (uint8_t *)&rcaplong, 1362 sizeof(rcaplong), 1363 /*sense_len*/ SSD_FULL_SIZE, 1364 /*timeout*/ probe_timeout ? probe_timeout : 5000); 1365 1366 /* Disable freezing the device queue */ 1367 ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; 1368 1369 if (arglist & CAMDD_ARG_ERR_RECOVER) 1370 ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; 1371 1372 if (cam_send_ccb(cam_dev, ccb) < 0) { 1373 warn("error sending READ CAPACITY (16) command"); 1374 1375 cam_error_print(cam_dev, ccb, CAM_ESF_ALL, 1376 CAM_EPF_ALL, stderr); 1377 1378 retval = 1; 1379 goto bailout; 1380 } 1381 1382 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1383 cam_error_print(cam_dev, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); 1384 goto bailout; 1385 } 1386 1387 maxsector = scsi_8btou64(rcaplong.addr); 1388 block_len = scsi_4btoul(rcaplong.length); 1389 1390 rcap_done: 1391 1392 bzero(&(&ccb->ccb_h)[1], 1393 sizeof(struct ccb_scsiio) - sizeof(struct ccb_hdr)); 1394 1395 ccb->ccb_h.func_code = XPT_PATH_INQ; 1396 ccb->ccb_h.flags = CAM_DIR_NONE; 1397 ccb->ccb_h.retry_count = 1; 1398 1399 if (cam_send_ccb(cam_dev, ccb) < 0) { 1400 warn("error sending XPT_PATH_INQ CCB"); 1401 1402 cam_error_print(cam_dev, ccb, CAM_ESF_ALL, 1403 CAM_EPF_ALL, stderr); 1404 goto bailout; 1405 } 1406 1407 EV_SET(&ke, cam_dev->fd, EVFILT_READ, EV_ADD|EV_ENABLE, 0, 0, 0); 1408 1409 dev = camdd_alloc_dev(CAMDD_DEV_PASS, &ke, 1, io_retry_count, 1410 io_timeout); 1411 if (dev == NULL) 1412 goto bailout; 1413 1414 pass_dev = &dev->dev_spec.pass; 1415 pass_dev->scsi_dev_type = scsi_dev_type; 1416 pass_dev->dev = cam_dev; 1417 pass_dev->max_sector = maxsector; 1418 pass_dev->block_len = block_len; 1419 pass_dev->cpi_maxio = ccb->cpi.maxio; 1420 snprintf(dev->device_name, sizeof(dev->device_name), "%s%u", 1421 pass_dev->dev->device_name, pass_dev->dev->dev_unit_num); 1422 dev->sector_size = block_len; 1423 dev->max_sector = maxsector; 1424 1425 1426 /* 1427 * Determine the optimal blocksize to use for this device. 1428 */ 1429 1430 /* 1431 * If the controller has not specified a maximum I/O size, 1432 * just go with 128K as a somewhat conservative value. 1433 */ 1434 if (pass_dev->cpi_maxio == 0) 1435 cpi_maxio = 131072; 1436 else 1437 cpi_maxio = pass_dev->cpi_maxio; 1438 1439 /* 1440 * If the controller has a large maximum I/O size, limit it 1441 * to something smaller so that the kernel doesn't have trouble 1442 * allocating buffers to copy data in and out for us. 1443 * XXX KDM this is until we have unmapped I/O support in the kernel. 1444 */ 1445 max_iosize = min(cpi_maxio, CAMDD_PASS_MAX_BLOCK); 1446 1447 /* 1448 * If we weren't able to get a block size for some reason, 1449 * default to 512 bytes. 1450 */ 1451 block_len = pass_dev->block_len; 1452 if (block_len == 0) 1453 block_len = 512; 1454 1455 /* 1456 * Figure out how many blocksize chunks will fit in the 1457 * maximum I/O size. 1458 */ 1459 pass_numblocks = max_iosize / block_len; 1460 1461 /* 1462 * And finally, multiple the number of blocks by the LBA 1463 * length to get our maximum block size; 1464 */ 1465 dev->blocksize = pass_numblocks * block_len; 1466 1467 if (io_opts->blocksize != 0) { 1468 if ((io_opts->blocksize % dev->sector_size) != 0) { 1469 warnx("Blocksize %ju for %s is not a multiple of " 1470 "sector size %u", (uintmax_t)io_opts->blocksize, 1471 dev->device_name, dev->sector_size); 1472 goto bailout_error; 1473 } 1474 dev->blocksize = io_opts->blocksize; 1475 } 1476 dev->target_queue_depth = CAMDD_PASS_DEFAULT_DEPTH; 1477 if (io_opts->queue_depth != 0) 1478 dev->target_queue_depth = io_opts->queue_depth; 1479 1480 if (io_opts->offset != 0) { 1481 if (io_opts->offset > (dev->max_sector * dev->sector_size)) { 1482 warnx("Offset %ju is past the end of device %s", 1483 io_opts->offset, dev->device_name); 1484 goto bailout_error; 1485 } 1486 #if 0 1487 else if ((io_opts->offset % dev->sector_size) != 0) { 1488 warnx("Offset %ju for %s is not a multiple of the " 1489 "sector size %u", io_opts->offset, 1490 dev->device_name, dev->sector_size); 1491 goto bailout_error; 1492 } 1493 dev->start_offset_bytes = io_opts->offset; 1494 #endif 1495 } 1496 1497 dev->min_cmd_size = io_opts->min_cmd_size; 1498 1499 dev->run = camdd_pass_run; 1500 dev->fetch = camdd_pass_fetch; 1501 1502 bailout: 1503 cam_freeccb(ccb); 1504 1505 return (dev); 1506 1507 bailout_error: 1508 cam_freeccb(ccb); 1509 1510 camdd_free_dev(dev); 1511 1512 return (NULL); 1513 } 1514 1515 void * 1516 camdd_worker(void *arg) 1517 { 1518 struct camdd_dev *dev = arg; 1519 struct camdd_buf *buf; 1520 struct timespec ts, *kq_ts; 1521 1522 ts.tv_sec = 0; 1523 ts.tv_nsec = 0; 1524 1525 pthread_mutex_lock(&dev->mutex); 1526 1527 dev->flags |= CAMDD_DEV_FLAG_ACTIVE; 1528 1529 for (;;) { 1530 struct kevent ke; 1531 int retval = 0; 1532 1533 /* 1534 * XXX KDM check the reorder queue depth? 1535 */ 1536 if (dev->write_dev == 0) { 1537 uint32_t our_depth, peer_depth, peer_bytes, our_bytes; 1538 uint32_t target_depth = dev->target_queue_depth; 1539 uint32_t peer_target_depth = 1540 dev->peer_dev->target_queue_depth; 1541 uint32_t peer_blocksize = dev->peer_dev->blocksize; 1542 1543 camdd_get_depth(dev, &our_depth, &peer_depth, 1544 &our_bytes, &peer_bytes); 1545 1546 #if 0 1547 while (((our_depth < target_depth) 1548 && (peer_depth < peer_target_depth)) 1549 || ((peer_bytes + our_bytes) < 1550 (peer_blocksize * 2))) { 1551 #endif 1552 while (((our_depth + peer_depth) < 1553 (target_depth + peer_target_depth)) 1554 || ((peer_bytes + our_bytes) < 1555 (peer_blocksize * 3))) { 1556 1557 retval = camdd_queue(dev, NULL); 1558 if (retval == 1) 1559 break; 1560 else if (retval != 0) { 1561 error_exit = 1; 1562 goto bailout; 1563 } 1564 1565 camdd_get_depth(dev, &our_depth, &peer_depth, 1566 &our_bytes, &peer_bytes); 1567 } 1568 } 1569 /* 1570 * See if we have any I/O that is ready to execute. 1571 */ 1572 buf = STAILQ_FIRST(&dev->run_queue); 1573 if (buf != NULL) { 1574 while (dev->target_queue_depth > dev->cur_active_io) { 1575 retval = dev->run(dev); 1576 if (retval == -1) { 1577 dev->flags |= CAMDD_DEV_FLAG_EOF; 1578 error_exit = 1; 1579 break; 1580 } else if (retval != 0) { 1581 break; 1582 } 1583 } 1584 } 1585 1586 /* 1587 * We've reached EOF, or our partner has reached EOF. 1588 */ 1589 if ((dev->flags & CAMDD_DEV_FLAG_EOF) 1590 || (dev->flags & CAMDD_DEV_FLAG_PEER_EOF)) { 1591 if (dev->write_dev != 0) { 1592 if ((STAILQ_EMPTY(&dev->work_queue)) 1593 && (dev->num_run_queue == 0) 1594 && (dev->cur_active_io == 0)) { 1595 goto bailout; 1596 } 1597 } else { 1598 /* 1599 * If we're the reader, and the writer 1600 * got EOF, he is already done. If we got 1601 * the EOF, then we need to wait until 1602 * everything is flushed out for the writer. 1603 */ 1604 if (dev->flags & CAMDD_DEV_FLAG_PEER_EOF) { 1605 goto bailout; 1606 } else if ((dev->num_peer_work_queue == 0) 1607 && (dev->num_peer_done_queue == 0) 1608 && (dev->cur_active_io == 0) 1609 && (dev->num_run_queue == 0)) { 1610 goto bailout; 1611 } 1612 } 1613 /* 1614 * XXX KDM need to do something about the pending 1615 * queue and cleanup resources. 1616 */ 1617 } 1618 1619 if ((dev->write_dev == 0) 1620 && (dev->cur_active_io == 0) 1621 && (dev->peer_bytes_queued < dev->peer_dev->blocksize)) 1622 kq_ts = &ts; 1623 else 1624 kq_ts = NULL; 1625 1626 /* 1627 * Run kevent to see if there are events to process. 1628 */ 1629 pthread_mutex_unlock(&dev->mutex); 1630 retval = kevent(dev->kq, NULL, 0, &ke, 1, kq_ts); 1631 pthread_mutex_lock(&dev->mutex); 1632 if (retval == -1) { 1633 warn("%s: error returned from kevent",__func__); 1634 goto bailout; 1635 } else if (retval != 0) { 1636 switch (ke.filter) { 1637 case EVFILT_READ: 1638 if (dev->fetch != NULL) { 1639 retval = dev->fetch(dev); 1640 if (retval == -1) { 1641 error_exit = 1; 1642 goto bailout; 1643 } 1644 } 1645 break; 1646 case EVFILT_SIGNAL: 1647 /* 1648 * We register for this so we don't get 1649 * an error as a result of a SIGINFO or a 1650 * SIGINT. It will actually get handled 1651 * by the signal handler. If we get a 1652 * SIGINT, bail out without printing an 1653 * error message. Any other signals 1654 * will result in the error message above. 1655 */ 1656 if (ke.ident == SIGINT) 1657 goto bailout; 1658 break; 1659 case EVFILT_USER: 1660 retval = 0; 1661 /* 1662 * Check to see if the other thread has 1663 * queued any I/O for us to do. (In this 1664 * case we're the writer.) 1665 */ 1666 for (buf = STAILQ_FIRST(&dev->work_queue); 1667 buf != NULL; 1668 buf = STAILQ_FIRST(&dev->work_queue)) { 1669 STAILQ_REMOVE_HEAD(&dev->work_queue, 1670 work_links); 1671 retval = camdd_queue(dev, buf); 1672 /* 1673 * We keep going unless we get an 1674 * actual error. If we get EOF, we 1675 * still want to remove the buffers 1676 * from the queue and send the back 1677 * to the reader thread. 1678 */ 1679 if (retval == -1) { 1680 error_exit = 1; 1681 goto bailout; 1682 } else 1683 retval = 0; 1684 } 1685 1686 /* 1687 * Next check to see if the other thread has 1688 * queued any completed buffers back to us. 1689 * (In this case we're the reader.) 1690 */ 1691 for (buf = STAILQ_FIRST(&dev->peer_done_queue); 1692 buf != NULL; 1693 buf = STAILQ_FIRST(&dev->peer_done_queue)){ 1694 STAILQ_REMOVE_HEAD( 1695 &dev->peer_done_queue, work_links); 1696 dev->num_peer_done_queue--; 1697 camdd_peer_done(buf); 1698 } 1699 break; 1700 default: 1701 warnx("%s: unknown kevent filter %d", 1702 __func__, ke.filter); 1703 break; 1704 } 1705 } 1706 } 1707 1708 bailout: 1709 1710 dev->flags &= ~CAMDD_DEV_FLAG_ACTIVE; 1711 1712 /* XXX KDM cleanup resources here? */ 1713 1714 pthread_mutex_unlock(&dev->mutex); 1715 1716 need_exit = 1; 1717 sem_post(&camdd_sem); 1718 1719 return (NULL); 1720 } 1721 1722 /* 1723 * Simplistic translation of CCB status to our local status. 1724 */ 1725 camdd_buf_status 1726 camdd_ccb_status(union ccb *ccb) 1727 { 1728 camdd_buf_status status = CAMDD_STATUS_NONE; 1729 cam_status ccb_status; 1730 1731 ccb_status = ccb->ccb_h.status & CAM_STATUS_MASK; 1732 1733 switch (ccb_status) { 1734 case CAM_REQ_CMP: { 1735 if (ccb->csio.resid == 0) { 1736 status = CAMDD_STATUS_OK; 1737 } else if (ccb->csio.dxfer_len > ccb->csio.resid) { 1738 status = CAMDD_STATUS_SHORT_IO; 1739 } else { 1740 status = CAMDD_STATUS_EOF; 1741 } 1742 break; 1743 } 1744 case CAM_SCSI_STATUS_ERROR: { 1745 switch (ccb->csio.scsi_status) { 1746 case SCSI_STATUS_OK: 1747 case SCSI_STATUS_COND_MET: 1748 case SCSI_STATUS_INTERMED: 1749 case SCSI_STATUS_INTERMED_COND_MET: 1750 status = CAMDD_STATUS_OK; 1751 break; 1752 case SCSI_STATUS_CMD_TERMINATED: 1753 case SCSI_STATUS_CHECK_COND: 1754 case SCSI_STATUS_QUEUE_FULL: 1755 case SCSI_STATUS_BUSY: 1756 case SCSI_STATUS_RESERV_CONFLICT: 1757 default: 1758 status = CAMDD_STATUS_ERROR; 1759 break; 1760 } 1761 break; 1762 } 1763 default: 1764 status = CAMDD_STATUS_ERROR; 1765 break; 1766 } 1767 1768 return (status); 1769 } 1770 1771 /* 1772 * Queue a buffer to our peer's work thread for writing. 1773 * 1774 * Returns 0 for success, -1 for failure, 1 if the other thread exited. 1775 */ 1776 int 1777 camdd_queue_peer_buf(struct camdd_dev *dev, struct camdd_buf *buf) 1778 { 1779 struct kevent ke; 1780 STAILQ_HEAD(, camdd_buf) local_queue; 1781 struct camdd_buf *buf1, *buf2; 1782 struct camdd_buf_data *data = NULL; 1783 uint64_t peer_bytes_queued = 0; 1784 int active = 1; 1785 int retval = 0; 1786 1787 STAILQ_INIT(&local_queue); 1788 1789 /* 1790 * Since we're the reader, we need to queue our I/O to the writer 1791 * in sequential order in order to make sure it gets written out 1792 * in sequential order. 1793 * 1794 * Check the next expected I/O starting offset. If this doesn't 1795 * match, put it on the reorder queue. 1796 */ 1797 if ((buf->lba * dev->sector_size) != dev->next_completion_pos_bytes) { 1798 1799 /* 1800 * If there is nothing on the queue, there is no sorting 1801 * needed. 1802 */ 1803 if (STAILQ_EMPTY(&dev->reorder_queue)) { 1804 STAILQ_INSERT_TAIL(&dev->reorder_queue, buf, links); 1805 dev->num_reorder_queue++; 1806 goto bailout; 1807 } 1808 1809 /* 1810 * Sort in ascending order by starting LBA. There should 1811 * be no identical LBAs. 1812 */ 1813 for (buf1 = STAILQ_FIRST(&dev->reorder_queue); buf1 != NULL; 1814 buf1 = buf2) { 1815 buf2 = STAILQ_NEXT(buf1, links); 1816 if (buf->lba < buf1->lba) { 1817 /* 1818 * If we're less than the first one, then 1819 * we insert at the head of the list 1820 * because this has to be the first element 1821 * on the list. 1822 */ 1823 STAILQ_INSERT_HEAD(&dev->reorder_queue, 1824 buf, links); 1825 dev->num_reorder_queue++; 1826 break; 1827 } else if (buf->lba > buf1->lba) { 1828 if (buf2 == NULL) { 1829 STAILQ_INSERT_TAIL(&dev->reorder_queue, 1830 buf, links); 1831 dev->num_reorder_queue++; 1832 break; 1833 } else if (buf->lba < buf2->lba) { 1834 STAILQ_INSERT_AFTER(&dev->reorder_queue, 1835 buf1, buf, links); 1836 dev->num_reorder_queue++; 1837 break; 1838 } 1839 } else { 1840 errx(1, "Found buffers with duplicate LBA %ju!", 1841 buf->lba); 1842 } 1843 } 1844 goto bailout; 1845 } else { 1846 1847 /* 1848 * We're the next expected I/O completion, so put ourselves 1849 * on the local queue to be sent to the writer. We use 1850 * work_links here so that we can queue this to the 1851 * peer_work_queue before taking the buffer off of the 1852 * local_queue. 1853 */ 1854 dev->next_completion_pos_bytes += buf->len; 1855 STAILQ_INSERT_TAIL(&local_queue, buf, work_links); 1856 1857 /* 1858 * Go through the reorder queue looking for more sequential 1859 * I/O and add it to the local queue. 1860 */ 1861 for (buf1 = STAILQ_FIRST(&dev->reorder_queue); buf1 != NULL; 1862 buf1 = STAILQ_FIRST(&dev->reorder_queue)) { 1863 /* 1864 * As soon as we see an I/O that is out of sequence, 1865 * we're done. 1866 */ 1867 if ((buf1->lba * dev->sector_size) != 1868 dev->next_completion_pos_bytes) 1869 break; 1870 1871 STAILQ_REMOVE_HEAD(&dev->reorder_queue, links); 1872 dev->num_reorder_queue--; 1873 STAILQ_INSERT_TAIL(&local_queue, buf1, work_links); 1874 dev->next_completion_pos_bytes += buf1->len; 1875 } 1876 } 1877 1878 /* 1879 * Setup the event to let the other thread know that it has work 1880 * pending. 1881 */ 1882 EV_SET(&ke, (uintptr_t)&dev->peer_dev->work_queue, EVFILT_USER, 0, 1883 NOTE_TRIGGER, 0, NULL); 1884 1885 /* 1886 * Put this on our shadow queue so that we know what we've queued 1887 * to the other thread. 1888 */ 1889 STAILQ_FOREACH_SAFE(buf1, &local_queue, work_links, buf2) { 1890 if (buf1->buf_type != CAMDD_BUF_DATA) { 1891 errx(1, "%s: should have a data buffer, not an " 1892 "indirect buffer", __func__); 1893 } 1894 data = &buf1->buf_type_spec.data; 1895 1896 /* 1897 * We only need to send one EOF to the writer, and don't 1898 * need to continue sending EOFs after that. 1899 */ 1900 if (buf1->status == CAMDD_STATUS_EOF) { 1901 if (dev->flags & CAMDD_DEV_FLAG_EOF_SENT) { 1902 STAILQ_REMOVE(&local_queue, buf1, camdd_buf, 1903 work_links); 1904 camdd_release_buf(buf1); 1905 retval = 1; 1906 continue; 1907 } 1908 dev->flags |= CAMDD_DEV_FLAG_EOF_SENT; 1909 } 1910 1911 1912 STAILQ_INSERT_TAIL(&dev->peer_work_queue, buf1, links); 1913 peer_bytes_queued += (data->fill_len - data->resid); 1914 dev->peer_bytes_queued += (data->fill_len - data->resid); 1915 dev->num_peer_work_queue++; 1916 } 1917 1918 if (STAILQ_FIRST(&local_queue) == NULL) 1919 goto bailout; 1920 1921 /* 1922 * Drop our mutex and pick up the other thread's mutex. We need to 1923 * do this to avoid deadlocks. 1924 */ 1925 pthread_mutex_unlock(&dev->mutex); 1926 pthread_mutex_lock(&dev->peer_dev->mutex); 1927 1928 if (dev->peer_dev->flags & CAMDD_DEV_FLAG_ACTIVE) { 1929 /* 1930 * Put the buffers on the other thread's incoming work queue. 1931 */ 1932 for (buf1 = STAILQ_FIRST(&local_queue); buf1 != NULL; 1933 buf1 = STAILQ_FIRST(&local_queue)) { 1934 STAILQ_REMOVE_HEAD(&local_queue, work_links); 1935 STAILQ_INSERT_TAIL(&dev->peer_dev->work_queue, buf1, 1936 work_links); 1937 } 1938 /* 1939 * Send an event to the other thread's kqueue to let it know 1940 * that there is something on the work queue. 1941 */ 1942 retval = kevent(dev->peer_dev->kq, &ke, 1, NULL, 0, NULL); 1943 if (retval == -1) 1944 warn("%s: unable to add peer work_queue kevent", 1945 __func__); 1946 else 1947 retval = 0; 1948 } else 1949 active = 0; 1950 1951 pthread_mutex_unlock(&dev->peer_dev->mutex); 1952 pthread_mutex_lock(&dev->mutex); 1953 1954 /* 1955 * If the other side isn't active, run through the queue and 1956 * release all of the buffers. 1957 */ 1958 if (active == 0) { 1959 for (buf1 = STAILQ_FIRST(&local_queue); buf1 != NULL; 1960 buf1 = STAILQ_FIRST(&local_queue)) { 1961 STAILQ_REMOVE_HEAD(&local_queue, work_links); 1962 STAILQ_REMOVE(&dev->peer_work_queue, buf1, camdd_buf, 1963 links); 1964 dev->num_peer_work_queue--; 1965 camdd_release_buf(buf1); 1966 } 1967 dev->peer_bytes_queued -= peer_bytes_queued; 1968 retval = 1; 1969 } 1970 1971 bailout: 1972 return (retval); 1973 } 1974 1975 /* 1976 * Return a buffer to the reader thread when we have completed writing it. 1977 */ 1978 int 1979 camdd_complete_peer_buf(struct camdd_dev *dev, struct camdd_buf *peer_buf) 1980 { 1981 struct kevent ke; 1982 int retval = 0; 1983 1984 /* 1985 * Setup the event to let the other thread know that we have 1986 * completed a buffer. 1987 */ 1988 EV_SET(&ke, (uintptr_t)&dev->peer_dev->peer_done_queue, EVFILT_USER, 0, 1989 NOTE_TRIGGER, 0, NULL); 1990 1991 /* 1992 * Drop our lock and acquire the other thread's lock before 1993 * manipulating 1994 */ 1995 pthread_mutex_unlock(&dev->mutex); 1996 pthread_mutex_lock(&dev->peer_dev->mutex); 1997 1998 /* 1999 * Put the buffer on the reader thread's peer done queue now that 2000 * we have completed it. 2001 */ 2002 STAILQ_INSERT_TAIL(&dev->peer_dev->peer_done_queue, peer_buf, 2003 work_links); 2004 dev->peer_dev->num_peer_done_queue++; 2005 2006 /* 2007 * Send an event to the peer thread to let it know that we've added 2008 * something to its peer done queue. 2009 */ 2010 retval = kevent(dev->peer_dev->kq, &ke, 1, NULL, 0, NULL); 2011 if (retval == -1) 2012 warn("%s: unable to add peer_done_queue kevent", __func__); 2013 else 2014 retval = 0; 2015 2016 /* 2017 * Drop the other thread's lock and reacquire ours. 2018 */ 2019 pthread_mutex_unlock(&dev->peer_dev->mutex); 2020 pthread_mutex_lock(&dev->mutex); 2021 2022 return (retval); 2023 } 2024 2025 /* 2026 * Free a buffer that was written out by the writer thread and returned to 2027 * the reader thread. 2028 */ 2029 void 2030 camdd_peer_done(struct camdd_buf *buf) 2031 { 2032 struct camdd_dev *dev; 2033 struct camdd_buf_data *data; 2034 2035 dev = buf->dev; 2036 if (buf->buf_type != CAMDD_BUF_DATA) { 2037 errx(1, "%s: should have a data buffer, not an " 2038 "indirect buffer", __func__); 2039 } 2040 2041 data = &buf->buf_type_spec.data; 2042 2043 STAILQ_REMOVE(&dev->peer_work_queue, buf, camdd_buf, links); 2044 dev->num_peer_work_queue--; 2045 dev->peer_bytes_queued -= (data->fill_len - data->resid); 2046 2047 if (buf->status == CAMDD_STATUS_EOF) 2048 dev->flags |= CAMDD_DEV_FLAG_PEER_EOF; 2049 2050 STAILQ_INSERT_TAIL(&dev->free_queue, buf, links); 2051 } 2052 2053 /* 2054 * Assumes caller holds the lock for this device. 2055 */ 2056 void 2057 camdd_complete_buf(struct camdd_dev *dev, struct camdd_buf *buf, 2058 int *error_count) 2059 { 2060 int retval = 0; 2061 2062 /* 2063 * If we're the reader, we need to send the completed I/O 2064 * to the writer. If we're the writer, we need to just 2065 * free up resources, or let the reader know if we've 2066 * encountered an error. 2067 */ 2068 if (dev->write_dev == 0) { 2069 retval = camdd_queue_peer_buf(dev, buf); 2070 if (retval != 0) 2071 (*error_count)++; 2072 } else { 2073 struct camdd_buf *tmp_buf, *next_buf; 2074 2075 STAILQ_FOREACH_SAFE(tmp_buf, &buf->src_list, src_links, 2076 next_buf) { 2077 struct camdd_buf *src_buf; 2078 struct camdd_buf_indirect *indirect; 2079 2080 STAILQ_REMOVE(&buf->src_list, tmp_buf, 2081 camdd_buf, src_links); 2082 2083 tmp_buf->status = buf->status; 2084 2085 if (tmp_buf->buf_type == CAMDD_BUF_DATA) { 2086 camdd_complete_peer_buf(dev, tmp_buf); 2087 continue; 2088 } 2089 2090 indirect = &tmp_buf->buf_type_spec.indirect; 2091 src_buf = indirect->src_buf; 2092 src_buf->refcount--; 2093 /* 2094 * XXX KDM we probably need to account for 2095 * exactly how many bytes we were able to 2096 * write. Allocate the residual to the 2097 * first N buffers? Or just track the 2098 * number of bytes written? Right now the reader 2099 * doesn't do anything with a residual. 2100 */ 2101 src_buf->status = buf->status; 2102 if (src_buf->refcount <= 0) 2103 camdd_complete_peer_buf(dev, src_buf); 2104 STAILQ_INSERT_TAIL(&dev->free_indirect_queue, 2105 tmp_buf, links); 2106 } 2107 2108 STAILQ_INSERT_TAIL(&dev->free_queue, buf, links); 2109 } 2110 } 2111 2112 /* 2113 * Fetch all completed commands from the pass(4) device. 2114 * 2115 * Returns the number of commands received, or -1 if any of the commands 2116 * completed with an error. Returns 0 if no commands are available. 2117 */ 2118 int 2119 camdd_pass_fetch(struct camdd_dev *dev) 2120 { 2121 struct camdd_dev_pass *pass_dev = &dev->dev_spec.pass; 2122 union ccb ccb; 2123 int retval = 0, num_fetched = 0, error_count = 0; 2124 2125 pthread_mutex_unlock(&dev->mutex); 2126 /* 2127 * XXX KDM we don't distinguish between EFAULT and ENOENT. 2128 */ 2129 while ((retval = ioctl(pass_dev->dev->fd, CAMIOGET, &ccb)) != -1) { 2130 struct camdd_buf *buf; 2131 struct camdd_buf_data *data; 2132 cam_status ccb_status; 2133 union ccb *buf_ccb; 2134 2135 buf = ccb.ccb_h.ccb_buf; 2136 data = &buf->buf_type_spec.data; 2137 buf_ccb = &data->ccb; 2138 2139 num_fetched++; 2140 2141 /* 2142 * Copy the CCB back out so we get status, sense data, etc. 2143 */ 2144 bcopy(&ccb, buf_ccb, sizeof(ccb)); 2145 2146 pthread_mutex_lock(&dev->mutex); 2147 2148 /* 2149 * We're now done, so take this off the active queue. 2150 */ 2151 STAILQ_REMOVE(&dev->active_queue, buf, camdd_buf, links); 2152 dev->cur_active_io--; 2153 2154 ccb_status = ccb.ccb_h.status & CAM_STATUS_MASK; 2155 if (ccb_status != CAM_REQ_CMP) { 2156 cam_error_print(pass_dev->dev, &ccb, CAM_ESF_ALL, 2157 CAM_EPF_ALL, stderr); 2158 } 2159 2160 data->resid = ccb.csio.resid; 2161 dev->bytes_transferred += (ccb.csio.dxfer_len - ccb.csio.resid); 2162 2163 if (buf->status == CAMDD_STATUS_NONE) 2164 buf->status = camdd_ccb_status(&ccb); 2165 if (buf->status == CAMDD_STATUS_ERROR) 2166 error_count++; 2167 else if (buf->status == CAMDD_STATUS_EOF) { 2168 /* 2169 * Once we queue this buffer to our partner thread, 2170 * he will know that we've hit EOF. 2171 */ 2172 dev->flags |= CAMDD_DEV_FLAG_EOF; 2173 } 2174 2175 camdd_complete_buf(dev, buf, &error_count); 2176 2177 /* 2178 * Unlock in preparation for the ioctl call. 2179 */ 2180 pthread_mutex_unlock(&dev->mutex); 2181 } 2182 2183 pthread_mutex_lock(&dev->mutex); 2184 2185 if (error_count > 0) 2186 return (-1); 2187 else 2188 return (num_fetched); 2189 } 2190 2191 /* 2192 * Returns -1 for error, 0 for success/continue, and 1 for resource 2193 * shortage/stop processing. 2194 */ 2195 int 2196 camdd_file_run(struct camdd_dev *dev) 2197 { 2198 struct camdd_dev_file *file_dev = &dev->dev_spec.file; 2199 struct camdd_buf_data *data; 2200 struct camdd_buf *buf; 2201 off_t io_offset; 2202 int retval = 0, write_dev = dev->write_dev; 2203 int error_count = 0, no_resources = 0, double_buf_needed = 0; 2204 uint32_t num_sectors = 0, db_len = 0; 2205 2206 buf = STAILQ_FIRST(&dev->run_queue); 2207 if (buf == NULL) { 2208 no_resources = 1; 2209 goto bailout; 2210 } else if ((dev->write_dev == 0) 2211 && (dev->flags & (CAMDD_DEV_FLAG_EOF | 2212 CAMDD_DEV_FLAG_EOF_SENT))) { 2213 STAILQ_REMOVE(&dev->run_queue, buf, camdd_buf, links); 2214 dev->num_run_queue--; 2215 buf->status = CAMDD_STATUS_EOF; 2216 error_count++; 2217 goto bailout; 2218 } 2219 2220 /* 2221 * If we're writing, we need to go through the source buffer list 2222 * and create an S/G list. 2223 */ 2224 if (write_dev != 0) { 2225 retval = camdd_buf_sg_create(buf, /*iovec*/ 1, 2226 dev->sector_size, &num_sectors, &double_buf_needed); 2227 if (retval != 0) { 2228 no_resources = 1; 2229 goto bailout; 2230 } 2231 } 2232 2233 STAILQ_REMOVE(&dev->run_queue, buf, camdd_buf, links); 2234 dev->num_run_queue--; 2235 2236 data = &buf->buf_type_spec.data; 2237 2238 /* 2239 * pread(2) and pwrite(2) offsets are byte offsets. 2240 */ 2241 io_offset = buf->lba * dev->sector_size; 2242 2243 /* 2244 * Unlock the mutex while we read or write. 2245 */ 2246 pthread_mutex_unlock(&dev->mutex); 2247 2248 /* 2249 * Note that we don't need to double buffer if we're the reader 2250 * because in that case, we have allocated a single buffer of 2251 * sufficient size to do the read. This copy is necessary on 2252 * writes because if one of the components of the S/G list is not 2253 * a sector size multiple, the kernel will reject the write. This 2254 * is unfortunate but not surprising. So this will make sure that 2255 * we're using a single buffer that is a multiple of the sector size. 2256 */ 2257 if ((double_buf_needed != 0) 2258 && (data->sg_count > 1) 2259 && (write_dev != 0)) { 2260 uint32_t cur_offset; 2261 int i; 2262 2263 if (file_dev->tmp_buf == NULL) 2264 file_dev->tmp_buf = calloc(dev->blocksize, 1); 2265 if (file_dev->tmp_buf == NULL) { 2266 buf->status = CAMDD_STATUS_ERROR; 2267 error_count++; 2268 goto bailout; 2269 } 2270 for (i = 0, cur_offset = 0; i < data->sg_count; i++) { 2271 bcopy(data->iovec[i].iov_base, 2272 &file_dev->tmp_buf[cur_offset], 2273 data->iovec[i].iov_len); 2274 cur_offset += data->iovec[i].iov_len; 2275 } 2276 db_len = cur_offset; 2277 } 2278 2279 if (file_dev->file_flags & CAMDD_FF_CAN_SEEK) { 2280 if (write_dev == 0) { 2281 /* 2282 * XXX KDM is there any way we would need a S/G 2283 * list here? 2284 */ 2285 retval = pread(file_dev->fd, data->buf, 2286 buf->len, io_offset); 2287 } else { 2288 if (double_buf_needed != 0) { 2289 retval = pwrite(file_dev->fd, file_dev->tmp_buf, 2290 db_len, io_offset); 2291 } else if (data->sg_count == 0) { 2292 retval = pwrite(file_dev->fd, data->buf, 2293 data->fill_len, io_offset); 2294 } else { 2295 retval = pwritev(file_dev->fd, data->iovec, 2296 data->sg_count, io_offset); 2297 } 2298 } 2299 } else { 2300 if (write_dev == 0) { 2301 /* 2302 * XXX KDM is there any way we would need a S/G 2303 * list here? 2304 */ 2305 retval = read(file_dev->fd, data->buf, buf->len); 2306 } else { 2307 if (double_buf_needed != 0) { 2308 retval = write(file_dev->fd, file_dev->tmp_buf, 2309 db_len); 2310 } else if (data->sg_count == 0) { 2311 retval = write(file_dev->fd, data->buf, 2312 data->fill_len); 2313 } else { 2314 retval = writev(file_dev->fd, data->iovec, 2315 data->sg_count); 2316 } 2317 } 2318 } 2319 2320 /* We're done, re-acquire the lock */ 2321 pthread_mutex_lock(&dev->mutex); 2322 2323 if (retval >= (ssize_t)data->fill_len) { 2324 /* 2325 * If the bytes transferred is more than the request size, 2326 * that indicates an overrun, which should only happen at 2327 * the end of a transfer if we have to round up to a sector 2328 * boundary. 2329 */ 2330 if (buf->status == CAMDD_STATUS_NONE) 2331 buf->status = CAMDD_STATUS_OK; 2332 data->resid = 0; 2333 dev->bytes_transferred += retval; 2334 } else if (retval == -1) { 2335 warn("Error %s %s", (write_dev) ? "writing to" : 2336 "reading from", file_dev->filename); 2337 2338 buf->status = CAMDD_STATUS_ERROR; 2339 data->resid = data->fill_len; 2340 error_count++; 2341 2342 if (dev->debug == 0) 2343 goto bailout; 2344 2345 if ((double_buf_needed != 0) 2346 && (write_dev != 0)) { 2347 fprintf(stderr, "%s: fd %d, DB buf %p, len %u lba %ju " 2348 "offset %ju\n", __func__, file_dev->fd, 2349 file_dev->tmp_buf, db_len, (uintmax_t)buf->lba, 2350 (uintmax_t)io_offset); 2351 } else if (data->sg_count == 0) { 2352 fprintf(stderr, "%s: fd %d, buf %p, len %u, lba %ju " 2353 "offset %ju\n", __func__, file_dev->fd, data->buf, 2354 data->fill_len, (uintmax_t)buf->lba, 2355 (uintmax_t)io_offset); 2356 } else { 2357 int i; 2358 2359 fprintf(stderr, "%s: fd %d, len %u, lba %ju " 2360 "offset %ju\n", __func__, file_dev->fd, 2361 data->fill_len, (uintmax_t)buf->lba, 2362 (uintmax_t)io_offset); 2363 2364 for (i = 0; i < data->sg_count; i++) { 2365 fprintf(stderr, "index %d ptr %p len %zu\n", 2366 i, data->iovec[i].iov_base, 2367 data->iovec[i].iov_len); 2368 } 2369 } 2370 } else if (retval == 0) { 2371 buf->status = CAMDD_STATUS_EOF; 2372 if (dev->debug != 0) 2373 printf("%s: got EOF from %s!\n", __func__, 2374 file_dev->filename); 2375 data->resid = data->fill_len; 2376 error_count++; 2377 } else if (retval < (ssize_t)data->fill_len) { 2378 if (buf->status == CAMDD_STATUS_NONE) 2379 buf->status = CAMDD_STATUS_SHORT_IO; 2380 data->resid = data->fill_len - retval; 2381 dev->bytes_transferred += retval; 2382 } 2383 2384 bailout: 2385 if (buf != NULL) { 2386 if (buf->status == CAMDD_STATUS_EOF) { 2387 struct camdd_buf *buf2; 2388 dev->flags |= CAMDD_DEV_FLAG_EOF; 2389 STAILQ_FOREACH(buf2, &dev->run_queue, links) 2390 buf2->status = CAMDD_STATUS_EOF; 2391 } 2392 2393 camdd_complete_buf(dev, buf, &error_count); 2394 } 2395 2396 if (error_count != 0) 2397 return (-1); 2398 else if (no_resources != 0) 2399 return (1); 2400 else 2401 return (0); 2402 } 2403 2404 /* 2405 * Execute one command from the run queue. Returns 0 for success, 1 for 2406 * stop processing, and -1 for error. 2407 */ 2408 int 2409 camdd_pass_run(struct camdd_dev *dev) 2410 { 2411 struct camdd_buf *buf = NULL; 2412 struct camdd_dev_pass *pass_dev = &dev->dev_spec.pass; 2413 struct camdd_buf_data *data; 2414 uint32_t num_blocks, sectors_used = 0; 2415 union ccb *ccb; 2416 int retval = 0, is_write = dev->write_dev; 2417 int double_buf_needed = 0; 2418 2419 buf = STAILQ_FIRST(&dev->run_queue); 2420 if (buf == NULL) { 2421 retval = 1; 2422 goto bailout; 2423 } 2424 2425 /* 2426 * If we're writing, we need to go through the source buffer list 2427 * and create an S/G list. 2428 */ 2429 if (is_write != 0) { 2430 retval = camdd_buf_sg_create(buf, /*iovec*/ 0,dev->sector_size, 2431 §ors_used, &double_buf_needed); 2432 if (retval != 0) { 2433 retval = -1; 2434 goto bailout; 2435 } 2436 } 2437 2438 STAILQ_REMOVE(&dev->run_queue, buf, camdd_buf, links); 2439 dev->num_run_queue--; 2440 2441 data = &buf->buf_type_spec.data; 2442 2443 ccb = &data->ccb; 2444 bzero(&(&ccb->ccb_h)[1], 2445 sizeof(struct ccb_scsiio) - sizeof(struct ccb_hdr)); 2446 2447 /* 2448 * In almost every case the number of blocks should be the device 2449 * block size. The exception may be at the end of an I/O stream 2450 * for a partial block or at the end of a device. 2451 */ 2452 if (is_write != 0) 2453 num_blocks = sectors_used; 2454 else 2455 num_blocks = data->fill_len / pass_dev->block_len; 2456 2457 scsi_read_write(&ccb->csio, 2458 /*retries*/ dev->retry_count, 2459 /*cbfcnp*/ NULL, 2460 /*tag_action*/ MSG_SIMPLE_Q_TAG, 2461 /*readop*/ (dev->write_dev == 0) ? SCSI_RW_READ : 2462 SCSI_RW_WRITE, 2463 /*byte2*/ 0, 2464 /*minimum_cmd_size*/ dev->min_cmd_size, 2465 /*lba*/ buf->lba, 2466 /*block_count*/ num_blocks, 2467 /*data_ptr*/ (data->sg_count != 0) ? 2468 (uint8_t *)data->segs : data->buf, 2469 /*dxfer_len*/ (num_blocks * pass_dev->block_len), 2470 /*sense_len*/ SSD_FULL_SIZE, 2471 /*timeout*/ dev->io_timeout); 2472 2473 /* Disable freezing the device queue */ 2474 ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; 2475 2476 if (dev->retry_count != 0) 2477 ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; 2478 2479 if (data->sg_count != 0) { 2480 ccb->csio.sglist_cnt = data->sg_count; 2481 ccb->ccb_h.flags |= CAM_DATA_SG; 2482 } 2483 2484 /* 2485 * Store a pointer to the buffer in the CCB. The kernel will 2486 * restore this when we get it back, and we'll use it to identify 2487 * the buffer this CCB came from. 2488 */ 2489 ccb->ccb_h.ccb_buf = buf; 2490 2491 /* 2492 * Unlock our mutex in preparation for issuing the ioctl. 2493 */ 2494 pthread_mutex_unlock(&dev->mutex); 2495 /* 2496 * Queue the CCB to the pass(4) driver. 2497 */ 2498 if (ioctl(pass_dev->dev->fd, CAMIOQUEUE, ccb) == -1) { 2499 pthread_mutex_lock(&dev->mutex); 2500 2501 warn("%s: error sending CAMIOQUEUE ioctl to %s%u", __func__, 2502 pass_dev->dev->device_name, pass_dev->dev->dev_unit_num); 2503 warn("%s: CCB address is %p", __func__, ccb); 2504 retval = -1; 2505 2506 STAILQ_INSERT_TAIL(&dev->free_queue, buf, links); 2507 } else { 2508 pthread_mutex_lock(&dev->mutex); 2509 2510 dev->cur_active_io++; 2511 STAILQ_INSERT_TAIL(&dev->active_queue, buf, links); 2512 } 2513 2514 bailout: 2515 return (retval); 2516 } 2517 2518 int 2519 camdd_get_next_lba_len(struct camdd_dev *dev, uint64_t *lba, ssize_t *len) 2520 { 2521 struct camdd_dev_pass *pass_dev; 2522 uint32_t num_blocks; 2523 int retval = 0; 2524 2525 pass_dev = &dev->dev_spec.pass; 2526 2527 *lba = dev->next_io_pos_bytes / dev->sector_size; 2528 *len = dev->blocksize; 2529 num_blocks = *len / dev->sector_size; 2530 2531 /* 2532 * If max_sector is 0, then we have no set limit. This can happen 2533 * if we're writing to a file in a filesystem, or reading from 2534 * something like /dev/zero. 2535 */ 2536 if ((dev->max_sector != 0) 2537 || (dev->sector_io_limit != 0)) { 2538 uint64_t max_sector; 2539 2540 if ((dev->max_sector != 0) 2541 && (dev->sector_io_limit != 0)) 2542 max_sector = min(dev->sector_io_limit, dev->max_sector); 2543 else if (dev->max_sector != 0) 2544 max_sector = dev->max_sector; 2545 else 2546 max_sector = dev->sector_io_limit; 2547 2548 2549 /* 2550 * Check to see whether we're starting off past the end of 2551 * the device. If so, we need to just send an EOF 2552 * notification to the writer. 2553 */ 2554 if (*lba > max_sector) { 2555 *len = 0; 2556 retval = 1; 2557 } else if (((*lba + num_blocks) > max_sector + 1) 2558 || ((*lba + num_blocks) < *lba)) { 2559 /* 2560 * If we get here (but pass the first check), we 2561 * can trim the request length down to go to the 2562 * end of the device. 2563 */ 2564 num_blocks = (max_sector + 1) - *lba; 2565 *len = num_blocks * dev->sector_size; 2566 retval = 1; 2567 } 2568 } 2569 2570 dev->next_io_pos_bytes += *len; 2571 2572 return (retval); 2573 } 2574 2575 /* 2576 * Returns 0 for success, 1 for EOF detected, and -1 for failure. 2577 */ 2578 int 2579 camdd_queue(struct camdd_dev *dev, struct camdd_buf *read_buf) 2580 { 2581 struct camdd_buf *buf = NULL; 2582 struct camdd_buf_data *data; 2583 struct camdd_dev_pass *pass_dev; 2584 size_t new_len; 2585 struct camdd_buf_data *rb_data; 2586 int is_write = dev->write_dev; 2587 int eof_flush_needed = 0; 2588 int retval = 0; 2589 int error; 2590 2591 pass_dev = &dev->dev_spec.pass; 2592 2593 /* 2594 * If we've gotten EOF or our partner has, we should not continue 2595 * queueing I/O. If we're a writer, though, we should continue 2596 * to write any buffers that don't have EOF status. 2597 */ 2598 if ((dev->flags & CAMDD_DEV_FLAG_EOF) 2599 || ((dev->flags & CAMDD_DEV_FLAG_PEER_EOF) 2600 && (is_write == 0))) { 2601 /* 2602 * Tell the worker thread that we have seen EOF. 2603 */ 2604 retval = 1; 2605 2606 /* 2607 * If we're the writer, send the buffer back with EOF status. 2608 */ 2609 if (is_write) { 2610 read_buf->status = CAMDD_STATUS_EOF; 2611 2612 error = camdd_complete_peer_buf(dev, read_buf); 2613 } 2614 goto bailout; 2615 } 2616 2617 if (is_write == 0) { 2618 buf = camdd_get_buf(dev, CAMDD_BUF_DATA); 2619 if (buf == NULL) { 2620 retval = -1; 2621 goto bailout; 2622 } 2623 data = &buf->buf_type_spec.data; 2624 2625 retval = camdd_get_next_lba_len(dev, &buf->lba, &buf->len); 2626 if (retval != 0) { 2627 buf->status = CAMDD_STATUS_EOF; 2628 2629 if ((buf->len == 0) 2630 && ((dev->flags & (CAMDD_DEV_FLAG_EOF_SENT | 2631 CAMDD_DEV_FLAG_EOF_QUEUED)) != 0)) { 2632 camdd_release_buf(buf); 2633 goto bailout; 2634 } 2635 dev->flags |= CAMDD_DEV_FLAG_EOF_QUEUED; 2636 } 2637 2638 data->fill_len = buf->len; 2639 data->src_start_offset = buf->lba * dev->sector_size; 2640 2641 /* 2642 * Put this on the run queue. 2643 */ 2644 STAILQ_INSERT_TAIL(&dev->run_queue, buf, links); 2645 dev->num_run_queue++; 2646 2647 /* We're done. */ 2648 goto bailout; 2649 } 2650 2651 /* 2652 * Check for new EOF status from the reader. 2653 */ 2654 if ((read_buf->status == CAMDD_STATUS_EOF) 2655 || (read_buf->status == CAMDD_STATUS_ERROR)) { 2656 dev->flags |= CAMDD_DEV_FLAG_PEER_EOF; 2657 if ((STAILQ_FIRST(&dev->pending_queue) == NULL) 2658 && (read_buf->len == 0)) { 2659 camdd_complete_peer_buf(dev, read_buf); 2660 retval = 1; 2661 goto bailout; 2662 } else 2663 eof_flush_needed = 1; 2664 } 2665 2666 /* 2667 * See if we have a buffer we're composing with pieces from our 2668 * partner thread. 2669 */ 2670 buf = STAILQ_FIRST(&dev->pending_queue); 2671 if (buf == NULL) { 2672 uint64_t lba; 2673 ssize_t len; 2674 2675 retval = camdd_get_next_lba_len(dev, &lba, &len); 2676 if (retval != 0) { 2677 read_buf->status = CAMDD_STATUS_EOF; 2678 2679 if (len == 0) { 2680 dev->flags |= CAMDD_DEV_FLAG_EOF; 2681 error = camdd_complete_peer_buf(dev, read_buf); 2682 goto bailout; 2683 } 2684 } 2685 2686 /* 2687 * If we don't have a pending buffer, we need to grab a new 2688 * one from the free list or allocate another one. 2689 */ 2690 buf = camdd_get_buf(dev, CAMDD_BUF_DATA); 2691 if (buf == NULL) { 2692 retval = 1; 2693 goto bailout; 2694 } 2695 2696 buf->lba = lba; 2697 buf->len = len; 2698 2699 STAILQ_INSERT_TAIL(&dev->pending_queue, buf, links); 2700 dev->num_pending_queue++; 2701 } 2702 2703 data = &buf->buf_type_spec.data; 2704 2705 rb_data = &read_buf->buf_type_spec.data; 2706 2707 if ((rb_data->src_start_offset != dev->next_peer_pos_bytes) 2708 && (dev->debug != 0)) { 2709 printf("%s: WARNING: reader offset %#jx != expected offset " 2710 "%#jx\n", __func__, (uintmax_t)rb_data->src_start_offset, 2711 (uintmax_t)dev->next_peer_pos_bytes); 2712 } 2713 dev->next_peer_pos_bytes = rb_data->src_start_offset + 2714 (rb_data->fill_len - rb_data->resid); 2715 2716 new_len = (rb_data->fill_len - rb_data->resid) + data->fill_len; 2717 if (new_len < buf->len) { 2718 /* 2719 * There are three cases here: 2720 * 1. We need more data to fill up a block, so we put 2721 * this I/O on the queue and wait for more I/O. 2722 * 2. We have a pending buffer in the queue that is 2723 * smaller than our blocksize, but we got an EOF. So we 2724 * need to go ahead and flush the write out. 2725 * 3. We got an error. 2726 */ 2727 2728 /* 2729 * Increment our fill length. 2730 */ 2731 data->fill_len += (rb_data->fill_len - rb_data->resid); 2732 2733 /* 2734 * Add the new read buffer to the list for writing. 2735 */ 2736 STAILQ_INSERT_TAIL(&buf->src_list, read_buf, src_links); 2737 2738 /* Increment the count */ 2739 buf->src_count++; 2740 2741 if (eof_flush_needed == 0) { 2742 /* 2743 * We need to exit, because we don't have enough 2744 * data yet. 2745 */ 2746 goto bailout; 2747 } else { 2748 /* 2749 * Take the buffer off of the pending queue. 2750 */ 2751 STAILQ_REMOVE(&dev->pending_queue, buf, camdd_buf, 2752 links); 2753 dev->num_pending_queue--; 2754 2755 /* 2756 * If we need an EOF flush, but there is no data 2757 * to flush, go ahead and return this buffer. 2758 */ 2759 if (data->fill_len == 0) { 2760 camdd_complete_buf(dev, buf, /*error_count*/0); 2761 retval = 1; 2762 goto bailout; 2763 } 2764 2765 /* 2766 * Put this on the next queue for execution. 2767 */ 2768 STAILQ_INSERT_TAIL(&dev->run_queue, buf, links); 2769 dev->num_run_queue++; 2770 } 2771 } else if (new_len == buf->len) { 2772 /* 2773 * We have enough data to completey fill one block, 2774 * so we're ready to issue the I/O. 2775 */ 2776 2777 /* 2778 * Take the buffer off of the pending queue. 2779 */ 2780 STAILQ_REMOVE(&dev->pending_queue, buf, camdd_buf, links); 2781 dev->num_pending_queue--; 2782 2783 /* 2784 * Add the new read buffer to the list for writing. 2785 */ 2786 STAILQ_INSERT_TAIL(&buf->src_list, read_buf, src_links); 2787 2788 /* Increment the count */ 2789 buf->src_count++; 2790 2791 /* 2792 * Increment our fill length. 2793 */ 2794 data->fill_len += (rb_data->fill_len - rb_data->resid); 2795 2796 /* 2797 * Put this on the next queue for execution. 2798 */ 2799 STAILQ_INSERT_TAIL(&dev->run_queue, buf, links); 2800 dev->num_run_queue++; 2801 } else { 2802 struct camdd_buf *idb; 2803 struct camdd_buf_indirect *indirect; 2804 uint32_t len_to_go, cur_offset; 2805 2806 2807 idb = camdd_get_buf(dev, CAMDD_BUF_INDIRECT); 2808 if (idb == NULL) { 2809 retval = 1; 2810 goto bailout; 2811 } 2812 indirect = &idb->buf_type_spec.indirect; 2813 indirect->src_buf = read_buf; 2814 read_buf->refcount++; 2815 indirect->offset = 0; 2816 indirect->start_ptr = rb_data->buf; 2817 /* 2818 * We've already established that there is more 2819 * data in read_buf than we have room for in our 2820 * current write request. So this particular chunk 2821 * of the request should just be the remainder 2822 * needed to fill up a block. 2823 */ 2824 indirect->len = buf->len - (data->fill_len - data->resid); 2825 2826 camdd_buf_add_child(buf, idb); 2827 2828 /* 2829 * This buffer is ready to execute, so we can take 2830 * it off the pending queue and put it on the run 2831 * queue. 2832 */ 2833 STAILQ_REMOVE(&dev->pending_queue, buf, camdd_buf, 2834 links); 2835 dev->num_pending_queue--; 2836 STAILQ_INSERT_TAIL(&dev->run_queue, buf, links); 2837 dev->num_run_queue++; 2838 2839 cur_offset = indirect->offset + indirect->len; 2840 2841 /* 2842 * The resulting I/O would be too large to fit in 2843 * one block. We need to split this I/O into 2844 * multiple pieces. Allocate as many buffers as needed. 2845 */ 2846 for (len_to_go = rb_data->fill_len - rb_data->resid - 2847 indirect->len; len_to_go > 0;) { 2848 struct camdd_buf *new_buf; 2849 struct camdd_buf_data *new_data; 2850 uint64_t lba; 2851 ssize_t len; 2852 2853 retval = camdd_get_next_lba_len(dev, &lba, &len); 2854 if ((retval != 0) 2855 && (len == 0)) { 2856 /* 2857 * The device has already been marked 2858 * as EOF, and there is no space left. 2859 */ 2860 goto bailout; 2861 } 2862 2863 new_buf = camdd_get_buf(dev, CAMDD_BUF_DATA); 2864 if (new_buf == NULL) { 2865 retval = 1; 2866 goto bailout; 2867 } 2868 2869 new_buf->lba = lba; 2870 new_buf->len = len; 2871 2872 idb = camdd_get_buf(dev, CAMDD_BUF_INDIRECT); 2873 if (idb == NULL) { 2874 retval = 1; 2875 goto bailout; 2876 } 2877 2878 indirect = &idb->buf_type_spec.indirect; 2879 2880 indirect->src_buf = read_buf; 2881 read_buf->refcount++; 2882 indirect->offset = cur_offset; 2883 indirect->start_ptr = rb_data->buf + cur_offset; 2884 indirect->len = min(len_to_go, new_buf->len); 2885 #if 0 2886 if (((indirect->len % dev->sector_size) != 0) 2887 || ((indirect->offset % dev->sector_size) != 0)) { 2888 warnx("offset %ju len %ju not aligned with " 2889 "sector size %u", indirect->offset, 2890 (uintmax_t)indirect->len, dev->sector_size); 2891 } 2892 #endif 2893 cur_offset += indirect->len; 2894 len_to_go -= indirect->len; 2895 2896 camdd_buf_add_child(new_buf, idb); 2897 2898 new_data = &new_buf->buf_type_spec.data; 2899 2900 if ((new_data->fill_len == new_buf->len) 2901 || (eof_flush_needed != 0)) { 2902 STAILQ_INSERT_TAIL(&dev->run_queue, 2903 new_buf, links); 2904 dev->num_run_queue++; 2905 } else if (new_data->fill_len < buf->len) { 2906 STAILQ_INSERT_TAIL(&dev->pending_queue, 2907 new_buf, links); 2908 dev->num_pending_queue++; 2909 } else { 2910 warnx("%s: too much data in new " 2911 "buffer!", __func__); 2912 retval = 1; 2913 goto bailout; 2914 } 2915 } 2916 } 2917 2918 bailout: 2919 return (retval); 2920 } 2921 2922 void 2923 camdd_get_depth(struct camdd_dev *dev, uint32_t *our_depth, 2924 uint32_t *peer_depth, uint32_t *our_bytes, uint32_t *peer_bytes) 2925 { 2926 *our_depth = dev->cur_active_io + dev->num_run_queue; 2927 if (dev->num_peer_work_queue > 2928 dev->num_peer_done_queue) 2929 *peer_depth = dev->num_peer_work_queue - 2930 dev->num_peer_done_queue; 2931 else 2932 *peer_depth = 0; 2933 *our_bytes = *our_depth * dev->blocksize; 2934 *peer_bytes = dev->peer_bytes_queued; 2935 } 2936 2937 void 2938 camdd_sig_handler(int sig) 2939 { 2940 if (sig == SIGINFO) 2941 need_status = 1; 2942 else { 2943 need_exit = 1; 2944 error_exit = 1; 2945 } 2946 2947 sem_post(&camdd_sem); 2948 } 2949 2950 void 2951 camdd_print_status(struct camdd_dev *camdd_dev, struct camdd_dev *other_dev, 2952 struct timespec *start_time) 2953 { 2954 struct timespec done_time; 2955 uint64_t total_ns; 2956 long double mb_sec, total_sec; 2957 int error = 0; 2958 2959 error = clock_gettime(CLOCK_MONOTONIC_PRECISE, &done_time); 2960 if (error != 0) { 2961 warn("Unable to get done time"); 2962 return; 2963 } 2964 2965 timespecsub(&done_time, start_time); 2966 2967 total_ns = done_time.tv_nsec + (done_time.tv_sec * 1000000000); 2968 total_sec = total_ns; 2969 total_sec /= 1000000000; 2970 2971 fprintf(stderr, "%ju bytes %s %s\n%ju bytes %s %s\n" 2972 "%.4Lf seconds elapsed\n", 2973 (uintmax_t)camdd_dev->bytes_transferred, 2974 (camdd_dev->write_dev == 0) ? "read from" : "written to", 2975 camdd_dev->device_name, 2976 (uintmax_t)other_dev->bytes_transferred, 2977 (other_dev->write_dev == 0) ? "read from" : "written to", 2978 other_dev->device_name, total_sec); 2979 2980 mb_sec = min(other_dev->bytes_transferred,camdd_dev->bytes_transferred); 2981 mb_sec /= 1024 * 1024; 2982 mb_sec *= 1000000000; 2983 mb_sec /= total_ns; 2984 fprintf(stderr, "%.2Lf MB/sec\n", mb_sec); 2985 } 2986 2987 int 2988 camdd_rw(struct camdd_io_opts *io_opts, int num_io_opts, uint64_t max_io, 2989 int retry_count, int timeout) 2990 { 2991 char *device = NULL; 2992 struct cam_device *new_cam_dev = NULL; 2993 struct camdd_dev *devs[2]; 2994 struct timespec start_time; 2995 pthread_t threads[2]; 2996 int unit = 0; 2997 int error = 0; 2998 int i; 2999 3000 if (num_io_opts != 2) { 3001 warnx("Must have one input and one output path"); 3002 error = 1; 3003 goto bailout; 3004 } 3005 3006 bzero(devs, sizeof(devs)); 3007 3008 for (i = 0; i < num_io_opts; i++) { 3009 switch (io_opts[i].dev_type) { 3010 case CAMDD_DEV_PASS: { 3011 camdd_argmask new_arglist = CAMDD_ARG_NONE; 3012 int bus = 0, target = 0, lun = 0; 3013 char name[30]; 3014 int rv; 3015 3016 if (isdigit(io_opts[i].dev_name[0])) { 3017 /* device specified as bus:target[:lun] */ 3018 rv = parse_btl(io_opts[i].dev_name, &bus, 3019 &target, &lun, &new_arglist); 3020 if (rv < 2) { 3021 warnx("numeric device specification " 3022 "must be either bus:target, or " 3023 "bus:target:lun"); 3024 error = 1; 3025 goto bailout; 3026 } 3027 /* default to 0 if lun was not specified */ 3028 if ((new_arglist & CAMDD_ARG_LUN) == 0) { 3029 lun = 0; 3030 new_arglist |= CAMDD_ARG_LUN; 3031 } 3032 } else { 3033 if (cam_get_device(io_opts[i].dev_name, name, 3034 sizeof name, &unit) == -1) { 3035 warnx("%s", cam_errbuf); 3036 error = 1; 3037 goto bailout; 3038 } 3039 device = strdup(name); 3040 new_arglist |= CAMDD_ARG_DEVICE |CAMDD_ARG_UNIT; 3041 } 3042 3043 if (new_arglist & (CAMDD_ARG_BUS | CAMDD_ARG_TARGET)) 3044 new_cam_dev = cam_open_btl(bus, target, lun, 3045 O_RDWR, NULL); 3046 else 3047 new_cam_dev = cam_open_spec_device(device, unit, 3048 O_RDWR, NULL); 3049 if (new_cam_dev == NULL) { 3050 warnx("%s", cam_errbuf); 3051 error = 1; 3052 goto bailout; 3053 } 3054 3055 devs[i] = camdd_probe_pass(new_cam_dev, 3056 /*io_opts*/ &io_opts[i], 3057 CAMDD_ARG_ERR_RECOVER, 3058 /*probe_retry_count*/ 3, 3059 /*probe_timeout*/ 5000, 3060 /*io_retry_count*/ retry_count, 3061 /*io_timeout*/ timeout); 3062 if (devs[i] == NULL) { 3063 warn("Unable to probe device %s%u", 3064 new_cam_dev->device_name, 3065 new_cam_dev->dev_unit_num); 3066 error = 1; 3067 goto bailout; 3068 } 3069 break; 3070 } 3071 case CAMDD_DEV_FILE: { 3072 int fd = -1; 3073 3074 if (io_opts[i].dev_name[0] == '-') { 3075 if (io_opts[i].write_dev != 0) 3076 fd = STDOUT_FILENO; 3077 else 3078 fd = STDIN_FILENO; 3079 } else { 3080 if (io_opts[i].write_dev != 0) { 3081 fd = open(io_opts[i].dev_name, 3082 O_RDWR | O_CREAT, S_IWUSR |S_IRUSR); 3083 } else { 3084 fd = open(io_opts[i].dev_name, 3085 O_RDONLY); 3086 } 3087 } 3088 if (fd == -1) { 3089 warn("error opening file %s", 3090 io_opts[i].dev_name); 3091 error = 1; 3092 goto bailout; 3093 } 3094 3095 devs[i] = camdd_probe_file(fd, &io_opts[i], 3096 retry_count, timeout); 3097 if (devs[i] == NULL) { 3098 error = 1; 3099 goto bailout; 3100 } 3101 3102 break; 3103 } 3104 default: 3105 warnx("Unknown device type %d (%s)", 3106 io_opts[i].dev_type, io_opts[i].dev_name); 3107 error = 1; 3108 goto bailout; 3109 break; /*NOTREACHED */ 3110 } 3111 3112 devs[i]->write_dev = io_opts[i].write_dev; 3113 3114 devs[i]->start_offset_bytes = io_opts[i].offset; 3115 3116 if (max_io != 0) { 3117 devs[i]->sector_io_limit = 3118 (devs[i]->start_offset_bytes / 3119 devs[i]->sector_size) + 3120 (max_io / devs[i]->sector_size) - 1; 3121 devs[i]->sector_io_limit = 3122 (devs[i]->start_offset_bytes / 3123 devs[i]->sector_size) + 3124 (max_io / devs[i]->sector_size) - 1; 3125 } 3126 3127 devs[i]->next_io_pos_bytes = devs[i]->start_offset_bytes; 3128 devs[i]->next_completion_pos_bytes =devs[i]->start_offset_bytes; 3129 } 3130 3131 devs[0]->peer_dev = devs[1]; 3132 devs[1]->peer_dev = devs[0]; 3133 devs[0]->next_peer_pos_bytes = devs[0]->peer_dev->next_io_pos_bytes; 3134 devs[1]->next_peer_pos_bytes = devs[1]->peer_dev->next_io_pos_bytes; 3135 3136 sem_init(&camdd_sem, /*pshared*/ 0, 0); 3137 3138 signal(SIGINFO, camdd_sig_handler); 3139 signal(SIGINT, camdd_sig_handler); 3140 3141 error = clock_gettime(CLOCK_MONOTONIC_PRECISE, &start_time); 3142 if (error != 0) { 3143 warn("Unable to get start time"); 3144 goto bailout; 3145 } 3146 3147 for (i = 0; i < num_io_opts; i++) { 3148 error = pthread_create(&threads[i], NULL, camdd_worker, 3149 (void *)devs[i]); 3150 if (error != 0) { 3151 warnc(error, "pthread_create() failed"); 3152 goto bailout; 3153 } 3154 } 3155 3156 for (;;) { 3157 if ((sem_wait(&camdd_sem) == -1) 3158 || (need_exit != 0)) { 3159 struct kevent ke; 3160 3161 for (i = 0; i < num_io_opts; i++) { 3162 EV_SET(&ke, (uintptr_t)&devs[i]->work_queue, 3163 EVFILT_USER, 0, NOTE_TRIGGER, 0, NULL); 3164 3165 devs[i]->flags |= CAMDD_DEV_FLAG_EOF; 3166 3167 error = kevent(devs[i]->kq, &ke, 1, NULL, 0, 3168 NULL); 3169 if (error == -1) 3170 warn("%s: unable to wake up thread", 3171 __func__); 3172 error = 0; 3173 } 3174 break; 3175 } else if (need_status != 0) { 3176 camdd_print_status(devs[0], devs[1], &start_time); 3177 need_status = 0; 3178 } 3179 } 3180 for (i = 0; i < num_io_opts; i++) { 3181 pthread_join(threads[i], NULL); 3182 } 3183 3184 camdd_print_status(devs[0], devs[1], &start_time); 3185 3186 bailout: 3187 3188 for (i = 0; i < num_io_opts; i++) 3189 camdd_free_dev(devs[i]); 3190 3191 return (error + error_exit); 3192 } 3193 3194 void 3195 usage(void) 3196 { 3197 fprintf(stderr, 3198 "usage: camdd <-i|-o pass=pass0,bs=1M,offset=1M,depth=4>\n" 3199 " <-i|-o file=/tmp/file,bs=512K,offset=1M>\n" 3200 " <-i|-o file=/dev/da0,bs=512K,offset=1M>\n" 3201 " <-i|-o file=/dev/nsa0,bs=512K>\n" 3202 " [-C retry_count][-E][-m max_io_amt][-t timeout_secs][-v][-h]\n" 3203 "Option description\n" 3204 "-i <arg=val> Specify input device/file and parameters\n" 3205 "-o <arg=val> Specify output device/file and parameters\n" 3206 "Input and Output parameters\n" 3207 "pass=name Specify a pass(4) device like pass0 or /dev/pass0\n" 3208 "file=name Specify a file or device, /tmp/foo, /dev/da0, /dev/null\n" 3209 " or - for stdin/stdout\n" 3210 "bs=blocksize Specify blocksize in bytes, or using K, M, G, etc. suffix\n" 3211 "offset=len Specify starting offset in bytes or using K, M, G suffix\n" 3212 " NOTE: offset cannot be specified on tapes, pipes, stdin/out\n" 3213 "depth=N Specify a numeric queue depth. This only applies to pass(4)\n" 3214 "mcs=N Specify a minimum cmd size for pass(4) read/write commands\n" 3215 "Optional arguments\n" 3216 "-C retry_cnt Specify a retry count for pass(4) devices\n" 3217 "-E Enable CAM error recovery for pass(4) devices\n" 3218 "-m max_io Specify the maximum amount to be transferred in bytes or\n" 3219 " using K, G, M, etc. suffixes\n" 3220 "-t timeout Specify the I/O timeout to use with pass(4) devices\n" 3221 "-v Enable verbose error recovery\n" 3222 "-h Print this message\n"); 3223 } 3224 3225 3226 int 3227 camdd_parse_io_opts(char *args, int is_write, struct camdd_io_opts *io_opts) 3228 { 3229 char *tmpstr, *tmpstr2; 3230 char *orig_tmpstr = NULL; 3231 int retval = 0; 3232 3233 io_opts->write_dev = is_write; 3234 3235 tmpstr = strdup(args); 3236 if (tmpstr == NULL) { 3237 warn("strdup failed"); 3238 retval = 1; 3239 goto bailout; 3240 } 3241 orig_tmpstr = tmpstr; 3242 while ((tmpstr2 = strsep(&tmpstr, ",")) != NULL) { 3243 char *name, *value; 3244 3245 /* 3246 * If the user creates an empty parameter by putting in two 3247 * commas, skip over it and look for the next field. 3248 */ 3249 if (*tmpstr2 == '\0') 3250 continue; 3251 3252 name = strsep(&tmpstr2, "="); 3253 if (*name == '\0') { 3254 warnx("Got empty I/O parameter name"); 3255 retval = 1; 3256 goto bailout; 3257 } 3258 value = strsep(&tmpstr2, "="); 3259 if ((value == NULL) 3260 || (*value == '\0')) { 3261 warnx("Empty I/O parameter value for %s", name); 3262 retval = 1; 3263 goto bailout; 3264 } 3265 if (strncasecmp(name, "file", 4) == 0) { 3266 io_opts->dev_type = CAMDD_DEV_FILE; 3267 io_opts->dev_name = strdup(value); 3268 if (io_opts->dev_name == NULL) { 3269 warn("Error allocating memory"); 3270 retval = 1; 3271 goto bailout; 3272 } 3273 } else if (strncasecmp(name, "pass", 4) == 0) { 3274 io_opts->dev_type = CAMDD_DEV_PASS; 3275 io_opts->dev_name = strdup(value); 3276 if (io_opts->dev_name == NULL) { 3277 warn("Error allocating memory"); 3278 retval = 1; 3279 goto bailout; 3280 } 3281 } else if ((strncasecmp(name, "bs", 2) == 0) 3282 || (strncasecmp(name, "blocksize", 9) == 0)) { 3283 retval = expand_number(value, &io_opts->blocksize); 3284 if (retval == -1) { 3285 warn("expand_number(3) failed on %s=%s", name, 3286 value); 3287 retval = 1; 3288 goto bailout; 3289 } 3290 } else if (strncasecmp(name, "depth", 5) == 0) { 3291 char *endptr; 3292 3293 io_opts->queue_depth = strtoull(value, &endptr, 0); 3294 if (*endptr != '\0') { 3295 warnx("invalid queue depth %s", value); 3296 retval = 1; 3297 goto bailout; 3298 } 3299 } else if (strncasecmp(name, "mcs", 3) == 0) { 3300 char *endptr; 3301 3302 io_opts->min_cmd_size = strtol(value, &endptr, 0); 3303 if ((*endptr != '\0') 3304 || ((io_opts->min_cmd_size > 16) 3305 || (io_opts->min_cmd_size < 0))) { 3306 warnx("invalid minimum cmd size %s", value); 3307 retval = 1; 3308 goto bailout; 3309 } 3310 } else if (strncasecmp(name, "offset", 6) == 0) { 3311 retval = expand_number(value, &io_opts->offset); 3312 if (retval == -1) { 3313 warn("expand_number(3) failed on %s=%s", name, 3314 value); 3315 retval = 1; 3316 goto bailout; 3317 } 3318 } else if (strncasecmp(name, "debug", 5) == 0) { 3319 char *endptr; 3320 3321 io_opts->debug = strtoull(value, &endptr, 0); 3322 if (*endptr != '\0') { 3323 warnx("invalid debug level %s", value); 3324 retval = 1; 3325 goto bailout; 3326 } 3327 } else { 3328 warnx("Unrecognized parameter %s=%s", name, value); 3329 } 3330 } 3331 bailout: 3332 free(orig_tmpstr); 3333 3334 return (retval); 3335 } 3336 3337 int 3338 main(int argc, char **argv) 3339 { 3340 int c; 3341 camdd_argmask arglist = CAMDD_ARG_NONE; 3342 int timeout = 0, retry_count = 1; 3343 int error = 0; 3344 uint64_t max_io = 0; 3345 struct camdd_io_opts *opt_list = NULL; 3346 3347 if (argc == 1) { 3348 usage(); 3349 exit(1); 3350 } 3351 3352 opt_list = calloc(2, sizeof(struct camdd_io_opts)); 3353 if (opt_list == NULL) { 3354 warn("Unable to allocate option list"); 3355 error = 1; 3356 goto bailout; 3357 } 3358 3359 while ((c = getopt(argc, argv, "C:Ehi:m:o:t:v")) != -1){ 3360 switch (c) { 3361 case 'C': 3362 retry_count = strtol(optarg, NULL, 0); 3363 if (retry_count < 0) 3364 errx(1, "retry count %d is < 0", 3365 retry_count); 3366 arglist |= CAMDD_ARG_RETRIES; 3367 break; 3368 case 'E': 3369 arglist |= CAMDD_ARG_ERR_RECOVER; 3370 break; 3371 case 'i': 3372 case 'o': 3373 if (((c == 'i') 3374 && (opt_list[0].dev_type != CAMDD_DEV_NONE)) 3375 || ((c == 'o') 3376 && (opt_list[1].dev_type != CAMDD_DEV_NONE))) { 3377 errx(1, "Only one input and output path " 3378 "allowed"); 3379 } 3380 error = camdd_parse_io_opts(optarg, (c == 'o') ? 1 : 0, 3381 (c == 'o') ? &opt_list[1] : &opt_list[0]); 3382 if (error != 0) 3383 goto bailout; 3384 break; 3385 case 'm': 3386 error = expand_number(optarg, &max_io); 3387 if (error == -1) { 3388 warn("invalid maximum I/O amount %s", optarg); 3389 error = 1; 3390 goto bailout; 3391 } 3392 break; 3393 case 't': 3394 timeout = strtol(optarg, NULL, 0); 3395 if (timeout < 0) 3396 errx(1, "invalid timeout %d", timeout); 3397 /* Convert the timeout from seconds to ms */ 3398 timeout *= 1000; 3399 arglist |= CAMDD_ARG_TIMEOUT; 3400 break; 3401 case 'v': 3402 arglist |= CAMDD_ARG_VERBOSE; 3403 break; 3404 case 'h': 3405 default: 3406 usage(); 3407 exit(1); 3408 break; /*NOTREACHED*/ 3409 } 3410 } 3411 3412 if ((opt_list[0].dev_type == CAMDD_DEV_NONE) 3413 || (opt_list[1].dev_type == CAMDD_DEV_NONE)) 3414 errx(1, "Must specify both -i and -o"); 3415 3416 /* 3417 * Set the timeout if the user hasn't specified one. 3418 */ 3419 if (timeout == 0) 3420 timeout = CAMDD_PASS_RW_TIMEOUT; 3421 3422 error = camdd_rw(opt_list, 2, max_io, retry_count, timeout); 3423 3424 bailout: 3425 free(opt_list); 3426 3427 exit(error); 3428 } 3429