1 /*- 2 * Copyright (c) 1997-2007 Kenneth D. Merry 3 * Copyright (c) 2013, 2014, 2015 Spectra Logic Corporation 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions, and the following disclaimer, 11 * without modification. 12 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 13 * substantially similar to the "NO WARRANTY" disclaimer below 14 * ("Disclaimer") and any redistribution must be conditioned upon 15 * including a substantially similar Disclaimer requirement for further 16 * binary redistribution. 17 * 18 * NO WARRANTY 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 22 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 23 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 27 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 28 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGES. 30 * 31 * Authors: Ken Merry (Spectra Logic Corporation) 32 */ 33 34 /* 35 * This is eventually intended to be: 36 * - A basic data transfer/copy utility 37 * - A simple benchmark utility 38 * - An example of how to use the asynchronous pass(4) driver interface. 39 */ 40 #include <sys/cdefs.h> 41 __FBSDID("$FreeBSD$"); 42 43 #include <sys/ioctl.h> 44 #include <sys/stdint.h> 45 #include <sys/types.h> 46 #include <sys/endian.h> 47 #include <sys/param.h> 48 #include <sys/sbuf.h> 49 #include <sys/stat.h> 50 #include <sys/event.h> 51 #include <sys/time.h> 52 #include <sys/uio.h> 53 #include <vm/vm.h> 54 #include <machine/bus.h> 55 #include <sys/bus.h> 56 #include <sys/bus_dma.h> 57 #include <sys/mtio.h> 58 #include <sys/conf.h> 59 #include <sys/disk.h> 60 61 #include <stdio.h> 62 #include <stdlib.h> 63 #include <semaphore.h> 64 #include <string.h> 65 #include <unistd.h> 66 #include <inttypes.h> 67 #include <limits.h> 68 #include <fcntl.h> 69 #include <ctype.h> 70 #include <err.h> 71 #include <libutil.h> 72 #include <pthread.h> 73 #include <assert.h> 74 #include <bsdxml.h> 75 76 #include <cam/cam.h> 77 #include <cam/cam_debug.h> 78 #include <cam/cam_ccb.h> 79 #include <cam/scsi/scsi_all.h> 80 #include <cam/scsi/scsi_da.h> 81 #include <cam/scsi/scsi_pass.h> 82 #include <cam/scsi/scsi_message.h> 83 #include <cam/scsi/smp_all.h> 84 #include <camlib.h> 85 #include <mtlib.h> 86 #include <zlib.h> 87 88 typedef enum { 89 CAMDD_CMD_NONE = 0x00000000, 90 CAMDD_CMD_HELP = 0x00000001, 91 CAMDD_CMD_WRITE = 0x00000002, 92 CAMDD_CMD_READ = 0x00000003 93 } camdd_cmdmask; 94 95 typedef enum { 96 CAMDD_ARG_NONE = 0x00000000, 97 CAMDD_ARG_VERBOSE = 0x00000001, 98 CAMDD_ARG_DEVICE = 0x00000002, 99 CAMDD_ARG_BUS = 0x00000004, 100 CAMDD_ARG_TARGET = 0x00000008, 101 CAMDD_ARG_LUN = 0x00000010, 102 CAMDD_ARG_UNIT = 0x00000020, 103 CAMDD_ARG_TIMEOUT = 0x00000040, 104 CAMDD_ARG_ERR_RECOVER = 0x00000080, 105 CAMDD_ARG_RETRIES = 0x00000100 106 } camdd_argmask; 107 108 typedef enum { 109 CAMDD_DEV_NONE = 0x00, 110 CAMDD_DEV_PASS = 0x01, 111 CAMDD_DEV_FILE = 0x02 112 } camdd_dev_type; 113 114 struct camdd_io_opts { 115 camdd_dev_type dev_type; 116 char *dev_name; 117 uint64_t blocksize; 118 uint64_t queue_depth; 119 uint64_t offset; 120 int min_cmd_size; 121 int write_dev; 122 uint64_t debug; 123 }; 124 125 typedef enum { 126 CAMDD_BUF_NONE, 127 CAMDD_BUF_DATA, 128 CAMDD_BUF_INDIRECT 129 } camdd_buf_type; 130 131 struct camdd_buf_indirect { 132 /* 133 * Pointer to the source buffer. 134 */ 135 struct camdd_buf *src_buf; 136 137 /* 138 * Offset into the source buffer, in bytes. 139 */ 140 uint64_t offset; 141 /* 142 * Pointer to the starting point in the source buffer. 143 */ 144 uint8_t *start_ptr; 145 146 /* 147 * Length of this chunk in bytes. 148 */ 149 size_t len; 150 }; 151 152 struct camdd_buf_data { 153 /* 154 * Buffer allocated when we allocate this camdd_buf. This should 155 * be the size of the blocksize for this device. 156 */ 157 uint8_t *buf; 158 159 /* 160 * The amount of backing store allocated in buf. Generally this 161 * will be the blocksize of the device. 162 */ 163 uint32_t alloc_len; 164 165 /* 166 * The amount of data that was put into the buffer (on reads) or 167 * the amount of data we have put onto the src_list so far (on 168 * writes). 169 */ 170 uint32_t fill_len; 171 172 /* 173 * The amount of data that was not transferred. 174 */ 175 uint32_t resid; 176 177 /* 178 * Starting byte offset on the reader. 179 */ 180 uint64_t src_start_offset; 181 182 /* 183 * CCB used for pass(4) device targets. 184 */ 185 union ccb ccb; 186 187 /* 188 * Number of scatter/gather segments. 189 */ 190 int sg_count; 191 192 /* 193 * Set if we had to tack on an extra buffer to round the transfer 194 * up to a sector size. 195 */ 196 int extra_buf; 197 198 /* 199 * Scatter/gather list used generally when we're the writer for a 200 * pass(4) device. 201 */ 202 bus_dma_segment_t *segs; 203 204 /* 205 * Scatter/gather list used generally when we're the writer for a 206 * file or block device; 207 */ 208 struct iovec *iovec; 209 }; 210 211 union camdd_buf_types { 212 struct camdd_buf_indirect indirect; 213 struct camdd_buf_data data; 214 }; 215 216 typedef enum { 217 CAMDD_STATUS_NONE, 218 CAMDD_STATUS_OK, 219 CAMDD_STATUS_SHORT_IO, 220 CAMDD_STATUS_EOF, 221 CAMDD_STATUS_ERROR 222 } camdd_buf_status; 223 224 struct camdd_buf { 225 camdd_buf_type buf_type; 226 union camdd_buf_types buf_type_spec; 227 228 camdd_buf_status status; 229 230 uint64_t lba; 231 size_t len; 232 233 /* 234 * A reference count of how many indirect buffers point to this 235 * buffer. 236 */ 237 int refcount; 238 239 /* 240 * A link back to our parent device. 241 */ 242 struct camdd_dev *dev; 243 STAILQ_ENTRY(camdd_buf) links; 244 STAILQ_ENTRY(camdd_buf) work_links; 245 246 /* 247 * A count of the buffers on the src_list. 248 */ 249 int src_count; 250 251 /* 252 * List of buffers from our partner thread that are the components 253 * of this buffer for the I/O. Uses src_links. 254 */ 255 STAILQ_HEAD(,camdd_buf) src_list; 256 STAILQ_ENTRY(camdd_buf) src_links; 257 }; 258 259 #define NUM_DEV_TYPES 2 260 261 struct camdd_dev_pass { 262 int scsi_dev_type; 263 int protocol; 264 struct cam_device *dev; 265 uint64_t max_sector; 266 uint32_t block_len; 267 uint32_t cpi_maxio; 268 }; 269 270 typedef enum { 271 CAMDD_FILE_NONE, 272 CAMDD_FILE_REG, 273 CAMDD_FILE_STD, 274 CAMDD_FILE_PIPE, 275 CAMDD_FILE_DISK, 276 CAMDD_FILE_TAPE, 277 CAMDD_FILE_TTY, 278 CAMDD_FILE_MEM 279 } camdd_file_type; 280 281 typedef enum { 282 CAMDD_FF_NONE = 0x00, 283 CAMDD_FF_CAN_SEEK = 0x01 284 } camdd_file_flags; 285 286 struct camdd_dev_file { 287 int fd; 288 struct stat sb; 289 char filename[MAXPATHLEN + 1]; 290 camdd_file_type file_type; 291 camdd_file_flags file_flags; 292 uint8_t *tmp_buf; 293 }; 294 295 struct camdd_dev_block { 296 int fd; 297 uint64_t size_bytes; 298 uint32_t block_len; 299 }; 300 301 union camdd_dev_spec { 302 struct camdd_dev_pass pass; 303 struct camdd_dev_file file; 304 struct camdd_dev_block block; 305 }; 306 307 typedef enum { 308 CAMDD_DEV_FLAG_NONE = 0x00, 309 CAMDD_DEV_FLAG_EOF = 0x01, 310 CAMDD_DEV_FLAG_PEER_EOF = 0x02, 311 CAMDD_DEV_FLAG_ACTIVE = 0x04, 312 CAMDD_DEV_FLAG_EOF_SENT = 0x08, 313 CAMDD_DEV_FLAG_EOF_QUEUED = 0x10 314 } camdd_dev_flags; 315 316 struct camdd_dev { 317 camdd_dev_type dev_type; 318 union camdd_dev_spec dev_spec; 319 camdd_dev_flags flags; 320 char device_name[MAXPATHLEN+1]; 321 uint32_t blocksize; 322 uint32_t sector_size; 323 uint64_t max_sector; 324 uint64_t sector_io_limit; 325 int min_cmd_size; 326 int write_dev; 327 int retry_count; 328 int io_timeout; 329 int debug; 330 uint64_t start_offset_bytes; 331 uint64_t next_io_pos_bytes; 332 uint64_t next_peer_pos_bytes; 333 uint64_t next_completion_pos_bytes; 334 uint64_t peer_bytes_queued; 335 uint64_t bytes_transferred; 336 uint32_t target_queue_depth; 337 uint32_t cur_active_io; 338 uint8_t *extra_buf; 339 uint32_t extra_buf_len; 340 struct camdd_dev *peer_dev; 341 pthread_mutex_t mutex; 342 pthread_cond_t cond; 343 int kq; 344 345 int (*run)(struct camdd_dev *dev); 346 int (*fetch)(struct camdd_dev *dev); 347 348 /* 349 * Buffers that are available for I/O. Uses links. 350 */ 351 STAILQ_HEAD(,camdd_buf) free_queue; 352 353 /* 354 * Free indirect buffers. These are used for breaking a large 355 * buffer into multiple pieces. 356 */ 357 STAILQ_HEAD(,camdd_buf) free_indirect_queue; 358 359 /* 360 * Buffers that have been queued to the kernel. Uses links. 361 */ 362 STAILQ_HEAD(,camdd_buf) active_queue; 363 364 /* 365 * Will generally contain one of our buffers that is waiting for enough 366 * I/O from our partner thread to be able to execute. This will 367 * generally happen when our per-I/O-size is larger than the 368 * partner thread's per-I/O-size. Uses links. 369 */ 370 STAILQ_HEAD(,camdd_buf) pending_queue; 371 372 /* 373 * Number of buffers on the pending queue 374 */ 375 int num_pending_queue; 376 377 /* 378 * Buffers that are filled and ready to execute. This is used when 379 * our partner (reader) thread sends us blocks that are larger than 380 * our blocksize, and so we have to split them into multiple pieces. 381 */ 382 STAILQ_HEAD(,camdd_buf) run_queue; 383 384 /* 385 * Number of buffers on the run queue. 386 */ 387 int num_run_queue; 388 389 STAILQ_HEAD(,camdd_buf) reorder_queue; 390 391 int num_reorder_queue; 392 393 /* 394 * Buffers that have been queued to us by our partner thread 395 * (generally the reader thread) to be written out. Uses 396 * work_links. 397 */ 398 STAILQ_HEAD(,camdd_buf) work_queue; 399 400 /* 401 * Buffers that have been completed by our partner thread. Uses 402 * work_links. 403 */ 404 STAILQ_HEAD(,camdd_buf) peer_done_queue; 405 406 /* 407 * Number of buffers on the peer done queue. 408 */ 409 uint32_t num_peer_done_queue; 410 411 /* 412 * A list of buffers that we have queued to our peer thread. Uses 413 * links. 414 */ 415 STAILQ_HEAD(,camdd_buf) peer_work_queue; 416 417 /* 418 * Number of buffers on the peer work queue. 419 */ 420 uint32_t num_peer_work_queue; 421 }; 422 423 static sem_t camdd_sem; 424 static sig_atomic_t need_exit = 0; 425 static sig_atomic_t error_exit = 0; 426 static sig_atomic_t need_status = 0; 427 428 #ifndef min 429 #define min(a, b) (a < b) ? a : b 430 #endif 431 432 /* 433 * XXX KDM private copy of timespecsub(). This is normally defined in 434 * sys/time.h, but is only enabled in the kernel. If that definition is 435 * enabled in userland, it breaks the build of libnetbsd. 436 */ 437 #ifndef timespecsub 438 #define timespecsub(vvp, uvp) \ 439 do { \ 440 (vvp)->tv_sec -= (uvp)->tv_sec; \ 441 (vvp)->tv_nsec -= (uvp)->tv_nsec; \ 442 if ((vvp)->tv_nsec < 0) { \ 443 (vvp)->tv_sec--; \ 444 (vvp)->tv_nsec += 1000000000; \ 445 } \ 446 } while (0) 447 #endif 448 449 450 /* Generically useful offsets into the peripheral private area */ 451 #define ppriv_ptr0 periph_priv.entries[0].ptr 452 #define ppriv_ptr1 periph_priv.entries[1].ptr 453 #define ppriv_field0 periph_priv.entries[0].field 454 #define ppriv_field1 periph_priv.entries[1].field 455 456 #define ccb_buf ppriv_ptr0 457 458 #define CAMDD_FILE_DEFAULT_BLOCK 524288 459 #define CAMDD_FILE_DEFAULT_DEPTH 1 460 #define CAMDD_PASS_MAX_BLOCK 1048576 461 #define CAMDD_PASS_DEFAULT_DEPTH 6 462 #define CAMDD_PASS_RW_TIMEOUT 60 * 1000 463 464 static int parse_btl(char *tstr, int *bus, int *target, int *lun, 465 camdd_argmask *arglst); 466 void camdd_free_dev(struct camdd_dev *dev); 467 struct camdd_dev *camdd_alloc_dev(camdd_dev_type dev_type, 468 struct kevent *new_ke, int num_ke, 469 int retry_count, int timeout); 470 static struct camdd_buf *camdd_alloc_buf(struct camdd_dev *dev, 471 camdd_buf_type buf_type); 472 void camdd_release_buf(struct camdd_buf *buf); 473 struct camdd_buf *camdd_get_buf(struct camdd_dev *dev, camdd_buf_type buf_type); 474 int camdd_buf_sg_create(struct camdd_buf *buf, int iovec, 475 uint32_t sector_size, uint32_t *num_sectors_used, 476 int *double_buf_needed); 477 uint32_t camdd_buf_get_len(struct camdd_buf *buf); 478 void camdd_buf_add_child(struct camdd_buf *buf, struct camdd_buf *child_buf); 479 int camdd_probe_tape(int fd, char *filename, uint64_t *max_iosize, 480 uint64_t *max_blk, uint64_t *min_blk, uint64_t *blk_gran); 481 int camdd_probe_pass_scsi(struct cam_device *cam_dev, union ccb *ccb, 482 camdd_argmask arglist, int probe_retry_count, 483 int probe_timeout, uint64_t *maxsector, uint32_t *block_len); 484 struct camdd_dev *camdd_probe_file(int fd, struct camdd_io_opts *io_opts, 485 int retry_count, int timeout); 486 struct camdd_dev *camdd_probe_pass(struct cam_device *cam_dev, 487 struct camdd_io_opts *io_opts, 488 camdd_argmask arglist, int probe_retry_count, 489 int probe_timeout, int io_retry_count, 490 int io_timeout); 491 void *camdd_file_worker(void *arg); 492 camdd_buf_status camdd_ccb_status(union ccb *ccb, int protocol); 493 int camdd_get_cgd(struct cam_device *device, struct ccb_getdev *cgd); 494 int camdd_queue_peer_buf(struct camdd_dev *dev, struct camdd_buf *buf); 495 int camdd_complete_peer_buf(struct camdd_dev *dev, struct camdd_buf *peer_buf); 496 void camdd_peer_done(struct camdd_buf *buf); 497 void camdd_complete_buf(struct camdd_dev *dev, struct camdd_buf *buf, 498 int *error_count); 499 int camdd_pass_fetch(struct camdd_dev *dev); 500 int camdd_file_run(struct camdd_dev *dev); 501 int camdd_pass_run(struct camdd_dev *dev); 502 int camdd_get_next_lba_len(struct camdd_dev *dev, uint64_t *lba, ssize_t *len); 503 int camdd_queue(struct camdd_dev *dev, struct camdd_buf *read_buf); 504 void camdd_get_depth(struct camdd_dev *dev, uint32_t *our_depth, 505 uint32_t *peer_depth, uint32_t *our_bytes, 506 uint32_t *peer_bytes); 507 void *camdd_worker(void *arg); 508 void camdd_sig_handler(int sig); 509 void camdd_print_status(struct camdd_dev *camdd_dev, 510 struct camdd_dev *other_dev, 511 struct timespec *start_time); 512 int camdd_rw(struct camdd_io_opts *io_opts, int num_io_opts, 513 uint64_t max_io, int retry_count, int timeout); 514 int camdd_parse_io_opts(char *args, int is_write, 515 struct camdd_io_opts *io_opts); 516 void usage(void); 517 518 /* 519 * Parse out a bus, or a bus, target and lun in the following 520 * format: 521 * bus 522 * bus:target 523 * bus:target:lun 524 * 525 * Returns the number of parsed components, or 0. 526 */ 527 static int 528 parse_btl(char *tstr, int *bus, int *target, int *lun, camdd_argmask *arglst) 529 { 530 char *tmpstr; 531 int convs = 0; 532 533 while (isspace(*tstr) && (*tstr != '\0')) 534 tstr++; 535 536 tmpstr = (char *)strtok(tstr, ":"); 537 if ((tmpstr != NULL) && (*tmpstr != '\0')) { 538 *bus = strtol(tmpstr, NULL, 0); 539 *arglst |= CAMDD_ARG_BUS; 540 convs++; 541 tmpstr = (char *)strtok(NULL, ":"); 542 if ((tmpstr != NULL) && (*tmpstr != '\0')) { 543 *target = strtol(tmpstr, NULL, 0); 544 *arglst |= CAMDD_ARG_TARGET; 545 convs++; 546 tmpstr = (char *)strtok(NULL, ":"); 547 if ((tmpstr != NULL) && (*tmpstr != '\0')) { 548 *lun = strtol(tmpstr, NULL, 0); 549 *arglst |= CAMDD_ARG_LUN; 550 convs++; 551 } 552 } 553 } 554 555 return convs; 556 } 557 558 /* 559 * XXX KDM clean up and free all of the buffers on the queue! 560 */ 561 void 562 camdd_free_dev(struct camdd_dev *dev) 563 { 564 if (dev == NULL) 565 return; 566 567 switch (dev->dev_type) { 568 case CAMDD_DEV_FILE: { 569 struct camdd_dev_file *file_dev = &dev->dev_spec.file; 570 571 if (file_dev->fd != -1) 572 close(file_dev->fd); 573 free(file_dev->tmp_buf); 574 break; 575 } 576 case CAMDD_DEV_PASS: { 577 struct camdd_dev_pass *pass_dev = &dev->dev_spec.pass; 578 579 if (pass_dev->dev != NULL) 580 cam_close_device(pass_dev->dev); 581 break; 582 } 583 default: 584 break; 585 } 586 587 free(dev); 588 } 589 590 struct camdd_dev * 591 camdd_alloc_dev(camdd_dev_type dev_type, struct kevent *new_ke, int num_ke, 592 int retry_count, int timeout) 593 { 594 struct camdd_dev *dev = NULL; 595 struct kevent *ke; 596 size_t ke_size; 597 int retval = 0; 598 599 dev = calloc(1, sizeof(*dev)); 600 if (dev == NULL) { 601 warn("%s: unable to malloc %zu bytes", __func__, sizeof(*dev)); 602 goto bailout; 603 } 604 605 dev->dev_type = dev_type; 606 dev->io_timeout = timeout; 607 dev->retry_count = retry_count; 608 STAILQ_INIT(&dev->free_queue); 609 STAILQ_INIT(&dev->free_indirect_queue); 610 STAILQ_INIT(&dev->active_queue); 611 STAILQ_INIT(&dev->pending_queue); 612 STAILQ_INIT(&dev->run_queue); 613 STAILQ_INIT(&dev->reorder_queue); 614 STAILQ_INIT(&dev->work_queue); 615 STAILQ_INIT(&dev->peer_done_queue); 616 STAILQ_INIT(&dev->peer_work_queue); 617 retval = pthread_mutex_init(&dev->mutex, NULL); 618 if (retval != 0) { 619 warnc(retval, "%s: failed to initialize mutex", __func__); 620 goto bailout; 621 } 622 623 retval = pthread_cond_init(&dev->cond, NULL); 624 if (retval != 0) { 625 warnc(retval, "%s: failed to initialize condition variable", 626 __func__); 627 goto bailout; 628 } 629 630 dev->kq = kqueue(); 631 if (dev->kq == -1) { 632 warn("%s: Unable to create kqueue", __func__); 633 goto bailout; 634 } 635 636 ke_size = sizeof(struct kevent) * (num_ke + 4); 637 ke = calloc(1, ke_size); 638 if (ke == NULL) { 639 warn("%s: unable to malloc %zu bytes", __func__, ke_size); 640 goto bailout; 641 } 642 if (num_ke > 0) 643 bcopy(new_ke, ke, num_ke * sizeof(struct kevent)); 644 645 EV_SET(&ke[num_ke++], (uintptr_t)&dev->work_queue, EVFILT_USER, 646 EV_ADD|EV_ENABLE|EV_CLEAR, 0,0, 0); 647 EV_SET(&ke[num_ke++], (uintptr_t)&dev->peer_done_queue, EVFILT_USER, 648 EV_ADD|EV_ENABLE|EV_CLEAR, 0,0, 0); 649 EV_SET(&ke[num_ke++], SIGINFO, EVFILT_SIGNAL, EV_ADD|EV_ENABLE, 0,0,0); 650 EV_SET(&ke[num_ke++], SIGINT, EVFILT_SIGNAL, EV_ADD|EV_ENABLE, 0,0,0); 651 652 retval = kevent(dev->kq, ke, num_ke, NULL, 0, NULL); 653 if (retval == -1) { 654 warn("%s: Unable to register kevents", __func__); 655 goto bailout; 656 } 657 658 659 return (dev); 660 661 bailout: 662 free(dev); 663 664 return (NULL); 665 } 666 667 static struct camdd_buf * 668 camdd_alloc_buf(struct camdd_dev *dev, camdd_buf_type buf_type) 669 { 670 struct camdd_buf *buf = NULL; 671 uint8_t *data_ptr = NULL; 672 673 /* 674 * We only need to allocate data space for data buffers. 675 */ 676 switch (buf_type) { 677 case CAMDD_BUF_DATA: 678 data_ptr = malloc(dev->blocksize); 679 if (data_ptr == NULL) { 680 warn("unable to allocate %u bytes", dev->blocksize); 681 goto bailout_error; 682 } 683 break; 684 default: 685 break; 686 } 687 688 buf = calloc(1, sizeof(*buf)); 689 if (buf == NULL) { 690 warn("unable to allocate %zu bytes", sizeof(*buf)); 691 goto bailout_error; 692 } 693 694 buf->buf_type = buf_type; 695 buf->dev = dev; 696 switch (buf_type) { 697 case CAMDD_BUF_DATA: { 698 struct camdd_buf_data *data; 699 700 data = &buf->buf_type_spec.data; 701 702 data->alloc_len = dev->blocksize; 703 data->buf = data_ptr; 704 break; 705 } 706 case CAMDD_BUF_INDIRECT: 707 break; 708 default: 709 break; 710 } 711 STAILQ_INIT(&buf->src_list); 712 713 return (buf); 714 715 bailout_error: 716 free(data_ptr); 717 718 return (NULL); 719 } 720 721 void 722 camdd_release_buf(struct camdd_buf *buf) 723 { 724 struct camdd_dev *dev; 725 726 dev = buf->dev; 727 728 switch (buf->buf_type) { 729 case CAMDD_BUF_DATA: { 730 struct camdd_buf_data *data; 731 732 data = &buf->buf_type_spec.data; 733 734 if (data->segs != NULL) { 735 if (data->extra_buf != 0) { 736 void *extra_buf; 737 738 extra_buf = (void *) 739 data->segs[data->sg_count - 1].ds_addr; 740 free(extra_buf); 741 data->extra_buf = 0; 742 } 743 free(data->segs); 744 data->segs = NULL; 745 data->sg_count = 0; 746 } else if (data->iovec != NULL) { 747 if (data->extra_buf != 0) { 748 free(data->iovec[data->sg_count - 1].iov_base); 749 data->extra_buf = 0; 750 } 751 free(data->iovec); 752 data->iovec = NULL; 753 data->sg_count = 0; 754 } 755 STAILQ_INSERT_TAIL(&dev->free_queue, buf, links); 756 break; 757 } 758 case CAMDD_BUF_INDIRECT: 759 STAILQ_INSERT_TAIL(&dev->free_indirect_queue, buf, links); 760 break; 761 default: 762 err(1, "%s: Invalid buffer type %d for released buffer", 763 __func__, buf->buf_type); 764 break; 765 } 766 } 767 768 struct camdd_buf * 769 camdd_get_buf(struct camdd_dev *dev, camdd_buf_type buf_type) 770 { 771 struct camdd_buf *buf = NULL; 772 773 switch (buf_type) { 774 case CAMDD_BUF_DATA: 775 buf = STAILQ_FIRST(&dev->free_queue); 776 if (buf != NULL) { 777 struct camdd_buf_data *data; 778 uint8_t *data_ptr; 779 uint32_t alloc_len; 780 781 STAILQ_REMOVE_HEAD(&dev->free_queue, links); 782 data = &buf->buf_type_spec.data; 783 data_ptr = data->buf; 784 alloc_len = data->alloc_len; 785 bzero(buf, sizeof(*buf)); 786 data->buf = data_ptr; 787 data->alloc_len = alloc_len; 788 } 789 break; 790 case CAMDD_BUF_INDIRECT: 791 buf = STAILQ_FIRST(&dev->free_indirect_queue); 792 if (buf != NULL) { 793 STAILQ_REMOVE_HEAD(&dev->free_indirect_queue, links); 794 795 bzero(buf, sizeof(*buf)); 796 } 797 break; 798 default: 799 warnx("Unknown buffer type %d requested", buf_type); 800 break; 801 } 802 803 804 if (buf == NULL) 805 return (camdd_alloc_buf(dev, buf_type)); 806 else { 807 STAILQ_INIT(&buf->src_list); 808 buf->dev = dev; 809 buf->buf_type = buf_type; 810 811 return (buf); 812 } 813 } 814 815 int 816 camdd_buf_sg_create(struct camdd_buf *buf, int iovec, uint32_t sector_size, 817 uint32_t *num_sectors_used, int *double_buf_needed) 818 { 819 struct camdd_buf *tmp_buf; 820 struct camdd_buf_data *data; 821 uint8_t *extra_buf = NULL; 822 size_t extra_buf_len = 0; 823 int extra_buf_attached = 0; 824 int i, retval = 0; 825 826 data = &buf->buf_type_spec.data; 827 828 data->sg_count = buf->src_count; 829 /* 830 * Compose a scatter/gather list from all of the buffers in the list. 831 * If the length of the buffer isn't a multiple of the sector size, 832 * we'll have to add an extra buffer. This should only happen 833 * at the end of a transfer. 834 */ 835 if ((data->fill_len % sector_size) != 0) { 836 extra_buf_len = sector_size - (data->fill_len % sector_size); 837 extra_buf = calloc(extra_buf_len, 1); 838 if (extra_buf == NULL) { 839 warn("%s: unable to allocate %zu bytes for extra " 840 "buffer space", __func__, extra_buf_len); 841 retval = 1; 842 goto bailout; 843 } 844 data->extra_buf = 1; 845 data->sg_count++; 846 } 847 if (iovec == 0) { 848 data->segs = calloc(data->sg_count, sizeof(bus_dma_segment_t)); 849 if (data->segs == NULL) { 850 warn("%s: unable to allocate %zu bytes for S/G list", 851 __func__, sizeof(bus_dma_segment_t) * 852 data->sg_count); 853 retval = 1; 854 goto bailout; 855 } 856 857 } else { 858 data->iovec = calloc(data->sg_count, sizeof(struct iovec)); 859 if (data->iovec == NULL) { 860 warn("%s: unable to allocate %zu bytes for S/G list", 861 __func__, sizeof(struct iovec) * data->sg_count); 862 retval = 1; 863 goto bailout; 864 } 865 } 866 867 for (i = 0, tmp_buf = STAILQ_FIRST(&buf->src_list); 868 i < buf->src_count && tmp_buf != NULL; i++, 869 tmp_buf = STAILQ_NEXT(tmp_buf, src_links)) { 870 871 if (tmp_buf->buf_type == CAMDD_BUF_DATA) { 872 struct camdd_buf_data *tmp_data; 873 874 tmp_data = &tmp_buf->buf_type_spec.data; 875 if (iovec == 0) { 876 data->segs[i].ds_addr = 877 (bus_addr_t) tmp_data->buf; 878 data->segs[i].ds_len = tmp_data->fill_len - 879 tmp_data->resid; 880 } else { 881 data->iovec[i].iov_base = tmp_data->buf; 882 data->iovec[i].iov_len = tmp_data->fill_len - 883 tmp_data->resid; 884 } 885 if (((tmp_data->fill_len - tmp_data->resid) % 886 sector_size) != 0) 887 *double_buf_needed = 1; 888 } else { 889 struct camdd_buf_indirect *tmp_ind; 890 891 tmp_ind = &tmp_buf->buf_type_spec.indirect; 892 if (iovec == 0) { 893 data->segs[i].ds_addr = 894 (bus_addr_t)tmp_ind->start_ptr; 895 data->segs[i].ds_len = tmp_ind->len; 896 } else { 897 data->iovec[i].iov_base = tmp_ind->start_ptr; 898 data->iovec[i].iov_len = tmp_ind->len; 899 } 900 if ((tmp_ind->len % sector_size) != 0) 901 *double_buf_needed = 1; 902 } 903 } 904 905 if (extra_buf != NULL) { 906 if (iovec == 0) { 907 data->segs[i].ds_addr = (bus_addr_t)extra_buf; 908 data->segs[i].ds_len = extra_buf_len; 909 } else { 910 data->iovec[i].iov_base = extra_buf; 911 data->iovec[i].iov_len = extra_buf_len; 912 } 913 extra_buf_attached = 1; 914 i++; 915 } 916 if ((tmp_buf != NULL) || (i != data->sg_count)) { 917 warnx("buffer source count does not match " 918 "number of buffers in list!"); 919 retval = 1; 920 goto bailout; 921 } 922 923 bailout: 924 if (retval == 0) { 925 *num_sectors_used = (data->fill_len + extra_buf_len) / 926 sector_size; 927 } else if (extra_buf_attached == 0) { 928 /* 929 * If extra_buf isn't attached yet, we need to free it 930 * to avoid leaking. 931 */ 932 free(extra_buf); 933 data->extra_buf = 0; 934 data->sg_count--; 935 } 936 return (retval); 937 } 938 939 uint32_t 940 camdd_buf_get_len(struct camdd_buf *buf) 941 { 942 uint32_t len = 0; 943 944 if (buf->buf_type != CAMDD_BUF_DATA) { 945 struct camdd_buf_indirect *indirect; 946 947 indirect = &buf->buf_type_spec.indirect; 948 len = indirect->len; 949 } else { 950 struct camdd_buf_data *data; 951 952 data = &buf->buf_type_spec.data; 953 len = data->fill_len; 954 } 955 956 return (len); 957 } 958 959 void 960 camdd_buf_add_child(struct camdd_buf *buf, struct camdd_buf *child_buf) 961 { 962 struct camdd_buf_data *data; 963 964 assert(buf->buf_type == CAMDD_BUF_DATA); 965 966 data = &buf->buf_type_spec.data; 967 968 STAILQ_INSERT_TAIL(&buf->src_list, child_buf, src_links); 969 buf->src_count++; 970 971 data->fill_len += camdd_buf_get_len(child_buf); 972 } 973 974 typedef enum { 975 CAMDD_TS_MAX_BLK, 976 CAMDD_TS_MIN_BLK, 977 CAMDD_TS_BLK_GRAN, 978 CAMDD_TS_EFF_IOSIZE 979 } camdd_status_item_index; 980 981 static struct camdd_status_items { 982 const char *name; 983 struct mt_status_entry *entry; 984 } req_status_items[] = { 985 { "max_blk", NULL }, 986 { "min_blk", NULL }, 987 { "blk_gran", NULL }, 988 { "max_effective_iosize", NULL } 989 }; 990 991 int 992 camdd_probe_tape(int fd, char *filename, uint64_t *max_iosize, 993 uint64_t *max_blk, uint64_t *min_blk, uint64_t *blk_gran) 994 { 995 struct mt_status_data status_data; 996 char *xml_str = NULL; 997 unsigned int i; 998 int retval = 0; 999 1000 retval = mt_get_xml_str(fd, MTIOCEXTGET, &xml_str); 1001 if (retval != 0) 1002 err(1, "Couldn't get XML string from %s", filename); 1003 1004 retval = mt_get_status(xml_str, &status_data); 1005 if (retval != XML_STATUS_OK) { 1006 warn("couldn't get status for %s", filename); 1007 retval = 1; 1008 goto bailout; 1009 } else 1010 retval = 0; 1011 1012 if (status_data.error != 0) { 1013 warnx("%s", status_data.error_str); 1014 retval = 1; 1015 goto bailout; 1016 } 1017 1018 for (i = 0; i < nitems(req_status_items); i++) { 1019 char *name; 1020 1021 name = __DECONST(char *, req_status_items[i].name); 1022 req_status_items[i].entry = mt_status_entry_find(&status_data, 1023 name); 1024 if (req_status_items[i].entry == NULL) { 1025 errx(1, "Cannot find status entry %s", 1026 req_status_items[i].name); 1027 } 1028 } 1029 1030 *max_iosize = req_status_items[CAMDD_TS_EFF_IOSIZE].entry->value_unsigned; 1031 *max_blk= req_status_items[CAMDD_TS_MAX_BLK].entry->value_unsigned; 1032 *min_blk= req_status_items[CAMDD_TS_MIN_BLK].entry->value_unsigned; 1033 *blk_gran = req_status_items[CAMDD_TS_BLK_GRAN].entry->value_unsigned; 1034 bailout: 1035 1036 free(xml_str); 1037 mt_status_free(&status_data); 1038 1039 return (retval); 1040 } 1041 1042 struct camdd_dev * 1043 camdd_probe_file(int fd, struct camdd_io_opts *io_opts, int retry_count, 1044 int timeout) 1045 { 1046 struct camdd_dev *dev = NULL; 1047 struct camdd_dev_file *file_dev; 1048 uint64_t blocksize = io_opts->blocksize; 1049 1050 dev = camdd_alloc_dev(CAMDD_DEV_FILE, NULL, 0, retry_count, timeout); 1051 if (dev == NULL) 1052 goto bailout; 1053 1054 file_dev = &dev->dev_spec.file; 1055 file_dev->fd = fd; 1056 strlcpy(file_dev->filename, io_opts->dev_name, 1057 sizeof(file_dev->filename)); 1058 strlcpy(dev->device_name, io_opts->dev_name, sizeof(dev->device_name)); 1059 if (blocksize == 0) 1060 dev->blocksize = CAMDD_FILE_DEFAULT_BLOCK; 1061 else 1062 dev->blocksize = blocksize; 1063 1064 if ((io_opts->queue_depth != 0) 1065 && (io_opts->queue_depth != 1)) { 1066 warnx("Queue depth %ju for %s ignored, only 1 outstanding " 1067 "command supported", (uintmax_t)io_opts->queue_depth, 1068 io_opts->dev_name); 1069 } 1070 dev->target_queue_depth = CAMDD_FILE_DEFAULT_DEPTH; 1071 dev->run = camdd_file_run; 1072 dev->fetch = NULL; 1073 1074 /* 1075 * We can effectively access files on byte boundaries. We'll reset 1076 * this for devices like disks that can be accessed on sector 1077 * boundaries. 1078 */ 1079 dev->sector_size = 1; 1080 1081 if ((fd != STDIN_FILENO) 1082 && (fd != STDOUT_FILENO)) { 1083 int retval; 1084 1085 retval = fstat(fd, &file_dev->sb); 1086 if (retval != 0) { 1087 warn("Cannot stat %s", dev->device_name); 1088 goto bailout_error; 1089 } 1090 if (S_ISREG(file_dev->sb.st_mode)) { 1091 file_dev->file_type = CAMDD_FILE_REG; 1092 } else if (S_ISCHR(file_dev->sb.st_mode)) { 1093 int type; 1094 1095 if (ioctl(fd, FIODTYPE, &type) == -1) 1096 err(1, "FIODTYPE ioctl failed on %s", 1097 dev->device_name); 1098 else { 1099 if (type & D_TAPE) 1100 file_dev->file_type = CAMDD_FILE_TAPE; 1101 else if (type & D_DISK) 1102 file_dev->file_type = CAMDD_FILE_DISK; 1103 else if (type & D_MEM) 1104 file_dev->file_type = CAMDD_FILE_MEM; 1105 else if (type & D_TTY) 1106 file_dev->file_type = CAMDD_FILE_TTY; 1107 } 1108 } else if (S_ISDIR(file_dev->sb.st_mode)) { 1109 errx(1, "cannot operate on directory %s", 1110 dev->device_name); 1111 } else if (S_ISFIFO(file_dev->sb.st_mode)) { 1112 file_dev->file_type = CAMDD_FILE_PIPE; 1113 } else 1114 errx(1, "Cannot determine file type for %s", 1115 dev->device_name); 1116 1117 switch (file_dev->file_type) { 1118 case CAMDD_FILE_REG: 1119 if (file_dev->sb.st_size != 0) 1120 dev->max_sector = file_dev->sb.st_size - 1; 1121 else 1122 dev->max_sector = 0; 1123 file_dev->file_flags |= CAMDD_FF_CAN_SEEK; 1124 break; 1125 case CAMDD_FILE_TAPE: { 1126 uint64_t max_iosize, max_blk, min_blk, blk_gran; 1127 /* 1128 * Check block limits and maximum effective iosize. 1129 * Make sure the blocksize is within the block 1130 * limits (and a multiple of the minimum blocksize) 1131 * and that the blocksize is <= maximum effective 1132 * iosize. 1133 */ 1134 retval = camdd_probe_tape(fd, dev->device_name, 1135 &max_iosize, &max_blk, &min_blk, &blk_gran); 1136 if (retval != 0) 1137 errx(1, "Unable to probe tape %s", 1138 dev->device_name); 1139 1140 /* 1141 * The blocksize needs to be <= the maximum 1142 * effective I/O size of the tape device. Note 1143 * that this also takes into account the maximum 1144 * blocksize reported by READ BLOCK LIMITS. 1145 */ 1146 if (dev->blocksize > max_iosize) { 1147 warnx("Blocksize %u too big for %s, limiting " 1148 "to %ju", dev->blocksize, dev->device_name, 1149 max_iosize); 1150 dev->blocksize = max_iosize; 1151 } 1152 1153 /* 1154 * The blocksize needs to be at least min_blk; 1155 */ 1156 if (dev->blocksize < min_blk) { 1157 warnx("Blocksize %u too small for %s, " 1158 "increasing to %ju", dev->blocksize, 1159 dev->device_name, min_blk); 1160 dev->blocksize = min_blk; 1161 } 1162 1163 /* 1164 * And the blocksize needs to be a multiple of 1165 * the block granularity. 1166 */ 1167 if ((blk_gran != 0) 1168 && (dev->blocksize % (1 << blk_gran))) { 1169 warnx("Blocksize %u for %s not a multiple of " 1170 "%d, adjusting to %d", dev->blocksize, 1171 dev->device_name, (1 << blk_gran), 1172 dev->blocksize & ~((1 << blk_gran) - 1)); 1173 dev->blocksize &= ~((1 << blk_gran) - 1); 1174 } 1175 1176 if (dev->blocksize == 0) { 1177 errx(1, "Unable to derive valid blocksize for " 1178 "%s", dev->device_name); 1179 } 1180 1181 /* 1182 * For tape drives, set the sector size to the 1183 * blocksize so that we make sure not to write 1184 * less than the blocksize out to the drive. 1185 */ 1186 dev->sector_size = dev->blocksize; 1187 break; 1188 } 1189 case CAMDD_FILE_DISK: { 1190 off_t media_size; 1191 unsigned int sector_size; 1192 1193 file_dev->file_flags |= CAMDD_FF_CAN_SEEK; 1194 1195 if (ioctl(fd, DIOCGSECTORSIZE, §or_size) == -1) { 1196 err(1, "DIOCGSECTORSIZE ioctl failed on %s", 1197 dev->device_name); 1198 } 1199 1200 if (sector_size == 0) { 1201 errx(1, "DIOCGSECTORSIZE ioctl returned " 1202 "invalid sector size %u for %s", 1203 sector_size, dev->device_name); 1204 } 1205 1206 if (ioctl(fd, DIOCGMEDIASIZE, &media_size) == -1) { 1207 err(1, "DIOCGMEDIASIZE ioctl failed on %s", 1208 dev->device_name); 1209 } 1210 1211 if (media_size == 0) { 1212 errx(1, "DIOCGMEDIASIZE ioctl returned " 1213 "invalid media size %ju for %s", 1214 (uintmax_t)media_size, dev->device_name); 1215 } 1216 1217 if (dev->blocksize % sector_size) { 1218 errx(1, "%s blocksize %u not a multiple of " 1219 "sector size %u", dev->device_name, 1220 dev->blocksize, sector_size); 1221 } 1222 1223 dev->sector_size = sector_size; 1224 dev->max_sector = (media_size / sector_size) - 1; 1225 break; 1226 } 1227 case CAMDD_FILE_MEM: 1228 file_dev->file_flags |= CAMDD_FF_CAN_SEEK; 1229 break; 1230 default: 1231 break; 1232 } 1233 } 1234 1235 if ((io_opts->offset != 0) 1236 && ((file_dev->file_flags & CAMDD_FF_CAN_SEEK) == 0)) { 1237 warnx("Offset %ju specified for %s, but we cannot seek on %s", 1238 io_opts->offset, io_opts->dev_name, io_opts->dev_name); 1239 goto bailout_error; 1240 } 1241 #if 0 1242 else if ((io_opts->offset != 0) 1243 && ((io_opts->offset % dev->sector_size) != 0)) { 1244 warnx("Offset %ju for %s is not a multiple of the " 1245 "sector size %u", io_opts->offset, 1246 io_opts->dev_name, dev->sector_size); 1247 goto bailout_error; 1248 } else { 1249 dev->start_offset_bytes = io_opts->offset; 1250 } 1251 #endif 1252 1253 bailout: 1254 return (dev); 1255 1256 bailout_error: 1257 camdd_free_dev(dev); 1258 return (NULL); 1259 } 1260 1261 /* 1262 * Get a get device CCB for the specified device. 1263 */ 1264 int 1265 camdd_get_cgd(struct cam_device *device, struct ccb_getdev *cgd) 1266 { 1267 union ccb *ccb; 1268 int retval = 0; 1269 1270 ccb = cam_getccb(device); 1271 1272 if (ccb == NULL) { 1273 warnx("%s: couldn't allocate CCB", __func__); 1274 return -1; 1275 } 1276 1277 CCB_CLEAR_ALL_EXCEPT_HDR(&ccb->cgd); 1278 1279 ccb->ccb_h.func_code = XPT_GDEV_TYPE; 1280 1281 if (cam_send_ccb(device, ccb) < 0) { 1282 warn("%s: error sending Get Device Information CCB", __func__); 1283 cam_error_print(device, ccb, CAM_ESF_ALL, 1284 CAM_EPF_ALL, stderr); 1285 retval = -1; 1286 goto bailout; 1287 } 1288 1289 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1290 cam_error_print(device, ccb, CAM_ESF_ALL, 1291 CAM_EPF_ALL, stderr); 1292 retval = -1; 1293 goto bailout; 1294 } 1295 1296 bcopy(&ccb->cgd, cgd, sizeof(struct ccb_getdev)); 1297 1298 bailout: 1299 cam_freeccb(ccb); 1300 1301 return retval; 1302 } 1303 1304 int 1305 camdd_probe_pass_scsi(struct cam_device *cam_dev, union ccb *ccb, 1306 camdd_argmask arglist, int probe_retry_count, 1307 int probe_timeout, uint64_t *maxsector, uint32_t *block_len) 1308 { 1309 struct scsi_read_capacity_data rcap; 1310 struct scsi_read_capacity_data_long rcaplong; 1311 int retval = -1; 1312 1313 if (ccb == NULL) { 1314 warnx("%s: error passed ccb is NULL", __func__); 1315 goto bailout; 1316 } 1317 1318 CCB_CLEAR_ALL_EXCEPT_HDR(&ccb->csio); 1319 1320 scsi_read_capacity(&ccb->csio, 1321 /*retries*/ probe_retry_count, 1322 /*cbfcnp*/ NULL, 1323 /*tag_action*/ MSG_SIMPLE_Q_TAG, 1324 &rcap, 1325 SSD_FULL_SIZE, 1326 /*timeout*/ probe_timeout ? probe_timeout : 5000); 1327 1328 /* Disable freezing the device queue */ 1329 ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; 1330 1331 if (arglist & CAMDD_ARG_ERR_RECOVER) 1332 ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; 1333 1334 if (cam_send_ccb(cam_dev, ccb) < 0) { 1335 warn("error sending READ CAPACITY command"); 1336 1337 cam_error_print(cam_dev, ccb, CAM_ESF_ALL, 1338 CAM_EPF_ALL, stderr); 1339 1340 goto bailout; 1341 } 1342 1343 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1344 cam_error_print(cam_dev, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); 1345 goto bailout; 1346 } 1347 1348 *maxsector = scsi_4btoul(rcap.addr); 1349 *block_len = scsi_4btoul(rcap.length); 1350 1351 /* 1352 * A last block of 2^32-1 means that the true capacity is over 2TB, 1353 * and we need to issue the long READ CAPACITY to get the real 1354 * capacity. Otherwise, we're all set. 1355 */ 1356 if (*maxsector != 0xffffffff) { 1357 retval = 0; 1358 goto bailout; 1359 } 1360 1361 scsi_read_capacity_16(&ccb->csio, 1362 /*retries*/ probe_retry_count, 1363 /*cbfcnp*/ NULL, 1364 /*tag_action*/ MSG_SIMPLE_Q_TAG, 1365 /*lba*/ 0, 1366 /*reladdr*/ 0, 1367 /*pmi*/ 0, 1368 (uint8_t *)&rcaplong, 1369 sizeof(rcaplong), 1370 /*sense_len*/ SSD_FULL_SIZE, 1371 /*timeout*/ probe_timeout ? probe_timeout : 5000); 1372 1373 /* Disable freezing the device queue */ 1374 ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; 1375 1376 if (arglist & CAMDD_ARG_ERR_RECOVER) 1377 ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; 1378 1379 if (cam_send_ccb(cam_dev, ccb) < 0) { 1380 warn("error sending READ CAPACITY (16) command"); 1381 cam_error_print(cam_dev, ccb, CAM_ESF_ALL, 1382 CAM_EPF_ALL, stderr); 1383 goto bailout; 1384 } 1385 1386 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1387 cam_error_print(cam_dev, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); 1388 goto bailout; 1389 } 1390 1391 *maxsector = scsi_8btou64(rcaplong.addr); 1392 *block_len = scsi_4btoul(rcaplong.length); 1393 1394 retval = 0; 1395 1396 bailout: 1397 return retval; 1398 } 1399 1400 /* 1401 * Need to implement this. Do a basic probe: 1402 * - Check the inquiry data, make sure we're talking to a device that we 1403 * can reasonably expect to talk to -- direct, RBC, CD, WORM. 1404 * - Send a test unit ready, make sure the device is available. 1405 * - Get the capacity and block size. 1406 */ 1407 struct camdd_dev * 1408 camdd_probe_pass(struct cam_device *cam_dev, struct camdd_io_opts *io_opts, 1409 camdd_argmask arglist, int probe_retry_count, 1410 int probe_timeout, int io_retry_count, int io_timeout) 1411 { 1412 union ccb *ccb; 1413 uint64_t maxsector = 0; 1414 uint32_t cpi_maxio, max_iosize, pass_numblocks; 1415 uint32_t block_len = 0; 1416 struct camdd_dev *dev = NULL; 1417 struct camdd_dev_pass *pass_dev; 1418 struct kevent ke; 1419 struct ccb_getdev cgd; 1420 int retval; 1421 int scsi_dev_type; 1422 1423 if ((retval = camdd_get_cgd(cam_dev, &cgd)) != 0) { 1424 warnx("%s: error retrieving CGD", __func__); 1425 return NULL; 1426 } 1427 1428 ccb = cam_getccb(cam_dev); 1429 1430 if (ccb == NULL) { 1431 warnx("%s: error allocating ccb", __func__); 1432 goto bailout; 1433 } 1434 1435 switch (cgd.protocol) { 1436 case PROTO_SCSI: 1437 scsi_dev_type = SID_TYPE(&cam_dev->inq_data); 1438 1439 /* 1440 * For devices that support READ CAPACITY, we'll attempt to get the 1441 * capacity. Otherwise, we really don't support tape or other 1442 * devices via SCSI passthrough, so just return an error in that case. 1443 */ 1444 switch (scsi_dev_type) { 1445 case T_DIRECT: 1446 case T_WORM: 1447 case T_CDROM: 1448 case T_OPTICAL: 1449 case T_RBC: 1450 case T_ZBC_HM: 1451 break; 1452 default: 1453 errx(1, "Unsupported SCSI device type %d", scsi_dev_type); 1454 break; /*NOTREACHED*/ 1455 } 1456 1457 if ((retval = camdd_probe_pass_scsi(cam_dev, ccb, probe_retry_count, 1458 arglist, probe_timeout, &maxsector, 1459 &block_len))) { 1460 goto bailout; 1461 } 1462 break; 1463 default: 1464 errx(1, "Unsupported PROTO type %d", cgd.protocol); 1465 break; /*NOTREACHED*/ 1466 } 1467 1468 if (block_len == 0) { 1469 warnx("Sector size for %s%u is 0, cannot continue", 1470 cam_dev->device_name, cam_dev->dev_unit_num); 1471 goto bailout_error; 1472 } 1473 1474 CCB_CLEAR_ALL_EXCEPT_HDR(&ccb->cpi); 1475 1476 ccb->ccb_h.func_code = XPT_PATH_INQ; 1477 ccb->ccb_h.flags = CAM_DIR_NONE; 1478 ccb->ccb_h.retry_count = 1; 1479 1480 if (cam_send_ccb(cam_dev, ccb) < 0) { 1481 warn("error sending XPT_PATH_INQ CCB"); 1482 1483 cam_error_print(cam_dev, ccb, CAM_ESF_ALL, 1484 CAM_EPF_ALL, stderr); 1485 goto bailout; 1486 } 1487 1488 EV_SET(&ke, cam_dev->fd, EVFILT_READ, EV_ADD|EV_ENABLE, 0, 0, 0); 1489 1490 dev = camdd_alloc_dev(CAMDD_DEV_PASS, &ke, 1, io_retry_count, 1491 io_timeout); 1492 if (dev == NULL) 1493 goto bailout; 1494 1495 pass_dev = &dev->dev_spec.pass; 1496 pass_dev->scsi_dev_type = scsi_dev_type; 1497 pass_dev->protocol = cgd.protocol; 1498 pass_dev->dev = cam_dev; 1499 pass_dev->max_sector = maxsector; 1500 pass_dev->block_len = block_len; 1501 pass_dev->cpi_maxio = ccb->cpi.maxio; 1502 snprintf(dev->device_name, sizeof(dev->device_name), "%s%u", 1503 pass_dev->dev->device_name, pass_dev->dev->dev_unit_num); 1504 dev->sector_size = block_len; 1505 dev->max_sector = maxsector; 1506 1507 1508 /* 1509 * Determine the optimal blocksize to use for this device. 1510 */ 1511 1512 /* 1513 * If the controller has not specified a maximum I/O size, 1514 * just go with 128K as a somewhat conservative value. 1515 */ 1516 if (pass_dev->cpi_maxio == 0) 1517 cpi_maxio = 131072; 1518 else 1519 cpi_maxio = pass_dev->cpi_maxio; 1520 1521 /* 1522 * If the controller has a large maximum I/O size, limit it 1523 * to something smaller so that the kernel doesn't have trouble 1524 * allocating buffers to copy data in and out for us. 1525 * XXX KDM this is until we have unmapped I/O support in the kernel. 1526 */ 1527 max_iosize = min(cpi_maxio, CAMDD_PASS_MAX_BLOCK); 1528 1529 /* 1530 * If we weren't able to get a block size for some reason, 1531 * default to 512 bytes. 1532 */ 1533 block_len = pass_dev->block_len; 1534 if (block_len == 0) 1535 block_len = 512; 1536 1537 /* 1538 * Figure out how many blocksize chunks will fit in the 1539 * maximum I/O size. 1540 */ 1541 pass_numblocks = max_iosize / block_len; 1542 1543 /* 1544 * And finally, multiple the number of blocks by the LBA 1545 * length to get our maximum block size; 1546 */ 1547 dev->blocksize = pass_numblocks * block_len; 1548 1549 if (io_opts->blocksize != 0) { 1550 if ((io_opts->blocksize % dev->sector_size) != 0) { 1551 warnx("Blocksize %ju for %s is not a multiple of " 1552 "sector size %u", (uintmax_t)io_opts->blocksize, 1553 dev->device_name, dev->sector_size); 1554 goto bailout_error; 1555 } 1556 dev->blocksize = io_opts->blocksize; 1557 } 1558 dev->target_queue_depth = CAMDD_PASS_DEFAULT_DEPTH; 1559 if (io_opts->queue_depth != 0) 1560 dev->target_queue_depth = io_opts->queue_depth; 1561 1562 if (io_opts->offset != 0) { 1563 if (io_opts->offset > (dev->max_sector * dev->sector_size)) { 1564 warnx("Offset %ju is past the end of device %s", 1565 io_opts->offset, dev->device_name); 1566 goto bailout_error; 1567 } 1568 #if 0 1569 else if ((io_opts->offset % dev->sector_size) != 0) { 1570 warnx("Offset %ju for %s is not a multiple of the " 1571 "sector size %u", io_opts->offset, 1572 dev->device_name, dev->sector_size); 1573 goto bailout_error; 1574 } 1575 dev->start_offset_bytes = io_opts->offset; 1576 #endif 1577 } 1578 1579 dev->min_cmd_size = io_opts->min_cmd_size; 1580 1581 dev->run = camdd_pass_run; 1582 dev->fetch = camdd_pass_fetch; 1583 1584 bailout: 1585 cam_freeccb(ccb); 1586 1587 return (dev); 1588 1589 bailout_error: 1590 cam_freeccb(ccb); 1591 1592 camdd_free_dev(dev); 1593 1594 return (NULL); 1595 } 1596 1597 void * 1598 camdd_worker(void *arg) 1599 { 1600 struct camdd_dev *dev = arg; 1601 struct camdd_buf *buf; 1602 struct timespec ts, *kq_ts; 1603 1604 ts.tv_sec = 0; 1605 ts.tv_nsec = 0; 1606 1607 pthread_mutex_lock(&dev->mutex); 1608 1609 dev->flags |= CAMDD_DEV_FLAG_ACTIVE; 1610 1611 for (;;) { 1612 struct kevent ke; 1613 int retval = 0; 1614 1615 /* 1616 * XXX KDM check the reorder queue depth? 1617 */ 1618 if (dev->write_dev == 0) { 1619 uint32_t our_depth, peer_depth, peer_bytes, our_bytes; 1620 uint32_t target_depth = dev->target_queue_depth; 1621 uint32_t peer_target_depth = 1622 dev->peer_dev->target_queue_depth; 1623 uint32_t peer_blocksize = dev->peer_dev->blocksize; 1624 1625 camdd_get_depth(dev, &our_depth, &peer_depth, 1626 &our_bytes, &peer_bytes); 1627 1628 #if 0 1629 while (((our_depth < target_depth) 1630 && (peer_depth < peer_target_depth)) 1631 || ((peer_bytes + our_bytes) < 1632 (peer_blocksize * 2))) { 1633 #endif 1634 while (((our_depth + peer_depth) < 1635 (target_depth + peer_target_depth)) 1636 || ((peer_bytes + our_bytes) < 1637 (peer_blocksize * 3))) { 1638 1639 retval = camdd_queue(dev, NULL); 1640 if (retval == 1) 1641 break; 1642 else if (retval != 0) { 1643 error_exit = 1; 1644 goto bailout; 1645 } 1646 1647 camdd_get_depth(dev, &our_depth, &peer_depth, 1648 &our_bytes, &peer_bytes); 1649 } 1650 } 1651 /* 1652 * See if we have any I/O that is ready to execute. 1653 */ 1654 buf = STAILQ_FIRST(&dev->run_queue); 1655 if (buf != NULL) { 1656 while (dev->target_queue_depth > dev->cur_active_io) { 1657 retval = dev->run(dev); 1658 if (retval == -1) { 1659 dev->flags |= CAMDD_DEV_FLAG_EOF; 1660 error_exit = 1; 1661 break; 1662 } else if (retval != 0) { 1663 break; 1664 } 1665 } 1666 } 1667 1668 /* 1669 * We've reached EOF, or our partner has reached EOF. 1670 */ 1671 if ((dev->flags & CAMDD_DEV_FLAG_EOF) 1672 || (dev->flags & CAMDD_DEV_FLAG_PEER_EOF)) { 1673 if (dev->write_dev != 0) { 1674 if ((STAILQ_EMPTY(&dev->work_queue)) 1675 && (dev->num_run_queue == 0) 1676 && (dev->cur_active_io == 0)) { 1677 goto bailout; 1678 } 1679 } else { 1680 /* 1681 * If we're the reader, and the writer 1682 * got EOF, he is already done. If we got 1683 * the EOF, then we need to wait until 1684 * everything is flushed out for the writer. 1685 */ 1686 if (dev->flags & CAMDD_DEV_FLAG_PEER_EOF) { 1687 goto bailout; 1688 } else if ((dev->num_peer_work_queue == 0) 1689 && (dev->num_peer_done_queue == 0) 1690 && (dev->cur_active_io == 0) 1691 && (dev->num_run_queue == 0)) { 1692 goto bailout; 1693 } 1694 } 1695 /* 1696 * XXX KDM need to do something about the pending 1697 * queue and cleanup resources. 1698 */ 1699 } 1700 1701 if ((dev->write_dev == 0) 1702 && (dev->cur_active_io == 0) 1703 && (dev->peer_bytes_queued < dev->peer_dev->blocksize)) 1704 kq_ts = &ts; 1705 else 1706 kq_ts = NULL; 1707 1708 /* 1709 * Run kevent to see if there are events to process. 1710 */ 1711 pthread_mutex_unlock(&dev->mutex); 1712 retval = kevent(dev->kq, NULL, 0, &ke, 1, kq_ts); 1713 pthread_mutex_lock(&dev->mutex); 1714 if (retval == -1) { 1715 warn("%s: error returned from kevent",__func__); 1716 goto bailout; 1717 } else if (retval != 0) { 1718 switch (ke.filter) { 1719 case EVFILT_READ: 1720 if (dev->fetch != NULL) { 1721 retval = dev->fetch(dev); 1722 if (retval == -1) { 1723 error_exit = 1; 1724 goto bailout; 1725 } 1726 } 1727 break; 1728 case EVFILT_SIGNAL: 1729 /* 1730 * We register for this so we don't get 1731 * an error as a result of a SIGINFO or a 1732 * SIGINT. It will actually get handled 1733 * by the signal handler. If we get a 1734 * SIGINT, bail out without printing an 1735 * error message. Any other signals 1736 * will result in the error message above. 1737 */ 1738 if (ke.ident == SIGINT) 1739 goto bailout; 1740 break; 1741 case EVFILT_USER: 1742 retval = 0; 1743 /* 1744 * Check to see if the other thread has 1745 * queued any I/O for us to do. (In this 1746 * case we're the writer.) 1747 */ 1748 for (buf = STAILQ_FIRST(&dev->work_queue); 1749 buf != NULL; 1750 buf = STAILQ_FIRST(&dev->work_queue)) { 1751 STAILQ_REMOVE_HEAD(&dev->work_queue, 1752 work_links); 1753 retval = camdd_queue(dev, buf); 1754 /* 1755 * We keep going unless we get an 1756 * actual error. If we get EOF, we 1757 * still want to remove the buffers 1758 * from the queue and send the back 1759 * to the reader thread. 1760 */ 1761 if (retval == -1) { 1762 error_exit = 1; 1763 goto bailout; 1764 } else 1765 retval = 0; 1766 } 1767 1768 /* 1769 * Next check to see if the other thread has 1770 * queued any completed buffers back to us. 1771 * (In this case we're the reader.) 1772 */ 1773 for (buf = STAILQ_FIRST(&dev->peer_done_queue); 1774 buf != NULL; 1775 buf = STAILQ_FIRST(&dev->peer_done_queue)){ 1776 STAILQ_REMOVE_HEAD( 1777 &dev->peer_done_queue, work_links); 1778 dev->num_peer_done_queue--; 1779 camdd_peer_done(buf); 1780 } 1781 break; 1782 default: 1783 warnx("%s: unknown kevent filter %d", 1784 __func__, ke.filter); 1785 break; 1786 } 1787 } 1788 } 1789 1790 bailout: 1791 1792 dev->flags &= ~CAMDD_DEV_FLAG_ACTIVE; 1793 1794 /* XXX KDM cleanup resources here? */ 1795 1796 pthread_mutex_unlock(&dev->mutex); 1797 1798 need_exit = 1; 1799 sem_post(&camdd_sem); 1800 1801 return (NULL); 1802 } 1803 1804 /* 1805 * Simplistic translation of CCB status to our local status. 1806 */ 1807 camdd_buf_status 1808 camdd_ccb_status(union ccb *ccb, int protocol) 1809 { 1810 camdd_buf_status status = CAMDD_STATUS_NONE; 1811 cam_status ccb_status; 1812 1813 ccb_status = ccb->ccb_h.status & CAM_STATUS_MASK; 1814 1815 switch (protocol) { 1816 case PROTO_SCSI: 1817 switch (ccb_status) { 1818 case CAM_REQ_CMP: { 1819 if (ccb->csio.resid == 0) { 1820 status = CAMDD_STATUS_OK; 1821 } else if (ccb->csio.dxfer_len > ccb->csio.resid) { 1822 status = CAMDD_STATUS_SHORT_IO; 1823 } else { 1824 status = CAMDD_STATUS_EOF; 1825 } 1826 break; 1827 } 1828 case CAM_SCSI_STATUS_ERROR: { 1829 switch (ccb->csio.scsi_status) { 1830 case SCSI_STATUS_OK: 1831 case SCSI_STATUS_COND_MET: 1832 case SCSI_STATUS_INTERMED: 1833 case SCSI_STATUS_INTERMED_COND_MET: 1834 status = CAMDD_STATUS_OK; 1835 break; 1836 case SCSI_STATUS_CMD_TERMINATED: 1837 case SCSI_STATUS_CHECK_COND: 1838 case SCSI_STATUS_QUEUE_FULL: 1839 case SCSI_STATUS_BUSY: 1840 case SCSI_STATUS_RESERV_CONFLICT: 1841 default: 1842 status = CAMDD_STATUS_ERROR; 1843 break; 1844 } 1845 break; 1846 } 1847 default: 1848 status = CAMDD_STATUS_ERROR; 1849 break; 1850 } 1851 break; 1852 default: 1853 status = CAMDD_STATUS_ERROR; 1854 break; 1855 } 1856 1857 return (status); 1858 } 1859 1860 /* 1861 * Queue a buffer to our peer's work thread for writing. 1862 * 1863 * Returns 0 for success, -1 for failure, 1 if the other thread exited. 1864 */ 1865 int 1866 camdd_queue_peer_buf(struct camdd_dev *dev, struct camdd_buf *buf) 1867 { 1868 struct kevent ke; 1869 STAILQ_HEAD(, camdd_buf) local_queue; 1870 struct camdd_buf *buf1, *buf2; 1871 struct camdd_buf_data *data = NULL; 1872 uint64_t peer_bytes_queued = 0; 1873 int active = 1; 1874 int retval = 0; 1875 1876 STAILQ_INIT(&local_queue); 1877 1878 /* 1879 * Since we're the reader, we need to queue our I/O to the writer 1880 * in sequential order in order to make sure it gets written out 1881 * in sequential order. 1882 * 1883 * Check the next expected I/O starting offset. If this doesn't 1884 * match, put it on the reorder queue. 1885 */ 1886 if ((buf->lba * dev->sector_size) != dev->next_completion_pos_bytes) { 1887 1888 /* 1889 * If there is nothing on the queue, there is no sorting 1890 * needed. 1891 */ 1892 if (STAILQ_EMPTY(&dev->reorder_queue)) { 1893 STAILQ_INSERT_TAIL(&dev->reorder_queue, buf, links); 1894 dev->num_reorder_queue++; 1895 goto bailout; 1896 } 1897 1898 /* 1899 * Sort in ascending order by starting LBA. There should 1900 * be no identical LBAs. 1901 */ 1902 for (buf1 = STAILQ_FIRST(&dev->reorder_queue); buf1 != NULL; 1903 buf1 = buf2) { 1904 buf2 = STAILQ_NEXT(buf1, links); 1905 if (buf->lba < buf1->lba) { 1906 /* 1907 * If we're less than the first one, then 1908 * we insert at the head of the list 1909 * because this has to be the first element 1910 * on the list. 1911 */ 1912 STAILQ_INSERT_HEAD(&dev->reorder_queue, 1913 buf, links); 1914 dev->num_reorder_queue++; 1915 break; 1916 } else if (buf->lba > buf1->lba) { 1917 if (buf2 == NULL) { 1918 STAILQ_INSERT_TAIL(&dev->reorder_queue, 1919 buf, links); 1920 dev->num_reorder_queue++; 1921 break; 1922 } else if (buf->lba < buf2->lba) { 1923 STAILQ_INSERT_AFTER(&dev->reorder_queue, 1924 buf1, buf, links); 1925 dev->num_reorder_queue++; 1926 break; 1927 } 1928 } else { 1929 errx(1, "Found buffers with duplicate LBA %ju!", 1930 buf->lba); 1931 } 1932 } 1933 goto bailout; 1934 } else { 1935 1936 /* 1937 * We're the next expected I/O completion, so put ourselves 1938 * on the local queue to be sent to the writer. We use 1939 * work_links here so that we can queue this to the 1940 * peer_work_queue before taking the buffer off of the 1941 * local_queue. 1942 */ 1943 dev->next_completion_pos_bytes += buf->len; 1944 STAILQ_INSERT_TAIL(&local_queue, buf, work_links); 1945 1946 /* 1947 * Go through the reorder queue looking for more sequential 1948 * I/O and add it to the local queue. 1949 */ 1950 for (buf1 = STAILQ_FIRST(&dev->reorder_queue); buf1 != NULL; 1951 buf1 = STAILQ_FIRST(&dev->reorder_queue)) { 1952 /* 1953 * As soon as we see an I/O that is out of sequence, 1954 * we're done. 1955 */ 1956 if ((buf1->lba * dev->sector_size) != 1957 dev->next_completion_pos_bytes) 1958 break; 1959 1960 STAILQ_REMOVE_HEAD(&dev->reorder_queue, links); 1961 dev->num_reorder_queue--; 1962 STAILQ_INSERT_TAIL(&local_queue, buf1, work_links); 1963 dev->next_completion_pos_bytes += buf1->len; 1964 } 1965 } 1966 1967 /* 1968 * Setup the event to let the other thread know that it has work 1969 * pending. 1970 */ 1971 EV_SET(&ke, (uintptr_t)&dev->peer_dev->work_queue, EVFILT_USER, 0, 1972 NOTE_TRIGGER, 0, NULL); 1973 1974 /* 1975 * Put this on our shadow queue so that we know what we've queued 1976 * to the other thread. 1977 */ 1978 STAILQ_FOREACH_SAFE(buf1, &local_queue, work_links, buf2) { 1979 if (buf1->buf_type != CAMDD_BUF_DATA) { 1980 errx(1, "%s: should have a data buffer, not an " 1981 "indirect buffer", __func__); 1982 } 1983 data = &buf1->buf_type_spec.data; 1984 1985 /* 1986 * We only need to send one EOF to the writer, and don't 1987 * need to continue sending EOFs after that. 1988 */ 1989 if (buf1->status == CAMDD_STATUS_EOF) { 1990 if (dev->flags & CAMDD_DEV_FLAG_EOF_SENT) { 1991 STAILQ_REMOVE(&local_queue, buf1, camdd_buf, 1992 work_links); 1993 camdd_release_buf(buf1); 1994 retval = 1; 1995 continue; 1996 } 1997 dev->flags |= CAMDD_DEV_FLAG_EOF_SENT; 1998 } 1999 2000 2001 STAILQ_INSERT_TAIL(&dev->peer_work_queue, buf1, links); 2002 peer_bytes_queued += (data->fill_len - data->resid); 2003 dev->peer_bytes_queued += (data->fill_len - data->resid); 2004 dev->num_peer_work_queue++; 2005 } 2006 2007 if (STAILQ_FIRST(&local_queue) == NULL) 2008 goto bailout; 2009 2010 /* 2011 * Drop our mutex and pick up the other thread's mutex. We need to 2012 * do this to avoid deadlocks. 2013 */ 2014 pthread_mutex_unlock(&dev->mutex); 2015 pthread_mutex_lock(&dev->peer_dev->mutex); 2016 2017 if (dev->peer_dev->flags & CAMDD_DEV_FLAG_ACTIVE) { 2018 /* 2019 * Put the buffers on the other thread's incoming work queue. 2020 */ 2021 for (buf1 = STAILQ_FIRST(&local_queue); buf1 != NULL; 2022 buf1 = STAILQ_FIRST(&local_queue)) { 2023 STAILQ_REMOVE_HEAD(&local_queue, work_links); 2024 STAILQ_INSERT_TAIL(&dev->peer_dev->work_queue, buf1, 2025 work_links); 2026 } 2027 /* 2028 * Send an event to the other thread's kqueue to let it know 2029 * that there is something on the work queue. 2030 */ 2031 retval = kevent(dev->peer_dev->kq, &ke, 1, NULL, 0, NULL); 2032 if (retval == -1) 2033 warn("%s: unable to add peer work_queue kevent", 2034 __func__); 2035 else 2036 retval = 0; 2037 } else 2038 active = 0; 2039 2040 pthread_mutex_unlock(&dev->peer_dev->mutex); 2041 pthread_mutex_lock(&dev->mutex); 2042 2043 /* 2044 * If the other side isn't active, run through the queue and 2045 * release all of the buffers. 2046 */ 2047 if (active == 0) { 2048 for (buf1 = STAILQ_FIRST(&local_queue); buf1 != NULL; 2049 buf1 = STAILQ_FIRST(&local_queue)) { 2050 STAILQ_REMOVE_HEAD(&local_queue, work_links); 2051 STAILQ_REMOVE(&dev->peer_work_queue, buf1, camdd_buf, 2052 links); 2053 dev->num_peer_work_queue--; 2054 camdd_release_buf(buf1); 2055 } 2056 dev->peer_bytes_queued -= peer_bytes_queued; 2057 retval = 1; 2058 } 2059 2060 bailout: 2061 return (retval); 2062 } 2063 2064 /* 2065 * Return a buffer to the reader thread when we have completed writing it. 2066 */ 2067 int 2068 camdd_complete_peer_buf(struct camdd_dev *dev, struct camdd_buf *peer_buf) 2069 { 2070 struct kevent ke; 2071 int retval = 0; 2072 2073 /* 2074 * Setup the event to let the other thread know that we have 2075 * completed a buffer. 2076 */ 2077 EV_SET(&ke, (uintptr_t)&dev->peer_dev->peer_done_queue, EVFILT_USER, 0, 2078 NOTE_TRIGGER, 0, NULL); 2079 2080 /* 2081 * Drop our lock and acquire the other thread's lock before 2082 * manipulating 2083 */ 2084 pthread_mutex_unlock(&dev->mutex); 2085 pthread_mutex_lock(&dev->peer_dev->mutex); 2086 2087 /* 2088 * Put the buffer on the reader thread's peer done queue now that 2089 * we have completed it. 2090 */ 2091 STAILQ_INSERT_TAIL(&dev->peer_dev->peer_done_queue, peer_buf, 2092 work_links); 2093 dev->peer_dev->num_peer_done_queue++; 2094 2095 /* 2096 * Send an event to the peer thread to let it know that we've added 2097 * something to its peer done queue. 2098 */ 2099 retval = kevent(dev->peer_dev->kq, &ke, 1, NULL, 0, NULL); 2100 if (retval == -1) 2101 warn("%s: unable to add peer_done_queue kevent", __func__); 2102 else 2103 retval = 0; 2104 2105 /* 2106 * Drop the other thread's lock and reacquire ours. 2107 */ 2108 pthread_mutex_unlock(&dev->peer_dev->mutex); 2109 pthread_mutex_lock(&dev->mutex); 2110 2111 return (retval); 2112 } 2113 2114 /* 2115 * Free a buffer that was written out by the writer thread and returned to 2116 * the reader thread. 2117 */ 2118 void 2119 camdd_peer_done(struct camdd_buf *buf) 2120 { 2121 struct camdd_dev *dev; 2122 struct camdd_buf_data *data; 2123 2124 dev = buf->dev; 2125 if (buf->buf_type != CAMDD_BUF_DATA) { 2126 errx(1, "%s: should have a data buffer, not an " 2127 "indirect buffer", __func__); 2128 } 2129 2130 data = &buf->buf_type_spec.data; 2131 2132 STAILQ_REMOVE(&dev->peer_work_queue, buf, camdd_buf, links); 2133 dev->num_peer_work_queue--; 2134 dev->peer_bytes_queued -= (data->fill_len - data->resid); 2135 2136 if (buf->status == CAMDD_STATUS_EOF) 2137 dev->flags |= CAMDD_DEV_FLAG_PEER_EOF; 2138 2139 STAILQ_INSERT_TAIL(&dev->free_queue, buf, links); 2140 } 2141 2142 /* 2143 * Assumes caller holds the lock for this device. 2144 */ 2145 void 2146 camdd_complete_buf(struct camdd_dev *dev, struct camdd_buf *buf, 2147 int *error_count) 2148 { 2149 int retval = 0; 2150 2151 /* 2152 * If we're the reader, we need to send the completed I/O 2153 * to the writer. If we're the writer, we need to just 2154 * free up resources, or let the reader know if we've 2155 * encountered an error. 2156 */ 2157 if (dev->write_dev == 0) { 2158 retval = camdd_queue_peer_buf(dev, buf); 2159 if (retval != 0) 2160 (*error_count)++; 2161 } else { 2162 struct camdd_buf *tmp_buf, *next_buf; 2163 2164 STAILQ_FOREACH_SAFE(tmp_buf, &buf->src_list, src_links, 2165 next_buf) { 2166 struct camdd_buf *src_buf; 2167 struct camdd_buf_indirect *indirect; 2168 2169 STAILQ_REMOVE(&buf->src_list, tmp_buf, 2170 camdd_buf, src_links); 2171 2172 tmp_buf->status = buf->status; 2173 2174 if (tmp_buf->buf_type == CAMDD_BUF_DATA) { 2175 camdd_complete_peer_buf(dev, tmp_buf); 2176 continue; 2177 } 2178 2179 indirect = &tmp_buf->buf_type_spec.indirect; 2180 src_buf = indirect->src_buf; 2181 src_buf->refcount--; 2182 /* 2183 * XXX KDM we probably need to account for 2184 * exactly how many bytes we were able to 2185 * write. Allocate the residual to the 2186 * first N buffers? Or just track the 2187 * number of bytes written? Right now the reader 2188 * doesn't do anything with a residual. 2189 */ 2190 src_buf->status = buf->status; 2191 if (src_buf->refcount <= 0) 2192 camdd_complete_peer_buf(dev, src_buf); 2193 STAILQ_INSERT_TAIL(&dev->free_indirect_queue, 2194 tmp_buf, links); 2195 } 2196 2197 STAILQ_INSERT_TAIL(&dev->free_queue, buf, links); 2198 } 2199 } 2200 2201 /* 2202 * Fetch all completed commands from the pass(4) device. 2203 * 2204 * Returns the number of commands received, or -1 if any of the commands 2205 * completed with an error. Returns 0 if no commands are available. 2206 */ 2207 int 2208 camdd_pass_fetch(struct camdd_dev *dev) 2209 { 2210 struct camdd_dev_pass *pass_dev = &dev->dev_spec.pass; 2211 union ccb ccb; 2212 int retval = 0, num_fetched = 0, error_count = 0; 2213 2214 pthread_mutex_unlock(&dev->mutex); 2215 /* 2216 * XXX KDM we don't distinguish between EFAULT and ENOENT. 2217 */ 2218 while ((retval = ioctl(pass_dev->dev->fd, CAMIOGET, &ccb)) != -1) { 2219 struct camdd_buf *buf; 2220 struct camdd_buf_data *data; 2221 cam_status ccb_status; 2222 union ccb *buf_ccb; 2223 2224 buf = ccb.ccb_h.ccb_buf; 2225 data = &buf->buf_type_spec.data; 2226 buf_ccb = &data->ccb; 2227 2228 num_fetched++; 2229 2230 /* 2231 * Copy the CCB back out so we get status, sense data, etc. 2232 */ 2233 bcopy(&ccb, buf_ccb, sizeof(ccb)); 2234 2235 pthread_mutex_lock(&dev->mutex); 2236 2237 /* 2238 * We're now done, so take this off the active queue. 2239 */ 2240 STAILQ_REMOVE(&dev->active_queue, buf, camdd_buf, links); 2241 dev->cur_active_io--; 2242 2243 ccb_status = ccb.ccb_h.status & CAM_STATUS_MASK; 2244 if (ccb_status != CAM_REQ_CMP) { 2245 cam_error_print(pass_dev->dev, &ccb, CAM_ESF_ALL, 2246 CAM_EPF_ALL, stderr); 2247 } 2248 2249 switch (pass_dev->protocol) { 2250 case PROTO_SCSI: 2251 data->resid = ccb.csio.resid; 2252 dev->bytes_transferred += (ccb.csio.dxfer_len - ccb.csio.resid); 2253 break; 2254 default: 2255 return -1; 2256 break; 2257 } 2258 2259 if (buf->status == CAMDD_STATUS_NONE) 2260 buf->status = camdd_ccb_status(&ccb, pass_dev->protocol); 2261 if (buf->status == CAMDD_STATUS_ERROR) 2262 error_count++; 2263 else if (buf->status == CAMDD_STATUS_EOF) { 2264 /* 2265 * Once we queue this buffer to our partner thread, 2266 * he will know that we've hit EOF. 2267 */ 2268 dev->flags |= CAMDD_DEV_FLAG_EOF; 2269 } 2270 2271 camdd_complete_buf(dev, buf, &error_count); 2272 2273 /* 2274 * Unlock in preparation for the ioctl call. 2275 */ 2276 pthread_mutex_unlock(&dev->mutex); 2277 } 2278 2279 pthread_mutex_lock(&dev->mutex); 2280 2281 if (error_count > 0) 2282 return (-1); 2283 else 2284 return (num_fetched); 2285 } 2286 2287 /* 2288 * Returns -1 for error, 0 for success/continue, and 1 for resource 2289 * shortage/stop processing. 2290 */ 2291 int 2292 camdd_file_run(struct camdd_dev *dev) 2293 { 2294 struct camdd_dev_file *file_dev = &dev->dev_spec.file; 2295 struct camdd_buf_data *data; 2296 struct camdd_buf *buf; 2297 off_t io_offset; 2298 int retval = 0, write_dev = dev->write_dev; 2299 int error_count = 0, no_resources = 0, double_buf_needed = 0; 2300 uint32_t num_sectors = 0, db_len = 0; 2301 2302 buf = STAILQ_FIRST(&dev->run_queue); 2303 if (buf == NULL) { 2304 no_resources = 1; 2305 goto bailout; 2306 } else if ((dev->write_dev == 0) 2307 && (dev->flags & (CAMDD_DEV_FLAG_EOF | 2308 CAMDD_DEV_FLAG_EOF_SENT))) { 2309 STAILQ_REMOVE(&dev->run_queue, buf, camdd_buf, links); 2310 dev->num_run_queue--; 2311 buf->status = CAMDD_STATUS_EOF; 2312 error_count++; 2313 goto bailout; 2314 } 2315 2316 /* 2317 * If we're writing, we need to go through the source buffer list 2318 * and create an S/G list. 2319 */ 2320 if (write_dev != 0) { 2321 retval = camdd_buf_sg_create(buf, /*iovec*/ 1, 2322 dev->sector_size, &num_sectors, &double_buf_needed); 2323 if (retval != 0) { 2324 no_resources = 1; 2325 goto bailout; 2326 } 2327 } 2328 2329 STAILQ_REMOVE(&dev->run_queue, buf, camdd_buf, links); 2330 dev->num_run_queue--; 2331 2332 data = &buf->buf_type_spec.data; 2333 2334 /* 2335 * pread(2) and pwrite(2) offsets are byte offsets. 2336 */ 2337 io_offset = buf->lba * dev->sector_size; 2338 2339 /* 2340 * Unlock the mutex while we read or write. 2341 */ 2342 pthread_mutex_unlock(&dev->mutex); 2343 2344 /* 2345 * Note that we don't need to double buffer if we're the reader 2346 * because in that case, we have allocated a single buffer of 2347 * sufficient size to do the read. This copy is necessary on 2348 * writes because if one of the components of the S/G list is not 2349 * a sector size multiple, the kernel will reject the write. This 2350 * is unfortunate but not surprising. So this will make sure that 2351 * we're using a single buffer that is a multiple of the sector size. 2352 */ 2353 if ((double_buf_needed != 0) 2354 && (data->sg_count > 1) 2355 && (write_dev != 0)) { 2356 uint32_t cur_offset; 2357 int i; 2358 2359 if (file_dev->tmp_buf == NULL) 2360 file_dev->tmp_buf = calloc(dev->blocksize, 1); 2361 if (file_dev->tmp_buf == NULL) { 2362 buf->status = CAMDD_STATUS_ERROR; 2363 error_count++; 2364 pthread_mutex_lock(&dev->mutex); 2365 goto bailout; 2366 } 2367 for (i = 0, cur_offset = 0; i < data->sg_count; i++) { 2368 bcopy(data->iovec[i].iov_base, 2369 &file_dev->tmp_buf[cur_offset], 2370 data->iovec[i].iov_len); 2371 cur_offset += data->iovec[i].iov_len; 2372 } 2373 db_len = cur_offset; 2374 } 2375 2376 if (file_dev->file_flags & CAMDD_FF_CAN_SEEK) { 2377 if (write_dev == 0) { 2378 /* 2379 * XXX KDM is there any way we would need a S/G 2380 * list here? 2381 */ 2382 retval = pread(file_dev->fd, data->buf, 2383 buf->len, io_offset); 2384 } else { 2385 if (double_buf_needed != 0) { 2386 retval = pwrite(file_dev->fd, file_dev->tmp_buf, 2387 db_len, io_offset); 2388 } else if (data->sg_count == 0) { 2389 retval = pwrite(file_dev->fd, data->buf, 2390 data->fill_len, io_offset); 2391 } else { 2392 retval = pwritev(file_dev->fd, data->iovec, 2393 data->sg_count, io_offset); 2394 } 2395 } 2396 } else { 2397 if (write_dev == 0) { 2398 /* 2399 * XXX KDM is there any way we would need a S/G 2400 * list here? 2401 */ 2402 retval = read(file_dev->fd, data->buf, buf->len); 2403 } else { 2404 if (double_buf_needed != 0) { 2405 retval = write(file_dev->fd, file_dev->tmp_buf, 2406 db_len); 2407 } else if (data->sg_count == 0) { 2408 retval = write(file_dev->fd, data->buf, 2409 data->fill_len); 2410 } else { 2411 retval = writev(file_dev->fd, data->iovec, 2412 data->sg_count); 2413 } 2414 } 2415 } 2416 2417 /* We're done, re-acquire the lock */ 2418 pthread_mutex_lock(&dev->mutex); 2419 2420 if (retval >= (ssize_t)data->fill_len) { 2421 /* 2422 * If the bytes transferred is more than the request size, 2423 * that indicates an overrun, which should only happen at 2424 * the end of a transfer if we have to round up to a sector 2425 * boundary. 2426 */ 2427 if (buf->status == CAMDD_STATUS_NONE) 2428 buf->status = CAMDD_STATUS_OK; 2429 data->resid = 0; 2430 dev->bytes_transferred += retval; 2431 } else if (retval == -1) { 2432 warn("Error %s %s", (write_dev) ? "writing to" : 2433 "reading from", file_dev->filename); 2434 2435 buf->status = CAMDD_STATUS_ERROR; 2436 data->resid = data->fill_len; 2437 error_count++; 2438 2439 if (dev->debug == 0) 2440 goto bailout; 2441 2442 if ((double_buf_needed != 0) 2443 && (write_dev != 0)) { 2444 fprintf(stderr, "%s: fd %d, DB buf %p, len %u lba %ju " 2445 "offset %ju\n", __func__, file_dev->fd, 2446 file_dev->tmp_buf, db_len, (uintmax_t)buf->lba, 2447 (uintmax_t)io_offset); 2448 } else if (data->sg_count == 0) { 2449 fprintf(stderr, "%s: fd %d, buf %p, len %u, lba %ju " 2450 "offset %ju\n", __func__, file_dev->fd, data->buf, 2451 data->fill_len, (uintmax_t)buf->lba, 2452 (uintmax_t)io_offset); 2453 } else { 2454 int i; 2455 2456 fprintf(stderr, "%s: fd %d, len %u, lba %ju " 2457 "offset %ju\n", __func__, file_dev->fd, 2458 data->fill_len, (uintmax_t)buf->lba, 2459 (uintmax_t)io_offset); 2460 2461 for (i = 0; i < data->sg_count; i++) { 2462 fprintf(stderr, "index %d ptr %p len %zu\n", 2463 i, data->iovec[i].iov_base, 2464 data->iovec[i].iov_len); 2465 } 2466 } 2467 } else if (retval == 0) { 2468 buf->status = CAMDD_STATUS_EOF; 2469 if (dev->debug != 0) 2470 printf("%s: got EOF from %s!\n", __func__, 2471 file_dev->filename); 2472 data->resid = data->fill_len; 2473 error_count++; 2474 } else if (retval < (ssize_t)data->fill_len) { 2475 if (buf->status == CAMDD_STATUS_NONE) 2476 buf->status = CAMDD_STATUS_SHORT_IO; 2477 data->resid = data->fill_len - retval; 2478 dev->bytes_transferred += retval; 2479 } 2480 2481 bailout: 2482 if (buf != NULL) { 2483 if (buf->status == CAMDD_STATUS_EOF) { 2484 struct camdd_buf *buf2; 2485 dev->flags |= CAMDD_DEV_FLAG_EOF; 2486 STAILQ_FOREACH(buf2, &dev->run_queue, links) 2487 buf2->status = CAMDD_STATUS_EOF; 2488 } 2489 2490 camdd_complete_buf(dev, buf, &error_count); 2491 } 2492 2493 if (error_count != 0) 2494 return (-1); 2495 else if (no_resources != 0) 2496 return (1); 2497 else 2498 return (0); 2499 } 2500 2501 /* 2502 * Execute one command from the run queue. Returns 0 for success, 1 for 2503 * stop processing, and -1 for error. 2504 */ 2505 int 2506 camdd_pass_run(struct camdd_dev *dev) 2507 { 2508 struct camdd_buf *buf = NULL; 2509 struct camdd_dev_pass *pass_dev = &dev->dev_spec.pass; 2510 struct camdd_buf_data *data; 2511 uint32_t num_blocks, sectors_used = 0; 2512 union ccb *ccb; 2513 int retval = 0, is_write = dev->write_dev; 2514 int double_buf_needed = 0; 2515 2516 buf = STAILQ_FIRST(&dev->run_queue); 2517 if (buf == NULL) { 2518 retval = 1; 2519 goto bailout; 2520 } 2521 2522 /* 2523 * If we're writing, we need to go through the source buffer list 2524 * and create an S/G list. 2525 */ 2526 if (is_write != 0) { 2527 retval = camdd_buf_sg_create(buf, /*iovec*/ 0,dev->sector_size, 2528 §ors_used, &double_buf_needed); 2529 if (retval != 0) { 2530 retval = -1; 2531 goto bailout; 2532 } 2533 } 2534 2535 STAILQ_REMOVE(&dev->run_queue, buf, camdd_buf, links); 2536 dev->num_run_queue--; 2537 2538 data = &buf->buf_type_spec.data; 2539 2540 /* 2541 * In almost every case the number of blocks should be the device 2542 * block size. The exception may be at the end of an I/O stream 2543 * for a partial block or at the end of a device. 2544 */ 2545 if (is_write != 0) 2546 num_blocks = sectors_used; 2547 else 2548 num_blocks = data->fill_len / pass_dev->block_len; 2549 2550 ccb = &data->ccb; 2551 2552 switch (pass_dev->protocol) { 2553 case PROTO_SCSI: 2554 CCB_CLEAR_ALL_EXCEPT_HDR(&ccb->csio); 2555 2556 scsi_read_write(&ccb->csio, 2557 /*retries*/ dev->retry_count, 2558 /*cbfcnp*/ NULL, 2559 /*tag_action*/ MSG_SIMPLE_Q_TAG, 2560 /*readop*/ (dev->write_dev == 0) ? SCSI_RW_READ : 2561 SCSI_RW_WRITE, 2562 /*byte2*/ 0, 2563 /*minimum_cmd_size*/ dev->min_cmd_size, 2564 /*lba*/ buf->lba, 2565 /*block_count*/ num_blocks, 2566 /*data_ptr*/ (data->sg_count != 0) ? 2567 (uint8_t *)data->segs : data->buf, 2568 /*dxfer_len*/ (num_blocks * pass_dev->block_len), 2569 /*sense_len*/ SSD_FULL_SIZE, 2570 /*timeout*/ dev->io_timeout); 2571 2572 if (data->sg_count != 0) { 2573 ccb->csio.sglist_cnt = data->sg_count; 2574 } 2575 break; 2576 default: 2577 retval = -1; 2578 goto bailout; 2579 } 2580 2581 /* Disable freezing the device queue */ 2582 ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; 2583 2584 if (dev->retry_count != 0) 2585 ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; 2586 2587 if (data->sg_count != 0) { 2588 ccb->ccb_h.flags |= CAM_DATA_SG; 2589 } 2590 2591 /* 2592 * Store a pointer to the buffer in the CCB. The kernel will 2593 * restore this when we get it back, and we'll use it to identify 2594 * the buffer this CCB came from. 2595 */ 2596 ccb->ccb_h.ccb_buf = buf; 2597 2598 /* 2599 * Unlock our mutex in preparation for issuing the ioctl. 2600 */ 2601 pthread_mutex_unlock(&dev->mutex); 2602 /* 2603 * Queue the CCB to the pass(4) driver. 2604 */ 2605 if (ioctl(pass_dev->dev->fd, CAMIOQUEUE, ccb) == -1) { 2606 pthread_mutex_lock(&dev->mutex); 2607 2608 warn("%s: error sending CAMIOQUEUE ioctl to %s%u", __func__, 2609 pass_dev->dev->device_name, pass_dev->dev->dev_unit_num); 2610 warn("%s: CCB address is %p", __func__, ccb); 2611 retval = -1; 2612 2613 STAILQ_INSERT_TAIL(&dev->free_queue, buf, links); 2614 } else { 2615 pthread_mutex_lock(&dev->mutex); 2616 2617 dev->cur_active_io++; 2618 STAILQ_INSERT_TAIL(&dev->active_queue, buf, links); 2619 } 2620 2621 bailout: 2622 return (retval); 2623 } 2624 2625 int 2626 camdd_get_next_lba_len(struct camdd_dev *dev, uint64_t *lba, ssize_t *len) 2627 { 2628 struct camdd_dev_pass *pass_dev; 2629 uint32_t num_blocks; 2630 int retval = 0; 2631 2632 pass_dev = &dev->dev_spec.pass; 2633 2634 *lba = dev->next_io_pos_bytes / dev->sector_size; 2635 *len = dev->blocksize; 2636 num_blocks = *len / dev->sector_size; 2637 2638 /* 2639 * If max_sector is 0, then we have no set limit. This can happen 2640 * if we're writing to a file in a filesystem, or reading from 2641 * something like /dev/zero. 2642 */ 2643 if ((dev->max_sector != 0) 2644 || (dev->sector_io_limit != 0)) { 2645 uint64_t max_sector; 2646 2647 if ((dev->max_sector != 0) 2648 && (dev->sector_io_limit != 0)) 2649 max_sector = min(dev->sector_io_limit, dev->max_sector); 2650 else if (dev->max_sector != 0) 2651 max_sector = dev->max_sector; 2652 else 2653 max_sector = dev->sector_io_limit; 2654 2655 2656 /* 2657 * Check to see whether we're starting off past the end of 2658 * the device. If so, we need to just send an EOF 2659 * notification to the writer. 2660 */ 2661 if (*lba > max_sector) { 2662 *len = 0; 2663 retval = 1; 2664 } else if (((*lba + num_blocks) > max_sector + 1) 2665 || ((*lba + num_blocks) < *lba)) { 2666 /* 2667 * If we get here (but pass the first check), we 2668 * can trim the request length down to go to the 2669 * end of the device. 2670 */ 2671 num_blocks = (max_sector + 1) - *lba; 2672 *len = num_blocks * dev->sector_size; 2673 retval = 1; 2674 } 2675 } 2676 2677 dev->next_io_pos_bytes += *len; 2678 2679 return (retval); 2680 } 2681 2682 /* 2683 * Returns 0 for success, 1 for EOF detected, and -1 for failure. 2684 */ 2685 int 2686 camdd_queue(struct camdd_dev *dev, struct camdd_buf *read_buf) 2687 { 2688 struct camdd_buf *buf = NULL; 2689 struct camdd_buf_data *data; 2690 struct camdd_dev_pass *pass_dev; 2691 size_t new_len; 2692 struct camdd_buf_data *rb_data; 2693 int is_write = dev->write_dev; 2694 int eof_flush_needed = 0; 2695 int retval = 0; 2696 int error; 2697 2698 pass_dev = &dev->dev_spec.pass; 2699 2700 /* 2701 * If we've gotten EOF or our partner has, we should not continue 2702 * queueing I/O. If we're a writer, though, we should continue 2703 * to write any buffers that don't have EOF status. 2704 */ 2705 if ((dev->flags & CAMDD_DEV_FLAG_EOF) 2706 || ((dev->flags & CAMDD_DEV_FLAG_PEER_EOF) 2707 && (is_write == 0))) { 2708 /* 2709 * Tell the worker thread that we have seen EOF. 2710 */ 2711 retval = 1; 2712 2713 /* 2714 * If we're the writer, send the buffer back with EOF status. 2715 */ 2716 if (is_write) { 2717 read_buf->status = CAMDD_STATUS_EOF; 2718 2719 error = camdd_complete_peer_buf(dev, read_buf); 2720 } 2721 goto bailout; 2722 } 2723 2724 if (is_write == 0) { 2725 buf = camdd_get_buf(dev, CAMDD_BUF_DATA); 2726 if (buf == NULL) { 2727 retval = -1; 2728 goto bailout; 2729 } 2730 data = &buf->buf_type_spec.data; 2731 2732 retval = camdd_get_next_lba_len(dev, &buf->lba, &buf->len); 2733 if (retval != 0) { 2734 buf->status = CAMDD_STATUS_EOF; 2735 2736 if ((buf->len == 0) 2737 && ((dev->flags & (CAMDD_DEV_FLAG_EOF_SENT | 2738 CAMDD_DEV_FLAG_EOF_QUEUED)) != 0)) { 2739 camdd_release_buf(buf); 2740 goto bailout; 2741 } 2742 dev->flags |= CAMDD_DEV_FLAG_EOF_QUEUED; 2743 } 2744 2745 data->fill_len = buf->len; 2746 data->src_start_offset = buf->lba * dev->sector_size; 2747 2748 /* 2749 * Put this on the run queue. 2750 */ 2751 STAILQ_INSERT_TAIL(&dev->run_queue, buf, links); 2752 dev->num_run_queue++; 2753 2754 /* We're done. */ 2755 goto bailout; 2756 } 2757 2758 /* 2759 * Check for new EOF status from the reader. 2760 */ 2761 if ((read_buf->status == CAMDD_STATUS_EOF) 2762 || (read_buf->status == CAMDD_STATUS_ERROR)) { 2763 dev->flags |= CAMDD_DEV_FLAG_PEER_EOF; 2764 if ((STAILQ_FIRST(&dev->pending_queue) == NULL) 2765 && (read_buf->len == 0)) { 2766 camdd_complete_peer_buf(dev, read_buf); 2767 retval = 1; 2768 goto bailout; 2769 } else 2770 eof_flush_needed = 1; 2771 } 2772 2773 /* 2774 * See if we have a buffer we're composing with pieces from our 2775 * partner thread. 2776 */ 2777 buf = STAILQ_FIRST(&dev->pending_queue); 2778 if (buf == NULL) { 2779 uint64_t lba; 2780 ssize_t len; 2781 2782 retval = camdd_get_next_lba_len(dev, &lba, &len); 2783 if (retval != 0) { 2784 read_buf->status = CAMDD_STATUS_EOF; 2785 2786 if (len == 0) { 2787 dev->flags |= CAMDD_DEV_FLAG_EOF; 2788 error = camdd_complete_peer_buf(dev, read_buf); 2789 goto bailout; 2790 } 2791 } 2792 2793 /* 2794 * If we don't have a pending buffer, we need to grab a new 2795 * one from the free list or allocate another one. 2796 */ 2797 buf = camdd_get_buf(dev, CAMDD_BUF_DATA); 2798 if (buf == NULL) { 2799 retval = 1; 2800 goto bailout; 2801 } 2802 2803 buf->lba = lba; 2804 buf->len = len; 2805 2806 STAILQ_INSERT_TAIL(&dev->pending_queue, buf, links); 2807 dev->num_pending_queue++; 2808 } 2809 2810 data = &buf->buf_type_spec.data; 2811 2812 rb_data = &read_buf->buf_type_spec.data; 2813 2814 if ((rb_data->src_start_offset != dev->next_peer_pos_bytes) 2815 && (dev->debug != 0)) { 2816 printf("%s: WARNING: reader offset %#jx != expected offset " 2817 "%#jx\n", __func__, (uintmax_t)rb_data->src_start_offset, 2818 (uintmax_t)dev->next_peer_pos_bytes); 2819 } 2820 dev->next_peer_pos_bytes = rb_data->src_start_offset + 2821 (rb_data->fill_len - rb_data->resid); 2822 2823 new_len = (rb_data->fill_len - rb_data->resid) + data->fill_len; 2824 if (new_len < buf->len) { 2825 /* 2826 * There are three cases here: 2827 * 1. We need more data to fill up a block, so we put 2828 * this I/O on the queue and wait for more I/O. 2829 * 2. We have a pending buffer in the queue that is 2830 * smaller than our blocksize, but we got an EOF. So we 2831 * need to go ahead and flush the write out. 2832 * 3. We got an error. 2833 */ 2834 2835 /* 2836 * Increment our fill length. 2837 */ 2838 data->fill_len += (rb_data->fill_len - rb_data->resid); 2839 2840 /* 2841 * Add the new read buffer to the list for writing. 2842 */ 2843 STAILQ_INSERT_TAIL(&buf->src_list, read_buf, src_links); 2844 2845 /* Increment the count */ 2846 buf->src_count++; 2847 2848 if (eof_flush_needed == 0) { 2849 /* 2850 * We need to exit, because we don't have enough 2851 * data yet. 2852 */ 2853 goto bailout; 2854 } else { 2855 /* 2856 * Take the buffer off of the pending queue. 2857 */ 2858 STAILQ_REMOVE(&dev->pending_queue, buf, camdd_buf, 2859 links); 2860 dev->num_pending_queue--; 2861 2862 /* 2863 * If we need an EOF flush, but there is no data 2864 * to flush, go ahead and return this buffer. 2865 */ 2866 if (data->fill_len == 0) { 2867 camdd_complete_buf(dev, buf, /*error_count*/0); 2868 retval = 1; 2869 goto bailout; 2870 } 2871 2872 /* 2873 * Put this on the next queue for execution. 2874 */ 2875 STAILQ_INSERT_TAIL(&dev->run_queue, buf, links); 2876 dev->num_run_queue++; 2877 } 2878 } else if (new_len == buf->len) { 2879 /* 2880 * We have enough data to completey fill one block, 2881 * so we're ready to issue the I/O. 2882 */ 2883 2884 /* 2885 * Take the buffer off of the pending queue. 2886 */ 2887 STAILQ_REMOVE(&dev->pending_queue, buf, camdd_buf, links); 2888 dev->num_pending_queue--; 2889 2890 /* 2891 * Add the new read buffer to the list for writing. 2892 */ 2893 STAILQ_INSERT_TAIL(&buf->src_list, read_buf, src_links); 2894 2895 /* Increment the count */ 2896 buf->src_count++; 2897 2898 /* 2899 * Increment our fill length. 2900 */ 2901 data->fill_len += (rb_data->fill_len - rb_data->resid); 2902 2903 /* 2904 * Put this on the next queue for execution. 2905 */ 2906 STAILQ_INSERT_TAIL(&dev->run_queue, buf, links); 2907 dev->num_run_queue++; 2908 } else { 2909 struct camdd_buf *idb; 2910 struct camdd_buf_indirect *indirect; 2911 uint32_t len_to_go, cur_offset; 2912 2913 2914 idb = camdd_get_buf(dev, CAMDD_BUF_INDIRECT); 2915 if (idb == NULL) { 2916 retval = 1; 2917 goto bailout; 2918 } 2919 indirect = &idb->buf_type_spec.indirect; 2920 indirect->src_buf = read_buf; 2921 read_buf->refcount++; 2922 indirect->offset = 0; 2923 indirect->start_ptr = rb_data->buf; 2924 /* 2925 * We've already established that there is more 2926 * data in read_buf than we have room for in our 2927 * current write request. So this particular chunk 2928 * of the request should just be the remainder 2929 * needed to fill up a block. 2930 */ 2931 indirect->len = buf->len - (data->fill_len - data->resid); 2932 2933 camdd_buf_add_child(buf, idb); 2934 2935 /* 2936 * This buffer is ready to execute, so we can take 2937 * it off the pending queue and put it on the run 2938 * queue. 2939 */ 2940 STAILQ_REMOVE(&dev->pending_queue, buf, camdd_buf, 2941 links); 2942 dev->num_pending_queue--; 2943 STAILQ_INSERT_TAIL(&dev->run_queue, buf, links); 2944 dev->num_run_queue++; 2945 2946 cur_offset = indirect->offset + indirect->len; 2947 2948 /* 2949 * The resulting I/O would be too large to fit in 2950 * one block. We need to split this I/O into 2951 * multiple pieces. Allocate as many buffers as needed. 2952 */ 2953 for (len_to_go = rb_data->fill_len - rb_data->resid - 2954 indirect->len; len_to_go > 0;) { 2955 struct camdd_buf *new_buf; 2956 struct camdd_buf_data *new_data; 2957 uint64_t lba; 2958 ssize_t len; 2959 2960 retval = camdd_get_next_lba_len(dev, &lba, &len); 2961 if ((retval != 0) 2962 && (len == 0)) { 2963 /* 2964 * The device has already been marked 2965 * as EOF, and there is no space left. 2966 */ 2967 goto bailout; 2968 } 2969 2970 new_buf = camdd_get_buf(dev, CAMDD_BUF_DATA); 2971 if (new_buf == NULL) { 2972 retval = 1; 2973 goto bailout; 2974 } 2975 2976 new_buf->lba = lba; 2977 new_buf->len = len; 2978 2979 idb = camdd_get_buf(dev, CAMDD_BUF_INDIRECT); 2980 if (idb == NULL) { 2981 retval = 1; 2982 goto bailout; 2983 } 2984 2985 indirect = &idb->buf_type_spec.indirect; 2986 2987 indirect->src_buf = read_buf; 2988 read_buf->refcount++; 2989 indirect->offset = cur_offset; 2990 indirect->start_ptr = rb_data->buf + cur_offset; 2991 indirect->len = min(len_to_go, new_buf->len); 2992 #if 0 2993 if (((indirect->len % dev->sector_size) != 0) 2994 || ((indirect->offset % dev->sector_size) != 0)) { 2995 warnx("offset %ju len %ju not aligned with " 2996 "sector size %u", indirect->offset, 2997 (uintmax_t)indirect->len, dev->sector_size); 2998 } 2999 #endif 3000 cur_offset += indirect->len; 3001 len_to_go -= indirect->len; 3002 3003 camdd_buf_add_child(new_buf, idb); 3004 3005 new_data = &new_buf->buf_type_spec.data; 3006 3007 if ((new_data->fill_len == new_buf->len) 3008 || (eof_flush_needed != 0)) { 3009 STAILQ_INSERT_TAIL(&dev->run_queue, 3010 new_buf, links); 3011 dev->num_run_queue++; 3012 } else if (new_data->fill_len < buf->len) { 3013 STAILQ_INSERT_TAIL(&dev->pending_queue, 3014 new_buf, links); 3015 dev->num_pending_queue++; 3016 } else { 3017 warnx("%s: too much data in new " 3018 "buffer!", __func__); 3019 retval = 1; 3020 goto bailout; 3021 } 3022 } 3023 } 3024 3025 bailout: 3026 return (retval); 3027 } 3028 3029 void 3030 camdd_get_depth(struct camdd_dev *dev, uint32_t *our_depth, 3031 uint32_t *peer_depth, uint32_t *our_bytes, uint32_t *peer_bytes) 3032 { 3033 *our_depth = dev->cur_active_io + dev->num_run_queue; 3034 if (dev->num_peer_work_queue > 3035 dev->num_peer_done_queue) 3036 *peer_depth = dev->num_peer_work_queue - 3037 dev->num_peer_done_queue; 3038 else 3039 *peer_depth = 0; 3040 *our_bytes = *our_depth * dev->blocksize; 3041 *peer_bytes = dev->peer_bytes_queued; 3042 } 3043 3044 void 3045 camdd_sig_handler(int sig) 3046 { 3047 if (sig == SIGINFO) 3048 need_status = 1; 3049 else { 3050 need_exit = 1; 3051 error_exit = 1; 3052 } 3053 3054 sem_post(&camdd_sem); 3055 } 3056 3057 void 3058 camdd_print_status(struct camdd_dev *camdd_dev, struct camdd_dev *other_dev, 3059 struct timespec *start_time) 3060 { 3061 struct timespec done_time; 3062 uint64_t total_ns; 3063 long double mb_sec, total_sec; 3064 int error = 0; 3065 3066 error = clock_gettime(CLOCK_MONOTONIC_PRECISE, &done_time); 3067 if (error != 0) { 3068 warn("Unable to get done time"); 3069 return; 3070 } 3071 3072 timespecsub(&done_time, start_time); 3073 3074 total_ns = done_time.tv_nsec + (done_time.tv_sec * 1000000000); 3075 total_sec = total_ns; 3076 total_sec /= 1000000000; 3077 3078 fprintf(stderr, "%ju bytes %s %s\n%ju bytes %s %s\n" 3079 "%.4Lf seconds elapsed\n", 3080 (uintmax_t)camdd_dev->bytes_transferred, 3081 (camdd_dev->write_dev == 0) ? "read from" : "written to", 3082 camdd_dev->device_name, 3083 (uintmax_t)other_dev->bytes_transferred, 3084 (other_dev->write_dev == 0) ? "read from" : "written to", 3085 other_dev->device_name, total_sec); 3086 3087 mb_sec = min(other_dev->bytes_transferred,camdd_dev->bytes_transferred); 3088 mb_sec /= 1024 * 1024; 3089 mb_sec *= 1000000000; 3090 mb_sec /= total_ns; 3091 fprintf(stderr, "%.2Lf MB/sec\n", mb_sec); 3092 } 3093 3094 int 3095 camdd_rw(struct camdd_io_opts *io_opts, int num_io_opts, uint64_t max_io, 3096 int retry_count, int timeout) 3097 { 3098 struct cam_device *new_cam_dev = NULL; 3099 struct camdd_dev *devs[2]; 3100 struct timespec start_time; 3101 pthread_t threads[2]; 3102 int unit = 0; 3103 int error = 0; 3104 int i; 3105 3106 if (num_io_opts != 2) { 3107 warnx("Must have one input and one output path"); 3108 error = 1; 3109 goto bailout; 3110 } 3111 3112 bzero(devs, sizeof(devs)); 3113 3114 for (i = 0; i < num_io_opts; i++) { 3115 switch (io_opts[i].dev_type) { 3116 case CAMDD_DEV_PASS: { 3117 if (isdigit(io_opts[i].dev_name[0])) { 3118 camdd_argmask new_arglist = CAMDD_ARG_NONE; 3119 int bus = 0, target = 0, lun = 0; 3120 int rv; 3121 3122 /* device specified as bus:target[:lun] */ 3123 rv = parse_btl(io_opts[i].dev_name, &bus, 3124 &target, &lun, &new_arglist); 3125 if (rv < 2) { 3126 warnx("numeric device specification " 3127 "must be either bus:target, or " 3128 "bus:target:lun"); 3129 error = 1; 3130 goto bailout; 3131 } 3132 /* default to 0 if lun was not specified */ 3133 if ((new_arglist & CAMDD_ARG_LUN) == 0) { 3134 lun = 0; 3135 new_arglist |= CAMDD_ARG_LUN; 3136 } 3137 new_cam_dev = cam_open_btl(bus, target, lun, 3138 O_RDWR, NULL); 3139 } else { 3140 char name[30]; 3141 3142 if (cam_get_device(io_opts[i].dev_name, name, 3143 sizeof name, &unit) == -1) { 3144 warnx("%s", cam_errbuf); 3145 error = 1; 3146 goto bailout; 3147 } 3148 new_cam_dev = cam_open_spec_device(name, unit, 3149 O_RDWR, NULL); 3150 } 3151 3152 if (new_cam_dev == NULL) { 3153 warnx("%s", cam_errbuf); 3154 error = 1; 3155 goto bailout; 3156 } 3157 3158 devs[i] = camdd_probe_pass(new_cam_dev, 3159 /*io_opts*/ &io_opts[i], 3160 CAMDD_ARG_ERR_RECOVER, 3161 /*probe_retry_count*/ 3, 3162 /*probe_timeout*/ 5000, 3163 /*io_retry_count*/ retry_count, 3164 /*io_timeout*/ timeout); 3165 if (devs[i] == NULL) { 3166 warn("Unable to probe device %s%u", 3167 new_cam_dev->device_name, 3168 new_cam_dev->dev_unit_num); 3169 error = 1; 3170 goto bailout; 3171 } 3172 break; 3173 } 3174 case CAMDD_DEV_FILE: { 3175 int fd = -1; 3176 3177 if (io_opts[i].dev_name[0] == '-') { 3178 if (io_opts[i].write_dev != 0) 3179 fd = STDOUT_FILENO; 3180 else 3181 fd = STDIN_FILENO; 3182 } else { 3183 if (io_opts[i].write_dev != 0) { 3184 fd = open(io_opts[i].dev_name, 3185 O_RDWR | O_CREAT, S_IWUSR |S_IRUSR); 3186 } else { 3187 fd = open(io_opts[i].dev_name, 3188 O_RDONLY); 3189 } 3190 } 3191 if (fd == -1) { 3192 warn("error opening file %s", 3193 io_opts[i].dev_name); 3194 error = 1; 3195 goto bailout; 3196 } 3197 3198 devs[i] = camdd_probe_file(fd, &io_opts[i], 3199 retry_count, timeout); 3200 if (devs[i] == NULL) { 3201 error = 1; 3202 goto bailout; 3203 } 3204 3205 break; 3206 } 3207 default: 3208 warnx("Unknown device type %d (%s)", 3209 io_opts[i].dev_type, io_opts[i].dev_name); 3210 error = 1; 3211 goto bailout; 3212 break; /*NOTREACHED */ 3213 } 3214 3215 devs[i]->write_dev = io_opts[i].write_dev; 3216 3217 devs[i]->start_offset_bytes = io_opts[i].offset; 3218 3219 if (max_io != 0) { 3220 devs[i]->sector_io_limit = 3221 (devs[i]->start_offset_bytes / 3222 devs[i]->sector_size) + 3223 (max_io / devs[i]->sector_size) - 1; 3224 } 3225 3226 devs[i]->next_io_pos_bytes = devs[i]->start_offset_bytes; 3227 devs[i]->next_completion_pos_bytes =devs[i]->start_offset_bytes; 3228 } 3229 3230 devs[0]->peer_dev = devs[1]; 3231 devs[1]->peer_dev = devs[0]; 3232 devs[0]->next_peer_pos_bytes = devs[0]->peer_dev->next_io_pos_bytes; 3233 devs[1]->next_peer_pos_bytes = devs[1]->peer_dev->next_io_pos_bytes; 3234 3235 sem_init(&camdd_sem, /*pshared*/ 0, 0); 3236 3237 signal(SIGINFO, camdd_sig_handler); 3238 signal(SIGINT, camdd_sig_handler); 3239 3240 error = clock_gettime(CLOCK_MONOTONIC_PRECISE, &start_time); 3241 if (error != 0) { 3242 warn("Unable to get start time"); 3243 goto bailout; 3244 } 3245 3246 for (i = 0; i < num_io_opts; i++) { 3247 error = pthread_create(&threads[i], NULL, camdd_worker, 3248 (void *)devs[i]); 3249 if (error != 0) { 3250 warnc(error, "pthread_create() failed"); 3251 goto bailout; 3252 } 3253 } 3254 3255 for (;;) { 3256 if ((sem_wait(&camdd_sem) == -1) 3257 || (need_exit != 0)) { 3258 struct kevent ke; 3259 3260 for (i = 0; i < num_io_opts; i++) { 3261 EV_SET(&ke, (uintptr_t)&devs[i]->work_queue, 3262 EVFILT_USER, 0, NOTE_TRIGGER, 0, NULL); 3263 3264 devs[i]->flags |= CAMDD_DEV_FLAG_EOF; 3265 3266 error = kevent(devs[i]->kq, &ke, 1, NULL, 0, 3267 NULL); 3268 if (error == -1) 3269 warn("%s: unable to wake up thread", 3270 __func__); 3271 error = 0; 3272 } 3273 break; 3274 } else if (need_status != 0) { 3275 camdd_print_status(devs[0], devs[1], &start_time); 3276 need_status = 0; 3277 } 3278 } 3279 for (i = 0; i < num_io_opts; i++) { 3280 pthread_join(threads[i], NULL); 3281 } 3282 3283 camdd_print_status(devs[0], devs[1], &start_time); 3284 3285 bailout: 3286 3287 for (i = 0; i < num_io_opts; i++) 3288 camdd_free_dev(devs[i]); 3289 3290 return (error + error_exit); 3291 } 3292 3293 void 3294 usage(void) 3295 { 3296 fprintf(stderr, 3297 "usage: camdd <-i|-o pass=pass0,bs=1M,offset=1M,depth=4>\n" 3298 " <-i|-o file=/tmp/file,bs=512K,offset=1M>\n" 3299 " <-i|-o file=/dev/da0,bs=512K,offset=1M>\n" 3300 " <-i|-o file=/dev/nsa0,bs=512K>\n" 3301 " [-C retry_count][-E][-m max_io_amt][-t timeout_secs][-v][-h]\n" 3302 "Option description\n" 3303 "-i <arg=val> Specify input device/file and parameters\n" 3304 "-o <arg=val> Specify output device/file and parameters\n" 3305 "Input and Output parameters\n" 3306 "pass=name Specify a pass(4) device like pass0 or /dev/pass0\n" 3307 "file=name Specify a file or device, /tmp/foo, /dev/da0, /dev/null\n" 3308 " or - for stdin/stdout\n" 3309 "bs=blocksize Specify blocksize in bytes, or using K, M, G, etc. suffix\n" 3310 "offset=len Specify starting offset in bytes or using K, M, G suffix\n" 3311 " NOTE: offset cannot be specified on tapes, pipes, stdin/out\n" 3312 "depth=N Specify a numeric queue depth. This only applies to pass(4)\n" 3313 "mcs=N Specify a minimum cmd size for pass(4) read/write commands\n" 3314 "Optional arguments\n" 3315 "-C retry_cnt Specify a retry count for pass(4) devices\n" 3316 "-E Enable CAM error recovery for pass(4) devices\n" 3317 "-m max_io Specify the maximum amount to be transferred in bytes or\n" 3318 " using K, G, M, etc. suffixes\n" 3319 "-t timeout Specify the I/O timeout to use with pass(4) devices\n" 3320 "-v Enable verbose error recovery\n" 3321 "-h Print this message\n"); 3322 } 3323 3324 3325 int 3326 camdd_parse_io_opts(char *args, int is_write, struct camdd_io_opts *io_opts) 3327 { 3328 char *tmpstr, *tmpstr2; 3329 char *orig_tmpstr = NULL; 3330 int retval = 0; 3331 3332 io_opts->write_dev = is_write; 3333 3334 tmpstr = strdup(args); 3335 if (tmpstr == NULL) { 3336 warn("strdup failed"); 3337 retval = 1; 3338 goto bailout; 3339 } 3340 orig_tmpstr = tmpstr; 3341 while ((tmpstr2 = strsep(&tmpstr, ",")) != NULL) { 3342 char *name, *value; 3343 3344 /* 3345 * If the user creates an empty parameter by putting in two 3346 * commas, skip over it and look for the next field. 3347 */ 3348 if (*tmpstr2 == '\0') 3349 continue; 3350 3351 name = strsep(&tmpstr2, "="); 3352 if (*name == '\0') { 3353 warnx("Got empty I/O parameter name"); 3354 retval = 1; 3355 goto bailout; 3356 } 3357 value = strsep(&tmpstr2, "="); 3358 if ((value == NULL) 3359 || (*value == '\0')) { 3360 warnx("Empty I/O parameter value for %s", name); 3361 retval = 1; 3362 goto bailout; 3363 } 3364 if (strncasecmp(name, "file", 4) == 0) { 3365 io_opts->dev_type = CAMDD_DEV_FILE; 3366 io_opts->dev_name = strdup(value); 3367 if (io_opts->dev_name == NULL) { 3368 warn("Error allocating memory"); 3369 retval = 1; 3370 goto bailout; 3371 } 3372 } else if (strncasecmp(name, "pass", 4) == 0) { 3373 io_opts->dev_type = CAMDD_DEV_PASS; 3374 io_opts->dev_name = strdup(value); 3375 if (io_opts->dev_name == NULL) { 3376 warn("Error allocating memory"); 3377 retval = 1; 3378 goto bailout; 3379 } 3380 } else if ((strncasecmp(name, "bs", 2) == 0) 3381 || (strncasecmp(name, "blocksize", 9) == 0)) { 3382 retval = expand_number(value, &io_opts->blocksize); 3383 if (retval == -1) { 3384 warn("expand_number(3) failed on %s=%s", name, 3385 value); 3386 retval = 1; 3387 goto bailout; 3388 } 3389 } else if (strncasecmp(name, "depth", 5) == 0) { 3390 char *endptr; 3391 3392 io_opts->queue_depth = strtoull(value, &endptr, 0); 3393 if (*endptr != '\0') { 3394 warnx("invalid queue depth %s", value); 3395 retval = 1; 3396 goto bailout; 3397 } 3398 } else if (strncasecmp(name, "mcs", 3) == 0) { 3399 char *endptr; 3400 3401 io_opts->min_cmd_size = strtol(value, &endptr, 0); 3402 if ((*endptr != '\0') 3403 || ((io_opts->min_cmd_size > 16) 3404 || (io_opts->min_cmd_size < 0))) { 3405 warnx("invalid minimum cmd size %s", value); 3406 retval = 1; 3407 goto bailout; 3408 } 3409 } else if (strncasecmp(name, "offset", 6) == 0) { 3410 retval = expand_number(value, &io_opts->offset); 3411 if (retval == -1) { 3412 warn("expand_number(3) failed on %s=%s", name, 3413 value); 3414 retval = 1; 3415 goto bailout; 3416 } 3417 } else if (strncasecmp(name, "debug", 5) == 0) { 3418 char *endptr; 3419 3420 io_opts->debug = strtoull(value, &endptr, 0); 3421 if (*endptr != '\0') { 3422 warnx("invalid debug level %s", value); 3423 retval = 1; 3424 goto bailout; 3425 } 3426 } else { 3427 warnx("Unrecognized parameter %s=%s", name, value); 3428 } 3429 } 3430 bailout: 3431 free(orig_tmpstr); 3432 3433 return (retval); 3434 } 3435 3436 int 3437 main(int argc, char **argv) 3438 { 3439 int c; 3440 camdd_argmask arglist = CAMDD_ARG_NONE; 3441 int timeout = 0, retry_count = 1; 3442 int error = 0; 3443 uint64_t max_io = 0; 3444 struct camdd_io_opts *opt_list = NULL; 3445 3446 if (argc == 1) { 3447 usage(); 3448 exit(1); 3449 } 3450 3451 opt_list = calloc(2, sizeof(struct camdd_io_opts)); 3452 if (opt_list == NULL) { 3453 warn("Unable to allocate option list"); 3454 error = 1; 3455 goto bailout; 3456 } 3457 3458 while ((c = getopt(argc, argv, "C:Ehi:m:o:t:v")) != -1){ 3459 switch (c) { 3460 case 'C': 3461 retry_count = strtol(optarg, NULL, 0); 3462 if (retry_count < 0) 3463 errx(1, "retry count %d is < 0", 3464 retry_count); 3465 arglist |= CAMDD_ARG_RETRIES; 3466 break; 3467 case 'E': 3468 arglist |= CAMDD_ARG_ERR_RECOVER; 3469 break; 3470 case 'i': 3471 case 'o': 3472 if (((c == 'i') 3473 && (opt_list[0].dev_type != CAMDD_DEV_NONE)) 3474 || ((c == 'o') 3475 && (opt_list[1].dev_type != CAMDD_DEV_NONE))) { 3476 errx(1, "Only one input and output path " 3477 "allowed"); 3478 } 3479 error = camdd_parse_io_opts(optarg, (c == 'o') ? 1 : 0, 3480 (c == 'o') ? &opt_list[1] : &opt_list[0]); 3481 if (error != 0) 3482 goto bailout; 3483 break; 3484 case 'm': 3485 error = expand_number(optarg, &max_io); 3486 if (error == -1) { 3487 warn("invalid maximum I/O amount %s", optarg); 3488 error = 1; 3489 goto bailout; 3490 } 3491 break; 3492 case 't': 3493 timeout = strtol(optarg, NULL, 0); 3494 if (timeout < 0) 3495 errx(1, "invalid timeout %d", timeout); 3496 /* Convert the timeout from seconds to ms */ 3497 timeout *= 1000; 3498 arglist |= CAMDD_ARG_TIMEOUT; 3499 break; 3500 case 'v': 3501 arglist |= CAMDD_ARG_VERBOSE; 3502 break; 3503 case 'h': 3504 default: 3505 usage(); 3506 exit(1); 3507 break; /*NOTREACHED*/ 3508 } 3509 } 3510 3511 if ((opt_list[0].dev_type == CAMDD_DEV_NONE) 3512 || (opt_list[1].dev_type == CAMDD_DEV_NONE)) 3513 errx(1, "Must specify both -i and -o"); 3514 3515 /* 3516 * Set the timeout if the user hasn't specified one. 3517 */ 3518 if (timeout == 0) 3519 timeout = CAMDD_PASS_RW_TIMEOUT; 3520 3521 error = camdd_rw(opt_list, 2, max_io, retry_count, timeout); 3522 3523 bailout: 3524 free(opt_list); 3525 3526 exit(error); 3527 } 3528