1 /*- 2 * Copyright (c) 1997-2007 Kenneth D. Merry 3 * Copyright (c) 2013, 2014, 2015 Spectra Logic Corporation 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions, and the following disclaimer, 11 * without modification. 12 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 13 * substantially similar to the "NO WARRANTY" disclaimer below 14 * ("Disclaimer") and any redistribution must be conditioned upon 15 * including a substantially similar Disclaimer requirement for further 16 * binary redistribution. 17 * 18 * NO WARRANTY 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 22 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 23 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 27 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 28 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGES. 30 * 31 * Authors: Ken Merry (Spectra Logic Corporation) 32 */ 33 34 /* 35 * This is eventually intended to be: 36 * - A basic data transfer/copy utility 37 * - A simple benchmark utility 38 * - An example of how to use the asynchronous pass(4) driver interface. 39 */ 40 #include <sys/cdefs.h> 41 __FBSDID("$FreeBSD$"); 42 43 #include <sys/ioctl.h> 44 #include <sys/stdint.h> 45 #include <sys/types.h> 46 #include <sys/endian.h> 47 #include <sys/param.h> 48 #include <sys/sbuf.h> 49 #include <sys/stat.h> 50 #include <sys/event.h> 51 #include <sys/time.h> 52 #include <sys/uio.h> 53 #include <vm/vm.h> 54 #include <machine/bus.h> 55 #include <sys/bus.h> 56 #include <sys/bus_dma.h> 57 #include <sys/mtio.h> 58 #include <sys/conf.h> 59 #include <sys/disk.h> 60 61 #include <stdio.h> 62 #include <stdlib.h> 63 #include <semaphore.h> 64 #include <string.h> 65 #include <unistd.h> 66 #include <inttypes.h> 67 #include <limits.h> 68 #include <fcntl.h> 69 #include <ctype.h> 70 #include <err.h> 71 #include <libutil.h> 72 #include <pthread.h> 73 #include <assert.h> 74 #include <bsdxml.h> 75 76 #include <cam/cam.h> 77 #include <cam/cam_debug.h> 78 #include <cam/cam_ccb.h> 79 #include <cam/scsi/scsi_all.h> 80 #include <cam/scsi/scsi_da.h> 81 #include <cam/scsi/scsi_pass.h> 82 #include <cam/scsi/scsi_message.h> 83 #include <cam/scsi/smp_all.h> 84 #include <camlib.h> 85 #include <mtlib.h> 86 #include <zlib.h> 87 88 typedef enum { 89 CAMDD_CMD_NONE = 0x00000000, 90 CAMDD_CMD_HELP = 0x00000001, 91 CAMDD_CMD_WRITE = 0x00000002, 92 CAMDD_CMD_READ = 0x00000003 93 } camdd_cmdmask; 94 95 typedef enum { 96 CAMDD_ARG_NONE = 0x00000000, 97 CAMDD_ARG_VERBOSE = 0x00000001, 98 CAMDD_ARG_DEVICE = 0x00000002, 99 CAMDD_ARG_BUS = 0x00000004, 100 CAMDD_ARG_TARGET = 0x00000008, 101 CAMDD_ARG_LUN = 0x00000010, 102 CAMDD_ARG_UNIT = 0x00000020, 103 CAMDD_ARG_TIMEOUT = 0x00000040, 104 CAMDD_ARG_ERR_RECOVER = 0x00000080, 105 CAMDD_ARG_RETRIES = 0x00000100 106 } camdd_argmask; 107 108 typedef enum { 109 CAMDD_DEV_NONE = 0x00, 110 CAMDD_DEV_PASS = 0x01, 111 CAMDD_DEV_FILE = 0x02 112 } camdd_dev_type; 113 114 struct camdd_io_opts { 115 camdd_dev_type dev_type; 116 char *dev_name; 117 uint64_t blocksize; 118 uint64_t queue_depth; 119 uint64_t offset; 120 int min_cmd_size; 121 int write_dev; 122 uint64_t debug; 123 }; 124 125 typedef enum { 126 CAMDD_BUF_NONE, 127 CAMDD_BUF_DATA, 128 CAMDD_BUF_INDIRECT 129 } camdd_buf_type; 130 131 struct camdd_buf_indirect { 132 /* 133 * Pointer to the source buffer. 134 */ 135 struct camdd_buf *src_buf; 136 137 /* 138 * Offset into the source buffer, in bytes. 139 */ 140 uint64_t offset; 141 /* 142 * Pointer to the starting point in the source buffer. 143 */ 144 uint8_t *start_ptr; 145 146 /* 147 * Length of this chunk in bytes. 148 */ 149 size_t len; 150 }; 151 152 struct camdd_buf_data { 153 /* 154 * Buffer allocated when we allocate this camdd_buf. This should 155 * be the size of the blocksize for this device. 156 */ 157 uint8_t *buf; 158 159 /* 160 * The amount of backing store allocated in buf. Generally this 161 * will be the blocksize of the device. 162 */ 163 uint32_t alloc_len; 164 165 /* 166 * The amount of data that was put into the buffer (on reads) or 167 * the amount of data we have put onto the src_list so far (on 168 * writes). 169 */ 170 uint32_t fill_len; 171 172 /* 173 * The amount of data that was not transferred. 174 */ 175 uint32_t resid; 176 177 /* 178 * Starting byte offset on the reader. 179 */ 180 uint64_t src_start_offset; 181 182 /* 183 * CCB used for pass(4) device targets. 184 */ 185 union ccb ccb; 186 187 /* 188 * Number of scatter/gather segments. 189 */ 190 int sg_count; 191 192 /* 193 * Set if we had to tack on an extra buffer to round the transfer 194 * up to a sector size. 195 */ 196 int extra_buf; 197 198 /* 199 * Scatter/gather list used generally when we're the writer for a 200 * pass(4) device. 201 */ 202 bus_dma_segment_t *segs; 203 204 /* 205 * Scatter/gather list used generally when we're the writer for a 206 * file or block device; 207 */ 208 struct iovec *iovec; 209 }; 210 211 union camdd_buf_types { 212 struct camdd_buf_indirect indirect; 213 struct camdd_buf_data data; 214 }; 215 216 typedef enum { 217 CAMDD_STATUS_NONE, 218 CAMDD_STATUS_OK, 219 CAMDD_STATUS_SHORT_IO, 220 CAMDD_STATUS_EOF, 221 CAMDD_STATUS_ERROR 222 } camdd_buf_status; 223 224 struct camdd_buf { 225 camdd_buf_type buf_type; 226 union camdd_buf_types buf_type_spec; 227 228 camdd_buf_status status; 229 230 uint64_t lba; 231 size_t len; 232 233 /* 234 * A reference count of how many indirect buffers point to this 235 * buffer. 236 */ 237 int refcount; 238 239 /* 240 * A link back to our parent device. 241 */ 242 struct camdd_dev *dev; 243 STAILQ_ENTRY(camdd_buf) links; 244 STAILQ_ENTRY(camdd_buf) work_links; 245 246 /* 247 * A count of the buffers on the src_list. 248 */ 249 int src_count; 250 251 /* 252 * List of buffers from our partner thread that are the components 253 * of this buffer for the I/O. Uses src_links. 254 */ 255 STAILQ_HEAD(,camdd_buf) src_list; 256 STAILQ_ENTRY(camdd_buf) src_links; 257 }; 258 259 #define NUM_DEV_TYPES 2 260 261 struct camdd_dev_pass { 262 int scsi_dev_type; 263 struct cam_device *dev; 264 uint64_t max_sector; 265 uint32_t block_len; 266 uint32_t cpi_maxio; 267 }; 268 269 typedef enum { 270 CAMDD_FILE_NONE, 271 CAMDD_FILE_REG, 272 CAMDD_FILE_STD, 273 CAMDD_FILE_PIPE, 274 CAMDD_FILE_DISK, 275 CAMDD_FILE_TAPE, 276 CAMDD_FILE_TTY, 277 CAMDD_FILE_MEM 278 } camdd_file_type; 279 280 typedef enum { 281 CAMDD_FF_NONE = 0x00, 282 CAMDD_FF_CAN_SEEK = 0x01 283 } camdd_file_flags; 284 285 struct camdd_dev_file { 286 int fd; 287 struct stat sb; 288 char filename[MAXPATHLEN + 1]; 289 camdd_file_type file_type; 290 camdd_file_flags file_flags; 291 uint8_t *tmp_buf; 292 }; 293 294 struct camdd_dev_block { 295 int fd; 296 uint64_t size_bytes; 297 uint32_t block_len; 298 }; 299 300 union camdd_dev_spec { 301 struct camdd_dev_pass pass; 302 struct camdd_dev_file file; 303 struct camdd_dev_block block; 304 }; 305 306 typedef enum { 307 CAMDD_DEV_FLAG_NONE = 0x00, 308 CAMDD_DEV_FLAG_EOF = 0x01, 309 CAMDD_DEV_FLAG_PEER_EOF = 0x02, 310 CAMDD_DEV_FLAG_ACTIVE = 0x04, 311 CAMDD_DEV_FLAG_EOF_SENT = 0x08, 312 CAMDD_DEV_FLAG_EOF_QUEUED = 0x10 313 } camdd_dev_flags; 314 315 struct camdd_dev { 316 camdd_dev_type dev_type; 317 union camdd_dev_spec dev_spec; 318 camdd_dev_flags flags; 319 char device_name[MAXPATHLEN+1]; 320 uint32_t blocksize; 321 uint32_t sector_size; 322 uint64_t max_sector; 323 uint64_t sector_io_limit; 324 int min_cmd_size; 325 int write_dev; 326 int retry_count; 327 int io_timeout; 328 int debug; 329 uint64_t start_offset_bytes; 330 uint64_t next_io_pos_bytes; 331 uint64_t next_peer_pos_bytes; 332 uint64_t next_completion_pos_bytes; 333 uint64_t peer_bytes_queued; 334 uint64_t bytes_transferred; 335 uint32_t target_queue_depth; 336 uint32_t cur_active_io; 337 uint8_t *extra_buf; 338 uint32_t extra_buf_len; 339 struct camdd_dev *peer_dev; 340 pthread_mutex_t mutex; 341 pthread_cond_t cond; 342 int kq; 343 344 int (*run)(struct camdd_dev *dev); 345 int (*fetch)(struct camdd_dev *dev); 346 347 /* 348 * Buffers that are available for I/O. Uses links. 349 */ 350 STAILQ_HEAD(,camdd_buf) free_queue; 351 352 /* 353 * Free indirect buffers. These are used for breaking a large 354 * buffer into multiple pieces. 355 */ 356 STAILQ_HEAD(,camdd_buf) free_indirect_queue; 357 358 /* 359 * Buffers that have been queued to the kernel. Uses links. 360 */ 361 STAILQ_HEAD(,camdd_buf) active_queue; 362 363 /* 364 * Will generally contain one of our buffers that is waiting for enough 365 * I/O from our partner thread to be able to execute. This will 366 * generally happen when our per-I/O-size is larger than the 367 * partner thread's per-I/O-size. Uses links. 368 */ 369 STAILQ_HEAD(,camdd_buf) pending_queue; 370 371 /* 372 * Number of buffers on the pending queue 373 */ 374 int num_pending_queue; 375 376 /* 377 * Buffers that are filled and ready to execute. This is used when 378 * our partner (reader) thread sends us blocks that are larger than 379 * our blocksize, and so we have to split them into multiple pieces. 380 */ 381 STAILQ_HEAD(,camdd_buf) run_queue; 382 383 /* 384 * Number of buffers on the run queue. 385 */ 386 int num_run_queue; 387 388 STAILQ_HEAD(,camdd_buf) reorder_queue; 389 390 int num_reorder_queue; 391 392 /* 393 * Buffers that have been queued to us by our partner thread 394 * (generally the reader thread) to be written out. Uses 395 * work_links. 396 */ 397 STAILQ_HEAD(,camdd_buf) work_queue; 398 399 /* 400 * Buffers that have been completed by our partner thread. Uses 401 * work_links. 402 */ 403 STAILQ_HEAD(,camdd_buf) peer_done_queue; 404 405 /* 406 * Number of buffers on the peer done queue. 407 */ 408 uint32_t num_peer_done_queue; 409 410 /* 411 * A list of buffers that we have queued to our peer thread. Uses 412 * links. 413 */ 414 STAILQ_HEAD(,camdd_buf) peer_work_queue; 415 416 /* 417 * Number of buffers on the peer work queue. 418 */ 419 uint32_t num_peer_work_queue; 420 }; 421 422 static sem_t camdd_sem; 423 static sig_atomic_t need_exit = 0; 424 static sig_atomic_t error_exit = 0; 425 static sig_atomic_t need_status = 0; 426 427 #ifndef min 428 #define min(a, b) (a < b) ? a : b 429 #endif 430 431 /* 432 * XXX KDM private copy of timespecsub(). This is normally defined in 433 * sys/time.h, but is only enabled in the kernel. If that definition is 434 * enabled in userland, it breaks the build of libnetbsd. 435 */ 436 #ifndef timespecsub 437 #define timespecsub(vvp, uvp) \ 438 do { \ 439 (vvp)->tv_sec -= (uvp)->tv_sec; \ 440 (vvp)->tv_nsec -= (uvp)->tv_nsec; \ 441 if ((vvp)->tv_nsec < 0) { \ 442 (vvp)->tv_sec--; \ 443 (vvp)->tv_nsec += 1000000000; \ 444 } \ 445 } while (0) 446 #endif 447 448 449 /* Generically useful offsets into the peripheral private area */ 450 #define ppriv_ptr0 periph_priv.entries[0].ptr 451 #define ppriv_ptr1 periph_priv.entries[1].ptr 452 #define ppriv_field0 periph_priv.entries[0].field 453 #define ppriv_field1 periph_priv.entries[1].field 454 455 #define ccb_buf ppriv_ptr0 456 457 #define CAMDD_FILE_DEFAULT_BLOCK 524288 458 #define CAMDD_FILE_DEFAULT_DEPTH 1 459 #define CAMDD_PASS_MAX_BLOCK 1048576 460 #define CAMDD_PASS_DEFAULT_DEPTH 6 461 #define CAMDD_PASS_RW_TIMEOUT 60 * 1000 462 463 static int parse_btl(char *tstr, int *bus, int *target, int *lun, 464 camdd_argmask *arglst); 465 void camdd_free_dev(struct camdd_dev *dev); 466 struct camdd_dev *camdd_alloc_dev(camdd_dev_type dev_type, 467 struct kevent *new_ke, int num_ke, 468 int retry_count, int timeout); 469 static struct camdd_buf *camdd_alloc_buf(struct camdd_dev *dev, 470 camdd_buf_type buf_type); 471 void camdd_release_buf(struct camdd_buf *buf); 472 struct camdd_buf *camdd_get_buf(struct camdd_dev *dev, camdd_buf_type buf_type); 473 int camdd_buf_sg_create(struct camdd_buf *buf, int iovec, 474 uint32_t sector_size, uint32_t *num_sectors_used, 475 int *double_buf_needed); 476 uint32_t camdd_buf_get_len(struct camdd_buf *buf); 477 void camdd_buf_add_child(struct camdd_buf *buf, struct camdd_buf *child_buf); 478 int camdd_probe_tape(int fd, char *filename, uint64_t *max_iosize, 479 uint64_t *max_blk, uint64_t *min_blk, uint64_t *blk_gran); 480 struct camdd_dev *camdd_probe_file(int fd, struct camdd_io_opts *io_opts, 481 int retry_count, int timeout); 482 struct camdd_dev *camdd_probe_pass(struct cam_device *cam_dev, 483 struct camdd_io_opts *io_opts, 484 camdd_argmask arglist, int probe_retry_count, 485 int probe_timeout, int io_retry_count, 486 int io_timeout); 487 void *camdd_file_worker(void *arg); 488 camdd_buf_status camdd_ccb_status(union ccb *ccb); 489 int camdd_queue_peer_buf(struct camdd_dev *dev, struct camdd_buf *buf); 490 int camdd_complete_peer_buf(struct camdd_dev *dev, struct camdd_buf *peer_buf); 491 void camdd_peer_done(struct camdd_buf *buf); 492 void camdd_complete_buf(struct camdd_dev *dev, struct camdd_buf *buf, 493 int *error_count); 494 int camdd_pass_fetch(struct camdd_dev *dev); 495 int camdd_file_run(struct camdd_dev *dev); 496 int camdd_pass_run(struct camdd_dev *dev); 497 int camdd_get_next_lba_len(struct camdd_dev *dev, uint64_t *lba, ssize_t *len); 498 int camdd_queue(struct camdd_dev *dev, struct camdd_buf *read_buf); 499 void camdd_get_depth(struct camdd_dev *dev, uint32_t *our_depth, 500 uint32_t *peer_depth, uint32_t *our_bytes, 501 uint32_t *peer_bytes); 502 void *camdd_worker(void *arg); 503 void camdd_sig_handler(int sig); 504 void camdd_print_status(struct camdd_dev *camdd_dev, 505 struct camdd_dev *other_dev, 506 struct timespec *start_time); 507 int camdd_rw(struct camdd_io_opts *io_opts, int num_io_opts, 508 uint64_t max_io, int retry_count, int timeout); 509 int camdd_parse_io_opts(char *args, int is_write, 510 struct camdd_io_opts *io_opts); 511 void usage(void); 512 513 /* 514 * Parse out a bus, or a bus, target and lun in the following 515 * format: 516 * bus 517 * bus:target 518 * bus:target:lun 519 * 520 * Returns the number of parsed components, or 0. 521 */ 522 static int 523 parse_btl(char *tstr, int *bus, int *target, int *lun, camdd_argmask *arglst) 524 { 525 char *tmpstr; 526 int convs = 0; 527 528 while (isspace(*tstr) && (*tstr != '\0')) 529 tstr++; 530 531 tmpstr = (char *)strtok(tstr, ":"); 532 if ((tmpstr != NULL) && (*tmpstr != '\0')) { 533 *bus = strtol(tmpstr, NULL, 0); 534 *arglst |= CAMDD_ARG_BUS; 535 convs++; 536 tmpstr = (char *)strtok(NULL, ":"); 537 if ((tmpstr != NULL) && (*tmpstr != '\0')) { 538 *target = strtol(tmpstr, NULL, 0); 539 *arglst |= CAMDD_ARG_TARGET; 540 convs++; 541 tmpstr = (char *)strtok(NULL, ":"); 542 if ((tmpstr != NULL) && (*tmpstr != '\0')) { 543 *lun = strtol(tmpstr, NULL, 0); 544 *arglst |= CAMDD_ARG_LUN; 545 convs++; 546 } 547 } 548 } 549 550 return convs; 551 } 552 553 /* 554 * XXX KDM clean up and free all of the buffers on the queue! 555 */ 556 void 557 camdd_free_dev(struct camdd_dev *dev) 558 { 559 if (dev == NULL) 560 return; 561 562 switch (dev->dev_type) { 563 case CAMDD_DEV_FILE: { 564 struct camdd_dev_file *file_dev = &dev->dev_spec.file; 565 566 if (file_dev->fd != -1) 567 close(file_dev->fd); 568 free(file_dev->tmp_buf); 569 break; 570 } 571 case CAMDD_DEV_PASS: { 572 struct camdd_dev_pass *pass_dev = &dev->dev_spec.pass; 573 574 if (pass_dev->dev != NULL) 575 cam_close_device(pass_dev->dev); 576 break; 577 } 578 default: 579 break; 580 } 581 582 free(dev); 583 } 584 585 struct camdd_dev * 586 camdd_alloc_dev(camdd_dev_type dev_type, struct kevent *new_ke, int num_ke, 587 int retry_count, int timeout) 588 { 589 struct camdd_dev *dev = NULL; 590 struct kevent *ke; 591 size_t ke_size; 592 int retval = 0; 593 594 dev = malloc(sizeof(*dev)); 595 if (dev == NULL) { 596 warn("%s: unable to malloc %zu bytes", __func__, sizeof(*dev)); 597 goto bailout; 598 } 599 600 bzero(dev, sizeof(*dev)); 601 602 dev->dev_type = dev_type; 603 dev->io_timeout = timeout; 604 dev->retry_count = retry_count; 605 STAILQ_INIT(&dev->free_queue); 606 STAILQ_INIT(&dev->free_indirect_queue); 607 STAILQ_INIT(&dev->active_queue); 608 STAILQ_INIT(&dev->pending_queue); 609 STAILQ_INIT(&dev->run_queue); 610 STAILQ_INIT(&dev->reorder_queue); 611 STAILQ_INIT(&dev->work_queue); 612 STAILQ_INIT(&dev->peer_done_queue); 613 STAILQ_INIT(&dev->peer_work_queue); 614 retval = pthread_mutex_init(&dev->mutex, NULL); 615 if (retval != 0) { 616 warnc(retval, "%s: failed to initialize mutex", __func__); 617 goto bailout; 618 } 619 620 retval = pthread_cond_init(&dev->cond, NULL); 621 if (retval != 0) { 622 warnc(retval, "%s: failed to initialize condition variable", 623 __func__); 624 goto bailout; 625 } 626 627 dev->kq = kqueue(); 628 if (dev->kq == -1) { 629 warn("%s: Unable to create kqueue", __func__); 630 goto bailout; 631 } 632 633 ke_size = sizeof(struct kevent) * (num_ke + 4); 634 ke = malloc(ke_size); 635 if (ke == NULL) { 636 warn("%s: unable to malloc %zu bytes", __func__, ke_size); 637 goto bailout; 638 } 639 bzero(ke, ke_size); 640 if (num_ke > 0) 641 bcopy(new_ke, ke, num_ke * sizeof(struct kevent)); 642 643 EV_SET(&ke[num_ke++], (uintptr_t)&dev->work_queue, EVFILT_USER, 644 EV_ADD|EV_ENABLE|EV_CLEAR, 0,0, 0); 645 EV_SET(&ke[num_ke++], (uintptr_t)&dev->peer_done_queue, EVFILT_USER, 646 EV_ADD|EV_ENABLE|EV_CLEAR, 0,0, 0); 647 EV_SET(&ke[num_ke++], SIGINFO, EVFILT_SIGNAL, EV_ADD|EV_ENABLE, 0,0,0); 648 EV_SET(&ke[num_ke++], SIGINT, EVFILT_SIGNAL, EV_ADD|EV_ENABLE, 0,0,0); 649 650 retval = kevent(dev->kq, ke, num_ke, NULL, 0, NULL); 651 if (retval == -1) { 652 warn("%s: Unable to register kevents", __func__); 653 goto bailout; 654 } 655 656 657 return (dev); 658 659 bailout: 660 free(dev); 661 662 return (NULL); 663 } 664 665 static struct camdd_buf * 666 camdd_alloc_buf(struct camdd_dev *dev, camdd_buf_type buf_type) 667 { 668 struct camdd_buf *buf = NULL; 669 uint8_t *data_ptr = NULL; 670 671 /* 672 * We only need to allocate data space for data buffers. 673 */ 674 switch (buf_type) { 675 case CAMDD_BUF_DATA: 676 data_ptr = malloc(dev->blocksize); 677 if (data_ptr == NULL) { 678 warn("unable to allocate %u bytes", dev->blocksize); 679 goto bailout_error; 680 } 681 break; 682 default: 683 break; 684 } 685 686 buf = malloc(sizeof(*buf)); 687 if (buf == NULL) { 688 warn("unable to allocate %zu bytes", sizeof(*buf)); 689 goto bailout_error; 690 } 691 692 bzero(buf, sizeof(*buf)); 693 buf->buf_type = buf_type; 694 buf->dev = dev; 695 switch (buf_type) { 696 case CAMDD_BUF_DATA: { 697 struct camdd_buf_data *data; 698 699 data = &buf->buf_type_spec.data; 700 701 data->alloc_len = dev->blocksize; 702 data->buf = data_ptr; 703 break; 704 } 705 case CAMDD_BUF_INDIRECT: 706 break; 707 default: 708 break; 709 } 710 STAILQ_INIT(&buf->src_list); 711 712 return (buf); 713 714 bailout_error: 715 free(data_ptr); 716 717 return (NULL); 718 } 719 720 void 721 camdd_release_buf(struct camdd_buf *buf) 722 { 723 struct camdd_dev *dev; 724 725 dev = buf->dev; 726 727 switch (buf->buf_type) { 728 case CAMDD_BUF_DATA: { 729 struct camdd_buf_data *data; 730 731 data = &buf->buf_type_spec.data; 732 733 if (data->segs != NULL) { 734 if (data->extra_buf != 0) { 735 void *extra_buf; 736 737 extra_buf = (void *) 738 data->segs[data->sg_count - 1].ds_addr; 739 free(extra_buf); 740 data->extra_buf = 0; 741 } 742 free(data->segs); 743 data->segs = NULL; 744 data->sg_count = 0; 745 } else if (data->iovec != NULL) { 746 if (data->extra_buf != 0) { 747 free(data->iovec[data->sg_count - 1].iov_base); 748 data->extra_buf = 0; 749 } 750 free(data->iovec); 751 data->iovec = NULL; 752 data->sg_count = 0; 753 } 754 STAILQ_INSERT_TAIL(&dev->free_queue, buf, links); 755 break; 756 } 757 case CAMDD_BUF_INDIRECT: 758 STAILQ_INSERT_TAIL(&dev->free_indirect_queue, buf, links); 759 break; 760 default: 761 err(1, "%s: Invalid buffer type %d for released buffer", 762 __func__, buf->buf_type); 763 break; 764 } 765 } 766 767 struct camdd_buf * 768 camdd_get_buf(struct camdd_dev *dev, camdd_buf_type buf_type) 769 { 770 struct camdd_buf *buf = NULL; 771 772 switch (buf_type) { 773 case CAMDD_BUF_DATA: 774 buf = STAILQ_FIRST(&dev->free_queue); 775 if (buf != NULL) { 776 struct camdd_buf_data *data; 777 uint8_t *data_ptr; 778 uint32_t alloc_len; 779 780 STAILQ_REMOVE_HEAD(&dev->free_queue, links); 781 data = &buf->buf_type_spec.data; 782 data_ptr = data->buf; 783 alloc_len = data->alloc_len; 784 bzero(buf, sizeof(*buf)); 785 data->buf = data_ptr; 786 data->alloc_len = alloc_len; 787 } 788 break; 789 case CAMDD_BUF_INDIRECT: 790 buf = STAILQ_FIRST(&dev->free_indirect_queue); 791 if (buf != NULL) { 792 STAILQ_REMOVE_HEAD(&dev->free_indirect_queue, links); 793 794 bzero(buf, sizeof(*buf)); 795 } 796 break; 797 default: 798 warnx("Unknown buffer type %d requested", buf_type); 799 break; 800 } 801 802 803 if (buf == NULL) 804 return (camdd_alloc_buf(dev, buf_type)); 805 else { 806 STAILQ_INIT(&buf->src_list); 807 buf->dev = dev; 808 buf->buf_type = buf_type; 809 810 return (buf); 811 } 812 } 813 814 int 815 camdd_buf_sg_create(struct camdd_buf *buf, int iovec, uint32_t sector_size, 816 uint32_t *num_sectors_used, int *double_buf_needed) 817 { 818 struct camdd_buf *tmp_buf; 819 struct camdd_buf_data *data; 820 uint8_t *extra_buf = NULL; 821 size_t extra_buf_len = 0; 822 int i, retval = 0; 823 824 data = &buf->buf_type_spec.data; 825 826 data->sg_count = buf->src_count; 827 /* 828 * Compose a scatter/gather list from all of the buffers in the list. 829 * If the length of the buffer isn't a multiple of the sector size, 830 * we'll have to add an extra buffer. This should only happen 831 * at the end of a transfer. 832 */ 833 if ((data->fill_len % sector_size) != 0) { 834 extra_buf_len = sector_size - (data->fill_len % sector_size); 835 extra_buf = calloc(extra_buf_len, 1); 836 if (extra_buf == NULL) { 837 warn("%s: unable to allocate %zu bytes for extra " 838 "buffer space", __func__, extra_buf_len); 839 retval = 1; 840 goto bailout; 841 } 842 data->extra_buf = 1; 843 data->sg_count++; 844 } 845 if (iovec == 0) { 846 data->segs = calloc(data->sg_count, sizeof(bus_dma_segment_t)); 847 if (data->segs == NULL) { 848 warn("%s: unable to allocate %zu bytes for S/G list", 849 __func__, sizeof(bus_dma_segment_t) * 850 data->sg_count); 851 retval = 1; 852 goto bailout; 853 } 854 855 } else { 856 data->iovec = calloc(data->sg_count, sizeof(struct iovec)); 857 if (data->iovec == NULL) { 858 warn("%s: unable to allocate %zu bytes for S/G list", 859 __func__, sizeof(struct iovec) * data->sg_count); 860 retval = 1; 861 goto bailout; 862 } 863 } 864 865 for (i = 0, tmp_buf = STAILQ_FIRST(&buf->src_list); 866 i < buf->src_count && tmp_buf != NULL; i++, 867 tmp_buf = STAILQ_NEXT(tmp_buf, src_links)) { 868 869 if (tmp_buf->buf_type == CAMDD_BUF_DATA) { 870 struct camdd_buf_data *tmp_data; 871 872 tmp_data = &tmp_buf->buf_type_spec.data; 873 if (iovec == 0) { 874 data->segs[i].ds_addr = 875 (bus_addr_t) tmp_data->buf; 876 data->segs[i].ds_len = tmp_data->fill_len - 877 tmp_data->resid; 878 } else { 879 data->iovec[i].iov_base = tmp_data->buf; 880 data->iovec[i].iov_len = tmp_data->fill_len - 881 tmp_data->resid; 882 } 883 if (((tmp_data->fill_len - tmp_data->resid) % 884 sector_size) != 0) 885 *double_buf_needed = 1; 886 } else { 887 struct camdd_buf_indirect *tmp_ind; 888 889 tmp_ind = &tmp_buf->buf_type_spec.indirect; 890 if (iovec == 0) { 891 data->segs[i].ds_addr = 892 (bus_addr_t)tmp_ind->start_ptr; 893 data->segs[i].ds_len = tmp_ind->len; 894 } else { 895 data->iovec[i].iov_base = tmp_ind->start_ptr; 896 data->iovec[i].iov_len = tmp_ind->len; 897 } 898 if ((tmp_ind->len % sector_size) != 0) 899 *double_buf_needed = 1; 900 } 901 } 902 903 if (extra_buf != NULL) { 904 if (iovec == 0) { 905 data->segs[i].ds_addr = (bus_addr_t)extra_buf; 906 data->segs[i].ds_len = extra_buf_len; 907 } else { 908 data->iovec[i].iov_base = extra_buf; 909 data->iovec[i].iov_len = extra_buf_len; 910 } 911 i++; 912 } 913 if ((tmp_buf != NULL) || (i != data->sg_count)) { 914 warnx("buffer source count does not match " 915 "number of buffers in list!"); 916 retval = 1; 917 goto bailout; 918 } 919 920 bailout: 921 if (retval == 0) { 922 *num_sectors_used = (data->fill_len + extra_buf_len) / 923 sector_size; 924 } 925 return (retval); 926 } 927 928 uint32_t 929 camdd_buf_get_len(struct camdd_buf *buf) 930 { 931 uint32_t len = 0; 932 933 if (buf->buf_type != CAMDD_BUF_DATA) { 934 struct camdd_buf_indirect *indirect; 935 936 indirect = &buf->buf_type_spec.indirect; 937 len = indirect->len; 938 } else { 939 struct camdd_buf_data *data; 940 941 data = &buf->buf_type_spec.data; 942 len = data->fill_len; 943 } 944 945 return (len); 946 } 947 948 void 949 camdd_buf_add_child(struct camdd_buf *buf, struct camdd_buf *child_buf) 950 { 951 struct camdd_buf_data *data; 952 953 assert(buf->buf_type == CAMDD_BUF_DATA); 954 955 data = &buf->buf_type_spec.data; 956 957 STAILQ_INSERT_TAIL(&buf->src_list, child_buf, src_links); 958 buf->src_count++; 959 960 data->fill_len += camdd_buf_get_len(child_buf); 961 } 962 963 typedef enum { 964 CAMDD_TS_MAX_BLK, 965 CAMDD_TS_MIN_BLK, 966 CAMDD_TS_BLK_GRAN, 967 CAMDD_TS_EFF_IOSIZE 968 } camdd_status_item_index; 969 970 static struct camdd_status_items { 971 const char *name; 972 struct mt_status_entry *entry; 973 } req_status_items[] = { 974 { "max_blk", NULL }, 975 { "min_blk", NULL }, 976 { "blk_gran", NULL }, 977 { "max_effective_iosize", NULL } 978 }; 979 980 int 981 camdd_probe_tape(int fd, char *filename, uint64_t *max_iosize, 982 uint64_t *max_blk, uint64_t *min_blk, uint64_t *blk_gran) 983 { 984 struct mt_status_data status_data; 985 char *xml_str = NULL; 986 unsigned int i; 987 int retval = 0; 988 989 retval = mt_get_xml_str(fd, MTIOCEXTGET, &xml_str); 990 if (retval != 0) 991 err(1, "Couldn't get XML string from %s", filename); 992 993 retval = mt_get_status(xml_str, &status_data); 994 if (retval != XML_STATUS_OK) { 995 warn("couldn't get status for %s", filename); 996 retval = 1; 997 goto bailout; 998 } else 999 retval = 0; 1000 1001 if (status_data.error != 0) { 1002 warnx("%s", status_data.error_str); 1003 retval = 1; 1004 goto bailout; 1005 } 1006 1007 for (i = 0; i < nitems(req_status_items); i++) { 1008 char *name; 1009 1010 name = __DECONST(char *, req_status_items[i].name); 1011 req_status_items[i].entry = mt_status_entry_find(&status_data, 1012 name); 1013 if (req_status_items[i].entry == NULL) { 1014 errx(1, "Cannot find status entry %s", 1015 req_status_items[i].name); 1016 } 1017 } 1018 1019 *max_iosize = req_status_items[CAMDD_TS_EFF_IOSIZE].entry->value_unsigned; 1020 *max_blk= req_status_items[CAMDD_TS_MAX_BLK].entry->value_unsigned; 1021 *min_blk= req_status_items[CAMDD_TS_MIN_BLK].entry->value_unsigned; 1022 *blk_gran = req_status_items[CAMDD_TS_BLK_GRAN].entry->value_unsigned; 1023 bailout: 1024 1025 free(xml_str); 1026 mt_status_free(&status_data); 1027 1028 return (retval); 1029 } 1030 1031 struct camdd_dev * 1032 camdd_probe_file(int fd, struct camdd_io_opts *io_opts, int retry_count, 1033 int timeout) 1034 { 1035 struct camdd_dev *dev = NULL; 1036 struct camdd_dev_file *file_dev; 1037 uint64_t blocksize = io_opts->blocksize; 1038 1039 dev = camdd_alloc_dev(CAMDD_DEV_FILE, NULL, 0, retry_count, timeout); 1040 if (dev == NULL) 1041 goto bailout; 1042 1043 file_dev = &dev->dev_spec.file; 1044 file_dev->fd = fd; 1045 strlcpy(file_dev->filename, io_opts->dev_name, 1046 sizeof(file_dev->filename)); 1047 strlcpy(dev->device_name, io_opts->dev_name, sizeof(dev->device_name)); 1048 if (blocksize == 0) 1049 dev->blocksize = CAMDD_FILE_DEFAULT_BLOCK; 1050 else 1051 dev->blocksize = blocksize; 1052 1053 if ((io_opts->queue_depth != 0) 1054 && (io_opts->queue_depth != 1)) { 1055 warnx("Queue depth %ju for %s ignored, only 1 outstanding " 1056 "command supported", (uintmax_t)io_opts->queue_depth, 1057 io_opts->dev_name); 1058 } 1059 dev->target_queue_depth = CAMDD_FILE_DEFAULT_DEPTH; 1060 dev->run = camdd_file_run; 1061 dev->fetch = NULL; 1062 1063 /* 1064 * We can effectively access files on byte boundaries. We'll reset 1065 * this for devices like disks that can be accessed on sector 1066 * boundaries. 1067 */ 1068 dev->sector_size = 1; 1069 1070 if ((fd != STDIN_FILENO) 1071 && (fd != STDOUT_FILENO)) { 1072 int retval; 1073 1074 retval = fstat(fd, &file_dev->sb); 1075 if (retval != 0) { 1076 warn("Cannot stat %s", dev->device_name); 1077 goto bailout_error; 1078 } 1079 if (S_ISREG(file_dev->sb.st_mode)) { 1080 file_dev->file_type = CAMDD_FILE_REG; 1081 } else if (S_ISCHR(file_dev->sb.st_mode)) { 1082 int type; 1083 1084 if (ioctl(fd, FIODTYPE, &type) == -1) 1085 err(1, "FIODTYPE ioctl failed on %s", 1086 dev->device_name); 1087 else { 1088 if (type & D_TAPE) 1089 file_dev->file_type = CAMDD_FILE_TAPE; 1090 else if (type & D_DISK) 1091 file_dev->file_type = CAMDD_FILE_DISK; 1092 else if (type & D_MEM) 1093 file_dev->file_type = CAMDD_FILE_MEM; 1094 else if (type & D_TTY) 1095 file_dev->file_type = CAMDD_FILE_TTY; 1096 } 1097 } else if (S_ISDIR(file_dev->sb.st_mode)) { 1098 errx(1, "cannot operate on directory %s", 1099 dev->device_name); 1100 } else if (S_ISFIFO(file_dev->sb.st_mode)) { 1101 file_dev->file_type = CAMDD_FILE_PIPE; 1102 } else 1103 errx(1, "Cannot determine file type for %s", 1104 dev->device_name); 1105 1106 switch (file_dev->file_type) { 1107 case CAMDD_FILE_REG: 1108 if (file_dev->sb.st_size != 0) 1109 dev->max_sector = file_dev->sb.st_size - 1; 1110 else 1111 dev->max_sector = 0; 1112 file_dev->file_flags |= CAMDD_FF_CAN_SEEK; 1113 break; 1114 case CAMDD_FILE_TAPE: { 1115 uint64_t max_iosize, max_blk, min_blk, blk_gran; 1116 /* 1117 * Check block limits and maximum effective iosize. 1118 * Make sure the blocksize is within the block 1119 * limits (and a multiple of the minimum blocksize) 1120 * and that the blocksize is <= maximum effective 1121 * iosize. 1122 */ 1123 retval = camdd_probe_tape(fd, dev->device_name, 1124 &max_iosize, &max_blk, &min_blk, &blk_gran); 1125 if (retval != 0) 1126 errx(1, "Unable to probe tape %s", 1127 dev->device_name); 1128 1129 /* 1130 * The blocksize needs to be <= the maximum 1131 * effective I/O size of the tape device. Note 1132 * that this also takes into account the maximum 1133 * blocksize reported by READ BLOCK LIMITS. 1134 */ 1135 if (dev->blocksize > max_iosize) { 1136 warnx("Blocksize %u too big for %s, limiting " 1137 "to %ju", dev->blocksize, dev->device_name, 1138 max_iosize); 1139 dev->blocksize = max_iosize; 1140 } 1141 1142 /* 1143 * The blocksize needs to be at least min_blk; 1144 */ 1145 if (dev->blocksize < min_blk) { 1146 warnx("Blocksize %u too small for %s, " 1147 "increasing to %ju", dev->blocksize, 1148 dev->device_name, min_blk); 1149 dev->blocksize = min_blk; 1150 } 1151 1152 /* 1153 * And the blocksize needs to be a multiple of 1154 * the block granularity. 1155 */ 1156 if ((blk_gran != 0) 1157 && (dev->blocksize % (1 << blk_gran))) { 1158 warnx("Blocksize %u for %s not a multiple of " 1159 "%d, adjusting to %d", dev->blocksize, 1160 dev->device_name, (1 << blk_gran), 1161 dev->blocksize & ~((1 << blk_gran) - 1)); 1162 dev->blocksize &= ~((1 << blk_gran) - 1); 1163 } 1164 1165 if (dev->blocksize == 0) { 1166 errx(1, "Unable to derive valid blocksize for " 1167 "%s", dev->device_name); 1168 } 1169 1170 /* 1171 * For tape drives, set the sector size to the 1172 * blocksize so that we make sure not to write 1173 * less than the blocksize out to the drive. 1174 */ 1175 dev->sector_size = dev->blocksize; 1176 break; 1177 } 1178 case CAMDD_FILE_DISK: { 1179 off_t media_size; 1180 unsigned int sector_size; 1181 1182 file_dev->file_flags |= CAMDD_FF_CAN_SEEK; 1183 1184 if (ioctl(fd, DIOCGSECTORSIZE, §or_size) == -1) { 1185 err(1, "DIOCGSECTORSIZE ioctl failed on %s", 1186 dev->device_name); 1187 } 1188 1189 if (sector_size == 0) { 1190 errx(1, "DIOCGSECTORSIZE ioctl returned " 1191 "invalid sector size %u for %s", 1192 sector_size, dev->device_name); 1193 } 1194 1195 if (ioctl(fd, DIOCGMEDIASIZE, &media_size) == -1) { 1196 err(1, "DIOCGMEDIASIZE ioctl failed on %s", 1197 dev->device_name); 1198 } 1199 1200 if (media_size == 0) { 1201 errx(1, "DIOCGMEDIASIZE ioctl returned " 1202 "invalid media size %ju for %s", 1203 (uintmax_t)media_size, dev->device_name); 1204 } 1205 1206 if (dev->blocksize % sector_size) { 1207 errx(1, "%s blocksize %u not a multiple of " 1208 "sector size %u", dev->device_name, 1209 dev->blocksize, sector_size); 1210 } 1211 1212 dev->sector_size = sector_size; 1213 dev->max_sector = (media_size / sector_size) - 1; 1214 break; 1215 } 1216 case CAMDD_FILE_MEM: 1217 file_dev->file_flags |= CAMDD_FF_CAN_SEEK; 1218 break; 1219 default: 1220 break; 1221 } 1222 } 1223 1224 if ((io_opts->offset != 0) 1225 && ((file_dev->file_flags & CAMDD_FF_CAN_SEEK) == 0)) { 1226 warnx("Offset %ju specified for %s, but we cannot seek on %s", 1227 io_opts->offset, io_opts->dev_name, io_opts->dev_name); 1228 goto bailout_error; 1229 } 1230 #if 0 1231 else if ((io_opts->offset != 0) 1232 && ((io_opts->offset % dev->sector_size) != 0)) { 1233 warnx("Offset %ju for %s is not a multiple of the " 1234 "sector size %u", io_opts->offset, 1235 io_opts->dev_name, dev->sector_size); 1236 goto bailout_error; 1237 } else { 1238 dev->start_offset_bytes = io_opts->offset; 1239 } 1240 #endif 1241 1242 bailout: 1243 return (dev); 1244 1245 bailout_error: 1246 camdd_free_dev(dev); 1247 return (NULL); 1248 } 1249 1250 /* 1251 * Need to implement this. Do a basic probe: 1252 * - Check the inquiry data, make sure we're talking to a device that we 1253 * can reasonably expect to talk to -- direct, RBC, CD, WORM. 1254 * - Send a test unit ready, make sure the device is available. 1255 * - Get the capacity and block size. 1256 */ 1257 struct camdd_dev * 1258 camdd_probe_pass(struct cam_device *cam_dev, struct camdd_io_opts *io_opts, 1259 camdd_argmask arglist, int probe_retry_count, 1260 int probe_timeout, int io_retry_count, int io_timeout) 1261 { 1262 union ccb *ccb; 1263 uint64_t maxsector; 1264 uint32_t cpi_maxio, max_iosize, pass_numblocks; 1265 uint32_t block_len; 1266 struct scsi_read_capacity_data rcap; 1267 struct scsi_read_capacity_data_long rcaplong; 1268 struct camdd_dev *dev; 1269 struct camdd_dev_pass *pass_dev; 1270 struct kevent ke; 1271 int scsi_dev_type; 1272 1273 dev = NULL; 1274 1275 scsi_dev_type = SID_TYPE(&cam_dev->inq_data); 1276 maxsector = 0; 1277 block_len = 0; 1278 1279 /* 1280 * For devices that support READ CAPACITY, we'll attempt to get the 1281 * capacity. Otherwise, we really don't support tape or other 1282 * devices via SCSI passthrough, so just return an error in that case. 1283 */ 1284 switch (scsi_dev_type) { 1285 case T_DIRECT: 1286 case T_WORM: 1287 case T_CDROM: 1288 case T_OPTICAL: 1289 case T_RBC: 1290 case T_ZBC_HM: 1291 break; 1292 default: 1293 errx(1, "Unsupported SCSI device type %d", scsi_dev_type); 1294 break; /*NOTREACHED*/ 1295 } 1296 1297 ccb = cam_getccb(cam_dev); 1298 1299 if (ccb == NULL) { 1300 warnx("%s: error allocating ccb", __func__); 1301 goto bailout; 1302 } 1303 1304 CCB_CLEAR_ALL_EXCEPT_HDR(&ccb->csio); 1305 1306 scsi_read_capacity(&ccb->csio, 1307 /*retries*/ probe_retry_count, 1308 /*cbfcnp*/ NULL, 1309 /*tag_action*/ MSG_SIMPLE_Q_TAG, 1310 &rcap, 1311 SSD_FULL_SIZE, 1312 /*timeout*/ probe_timeout ? probe_timeout : 5000); 1313 1314 /* Disable freezing the device queue */ 1315 ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; 1316 1317 if (arglist & CAMDD_ARG_ERR_RECOVER) 1318 ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; 1319 1320 if (cam_send_ccb(cam_dev, ccb) < 0) { 1321 warn("error sending READ CAPACITY command"); 1322 1323 cam_error_print(cam_dev, ccb, CAM_ESF_ALL, 1324 CAM_EPF_ALL, stderr); 1325 1326 goto bailout; 1327 } 1328 1329 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1330 cam_error_print(cam_dev, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); 1331 goto bailout; 1332 } 1333 1334 maxsector = scsi_4btoul(rcap.addr); 1335 block_len = scsi_4btoul(rcap.length); 1336 1337 /* 1338 * A last block of 2^32-1 means that the true capacity is over 2TB, 1339 * and we need to issue the long READ CAPACITY to get the real 1340 * capacity. Otherwise, we're all set. 1341 */ 1342 if (maxsector != 0xffffffff) 1343 goto rcap_done; 1344 1345 scsi_read_capacity_16(&ccb->csio, 1346 /*retries*/ probe_retry_count, 1347 /*cbfcnp*/ NULL, 1348 /*tag_action*/ MSG_SIMPLE_Q_TAG, 1349 /*lba*/ 0, 1350 /*reladdr*/ 0, 1351 /*pmi*/ 0, 1352 (uint8_t *)&rcaplong, 1353 sizeof(rcaplong), 1354 /*sense_len*/ SSD_FULL_SIZE, 1355 /*timeout*/ probe_timeout ? probe_timeout : 5000); 1356 1357 /* Disable freezing the device queue */ 1358 ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; 1359 1360 if (arglist & CAMDD_ARG_ERR_RECOVER) 1361 ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; 1362 1363 if (cam_send_ccb(cam_dev, ccb) < 0) { 1364 warn("error sending READ CAPACITY (16) command"); 1365 cam_error_print(cam_dev, ccb, CAM_ESF_ALL, 1366 CAM_EPF_ALL, stderr); 1367 goto bailout; 1368 } 1369 1370 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1371 cam_error_print(cam_dev, ccb, CAM_ESF_ALL, CAM_EPF_ALL, stderr); 1372 goto bailout; 1373 } 1374 1375 maxsector = scsi_8btou64(rcaplong.addr); 1376 block_len = scsi_4btoul(rcaplong.length); 1377 1378 rcap_done: 1379 if (block_len == 0) { 1380 warnx("Sector size for %s%u is 0, cannot continue", 1381 cam_dev->device_name, cam_dev->dev_unit_num); 1382 goto bailout_error; 1383 } 1384 1385 CCB_CLEAR_ALL_EXCEPT_HDR(&ccb->cpi); 1386 1387 ccb->ccb_h.func_code = XPT_PATH_INQ; 1388 ccb->ccb_h.flags = CAM_DIR_NONE; 1389 ccb->ccb_h.retry_count = 1; 1390 1391 if (cam_send_ccb(cam_dev, ccb) < 0) { 1392 warn("error sending XPT_PATH_INQ CCB"); 1393 1394 cam_error_print(cam_dev, ccb, CAM_ESF_ALL, 1395 CAM_EPF_ALL, stderr); 1396 goto bailout; 1397 } 1398 1399 EV_SET(&ke, cam_dev->fd, EVFILT_READ, EV_ADD|EV_ENABLE, 0, 0, 0); 1400 1401 dev = camdd_alloc_dev(CAMDD_DEV_PASS, &ke, 1, io_retry_count, 1402 io_timeout); 1403 if (dev == NULL) 1404 goto bailout; 1405 1406 pass_dev = &dev->dev_spec.pass; 1407 pass_dev->scsi_dev_type = scsi_dev_type; 1408 pass_dev->dev = cam_dev; 1409 pass_dev->max_sector = maxsector; 1410 pass_dev->block_len = block_len; 1411 pass_dev->cpi_maxio = ccb->cpi.maxio; 1412 snprintf(dev->device_name, sizeof(dev->device_name), "%s%u", 1413 pass_dev->dev->device_name, pass_dev->dev->dev_unit_num); 1414 dev->sector_size = block_len; 1415 dev->max_sector = maxsector; 1416 1417 1418 /* 1419 * Determine the optimal blocksize to use for this device. 1420 */ 1421 1422 /* 1423 * If the controller has not specified a maximum I/O size, 1424 * just go with 128K as a somewhat conservative value. 1425 */ 1426 if (pass_dev->cpi_maxio == 0) 1427 cpi_maxio = 131072; 1428 else 1429 cpi_maxio = pass_dev->cpi_maxio; 1430 1431 /* 1432 * If the controller has a large maximum I/O size, limit it 1433 * to something smaller so that the kernel doesn't have trouble 1434 * allocating buffers to copy data in and out for us. 1435 * XXX KDM this is until we have unmapped I/O support in the kernel. 1436 */ 1437 max_iosize = min(cpi_maxio, CAMDD_PASS_MAX_BLOCK); 1438 1439 /* 1440 * If we weren't able to get a block size for some reason, 1441 * default to 512 bytes. 1442 */ 1443 block_len = pass_dev->block_len; 1444 if (block_len == 0) 1445 block_len = 512; 1446 1447 /* 1448 * Figure out how many blocksize chunks will fit in the 1449 * maximum I/O size. 1450 */ 1451 pass_numblocks = max_iosize / block_len; 1452 1453 /* 1454 * And finally, multiple the number of blocks by the LBA 1455 * length to get our maximum block size; 1456 */ 1457 dev->blocksize = pass_numblocks * block_len; 1458 1459 if (io_opts->blocksize != 0) { 1460 if ((io_opts->blocksize % dev->sector_size) != 0) { 1461 warnx("Blocksize %ju for %s is not a multiple of " 1462 "sector size %u", (uintmax_t)io_opts->blocksize, 1463 dev->device_name, dev->sector_size); 1464 goto bailout_error; 1465 } 1466 dev->blocksize = io_opts->blocksize; 1467 } 1468 dev->target_queue_depth = CAMDD_PASS_DEFAULT_DEPTH; 1469 if (io_opts->queue_depth != 0) 1470 dev->target_queue_depth = io_opts->queue_depth; 1471 1472 if (io_opts->offset != 0) { 1473 if (io_opts->offset > (dev->max_sector * dev->sector_size)) { 1474 warnx("Offset %ju is past the end of device %s", 1475 io_opts->offset, dev->device_name); 1476 goto bailout_error; 1477 } 1478 #if 0 1479 else if ((io_opts->offset % dev->sector_size) != 0) { 1480 warnx("Offset %ju for %s is not a multiple of the " 1481 "sector size %u", io_opts->offset, 1482 dev->device_name, dev->sector_size); 1483 goto bailout_error; 1484 } 1485 dev->start_offset_bytes = io_opts->offset; 1486 #endif 1487 } 1488 1489 dev->min_cmd_size = io_opts->min_cmd_size; 1490 1491 dev->run = camdd_pass_run; 1492 dev->fetch = camdd_pass_fetch; 1493 1494 bailout: 1495 cam_freeccb(ccb); 1496 1497 return (dev); 1498 1499 bailout_error: 1500 cam_freeccb(ccb); 1501 1502 camdd_free_dev(dev); 1503 1504 return (NULL); 1505 } 1506 1507 void * 1508 camdd_worker(void *arg) 1509 { 1510 struct camdd_dev *dev = arg; 1511 struct camdd_buf *buf; 1512 struct timespec ts, *kq_ts; 1513 1514 ts.tv_sec = 0; 1515 ts.tv_nsec = 0; 1516 1517 pthread_mutex_lock(&dev->mutex); 1518 1519 dev->flags |= CAMDD_DEV_FLAG_ACTIVE; 1520 1521 for (;;) { 1522 struct kevent ke; 1523 int retval = 0; 1524 1525 /* 1526 * XXX KDM check the reorder queue depth? 1527 */ 1528 if (dev->write_dev == 0) { 1529 uint32_t our_depth, peer_depth, peer_bytes, our_bytes; 1530 uint32_t target_depth = dev->target_queue_depth; 1531 uint32_t peer_target_depth = 1532 dev->peer_dev->target_queue_depth; 1533 uint32_t peer_blocksize = dev->peer_dev->blocksize; 1534 1535 camdd_get_depth(dev, &our_depth, &peer_depth, 1536 &our_bytes, &peer_bytes); 1537 1538 #if 0 1539 while (((our_depth < target_depth) 1540 && (peer_depth < peer_target_depth)) 1541 || ((peer_bytes + our_bytes) < 1542 (peer_blocksize * 2))) { 1543 #endif 1544 while (((our_depth + peer_depth) < 1545 (target_depth + peer_target_depth)) 1546 || ((peer_bytes + our_bytes) < 1547 (peer_blocksize * 3))) { 1548 1549 retval = camdd_queue(dev, NULL); 1550 if (retval == 1) 1551 break; 1552 else if (retval != 0) { 1553 error_exit = 1; 1554 goto bailout; 1555 } 1556 1557 camdd_get_depth(dev, &our_depth, &peer_depth, 1558 &our_bytes, &peer_bytes); 1559 } 1560 } 1561 /* 1562 * See if we have any I/O that is ready to execute. 1563 */ 1564 buf = STAILQ_FIRST(&dev->run_queue); 1565 if (buf != NULL) { 1566 while (dev->target_queue_depth > dev->cur_active_io) { 1567 retval = dev->run(dev); 1568 if (retval == -1) { 1569 dev->flags |= CAMDD_DEV_FLAG_EOF; 1570 error_exit = 1; 1571 break; 1572 } else if (retval != 0) { 1573 break; 1574 } 1575 } 1576 } 1577 1578 /* 1579 * We've reached EOF, or our partner has reached EOF. 1580 */ 1581 if ((dev->flags & CAMDD_DEV_FLAG_EOF) 1582 || (dev->flags & CAMDD_DEV_FLAG_PEER_EOF)) { 1583 if (dev->write_dev != 0) { 1584 if ((STAILQ_EMPTY(&dev->work_queue)) 1585 && (dev->num_run_queue == 0) 1586 && (dev->cur_active_io == 0)) { 1587 goto bailout; 1588 } 1589 } else { 1590 /* 1591 * If we're the reader, and the writer 1592 * got EOF, he is already done. If we got 1593 * the EOF, then we need to wait until 1594 * everything is flushed out for the writer. 1595 */ 1596 if (dev->flags & CAMDD_DEV_FLAG_PEER_EOF) { 1597 goto bailout; 1598 } else if ((dev->num_peer_work_queue == 0) 1599 && (dev->num_peer_done_queue == 0) 1600 && (dev->cur_active_io == 0) 1601 && (dev->num_run_queue == 0)) { 1602 goto bailout; 1603 } 1604 } 1605 /* 1606 * XXX KDM need to do something about the pending 1607 * queue and cleanup resources. 1608 */ 1609 } 1610 1611 if ((dev->write_dev == 0) 1612 && (dev->cur_active_io == 0) 1613 && (dev->peer_bytes_queued < dev->peer_dev->blocksize)) 1614 kq_ts = &ts; 1615 else 1616 kq_ts = NULL; 1617 1618 /* 1619 * Run kevent to see if there are events to process. 1620 */ 1621 pthread_mutex_unlock(&dev->mutex); 1622 retval = kevent(dev->kq, NULL, 0, &ke, 1, kq_ts); 1623 pthread_mutex_lock(&dev->mutex); 1624 if (retval == -1) { 1625 warn("%s: error returned from kevent",__func__); 1626 goto bailout; 1627 } else if (retval != 0) { 1628 switch (ke.filter) { 1629 case EVFILT_READ: 1630 if (dev->fetch != NULL) { 1631 retval = dev->fetch(dev); 1632 if (retval == -1) { 1633 error_exit = 1; 1634 goto bailout; 1635 } 1636 } 1637 break; 1638 case EVFILT_SIGNAL: 1639 /* 1640 * We register for this so we don't get 1641 * an error as a result of a SIGINFO or a 1642 * SIGINT. It will actually get handled 1643 * by the signal handler. If we get a 1644 * SIGINT, bail out without printing an 1645 * error message. Any other signals 1646 * will result in the error message above. 1647 */ 1648 if (ke.ident == SIGINT) 1649 goto bailout; 1650 break; 1651 case EVFILT_USER: 1652 retval = 0; 1653 /* 1654 * Check to see if the other thread has 1655 * queued any I/O for us to do. (In this 1656 * case we're the writer.) 1657 */ 1658 for (buf = STAILQ_FIRST(&dev->work_queue); 1659 buf != NULL; 1660 buf = STAILQ_FIRST(&dev->work_queue)) { 1661 STAILQ_REMOVE_HEAD(&dev->work_queue, 1662 work_links); 1663 retval = camdd_queue(dev, buf); 1664 /* 1665 * We keep going unless we get an 1666 * actual error. If we get EOF, we 1667 * still want to remove the buffers 1668 * from the queue and send the back 1669 * to the reader thread. 1670 */ 1671 if (retval == -1) { 1672 error_exit = 1; 1673 goto bailout; 1674 } else 1675 retval = 0; 1676 } 1677 1678 /* 1679 * Next check to see if the other thread has 1680 * queued any completed buffers back to us. 1681 * (In this case we're the reader.) 1682 */ 1683 for (buf = STAILQ_FIRST(&dev->peer_done_queue); 1684 buf != NULL; 1685 buf = STAILQ_FIRST(&dev->peer_done_queue)){ 1686 STAILQ_REMOVE_HEAD( 1687 &dev->peer_done_queue, work_links); 1688 dev->num_peer_done_queue--; 1689 camdd_peer_done(buf); 1690 } 1691 break; 1692 default: 1693 warnx("%s: unknown kevent filter %d", 1694 __func__, ke.filter); 1695 break; 1696 } 1697 } 1698 } 1699 1700 bailout: 1701 1702 dev->flags &= ~CAMDD_DEV_FLAG_ACTIVE; 1703 1704 /* XXX KDM cleanup resources here? */ 1705 1706 pthread_mutex_unlock(&dev->mutex); 1707 1708 need_exit = 1; 1709 sem_post(&camdd_sem); 1710 1711 return (NULL); 1712 } 1713 1714 /* 1715 * Simplistic translation of CCB status to our local status. 1716 */ 1717 camdd_buf_status 1718 camdd_ccb_status(union ccb *ccb) 1719 { 1720 camdd_buf_status status = CAMDD_STATUS_NONE; 1721 cam_status ccb_status; 1722 1723 ccb_status = ccb->ccb_h.status & CAM_STATUS_MASK; 1724 1725 switch (ccb_status) { 1726 case CAM_REQ_CMP: { 1727 if (ccb->csio.resid == 0) { 1728 status = CAMDD_STATUS_OK; 1729 } else if (ccb->csio.dxfer_len > ccb->csio.resid) { 1730 status = CAMDD_STATUS_SHORT_IO; 1731 } else { 1732 status = CAMDD_STATUS_EOF; 1733 } 1734 break; 1735 } 1736 case CAM_SCSI_STATUS_ERROR: { 1737 switch (ccb->csio.scsi_status) { 1738 case SCSI_STATUS_OK: 1739 case SCSI_STATUS_COND_MET: 1740 case SCSI_STATUS_INTERMED: 1741 case SCSI_STATUS_INTERMED_COND_MET: 1742 status = CAMDD_STATUS_OK; 1743 break; 1744 case SCSI_STATUS_CMD_TERMINATED: 1745 case SCSI_STATUS_CHECK_COND: 1746 case SCSI_STATUS_QUEUE_FULL: 1747 case SCSI_STATUS_BUSY: 1748 case SCSI_STATUS_RESERV_CONFLICT: 1749 default: 1750 status = CAMDD_STATUS_ERROR; 1751 break; 1752 } 1753 break; 1754 } 1755 default: 1756 status = CAMDD_STATUS_ERROR; 1757 break; 1758 } 1759 1760 return (status); 1761 } 1762 1763 /* 1764 * Queue a buffer to our peer's work thread for writing. 1765 * 1766 * Returns 0 for success, -1 for failure, 1 if the other thread exited. 1767 */ 1768 int 1769 camdd_queue_peer_buf(struct camdd_dev *dev, struct camdd_buf *buf) 1770 { 1771 struct kevent ke; 1772 STAILQ_HEAD(, camdd_buf) local_queue; 1773 struct camdd_buf *buf1, *buf2; 1774 struct camdd_buf_data *data = NULL; 1775 uint64_t peer_bytes_queued = 0; 1776 int active = 1; 1777 int retval = 0; 1778 1779 STAILQ_INIT(&local_queue); 1780 1781 /* 1782 * Since we're the reader, we need to queue our I/O to the writer 1783 * in sequential order in order to make sure it gets written out 1784 * in sequential order. 1785 * 1786 * Check the next expected I/O starting offset. If this doesn't 1787 * match, put it on the reorder queue. 1788 */ 1789 if ((buf->lba * dev->sector_size) != dev->next_completion_pos_bytes) { 1790 1791 /* 1792 * If there is nothing on the queue, there is no sorting 1793 * needed. 1794 */ 1795 if (STAILQ_EMPTY(&dev->reorder_queue)) { 1796 STAILQ_INSERT_TAIL(&dev->reorder_queue, buf, links); 1797 dev->num_reorder_queue++; 1798 goto bailout; 1799 } 1800 1801 /* 1802 * Sort in ascending order by starting LBA. There should 1803 * be no identical LBAs. 1804 */ 1805 for (buf1 = STAILQ_FIRST(&dev->reorder_queue); buf1 != NULL; 1806 buf1 = buf2) { 1807 buf2 = STAILQ_NEXT(buf1, links); 1808 if (buf->lba < buf1->lba) { 1809 /* 1810 * If we're less than the first one, then 1811 * we insert at the head of the list 1812 * because this has to be the first element 1813 * on the list. 1814 */ 1815 STAILQ_INSERT_HEAD(&dev->reorder_queue, 1816 buf, links); 1817 dev->num_reorder_queue++; 1818 break; 1819 } else if (buf->lba > buf1->lba) { 1820 if (buf2 == NULL) { 1821 STAILQ_INSERT_TAIL(&dev->reorder_queue, 1822 buf, links); 1823 dev->num_reorder_queue++; 1824 break; 1825 } else if (buf->lba < buf2->lba) { 1826 STAILQ_INSERT_AFTER(&dev->reorder_queue, 1827 buf1, buf, links); 1828 dev->num_reorder_queue++; 1829 break; 1830 } 1831 } else { 1832 errx(1, "Found buffers with duplicate LBA %ju!", 1833 buf->lba); 1834 } 1835 } 1836 goto bailout; 1837 } else { 1838 1839 /* 1840 * We're the next expected I/O completion, so put ourselves 1841 * on the local queue to be sent to the writer. We use 1842 * work_links here so that we can queue this to the 1843 * peer_work_queue before taking the buffer off of the 1844 * local_queue. 1845 */ 1846 dev->next_completion_pos_bytes += buf->len; 1847 STAILQ_INSERT_TAIL(&local_queue, buf, work_links); 1848 1849 /* 1850 * Go through the reorder queue looking for more sequential 1851 * I/O and add it to the local queue. 1852 */ 1853 for (buf1 = STAILQ_FIRST(&dev->reorder_queue); buf1 != NULL; 1854 buf1 = STAILQ_FIRST(&dev->reorder_queue)) { 1855 /* 1856 * As soon as we see an I/O that is out of sequence, 1857 * we're done. 1858 */ 1859 if ((buf1->lba * dev->sector_size) != 1860 dev->next_completion_pos_bytes) 1861 break; 1862 1863 STAILQ_REMOVE_HEAD(&dev->reorder_queue, links); 1864 dev->num_reorder_queue--; 1865 STAILQ_INSERT_TAIL(&local_queue, buf1, work_links); 1866 dev->next_completion_pos_bytes += buf1->len; 1867 } 1868 } 1869 1870 /* 1871 * Setup the event to let the other thread know that it has work 1872 * pending. 1873 */ 1874 EV_SET(&ke, (uintptr_t)&dev->peer_dev->work_queue, EVFILT_USER, 0, 1875 NOTE_TRIGGER, 0, NULL); 1876 1877 /* 1878 * Put this on our shadow queue so that we know what we've queued 1879 * to the other thread. 1880 */ 1881 STAILQ_FOREACH_SAFE(buf1, &local_queue, work_links, buf2) { 1882 if (buf1->buf_type != CAMDD_BUF_DATA) { 1883 errx(1, "%s: should have a data buffer, not an " 1884 "indirect buffer", __func__); 1885 } 1886 data = &buf1->buf_type_spec.data; 1887 1888 /* 1889 * We only need to send one EOF to the writer, and don't 1890 * need to continue sending EOFs after that. 1891 */ 1892 if (buf1->status == CAMDD_STATUS_EOF) { 1893 if (dev->flags & CAMDD_DEV_FLAG_EOF_SENT) { 1894 STAILQ_REMOVE(&local_queue, buf1, camdd_buf, 1895 work_links); 1896 camdd_release_buf(buf1); 1897 retval = 1; 1898 continue; 1899 } 1900 dev->flags |= CAMDD_DEV_FLAG_EOF_SENT; 1901 } 1902 1903 1904 STAILQ_INSERT_TAIL(&dev->peer_work_queue, buf1, links); 1905 peer_bytes_queued += (data->fill_len - data->resid); 1906 dev->peer_bytes_queued += (data->fill_len - data->resid); 1907 dev->num_peer_work_queue++; 1908 } 1909 1910 if (STAILQ_FIRST(&local_queue) == NULL) 1911 goto bailout; 1912 1913 /* 1914 * Drop our mutex and pick up the other thread's mutex. We need to 1915 * do this to avoid deadlocks. 1916 */ 1917 pthread_mutex_unlock(&dev->mutex); 1918 pthread_mutex_lock(&dev->peer_dev->mutex); 1919 1920 if (dev->peer_dev->flags & CAMDD_DEV_FLAG_ACTIVE) { 1921 /* 1922 * Put the buffers on the other thread's incoming work queue. 1923 */ 1924 for (buf1 = STAILQ_FIRST(&local_queue); buf1 != NULL; 1925 buf1 = STAILQ_FIRST(&local_queue)) { 1926 STAILQ_REMOVE_HEAD(&local_queue, work_links); 1927 STAILQ_INSERT_TAIL(&dev->peer_dev->work_queue, buf1, 1928 work_links); 1929 } 1930 /* 1931 * Send an event to the other thread's kqueue to let it know 1932 * that there is something on the work queue. 1933 */ 1934 retval = kevent(dev->peer_dev->kq, &ke, 1, NULL, 0, NULL); 1935 if (retval == -1) 1936 warn("%s: unable to add peer work_queue kevent", 1937 __func__); 1938 else 1939 retval = 0; 1940 } else 1941 active = 0; 1942 1943 pthread_mutex_unlock(&dev->peer_dev->mutex); 1944 pthread_mutex_lock(&dev->mutex); 1945 1946 /* 1947 * If the other side isn't active, run through the queue and 1948 * release all of the buffers. 1949 */ 1950 if (active == 0) { 1951 for (buf1 = STAILQ_FIRST(&local_queue); buf1 != NULL; 1952 buf1 = STAILQ_FIRST(&local_queue)) { 1953 STAILQ_REMOVE_HEAD(&local_queue, work_links); 1954 STAILQ_REMOVE(&dev->peer_work_queue, buf1, camdd_buf, 1955 links); 1956 dev->num_peer_work_queue--; 1957 camdd_release_buf(buf1); 1958 } 1959 dev->peer_bytes_queued -= peer_bytes_queued; 1960 retval = 1; 1961 } 1962 1963 bailout: 1964 return (retval); 1965 } 1966 1967 /* 1968 * Return a buffer to the reader thread when we have completed writing it. 1969 */ 1970 int 1971 camdd_complete_peer_buf(struct camdd_dev *dev, struct camdd_buf *peer_buf) 1972 { 1973 struct kevent ke; 1974 int retval = 0; 1975 1976 /* 1977 * Setup the event to let the other thread know that we have 1978 * completed a buffer. 1979 */ 1980 EV_SET(&ke, (uintptr_t)&dev->peer_dev->peer_done_queue, EVFILT_USER, 0, 1981 NOTE_TRIGGER, 0, NULL); 1982 1983 /* 1984 * Drop our lock and acquire the other thread's lock before 1985 * manipulating 1986 */ 1987 pthread_mutex_unlock(&dev->mutex); 1988 pthread_mutex_lock(&dev->peer_dev->mutex); 1989 1990 /* 1991 * Put the buffer on the reader thread's peer done queue now that 1992 * we have completed it. 1993 */ 1994 STAILQ_INSERT_TAIL(&dev->peer_dev->peer_done_queue, peer_buf, 1995 work_links); 1996 dev->peer_dev->num_peer_done_queue++; 1997 1998 /* 1999 * Send an event to the peer thread to let it know that we've added 2000 * something to its peer done queue. 2001 */ 2002 retval = kevent(dev->peer_dev->kq, &ke, 1, NULL, 0, NULL); 2003 if (retval == -1) 2004 warn("%s: unable to add peer_done_queue kevent", __func__); 2005 else 2006 retval = 0; 2007 2008 /* 2009 * Drop the other thread's lock and reacquire ours. 2010 */ 2011 pthread_mutex_unlock(&dev->peer_dev->mutex); 2012 pthread_mutex_lock(&dev->mutex); 2013 2014 return (retval); 2015 } 2016 2017 /* 2018 * Free a buffer that was written out by the writer thread and returned to 2019 * the reader thread. 2020 */ 2021 void 2022 camdd_peer_done(struct camdd_buf *buf) 2023 { 2024 struct camdd_dev *dev; 2025 struct camdd_buf_data *data; 2026 2027 dev = buf->dev; 2028 if (buf->buf_type != CAMDD_BUF_DATA) { 2029 errx(1, "%s: should have a data buffer, not an " 2030 "indirect buffer", __func__); 2031 } 2032 2033 data = &buf->buf_type_spec.data; 2034 2035 STAILQ_REMOVE(&dev->peer_work_queue, buf, camdd_buf, links); 2036 dev->num_peer_work_queue--; 2037 dev->peer_bytes_queued -= (data->fill_len - data->resid); 2038 2039 if (buf->status == CAMDD_STATUS_EOF) 2040 dev->flags |= CAMDD_DEV_FLAG_PEER_EOF; 2041 2042 STAILQ_INSERT_TAIL(&dev->free_queue, buf, links); 2043 } 2044 2045 /* 2046 * Assumes caller holds the lock for this device. 2047 */ 2048 void 2049 camdd_complete_buf(struct camdd_dev *dev, struct camdd_buf *buf, 2050 int *error_count) 2051 { 2052 int retval = 0; 2053 2054 /* 2055 * If we're the reader, we need to send the completed I/O 2056 * to the writer. If we're the writer, we need to just 2057 * free up resources, or let the reader know if we've 2058 * encountered an error. 2059 */ 2060 if (dev->write_dev == 0) { 2061 retval = camdd_queue_peer_buf(dev, buf); 2062 if (retval != 0) 2063 (*error_count)++; 2064 } else { 2065 struct camdd_buf *tmp_buf, *next_buf; 2066 2067 STAILQ_FOREACH_SAFE(tmp_buf, &buf->src_list, src_links, 2068 next_buf) { 2069 struct camdd_buf *src_buf; 2070 struct camdd_buf_indirect *indirect; 2071 2072 STAILQ_REMOVE(&buf->src_list, tmp_buf, 2073 camdd_buf, src_links); 2074 2075 tmp_buf->status = buf->status; 2076 2077 if (tmp_buf->buf_type == CAMDD_BUF_DATA) { 2078 camdd_complete_peer_buf(dev, tmp_buf); 2079 continue; 2080 } 2081 2082 indirect = &tmp_buf->buf_type_spec.indirect; 2083 src_buf = indirect->src_buf; 2084 src_buf->refcount--; 2085 /* 2086 * XXX KDM we probably need to account for 2087 * exactly how many bytes we were able to 2088 * write. Allocate the residual to the 2089 * first N buffers? Or just track the 2090 * number of bytes written? Right now the reader 2091 * doesn't do anything with a residual. 2092 */ 2093 src_buf->status = buf->status; 2094 if (src_buf->refcount <= 0) 2095 camdd_complete_peer_buf(dev, src_buf); 2096 STAILQ_INSERT_TAIL(&dev->free_indirect_queue, 2097 tmp_buf, links); 2098 } 2099 2100 STAILQ_INSERT_TAIL(&dev->free_queue, buf, links); 2101 } 2102 } 2103 2104 /* 2105 * Fetch all completed commands from the pass(4) device. 2106 * 2107 * Returns the number of commands received, or -1 if any of the commands 2108 * completed with an error. Returns 0 if no commands are available. 2109 */ 2110 int 2111 camdd_pass_fetch(struct camdd_dev *dev) 2112 { 2113 struct camdd_dev_pass *pass_dev = &dev->dev_spec.pass; 2114 union ccb ccb; 2115 int retval = 0, num_fetched = 0, error_count = 0; 2116 2117 pthread_mutex_unlock(&dev->mutex); 2118 /* 2119 * XXX KDM we don't distinguish between EFAULT and ENOENT. 2120 */ 2121 while ((retval = ioctl(pass_dev->dev->fd, CAMIOGET, &ccb)) != -1) { 2122 struct camdd_buf *buf; 2123 struct camdd_buf_data *data; 2124 cam_status ccb_status; 2125 union ccb *buf_ccb; 2126 2127 buf = ccb.ccb_h.ccb_buf; 2128 data = &buf->buf_type_spec.data; 2129 buf_ccb = &data->ccb; 2130 2131 num_fetched++; 2132 2133 /* 2134 * Copy the CCB back out so we get status, sense data, etc. 2135 */ 2136 bcopy(&ccb, buf_ccb, sizeof(ccb)); 2137 2138 pthread_mutex_lock(&dev->mutex); 2139 2140 /* 2141 * We're now done, so take this off the active queue. 2142 */ 2143 STAILQ_REMOVE(&dev->active_queue, buf, camdd_buf, links); 2144 dev->cur_active_io--; 2145 2146 ccb_status = ccb.ccb_h.status & CAM_STATUS_MASK; 2147 if (ccb_status != CAM_REQ_CMP) { 2148 cam_error_print(pass_dev->dev, &ccb, CAM_ESF_ALL, 2149 CAM_EPF_ALL, stderr); 2150 } 2151 2152 data->resid = ccb.csio.resid; 2153 dev->bytes_transferred += (ccb.csio.dxfer_len - ccb.csio.resid); 2154 2155 if (buf->status == CAMDD_STATUS_NONE) 2156 buf->status = camdd_ccb_status(&ccb); 2157 if (buf->status == CAMDD_STATUS_ERROR) 2158 error_count++; 2159 else if (buf->status == CAMDD_STATUS_EOF) { 2160 /* 2161 * Once we queue this buffer to our partner thread, 2162 * he will know that we've hit EOF. 2163 */ 2164 dev->flags |= CAMDD_DEV_FLAG_EOF; 2165 } 2166 2167 camdd_complete_buf(dev, buf, &error_count); 2168 2169 /* 2170 * Unlock in preparation for the ioctl call. 2171 */ 2172 pthread_mutex_unlock(&dev->mutex); 2173 } 2174 2175 pthread_mutex_lock(&dev->mutex); 2176 2177 if (error_count > 0) 2178 return (-1); 2179 else 2180 return (num_fetched); 2181 } 2182 2183 /* 2184 * Returns -1 for error, 0 for success/continue, and 1 for resource 2185 * shortage/stop processing. 2186 */ 2187 int 2188 camdd_file_run(struct camdd_dev *dev) 2189 { 2190 struct camdd_dev_file *file_dev = &dev->dev_spec.file; 2191 struct camdd_buf_data *data; 2192 struct camdd_buf *buf; 2193 off_t io_offset; 2194 int retval = 0, write_dev = dev->write_dev; 2195 int error_count = 0, no_resources = 0, double_buf_needed = 0; 2196 uint32_t num_sectors = 0, db_len = 0; 2197 2198 buf = STAILQ_FIRST(&dev->run_queue); 2199 if (buf == NULL) { 2200 no_resources = 1; 2201 goto bailout; 2202 } else if ((dev->write_dev == 0) 2203 && (dev->flags & (CAMDD_DEV_FLAG_EOF | 2204 CAMDD_DEV_FLAG_EOF_SENT))) { 2205 STAILQ_REMOVE(&dev->run_queue, buf, camdd_buf, links); 2206 dev->num_run_queue--; 2207 buf->status = CAMDD_STATUS_EOF; 2208 error_count++; 2209 goto bailout; 2210 } 2211 2212 /* 2213 * If we're writing, we need to go through the source buffer list 2214 * and create an S/G list. 2215 */ 2216 if (write_dev != 0) { 2217 retval = camdd_buf_sg_create(buf, /*iovec*/ 1, 2218 dev->sector_size, &num_sectors, &double_buf_needed); 2219 if (retval != 0) { 2220 no_resources = 1; 2221 goto bailout; 2222 } 2223 } 2224 2225 STAILQ_REMOVE(&dev->run_queue, buf, camdd_buf, links); 2226 dev->num_run_queue--; 2227 2228 data = &buf->buf_type_spec.data; 2229 2230 /* 2231 * pread(2) and pwrite(2) offsets are byte offsets. 2232 */ 2233 io_offset = buf->lba * dev->sector_size; 2234 2235 /* 2236 * Unlock the mutex while we read or write. 2237 */ 2238 pthread_mutex_unlock(&dev->mutex); 2239 2240 /* 2241 * Note that we don't need to double buffer if we're the reader 2242 * because in that case, we have allocated a single buffer of 2243 * sufficient size to do the read. This copy is necessary on 2244 * writes because if one of the components of the S/G list is not 2245 * a sector size multiple, the kernel will reject the write. This 2246 * is unfortunate but not surprising. So this will make sure that 2247 * we're using a single buffer that is a multiple of the sector size. 2248 */ 2249 if ((double_buf_needed != 0) 2250 && (data->sg_count > 1) 2251 && (write_dev != 0)) { 2252 uint32_t cur_offset; 2253 int i; 2254 2255 if (file_dev->tmp_buf == NULL) 2256 file_dev->tmp_buf = calloc(dev->blocksize, 1); 2257 if (file_dev->tmp_buf == NULL) { 2258 buf->status = CAMDD_STATUS_ERROR; 2259 error_count++; 2260 pthread_mutex_lock(&dev->mutex); 2261 goto bailout; 2262 } 2263 for (i = 0, cur_offset = 0; i < data->sg_count; i++) { 2264 bcopy(data->iovec[i].iov_base, 2265 &file_dev->tmp_buf[cur_offset], 2266 data->iovec[i].iov_len); 2267 cur_offset += data->iovec[i].iov_len; 2268 } 2269 db_len = cur_offset; 2270 } 2271 2272 if (file_dev->file_flags & CAMDD_FF_CAN_SEEK) { 2273 if (write_dev == 0) { 2274 /* 2275 * XXX KDM is there any way we would need a S/G 2276 * list here? 2277 */ 2278 retval = pread(file_dev->fd, data->buf, 2279 buf->len, io_offset); 2280 } else { 2281 if (double_buf_needed != 0) { 2282 retval = pwrite(file_dev->fd, file_dev->tmp_buf, 2283 db_len, io_offset); 2284 } else if (data->sg_count == 0) { 2285 retval = pwrite(file_dev->fd, data->buf, 2286 data->fill_len, io_offset); 2287 } else { 2288 retval = pwritev(file_dev->fd, data->iovec, 2289 data->sg_count, io_offset); 2290 } 2291 } 2292 } else { 2293 if (write_dev == 0) { 2294 /* 2295 * XXX KDM is there any way we would need a S/G 2296 * list here? 2297 */ 2298 retval = read(file_dev->fd, data->buf, buf->len); 2299 } else { 2300 if (double_buf_needed != 0) { 2301 retval = write(file_dev->fd, file_dev->tmp_buf, 2302 db_len); 2303 } else if (data->sg_count == 0) { 2304 retval = write(file_dev->fd, data->buf, 2305 data->fill_len); 2306 } else { 2307 retval = writev(file_dev->fd, data->iovec, 2308 data->sg_count); 2309 } 2310 } 2311 } 2312 2313 /* We're done, re-acquire the lock */ 2314 pthread_mutex_lock(&dev->mutex); 2315 2316 if (retval >= (ssize_t)data->fill_len) { 2317 /* 2318 * If the bytes transferred is more than the request size, 2319 * that indicates an overrun, which should only happen at 2320 * the end of a transfer if we have to round up to a sector 2321 * boundary. 2322 */ 2323 if (buf->status == CAMDD_STATUS_NONE) 2324 buf->status = CAMDD_STATUS_OK; 2325 data->resid = 0; 2326 dev->bytes_transferred += retval; 2327 } else if (retval == -1) { 2328 warn("Error %s %s", (write_dev) ? "writing to" : 2329 "reading from", file_dev->filename); 2330 2331 buf->status = CAMDD_STATUS_ERROR; 2332 data->resid = data->fill_len; 2333 error_count++; 2334 2335 if (dev->debug == 0) 2336 goto bailout; 2337 2338 if ((double_buf_needed != 0) 2339 && (write_dev != 0)) { 2340 fprintf(stderr, "%s: fd %d, DB buf %p, len %u lba %ju " 2341 "offset %ju\n", __func__, file_dev->fd, 2342 file_dev->tmp_buf, db_len, (uintmax_t)buf->lba, 2343 (uintmax_t)io_offset); 2344 } else if (data->sg_count == 0) { 2345 fprintf(stderr, "%s: fd %d, buf %p, len %u, lba %ju " 2346 "offset %ju\n", __func__, file_dev->fd, data->buf, 2347 data->fill_len, (uintmax_t)buf->lba, 2348 (uintmax_t)io_offset); 2349 } else { 2350 int i; 2351 2352 fprintf(stderr, "%s: fd %d, len %u, lba %ju " 2353 "offset %ju\n", __func__, file_dev->fd, 2354 data->fill_len, (uintmax_t)buf->lba, 2355 (uintmax_t)io_offset); 2356 2357 for (i = 0; i < data->sg_count; i++) { 2358 fprintf(stderr, "index %d ptr %p len %zu\n", 2359 i, data->iovec[i].iov_base, 2360 data->iovec[i].iov_len); 2361 } 2362 } 2363 } else if (retval == 0) { 2364 buf->status = CAMDD_STATUS_EOF; 2365 if (dev->debug != 0) 2366 printf("%s: got EOF from %s!\n", __func__, 2367 file_dev->filename); 2368 data->resid = data->fill_len; 2369 error_count++; 2370 } else if (retval < (ssize_t)data->fill_len) { 2371 if (buf->status == CAMDD_STATUS_NONE) 2372 buf->status = CAMDD_STATUS_SHORT_IO; 2373 data->resid = data->fill_len - retval; 2374 dev->bytes_transferred += retval; 2375 } 2376 2377 bailout: 2378 if (buf != NULL) { 2379 if (buf->status == CAMDD_STATUS_EOF) { 2380 struct camdd_buf *buf2; 2381 dev->flags |= CAMDD_DEV_FLAG_EOF; 2382 STAILQ_FOREACH(buf2, &dev->run_queue, links) 2383 buf2->status = CAMDD_STATUS_EOF; 2384 } 2385 2386 camdd_complete_buf(dev, buf, &error_count); 2387 } 2388 2389 if (error_count != 0) 2390 return (-1); 2391 else if (no_resources != 0) 2392 return (1); 2393 else 2394 return (0); 2395 } 2396 2397 /* 2398 * Execute one command from the run queue. Returns 0 for success, 1 for 2399 * stop processing, and -1 for error. 2400 */ 2401 int 2402 camdd_pass_run(struct camdd_dev *dev) 2403 { 2404 struct camdd_buf *buf = NULL; 2405 struct camdd_dev_pass *pass_dev = &dev->dev_spec.pass; 2406 struct camdd_buf_data *data; 2407 uint32_t num_blocks, sectors_used = 0; 2408 union ccb *ccb; 2409 int retval = 0, is_write = dev->write_dev; 2410 int double_buf_needed = 0; 2411 2412 buf = STAILQ_FIRST(&dev->run_queue); 2413 if (buf == NULL) { 2414 retval = 1; 2415 goto bailout; 2416 } 2417 2418 /* 2419 * If we're writing, we need to go through the source buffer list 2420 * and create an S/G list. 2421 */ 2422 if (is_write != 0) { 2423 retval = camdd_buf_sg_create(buf, /*iovec*/ 0,dev->sector_size, 2424 §ors_used, &double_buf_needed); 2425 if (retval != 0) { 2426 retval = -1; 2427 goto bailout; 2428 } 2429 } 2430 2431 STAILQ_REMOVE(&dev->run_queue, buf, camdd_buf, links); 2432 dev->num_run_queue--; 2433 2434 data = &buf->buf_type_spec.data; 2435 2436 ccb = &data->ccb; 2437 CCB_CLEAR_ALL_EXCEPT_HDR(&ccb->csio); 2438 2439 /* 2440 * In almost every case the number of blocks should be the device 2441 * block size. The exception may be at the end of an I/O stream 2442 * for a partial block or at the end of a device. 2443 */ 2444 if (is_write != 0) 2445 num_blocks = sectors_used; 2446 else 2447 num_blocks = data->fill_len / pass_dev->block_len; 2448 2449 scsi_read_write(&ccb->csio, 2450 /*retries*/ dev->retry_count, 2451 /*cbfcnp*/ NULL, 2452 /*tag_action*/ MSG_SIMPLE_Q_TAG, 2453 /*readop*/ (dev->write_dev == 0) ? SCSI_RW_READ : 2454 SCSI_RW_WRITE, 2455 /*byte2*/ 0, 2456 /*minimum_cmd_size*/ dev->min_cmd_size, 2457 /*lba*/ buf->lba, 2458 /*block_count*/ num_blocks, 2459 /*data_ptr*/ (data->sg_count != 0) ? 2460 (uint8_t *)data->segs : data->buf, 2461 /*dxfer_len*/ (num_blocks * pass_dev->block_len), 2462 /*sense_len*/ SSD_FULL_SIZE, 2463 /*timeout*/ dev->io_timeout); 2464 2465 /* Disable freezing the device queue */ 2466 ccb->ccb_h.flags |= CAM_DEV_QFRZDIS; 2467 2468 if (dev->retry_count != 0) 2469 ccb->ccb_h.flags |= CAM_PASS_ERR_RECOVER; 2470 2471 if (data->sg_count != 0) { 2472 ccb->csio.sglist_cnt = data->sg_count; 2473 ccb->ccb_h.flags |= CAM_DATA_SG; 2474 } 2475 2476 /* 2477 * Store a pointer to the buffer in the CCB. The kernel will 2478 * restore this when we get it back, and we'll use it to identify 2479 * the buffer this CCB came from. 2480 */ 2481 ccb->ccb_h.ccb_buf = buf; 2482 2483 /* 2484 * Unlock our mutex in preparation for issuing the ioctl. 2485 */ 2486 pthread_mutex_unlock(&dev->mutex); 2487 /* 2488 * Queue the CCB to the pass(4) driver. 2489 */ 2490 if (ioctl(pass_dev->dev->fd, CAMIOQUEUE, ccb) == -1) { 2491 pthread_mutex_lock(&dev->mutex); 2492 2493 warn("%s: error sending CAMIOQUEUE ioctl to %s%u", __func__, 2494 pass_dev->dev->device_name, pass_dev->dev->dev_unit_num); 2495 warn("%s: CCB address is %p", __func__, ccb); 2496 retval = -1; 2497 2498 STAILQ_INSERT_TAIL(&dev->free_queue, buf, links); 2499 } else { 2500 pthread_mutex_lock(&dev->mutex); 2501 2502 dev->cur_active_io++; 2503 STAILQ_INSERT_TAIL(&dev->active_queue, buf, links); 2504 } 2505 2506 bailout: 2507 return (retval); 2508 } 2509 2510 int 2511 camdd_get_next_lba_len(struct camdd_dev *dev, uint64_t *lba, ssize_t *len) 2512 { 2513 struct camdd_dev_pass *pass_dev; 2514 uint32_t num_blocks; 2515 int retval = 0; 2516 2517 pass_dev = &dev->dev_spec.pass; 2518 2519 *lba = dev->next_io_pos_bytes / dev->sector_size; 2520 *len = dev->blocksize; 2521 num_blocks = *len / dev->sector_size; 2522 2523 /* 2524 * If max_sector is 0, then we have no set limit. This can happen 2525 * if we're writing to a file in a filesystem, or reading from 2526 * something like /dev/zero. 2527 */ 2528 if ((dev->max_sector != 0) 2529 || (dev->sector_io_limit != 0)) { 2530 uint64_t max_sector; 2531 2532 if ((dev->max_sector != 0) 2533 && (dev->sector_io_limit != 0)) 2534 max_sector = min(dev->sector_io_limit, dev->max_sector); 2535 else if (dev->max_sector != 0) 2536 max_sector = dev->max_sector; 2537 else 2538 max_sector = dev->sector_io_limit; 2539 2540 2541 /* 2542 * Check to see whether we're starting off past the end of 2543 * the device. If so, we need to just send an EOF 2544 * notification to the writer. 2545 */ 2546 if (*lba > max_sector) { 2547 *len = 0; 2548 retval = 1; 2549 } else if (((*lba + num_blocks) > max_sector + 1) 2550 || ((*lba + num_blocks) < *lba)) { 2551 /* 2552 * If we get here (but pass the first check), we 2553 * can trim the request length down to go to the 2554 * end of the device. 2555 */ 2556 num_blocks = (max_sector + 1) - *lba; 2557 *len = num_blocks * dev->sector_size; 2558 retval = 1; 2559 } 2560 } 2561 2562 dev->next_io_pos_bytes += *len; 2563 2564 return (retval); 2565 } 2566 2567 /* 2568 * Returns 0 for success, 1 for EOF detected, and -1 for failure. 2569 */ 2570 int 2571 camdd_queue(struct camdd_dev *dev, struct camdd_buf *read_buf) 2572 { 2573 struct camdd_buf *buf = NULL; 2574 struct camdd_buf_data *data; 2575 struct camdd_dev_pass *pass_dev; 2576 size_t new_len; 2577 struct camdd_buf_data *rb_data; 2578 int is_write = dev->write_dev; 2579 int eof_flush_needed = 0; 2580 int retval = 0; 2581 int error; 2582 2583 pass_dev = &dev->dev_spec.pass; 2584 2585 /* 2586 * If we've gotten EOF or our partner has, we should not continue 2587 * queueing I/O. If we're a writer, though, we should continue 2588 * to write any buffers that don't have EOF status. 2589 */ 2590 if ((dev->flags & CAMDD_DEV_FLAG_EOF) 2591 || ((dev->flags & CAMDD_DEV_FLAG_PEER_EOF) 2592 && (is_write == 0))) { 2593 /* 2594 * Tell the worker thread that we have seen EOF. 2595 */ 2596 retval = 1; 2597 2598 /* 2599 * If we're the writer, send the buffer back with EOF status. 2600 */ 2601 if (is_write) { 2602 read_buf->status = CAMDD_STATUS_EOF; 2603 2604 error = camdd_complete_peer_buf(dev, read_buf); 2605 } 2606 goto bailout; 2607 } 2608 2609 if (is_write == 0) { 2610 buf = camdd_get_buf(dev, CAMDD_BUF_DATA); 2611 if (buf == NULL) { 2612 retval = -1; 2613 goto bailout; 2614 } 2615 data = &buf->buf_type_spec.data; 2616 2617 retval = camdd_get_next_lba_len(dev, &buf->lba, &buf->len); 2618 if (retval != 0) { 2619 buf->status = CAMDD_STATUS_EOF; 2620 2621 if ((buf->len == 0) 2622 && ((dev->flags & (CAMDD_DEV_FLAG_EOF_SENT | 2623 CAMDD_DEV_FLAG_EOF_QUEUED)) != 0)) { 2624 camdd_release_buf(buf); 2625 goto bailout; 2626 } 2627 dev->flags |= CAMDD_DEV_FLAG_EOF_QUEUED; 2628 } 2629 2630 data->fill_len = buf->len; 2631 data->src_start_offset = buf->lba * dev->sector_size; 2632 2633 /* 2634 * Put this on the run queue. 2635 */ 2636 STAILQ_INSERT_TAIL(&dev->run_queue, buf, links); 2637 dev->num_run_queue++; 2638 2639 /* We're done. */ 2640 goto bailout; 2641 } 2642 2643 /* 2644 * Check for new EOF status from the reader. 2645 */ 2646 if ((read_buf->status == CAMDD_STATUS_EOF) 2647 || (read_buf->status == CAMDD_STATUS_ERROR)) { 2648 dev->flags |= CAMDD_DEV_FLAG_PEER_EOF; 2649 if ((STAILQ_FIRST(&dev->pending_queue) == NULL) 2650 && (read_buf->len == 0)) { 2651 camdd_complete_peer_buf(dev, read_buf); 2652 retval = 1; 2653 goto bailout; 2654 } else 2655 eof_flush_needed = 1; 2656 } 2657 2658 /* 2659 * See if we have a buffer we're composing with pieces from our 2660 * partner thread. 2661 */ 2662 buf = STAILQ_FIRST(&dev->pending_queue); 2663 if (buf == NULL) { 2664 uint64_t lba; 2665 ssize_t len; 2666 2667 retval = camdd_get_next_lba_len(dev, &lba, &len); 2668 if (retval != 0) { 2669 read_buf->status = CAMDD_STATUS_EOF; 2670 2671 if (len == 0) { 2672 dev->flags |= CAMDD_DEV_FLAG_EOF; 2673 error = camdd_complete_peer_buf(dev, read_buf); 2674 goto bailout; 2675 } 2676 } 2677 2678 /* 2679 * If we don't have a pending buffer, we need to grab a new 2680 * one from the free list or allocate another one. 2681 */ 2682 buf = camdd_get_buf(dev, CAMDD_BUF_DATA); 2683 if (buf == NULL) { 2684 retval = 1; 2685 goto bailout; 2686 } 2687 2688 buf->lba = lba; 2689 buf->len = len; 2690 2691 STAILQ_INSERT_TAIL(&dev->pending_queue, buf, links); 2692 dev->num_pending_queue++; 2693 } 2694 2695 data = &buf->buf_type_spec.data; 2696 2697 rb_data = &read_buf->buf_type_spec.data; 2698 2699 if ((rb_data->src_start_offset != dev->next_peer_pos_bytes) 2700 && (dev->debug != 0)) { 2701 printf("%s: WARNING: reader offset %#jx != expected offset " 2702 "%#jx\n", __func__, (uintmax_t)rb_data->src_start_offset, 2703 (uintmax_t)dev->next_peer_pos_bytes); 2704 } 2705 dev->next_peer_pos_bytes = rb_data->src_start_offset + 2706 (rb_data->fill_len - rb_data->resid); 2707 2708 new_len = (rb_data->fill_len - rb_data->resid) + data->fill_len; 2709 if (new_len < buf->len) { 2710 /* 2711 * There are three cases here: 2712 * 1. We need more data to fill up a block, so we put 2713 * this I/O on the queue and wait for more I/O. 2714 * 2. We have a pending buffer in the queue that is 2715 * smaller than our blocksize, but we got an EOF. So we 2716 * need to go ahead and flush the write out. 2717 * 3. We got an error. 2718 */ 2719 2720 /* 2721 * Increment our fill length. 2722 */ 2723 data->fill_len += (rb_data->fill_len - rb_data->resid); 2724 2725 /* 2726 * Add the new read buffer to the list for writing. 2727 */ 2728 STAILQ_INSERT_TAIL(&buf->src_list, read_buf, src_links); 2729 2730 /* Increment the count */ 2731 buf->src_count++; 2732 2733 if (eof_flush_needed == 0) { 2734 /* 2735 * We need to exit, because we don't have enough 2736 * data yet. 2737 */ 2738 goto bailout; 2739 } else { 2740 /* 2741 * Take the buffer off of the pending queue. 2742 */ 2743 STAILQ_REMOVE(&dev->pending_queue, buf, camdd_buf, 2744 links); 2745 dev->num_pending_queue--; 2746 2747 /* 2748 * If we need an EOF flush, but there is no data 2749 * to flush, go ahead and return this buffer. 2750 */ 2751 if (data->fill_len == 0) { 2752 camdd_complete_buf(dev, buf, /*error_count*/0); 2753 retval = 1; 2754 goto bailout; 2755 } 2756 2757 /* 2758 * Put this on the next queue for execution. 2759 */ 2760 STAILQ_INSERT_TAIL(&dev->run_queue, buf, links); 2761 dev->num_run_queue++; 2762 } 2763 } else if (new_len == buf->len) { 2764 /* 2765 * We have enough data to completey fill one block, 2766 * so we're ready to issue the I/O. 2767 */ 2768 2769 /* 2770 * Take the buffer off of the pending queue. 2771 */ 2772 STAILQ_REMOVE(&dev->pending_queue, buf, camdd_buf, links); 2773 dev->num_pending_queue--; 2774 2775 /* 2776 * Add the new read buffer to the list for writing. 2777 */ 2778 STAILQ_INSERT_TAIL(&buf->src_list, read_buf, src_links); 2779 2780 /* Increment the count */ 2781 buf->src_count++; 2782 2783 /* 2784 * Increment our fill length. 2785 */ 2786 data->fill_len += (rb_data->fill_len - rb_data->resid); 2787 2788 /* 2789 * Put this on the next queue for execution. 2790 */ 2791 STAILQ_INSERT_TAIL(&dev->run_queue, buf, links); 2792 dev->num_run_queue++; 2793 } else { 2794 struct camdd_buf *idb; 2795 struct camdd_buf_indirect *indirect; 2796 uint32_t len_to_go, cur_offset; 2797 2798 2799 idb = camdd_get_buf(dev, CAMDD_BUF_INDIRECT); 2800 if (idb == NULL) { 2801 retval = 1; 2802 goto bailout; 2803 } 2804 indirect = &idb->buf_type_spec.indirect; 2805 indirect->src_buf = read_buf; 2806 read_buf->refcount++; 2807 indirect->offset = 0; 2808 indirect->start_ptr = rb_data->buf; 2809 /* 2810 * We've already established that there is more 2811 * data in read_buf than we have room for in our 2812 * current write request. So this particular chunk 2813 * of the request should just be the remainder 2814 * needed to fill up a block. 2815 */ 2816 indirect->len = buf->len - (data->fill_len - data->resid); 2817 2818 camdd_buf_add_child(buf, idb); 2819 2820 /* 2821 * This buffer is ready to execute, so we can take 2822 * it off the pending queue and put it on the run 2823 * queue. 2824 */ 2825 STAILQ_REMOVE(&dev->pending_queue, buf, camdd_buf, 2826 links); 2827 dev->num_pending_queue--; 2828 STAILQ_INSERT_TAIL(&dev->run_queue, buf, links); 2829 dev->num_run_queue++; 2830 2831 cur_offset = indirect->offset + indirect->len; 2832 2833 /* 2834 * The resulting I/O would be too large to fit in 2835 * one block. We need to split this I/O into 2836 * multiple pieces. Allocate as many buffers as needed. 2837 */ 2838 for (len_to_go = rb_data->fill_len - rb_data->resid - 2839 indirect->len; len_to_go > 0;) { 2840 struct camdd_buf *new_buf; 2841 struct camdd_buf_data *new_data; 2842 uint64_t lba; 2843 ssize_t len; 2844 2845 retval = camdd_get_next_lba_len(dev, &lba, &len); 2846 if ((retval != 0) 2847 && (len == 0)) { 2848 /* 2849 * The device has already been marked 2850 * as EOF, and there is no space left. 2851 */ 2852 goto bailout; 2853 } 2854 2855 new_buf = camdd_get_buf(dev, CAMDD_BUF_DATA); 2856 if (new_buf == NULL) { 2857 retval = 1; 2858 goto bailout; 2859 } 2860 2861 new_buf->lba = lba; 2862 new_buf->len = len; 2863 2864 idb = camdd_get_buf(dev, CAMDD_BUF_INDIRECT); 2865 if (idb == NULL) { 2866 retval = 1; 2867 goto bailout; 2868 } 2869 2870 indirect = &idb->buf_type_spec.indirect; 2871 2872 indirect->src_buf = read_buf; 2873 read_buf->refcount++; 2874 indirect->offset = cur_offset; 2875 indirect->start_ptr = rb_data->buf + cur_offset; 2876 indirect->len = min(len_to_go, new_buf->len); 2877 #if 0 2878 if (((indirect->len % dev->sector_size) != 0) 2879 || ((indirect->offset % dev->sector_size) != 0)) { 2880 warnx("offset %ju len %ju not aligned with " 2881 "sector size %u", indirect->offset, 2882 (uintmax_t)indirect->len, dev->sector_size); 2883 } 2884 #endif 2885 cur_offset += indirect->len; 2886 len_to_go -= indirect->len; 2887 2888 camdd_buf_add_child(new_buf, idb); 2889 2890 new_data = &new_buf->buf_type_spec.data; 2891 2892 if ((new_data->fill_len == new_buf->len) 2893 || (eof_flush_needed != 0)) { 2894 STAILQ_INSERT_TAIL(&dev->run_queue, 2895 new_buf, links); 2896 dev->num_run_queue++; 2897 } else if (new_data->fill_len < buf->len) { 2898 STAILQ_INSERT_TAIL(&dev->pending_queue, 2899 new_buf, links); 2900 dev->num_pending_queue++; 2901 } else { 2902 warnx("%s: too much data in new " 2903 "buffer!", __func__); 2904 retval = 1; 2905 goto bailout; 2906 } 2907 } 2908 } 2909 2910 bailout: 2911 return (retval); 2912 } 2913 2914 void 2915 camdd_get_depth(struct camdd_dev *dev, uint32_t *our_depth, 2916 uint32_t *peer_depth, uint32_t *our_bytes, uint32_t *peer_bytes) 2917 { 2918 *our_depth = dev->cur_active_io + dev->num_run_queue; 2919 if (dev->num_peer_work_queue > 2920 dev->num_peer_done_queue) 2921 *peer_depth = dev->num_peer_work_queue - 2922 dev->num_peer_done_queue; 2923 else 2924 *peer_depth = 0; 2925 *our_bytes = *our_depth * dev->blocksize; 2926 *peer_bytes = dev->peer_bytes_queued; 2927 } 2928 2929 void 2930 camdd_sig_handler(int sig) 2931 { 2932 if (sig == SIGINFO) 2933 need_status = 1; 2934 else { 2935 need_exit = 1; 2936 error_exit = 1; 2937 } 2938 2939 sem_post(&camdd_sem); 2940 } 2941 2942 void 2943 camdd_print_status(struct camdd_dev *camdd_dev, struct camdd_dev *other_dev, 2944 struct timespec *start_time) 2945 { 2946 struct timespec done_time; 2947 uint64_t total_ns; 2948 long double mb_sec, total_sec; 2949 int error = 0; 2950 2951 error = clock_gettime(CLOCK_MONOTONIC_PRECISE, &done_time); 2952 if (error != 0) { 2953 warn("Unable to get done time"); 2954 return; 2955 } 2956 2957 timespecsub(&done_time, start_time); 2958 2959 total_ns = done_time.tv_nsec + (done_time.tv_sec * 1000000000); 2960 total_sec = total_ns; 2961 total_sec /= 1000000000; 2962 2963 fprintf(stderr, "%ju bytes %s %s\n%ju bytes %s %s\n" 2964 "%.4Lf seconds elapsed\n", 2965 (uintmax_t)camdd_dev->bytes_transferred, 2966 (camdd_dev->write_dev == 0) ? "read from" : "written to", 2967 camdd_dev->device_name, 2968 (uintmax_t)other_dev->bytes_transferred, 2969 (other_dev->write_dev == 0) ? "read from" : "written to", 2970 other_dev->device_name, total_sec); 2971 2972 mb_sec = min(other_dev->bytes_transferred,camdd_dev->bytes_transferred); 2973 mb_sec /= 1024 * 1024; 2974 mb_sec *= 1000000000; 2975 mb_sec /= total_ns; 2976 fprintf(stderr, "%.2Lf MB/sec\n", mb_sec); 2977 } 2978 2979 int 2980 camdd_rw(struct camdd_io_opts *io_opts, int num_io_opts, uint64_t max_io, 2981 int retry_count, int timeout) 2982 { 2983 struct cam_device *new_cam_dev = NULL; 2984 struct camdd_dev *devs[2]; 2985 struct timespec start_time; 2986 pthread_t threads[2]; 2987 int unit = 0; 2988 int error = 0; 2989 int i; 2990 2991 if (num_io_opts != 2) { 2992 warnx("Must have one input and one output path"); 2993 error = 1; 2994 goto bailout; 2995 } 2996 2997 bzero(devs, sizeof(devs)); 2998 2999 for (i = 0; i < num_io_opts; i++) { 3000 switch (io_opts[i].dev_type) { 3001 case CAMDD_DEV_PASS: { 3002 if (isdigit(io_opts[i].dev_name[0])) { 3003 camdd_argmask new_arglist = CAMDD_ARG_NONE; 3004 int bus = 0, target = 0, lun = 0; 3005 int rv; 3006 3007 /* device specified as bus:target[:lun] */ 3008 rv = parse_btl(io_opts[i].dev_name, &bus, 3009 &target, &lun, &new_arglist); 3010 if (rv < 2) { 3011 warnx("numeric device specification " 3012 "must be either bus:target, or " 3013 "bus:target:lun"); 3014 error = 1; 3015 goto bailout; 3016 } 3017 /* default to 0 if lun was not specified */ 3018 if ((new_arglist & CAMDD_ARG_LUN) == 0) { 3019 lun = 0; 3020 new_arglist |= CAMDD_ARG_LUN; 3021 } 3022 new_cam_dev = cam_open_btl(bus, target, lun, 3023 O_RDWR, NULL); 3024 } else { 3025 char name[30]; 3026 3027 if (cam_get_device(io_opts[i].dev_name, name, 3028 sizeof name, &unit) == -1) { 3029 warnx("%s", cam_errbuf); 3030 error = 1; 3031 goto bailout; 3032 } 3033 new_cam_dev = cam_open_spec_device(name, unit, 3034 O_RDWR, NULL); 3035 } 3036 3037 if (new_cam_dev == NULL) { 3038 warnx("%s", cam_errbuf); 3039 error = 1; 3040 goto bailout; 3041 } 3042 3043 devs[i] = camdd_probe_pass(new_cam_dev, 3044 /*io_opts*/ &io_opts[i], 3045 CAMDD_ARG_ERR_RECOVER, 3046 /*probe_retry_count*/ 3, 3047 /*probe_timeout*/ 5000, 3048 /*io_retry_count*/ retry_count, 3049 /*io_timeout*/ timeout); 3050 if (devs[i] == NULL) { 3051 warn("Unable to probe device %s%u", 3052 new_cam_dev->device_name, 3053 new_cam_dev->dev_unit_num); 3054 error = 1; 3055 goto bailout; 3056 } 3057 break; 3058 } 3059 case CAMDD_DEV_FILE: { 3060 int fd = -1; 3061 3062 if (io_opts[i].dev_name[0] == '-') { 3063 if (io_opts[i].write_dev != 0) 3064 fd = STDOUT_FILENO; 3065 else 3066 fd = STDIN_FILENO; 3067 } else { 3068 if (io_opts[i].write_dev != 0) { 3069 fd = open(io_opts[i].dev_name, 3070 O_RDWR | O_CREAT, S_IWUSR |S_IRUSR); 3071 } else { 3072 fd = open(io_opts[i].dev_name, 3073 O_RDONLY); 3074 } 3075 } 3076 if (fd == -1) { 3077 warn("error opening file %s", 3078 io_opts[i].dev_name); 3079 error = 1; 3080 goto bailout; 3081 } 3082 3083 devs[i] = camdd_probe_file(fd, &io_opts[i], 3084 retry_count, timeout); 3085 if (devs[i] == NULL) { 3086 error = 1; 3087 goto bailout; 3088 } 3089 3090 break; 3091 } 3092 default: 3093 warnx("Unknown device type %d (%s)", 3094 io_opts[i].dev_type, io_opts[i].dev_name); 3095 error = 1; 3096 goto bailout; 3097 break; /*NOTREACHED */ 3098 } 3099 3100 devs[i]->write_dev = io_opts[i].write_dev; 3101 3102 devs[i]->start_offset_bytes = io_opts[i].offset; 3103 3104 if (max_io != 0) { 3105 devs[i]->sector_io_limit = 3106 (devs[i]->start_offset_bytes / 3107 devs[i]->sector_size) + 3108 (max_io / devs[i]->sector_size) - 1; 3109 } 3110 3111 devs[i]->next_io_pos_bytes = devs[i]->start_offset_bytes; 3112 devs[i]->next_completion_pos_bytes =devs[i]->start_offset_bytes; 3113 } 3114 3115 devs[0]->peer_dev = devs[1]; 3116 devs[1]->peer_dev = devs[0]; 3117 devs[0]->next_peer_pos_bytes = devs[0]->peer_dev->next_io_pos_bytes; 3118 devs[1]->next_peer_pos_bytes = devs[1]->peer_dev->next_io_pos_bytes; 3119 3120 sem_init(&camdd_sem, /*pshared*/ 0, 0); 3121 3122 signal(SIGINFO, camdd_sig_handler); 3123 signal(SIGINT, camdd_sig_handler); 3124 3125 error = clock_gettime(CLOCK_MONOTONIC_PRECISE, &start_time); 3126 if (error != 0) { 3127 warn("Unable to get start time"); 3128 goto bailout; 3129 } 3130 3131 for (i = 0; i < num_io_opts; i++) { 3132 error = pthread_create(&threads[i], NULL, camdd_worker, 3133 (void *)devs[i]); 3134 if (error != 0) { 3135 warnc(error, "pthread_create() failed"); 3136 goto bailout; 3137 } 3138 } 3139 3140 for (;;) { 3141 if ((sem_wait(&camdd_sem) == -1) 3142 || (need_exit != 0)) { 3143 struct kevent ke; 3144 3145 for (i = 0; i < num_io_opts; i++) { 3146 EV_SET(&ke, (uintptr_t)&devs[i]->work_queue, 3147 EVFILT_USER, 0, NOTE_TRIGGER, 0, NULL); 3148 3149 devs[i]->flags |= CAMDD_DEV_FLAG_EOF; 3150 3151 error = kevent(devs[i]->kq, &ke, 1, NULL, 0, 3152 NULL); 3153 if (error == -1) 3154 warn("%s: unable to wake up thread", 3155 __func__); 3156 error = 0; 3157 } 3158 break; 3159 } else if (need_status != 0) { 3160 camdd_print_status(devs[0], devs[1], &start_time); 3161 need_status = 0; 3162 } 3163 } 3164 for (i = 0; i < num_io_opts; i++) { 3165 pthread_join(threads[i], NULL); 3166 } 3167 3168 camdd_print_status(devs[0], devs[1], &start_time); 3169 3170 bailout: 3171 3172 for (i = 0; i < num_io_opts; i++) 3173 camdd_free_dev(devs[i]); 3174 3175 return (error + error_exit); 3176 } 3177 3178 void 3179 usage(void) 3180 { 3181 fprintf(stderr, 3182 "usage: camdd <-i|-o pass=pass0,bs=1M,offset=1M,depth=4>\n" 3183 " <-i|-o file=/tmp/file,bs=512K,offset=1M>\n" 3184 " <-i|-o file=/dev/da0,bs=512K,offset=1M>\n" 3185 " <-i|-o file=/dev/nsa0,bs=512K>\n" 3186 " [-C retry_count][-E][-m max_io_amt][-t timeout_secs][-v][-h]\n" 3187 "Option description\n" 3188 "-i <arg=val> Specify input device/file and parameters\n" 3189 "-o <arg=val> Specify output device/file and parameters\n" 3190 "Input and Output parameters\n" 3191 "pass=name Specify a pass(4) device like pass0 or /dev/pass0\n" 3192 "file=name Specify a file or device, /tmp/foo, /dev/da0, /dev/null\n" 3193 " or - for stdin/stdout\n" 3194 "bs=blocksize Specify blocksize in bytes, or using K, M, G, etc. suffix\n" 3195 "offset=len Specify starting offset in bytes or using K, M, G suffix\n" 3196 " NOTE: offset cannot be specified on tapes, pipes, stdin/out\n" 3197 "depth=N Specify a numeric queue depth. This only applies to pass(4)\n" 3198 "mcs=N Specify a minimum cmd size for pass(4) read/write commands\n" 3199 "Optional arguments\n" 3200 "-C retry_cnt Specify a retry count for pass(4) devices\n" 3201 "-E Enable CAM error recovery for pass(4) devices\n" 3202 "-m max_io Specify the maximum amount to be transferred in bytes or\n" 3203 " using K, G, M, etc. suffixes\n" 3204 "-t timeout Specify the I/O timeout to use with pass(4) devices\n" 3205 "-v Enable verbose error recovery\n" 3206 "-h Print this message\n"); 3207 } 3208 3209 3210 int 3211 camdd_parse_io_opts(char *args, int is_write, struct camdd_io_opts *io_opts) 3212 { 3213 char *tmpstr, *tmpstr2; 3214 char *orig_tmpstr = NULL; 3215 int retval = 0; 3216 3217 io_opts->write_dev = is_write; 3218 3219 tmpstr = strdup(args); 3220 if (tmpstr == NULL) { 3221 warn("strdup failed"); 3222 retval = 1; 3223 goto bailout; 3224 } 3225 orig_tmpstr = tmpstr; 3226 while ((tmpstr2 = strsep(&tmpstr, ",")) != NULL) { 3227 char *name, *value; 3228 3229 /* 3230 * If the user creates an empty parameter by putting in two 3231 * commas, skip over it and look for the next field. 3232 */ 3233 if (*tmpstr2 == '\0') 3234 continue; 3235 3236 name = strsep(&tmpstr2, "="); 3237 if (*name == '\0') { 3238 warnx("Got empty I/O parameter name"); 3239 retval = 1; 3240 goto bailout; 3241 } 3242 value = strsep(&tmpstr2, "="); 3243 if ((value == NULL) 3244 || (*value == '\0')) { 3245 warnx("Empty I/O parameter value for %s", name); 3246 retval = 1; 3247 goto bailout; 3248 } 3249 if (strncasecmp(name, "file", 4) == 0) { 3250 io_opts->dev_type = CAMDD_DEV_FILE; 3251 io_opts->dev_name = strdup(value); 3252 if (io_opts->dev_name == NULL) { 3253 warn("Error allocating memory"); 3254 retval = 1; 3255 goto bailout; 3256 } 3257 } else if (strncasecmp(name, "pass", 4) == 0) { 3258 io_opts->dev_type = CAMDD_DEV_PASS; 3259 io_opts->dev_name = strdup(value); 3260 if (io_opts->dev_name == NULL) { 3261 warn("Error allocating memory"); 3262 retval = 1; 3263 goto bailout; 3264 } 3265 } else if ((strncasecmp(name, "bs", 2) == 0) 3266 || (strncasecmp(name, "blocksize", 9) == 0)) { 3267 retval = expand_number(value, &io_opts->blocksize); 3268 if (retval == -1) { 3269 warn("expand_number(3) failed on %s=%s", name, 3270 value); 3271 retval = 1; 3272 goto bailout; 3273 } 3274 } else if (strncasecmp(name, "depth", 5) == 0) { 3275 char *endptr; 3276 3277 io_opts->queue_depth = strtoull(value, &endptr, 0); 3278 if (*endptr != '\0') { 3279 warnx("invalid queue depth %s", value); 3280 retval = 1; 3281 goto bailout; 3282 } 3283 } else if (strncasecmp(name, "mcs", 3) == 0) { 3284 char *endptr; 3285 3286 io_opts->min_cmd_size = strtol(value, &endptr, 0); 3287 if ((*endptr != '\0') 3288 || ((io_opts->min_cmd_size > 16) 3289 || (io_opts->min_cmd_size < 0))) { 3290 warnx("invalid minimum cmd size %s", value); 3291 retval = 1; 3292 goto bailout; 3293 } 3294 } else if (strncasecmp(name, "offset", 6) == 0) { 3295 retval = expand_number(value, &io_opts->offset); 3296 if (retval == -1) { 3297 warn("expand_number(3) failed on %s=%s", name, 3298 value); 3299 retval = 1; 3300 goto bailout; 3301 } 3302 } else if (strncasecmp(name, "debug", 5) == 0) { 3303 char *endptr; 3304 3305 io_opts->debug = strtoull(value, &endptr, 0); 3306 if (*endptr != '\0') { 3307 warnx("invalid debug level %s", value); 3308 retval = 1; 3309 goto bailout; 3310 } 3311 } else { 3312 warnx("Unrecognized parameter %s=%s", name, value); 3313 } 3314 } 3315 bailout: 3316 free(orig_tmpstr); 3317 3318 return (retval); 3319 } 3320 3321 int 3322 main(int argc, char **argv) 3323 { 3324 int c; 3325 camdd_argmask arglist = CAMDD_ARG_NONE; 3326 int timeout = 0, retry_count = 1; 3327 int error = 0; 3328 uint64_t max_io = 0; 3329 struct camdd_io_opts *opt_list = NULL; 3330 3331 if (argc == 1) { 3332 usage(); 3333 exit(1); 3334 } 3335 3336 opt_list = calloc(2, sizeof(struct camdd_io_opts)); 3337 if (opt_list == NULL) { 3338 warn("Unable to allocate option list"); 3339 error = 1; 3340 goto bailout; 3341 } 3342 3343 while ((c = getopt(argc, argv, "C:Ehi:m:o:t:v")) != -1){ 3344 switch (c) { 3345 case 'C': 3346 retry_count = strtol(optarg, NULL, 0); 3347 if (retry_count < 0) 3348 errx(1, "retry count %d is < 0", 3349 retry_count); 3350 arglist |= CAMDD_ARG_RETRIES; 3351 break; 3352 case 'E': 3353 arglist |= CAMDD_ARG_ERR_RECOVER; 3354 break; 3355 case 'i': 3356 case 'o': 3357 if (((c == 'i') 3358 && (opt_list[0].dev_type != CAMDD_DEV_NONE)) 3359 || ((c == 'o') 3360 && (opt_list[1].dev_type != CAMDD_DEV_NONE))) { 3361 errx(1, "Only one input and output path " 3362 "allowed"); 3363 } 3364 error = camdd_parse_io_opts(optarg, (c == 'o') ? 1 : 0, 3365 (c == 'o') ? &opt_list[1] : &opt_list[0]); 3366 if (error != 0) 3367 goto bailout; 3368 break; 3369 case 'm': 3370 error = expand_number(optarg, &max_io); 3371 if (error == -1) { 3372 warn("invalid maximum I/O amount %s", optarg); 3373 error = 1; 3374 goto bailout; 3375 } 3376 break; 3377 case 't': 3378 timeout = strtol(optarg, NULL, 0); 3379 if (timeout < 0) 3380 errx(1, "invalid timeout %d", timeout); 3381 /* Convert the timeout from seconds to ms */ 3382 timeout *= 1000; 3383 arglist |= CAMDD_ARG_TIMEOUT; 3384 break; 3385 case 'v': 3386 arglist |= CAMDD_ARG_VERBOSE; 3387 break; 3388 case 'h': 3389 default: 3390 usage(); 3391 exit(1); 3392 break; /*NOTREACHED*/ 3393 } 3394 } 3395 3396 if ((opt_list[0].dev_type == CAMDD_DEV_NONE) 3397 || (opt_list[1].dev_type == CAMDD_DEV_NONE)) 3398 errx(1, "Must specify both -i and -o"); 3399 3400 /* 3401 * Set the timeout if the user hasn't specified one. 3402 */ 3403 if (timeout == 0) 3404 timeout = CAMDD_PASS_RW_TIMEOUT; 3405 3406 error = camdd_rw(opt_list, 2, max_io, retry_count, timeout); 3407 3408 bailout: 3409 free(opt_list); 3410 3411 exit(error); 3412 } 3413