1 /*- 2 * Copyright (c) 2003 Silicon Graphics International Corp. 3 * Copyright (c) 2009-2011 Spectra Logic Corporation 4 * Copyright (c) 2012 The FreeBSD Foundation 5 * All rights reserved. 6 * 7 * Portions of this software were developed by Edward Tomasz Napierala 8 * under sponsorship from the FreeBSD Foundation. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions, and the following disclaimer, 15 * without modification. 16 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 17 * substantially similar to the "NO WARRANTY" disclaimer below 18 * ("Disclaimer") and any redistribution must be conditioned upon 19 * including a substantially similar Disclaimer requirement for further 20 * binary redistribution. 21 * 22 * NO WARRANTY 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 27 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 31 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 32 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 33 * POSSIBILITY OF SUCH DAMAGES. 34 * 35 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend_block.c#5 $ 36 */ 37 /* 38 * CAM Target Layer driver backend for block devices. 39 * 40 * Author: Ken Merry <ken@FreeBSD.org> 41 */ 42 #include <sys/cdefs.h> 43 __FBSDID("$FreeBSD$"); 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/kernel.h> 48 #include <sys/types.h> 49 #include <sys/kthread.h> 50 #include <sys/bio.h> 51 #include <sys/fcntl.h> 52 #include <sys/limits.h> 53 #include <sys/lock.h> 54 #include <sys/mutex.h> 55 #include <sys/condvar.h> 56 #include <sys/malloc.h> 57 #include <sys/conf.h> 58 #include <sys/ioccom.h> 59 #include <sys/queue.h> 60 #include <sys/sbuf.h> 61 #include <sys/endian.h> 62 #include <sys/uio.h> 63 #include <sys/buf.h> 64 #include <sys/taskqueue.h> 65 #include <sys/vnode.h> 66 #include <sys/namei.h> 67 #include <sys/mount.h> 68 #include <sys/disk.h> 69 #include <sys/fcntl.h> 70 #include <sys/filedesc.h> 71 #include <sys/proc.h> 72 #include <sys/pcpu.h> 73 #include <sys/module.h> 74 #include <sys/sdt.h> 75 #include <sys/devicestat.h> 76 #include <sys/sysctl.h> 77 78 #include <geom/geom.h> 79 80 #include <cam/cam.h> 81 #include <cam/scsi/scsi_all.h> 82 #include <cam/scsi/scsi_da.h> 83 #include <cam/ctl/ctl_io.h> 84 #include <cam/ctl/ctl.h> 85 #include <cam/ctl/ctl_backend.h> 86 #include <cam/ctl/ctl_frontend_internal.h> 87 #include <cam/ctl/ctl_ioctl.h> 88 #include <cam/ctl/ctl_scsi_all.h> 89 #include <cam/ctl/ctl_error.h> 90 91 /* 92 * The idea here is that we'll allocate enough S/G space to hold a 1MB 93 * I/O. If we get an I/O larger than that, we'll split it. 94 */ 95 #define CTLBLK_MAX_IO_SIZE (1024 * 1024) 96 #define CTLBLK_MAX_SEG MAXPHYS 97 #define CTLBLK_MAX_SEGS MAX(CTLBLK_MAX_IO_SIZE / CTLBLK_MAX_SEG, 1) 98 99 #ifdef CTLBLK_DEBUG 100 #define DPRINTF(fmt, args...) \ 101 printf("cbb(%s:%d): " fmt, __FUNCTION__, __LINE__, ##args) 102 #else 103 #define DPRINTF(fmt, args...) do {} while(0) 104 #endif 105 106 SDT_PROVIDER_DEFINE(cbb); 107 108 typedef enum { 109 CTL_BE_BLOCK_LUN_UNCONFIGURED = 0x01, 110 CTL_BE_BLOCK_LUN_CONFIG_ERR = 0x02, 111 CTL_BE_BLOCK_LUN_WAITING = 0x04, 112 CTL_BE_BLOCK_LUN_MULTI_THREAD = 0x08 113 } ctl_be_block_lun_flags; 114 115 typedef enum { 116 CTL_BE_BLOCK_NONE, 117 CTL_BE_BLOCK_DEV, 118 CTL_BE_BLOCK_FILE 119 } ctl_be_block_type; 120 121 struct ctl_be_block_devdata { 122 struct cdev *cdev; 123 struct cdevsw *csw; 124 int dev_ref; 125 }; 126 127 struct ctl_be_block_filedata { 128 struct ucred *cred; 129 }; 130 131 union ctl_be_block_bedata { 132 struct ctl_be_block_devdata dev; 133 struct ctl_be_block_filedata file; 134 }; 135 136 struct ctl_be_block_io; 137 struct ctl_be_block_lun; 138 139 typedef void (*cbb_dispatch_t)(struct ctl_be_block_lun *be_lun, 140 struct ctl_be_block_io *beio); 141 142 /* 143 * Backend LUN structure. There is a 1:1 mapping between a block device 144 * and a backend block LUN, and between a backend block LUN and a CTL LUN. 145 */ 146 struct ctl_be_block_lun { 147 struct ctl_block_disk *disk; 148 char lunname[32]; 149 char *dev_path; 150 ctl_be_block_type dev_type; 151 struct vnode *vn; 152 union ctl_be_block_bedata backend; 153 cbb_dispatch_t dispatch; 154 cbb_dispatch_t lun_flush; 155 cbb_dispatch_t unmap; 156 struct mtx lock; 157 uma_zone_t lun_zone; 158 uint64_t size_blocks; 159 uint64_t size_bytes; 160 uint32_t blocksize; 161 int blocksize_shift; 162 uint16_t pblockexp; 163 uint16_t pblockoff; 164 struct ctl_be_block_softc *softc; 165 struct devstat *disk_stats; 166 ctl_be_block_lun_flags flags; 167 STAILQ_ENTRY(ctl_be_block_lun) links; 168 struct ctl_be_lun ctl_be_lun; 169 struct taskqueue *io_taskqueue; 170 struct task io_task; 171 int num_threads; 172 STAILQ_HEAD(, ctl_io_hdr) input_queue; 173 STAILQ_HEAD(, ctl_io_hdr) config_write_queue; 174 STAILQ_HEAD(, ctl_io_hdr) datamove_queue; 175 }; 176 177 /* 178 * Overall softc structure for the block backend module. 179 */ 180 struct ctl_be_block_softc { 181 struct mtx lock; 182 int num_disks; 183 STAILQ_HEAD(, ctl_block_disk) disk_list; 184 int num_luns; 185 STAILQ_HEAD(, ctl_be_block_lun) lun_list; 186 }; 187 188 static struct ctl_be_block_softc backend_block_softc; 189 190 /* 191 * Per-I/O information. 192 */ 193 struct ctl_be_block_io { 194 union ctl_io *io; 195 struct ctl_sg_entry sg_segs[CTLBLK_MAX_SEGS]; 196 struct iovec xiovecs[CTLBLK_MAX_SEGS]; 197 int bio_cmd; 198 int bio_flags; 199 int num_segs; 200 int num_bios_sent; 201 int num_bios_done; 202 int send_complete; 203 int num_errors; 204 struct bintime ds_t0; 205 devstat_tag_type ds_tag_type; 206 devstat_trans_flags ds_trans_type; 207 uint64_t io_len; 208 uint64_t io_offset; 209 struct ctl_be_block_softc *softc; 210 struct ctl_be_block_lun *lun; 211 void (*beio_cont)(struct ctl_be_block_io *beio); /* to continue processing */ 212 }; 213 214 static int cbb_num_threads = 14; 215 TUNABLE_INT("kern.cam.ctl.block.num_threads", &cbb_num_threads); 216 SYSCTL_NODE(_kern_cam_ctl, OID_AUTO, block, CTLFLAG_RD, 0, 217 "CAM Target Layer Block Backend"); 218 SYSCTL_INT(_kern_cam_ctl_block, OID_AUTO, num_threads, CTLFLAG_RW, 219 &cbb_num_threads, 0, "Number of threads per backing file"); 220 221 static struct ctl_be_block_io *ctl_alloc_beio(struct ctl_be_block_softc *softc); 222 static void ctl_free_beio(struct ctl_be_block_io *beio); 223 static void ctl_complete_beio(struct ctl_be_block_io *beio); 224 static int ctl_be_block_move_done(union ctl_io *io); 225 static void ctl_be_block_biodone(struct bio *bio); 226 static void ctl_be_block_flush_file(struct ctl_be_block_lun *be_lun, 227 struct ctl_be_block_io *beio); 228 static void ctl_be_block_dispatch_file(struct ctl_be_block_lun *be_lun, 229 struct ctl_be_block_io *beio); 230 static void ctl_be_block_flush_dev(struct ctl_be_block_lun *be_lun, 231 struct ctl_be_block_io *beio); 232 static void ctl_be_block_unmap_dev(struct ctl_be_block_lun *be_lun, 233 struct ctl_be_block_io *beio); 234 static void ctl_be_block_dispatch_dev(struct ctl_be_block_lun *be_lun, 235 struct ctl_be_block_io *beio); 236 static void ctl_be_block_cw_dispatch(struct ctl_be_block_lun *be_lun, 237 union ctl_io *io); 238 static void ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun, 239 union ctl_io *io); 240 static void ctl_be_block_worker(void *context, int pending); 241 static int ctl_be_block_submit(union ctl_io *io); 242 static int ctl_be_block_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, 243 int flag, struct thread *td); 244 static int ctl_be_block_open_file(struct ctl_be_block_lun *be_lun, 245 struct ctl_lun_req *req); 246 static int ctl_be_block_open_dev(struct ctl_be_block_lun *be_lun, 247 struct ctl_lun_req *req); 248 static int ctl_be_block_close(struct ctl_be_block_lun *be_lun); 249 static int ctl_be_block_open(struct ctl_be_block_softc *softc, 250 struct ctl_be_block_lun *be_lun, 251 struct ctl_lun_req *req); 252 static int ctl_be_block_create(struct ctl_be_block_softc *softc, 253 struct ctl_lun_req *req); 254 static int ctl_be_block_rm(struct ctl_be_block_softc *softc, 255 struct ctl_lun_req *req); 256 static int ctl_be_block_modify_file(struct ctl_be_block_lun *be_lun, 257 struct ctl_lun_req *req); 258 static int ctl_be_block_modify_dev(struct ctl_be_block_lun *be_lun, 259 struct ctl_lun_req *req); 260 static int ctl_be_block_modify(struct ctl_be_block_softc *softc, 261 struct ctl_lun_req *req); 262 static void ctl_be_block_lun_shutdown(void *be_lun); 263 static void ctl_be_block_lun_config_status(void *be_lun, 264 ctl_lun_config_status status); 265 static int ctl_be_block_config_write(union ctl_io *io); 266 static int ctl_be_block_config_read(union ctl_io *io); 267 static int ctl_be_block_lun_info(void *be_lun, struct sbuf *sb); 268 int ctl_be_block_init(void); 269 270 static struct ctl_backend_driver ctl_be_block_driver = 271 { 272 .name = "block", 273 .flags = CTL_BE_FLAG_HAS_CONFIG, 274 .init = ctl_be_block_init, 275 .data_submit = ctl_be_block_submit, 276 .data_move_done = ctl_be_block_move_done, 277 .config_read = ctl_be_block_config_read, 278 .config_write = ctl_be_block_config_write, 279 .ioctl = ctl_be_block_ioctl, 280 .lun_info = ctl_be_block_lun_info 281 }; 282 283 MALLOC_DEFINE(M_CTLBLK, "ctlblk", "Memory used for CTL block backend"); 284 CTL_BACKEND_DECLARE(cbb, ctl_be_block_driver); 285 286 static uma_zone_t beio_zone; 287 288 static struct ctl_be_block_io * 289 ctl_alloc_beio(struct ctl_be_block_softc *softc) 290 { 291 struct ctl_be_block_io *beio; 292 293 beio = uma_zalloc(beio_zone, M_WAITOK | M_ZERO); 294 beio->softc = softc; 295 return (beio); 296 } 297 298 static void 299 ctl_free_beio(struct ctl_be_block_io *beio) 300 { 301 int duplicate_free; 302 int i; 303 304 duplicate_free = 0; 305 306 for (i = 0; i < beio->num_segs; i++) { 307 if (beio->sg_segs[i].addr == NULL) 308 duplicate_free++; 309 310 uma_zfree(beio->lun->lun_zone, beio->sg_segs[i].addr); 311 beio->sg_segs[i].addr = NULL; 312 } 313 314 if (duplicate_free > 0) { 315 printf("%s: %d duplicate frees out of %d segments\n", __func__, 316 duplicate_free, beio->num_segs); 317 } 318 319 uma_zfree(beio_zone, beio); 320 } 321 322 static void 323 ctl_complete_beio(struct ctl_be_block_io *beio) 324 { 325 union ctl_io *io; 326 int io_len; 327 328 io = beio->io; 329 330 if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) 331 io_len = beio->io_len; 332 else 333 io_len = 0; 334 335 devstat_end_transaction(beio->lun->disk_stats, 336 /*bytes*/ io_len, 337 beio->ds_tag_type, 338 beio->ds_trans_type, 339 /*now*/ NULL, 340 /*then*/&beio->ds_t0); 341 342 if (beio->beio_cont != NULL) { 343 beio->beio_cont(beio); 344 } else { 345 ctl_free_beio(beio); 346 ctl_done(io); 347 } 348 } 349 350 static int 351 ctl_be_block_move_done(union ctl_io *io) 352 { 353 struct ctl_be_block_io *beio; 354 struct ctl_be_block_lun *be_lun; 355 #ifdef CTL_TIME_IO 356 struct bintime cur_bt; 357 #endif 358 359 beio = (struct ctl_be_block_io *) 360 io->io_hdr.ctl_private[CTL_PRIV_BACKEND].ptr; 361 362 be_lun = beio->lun; 363 364 DPRINTF("entered\n"); 365 366 #ifdef CTL_TIME_IO 367 getbintime(&cur_bt); 368 bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt); 369 bintime_add(&io->io_hdr.dma_bt, &cur_bt); 370 io->io_hdr.num_dmas++; 371 #endif 372 373 /* 374 * We set status at this point for read commands, and write 375 * commands with errors. 376 */ 377 if ((beio->bio_cmd == BIO_READ) 378 && (io->io_hdr.port_status == 0) 379 && ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0) 380 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) 381 ctl_set_success(&io->scsiio); 382 else if ((io->io_hdr.port_status != 0) 383 && ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0) 384 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) { 385 /* 386 * For hardware error sense keys, the sense key 387 * specific value is defined to be a retry count, 388 * but we use it to pass back an internal FETD 389 * error code. XXX KDM Hopefully the FETD is only 390 * using 16 bits for an error code, since that's 391 * all the space we have in the sks field. 392 */ 393 ctl_set_internal_failure(&io->scsiio, 394 /*sks_valid*/ 1, 395 /*retry_count*/ 396 io->io_hdr.port_status); 397 } 398 399 /* 400 * If this is a read, or a write with errors, it is done. 401 */ 402 if ((beio->bio_cmd == BIO_READ) 403 || ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0) 404 || ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE)) { 405 ctl_complete_beio(beio); 406 return (0); 407 } 408 409 /* 410 * At this point, we have a write and the DMA completed 411 * successfully. We now have to queue it to the task queue to 412 * execute the backend I/O. That is because we do blocking 413 * memory allocations, and in the file backing case, blocking I/O. 414 * This move done routine is generally called in the SIM's 415 * interrupt context, and therefore we cannot block. 416 */ 417 mtx_lock(&be_lun->lock); 418 /* 419 * XXX KDM make sure that links is okay to use at this point. 420 * Otherwise, we either need to add another field to ctl_io_hdr, 421 * or deal with resource allocation here. 422 */ 423 STAILQ_INSERT_TAIL(&be_lun->datamove_queue, &io->io_hdr, links); 424 mtx_unlock(&be_lun->lock); 425 426 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); 427 428 return (0); 429 } 430 431 static void 432 ctl_be_block_biodone(struct bio *bio) 433 { 434 struct ctl_be_block_io *beio; 435 struct ctl_be_block_lun *be_lun; 436 union ctl_io *io; 437 int error; 438 439 beio = bio->bio_caller1; 440 be_lun = beio->lun; 441 io = beio->io; 442 443 DPRINTF("entered\n"); 444 445 error = bio->bio_error; 446 mtx_lock(&be_lun->lock); 447 if (error != 0) 448 beio->num_errors++; 449 450 beio->num_bios_done++; 451 452 /* 453 * XXX KDM will this cause WITNESS to complain? Holding a lock 454 * during the free might cause it to complain. 455 */ 456 g_destroy_bio(bio); 457 458 /* 459 * If the send complete bit isn't set, or we aren't the last I/O to 460 * complete, then we're done. 461 */ 462 if ((beio->send_complete == 0) 463 || (beio->num_bios_done < beio->num_bios_sent)) { 464 mtx_unlock(&be_lun->lock); 465 return; 466 } 467 468 /* 469 * At this point, we've verified that we are the last I/O to 470 * complete, so it's safe to drop the lock. 471 */ 472 mtx_unlock(&be_lun->lock); 473 474 /* 475 * If there are any errors from the backing device, we fail the 476 * entire I/O with a medium error. 477 */ 478 if (beio->num_errors > 0) { 479 if (error == EOPNOTSUPP) { 480 ctl_set_invalid_opcode(&io->scsiio); 481 } else if (beio->bio_cmd == BIO_FLUSH) { 482 /* XXX KDM is there is a better error here? */ 483 ctl_set_internal_failure(&io->scsiio, 484 /*sks_valid*/ 1, 485 /*retry_count*/ 0xbad2); 486 } else 487 ctl_set_medium_error(&io->scsiio); 488 ctl_complete_beio(beio); 489 return; 490 } 491 492 /* 493 * If this is a write, a flush or a delete, we're all done. 494 * If this is a read, we can now send the data to the user. 495 */ 496 if ((beio->bio_cmd == BIO_WRITE) 497 || (beio->bio_cmd == BIO_FLUSH) 498 || (beio->bio_cmd == BIO_DELETE)) { 499 ctl_set_success(&io->scsiio); 500 ctl_complete_beio(beio); 501 } else { 502 #ifdef CTL_TIME_IO 503 getbintime(&io->io_hdr.dma_start_bt); 504 #endif 505 ctl_datamove(io); 506 } 507 } 508 509 static void 510 ctl_be_block_flush_file(struct ctl_be_block_lun *be_lun, 511 struct ctl_be_block_io *beio) 512 { 513 union ctl_io *io; 514 struct mount *mountpoint; 515 int error, lock_flags; 516 517 DPRINTF("entered\n"); 518 519 io = beio->io; 520 521 (void) vn_start_write(be_lun->vn, &mountpoint, V_WAIT); 522 523 if (MNT_SHARED_WRITES(mountpoint) 524 || ((mountpoint == NULL) 525 && MNT_SHARED_WRITES(be_lun->vn->v_mount))) 526 lock_flags = LK_SHARED; 527 else 528 lock_flags = LK_EXCLUSIVE; 529 530 vn_lock(be_lun->vn, lock_flags | LK_RETRY); 531 532 binuptime(&beio->ds_t0); 533 devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0); 534 535 error = VOP_FSYNC(be_lun->vn, MNT_WAIT, curthread); 536 VOP_UNLOCK(be_lun->vn, 0); 537 538 vn_finished_write(mountpoint); 539 540 if (error == 0) 541 ctl_set_success(&io->scsiio); 542 else { 543 /* XXX KDM is there is a better error here? */ 544 ctl_set_internal_failure(&io->scsiio, 545 /*sks_valid*/ 1, 546 /*retry_count*/ 0xbad1); 547 } 548 549 ctl_complete_beio(beio); 550 } 551 552 SDT_PROBE_DEFINE1(cbb, kernel, read, file_start, "uint64_t"); 553 SDT_PROBE_DEFINE1(cbb, kernel, write, file_start, "uint64_t"); 554 SDT_PROBE_DEFINE1(cbb, kernel, read, file_done,"uint64_t"); 555 SDT_PROBE_DEFINE1(cbb, kernel, write, file_done, "uint64_t"); 556 557 static void 558 ctl_be_block_dispatch_file(struct ctl_be_block_lun *be_lun, 559 struct ctl_be_block_io *beio) 560 { 561 struct ctl_be_block_filedata *file_data; 562 union ctl_io *io; 563 struct uio xuio; 564 struct iovec *xiovec; 565 int flags; 566 int error, i; 567 568 DPRINTF("entered\n"); 569 570 file_data = &be_lun->backend.file; 571 io = beio->io; 572 flags = beio->bio_flags; 573 574 if (beio->bio_cmd == BIO_READ) { 575 SDT_PROBE(cbb, kernel, read, file_start, 0, 0, 0, 0, 0); 576 } else { 577 SDT_PROBE(cbb, kernel, write, file_start, 0, 0, 0, 0, 0); 578 } 579 580 bzero(&xuio, sizeof(xuio)); 581 if (beio->bio_cmd == BIO_READ) 582 xuio.uio_rw = UIO_READ; 583 else 584 xuio.uio_rw = UIO_WRITE; 585 586 xuio.uio_offset = beio->io_offset; 587 xuio.uio_resid = beio->io_len; 588 xuio.uio_segflg = UIO_SYSSPACE; 589 xuio.uio_iov = beio->xiovecs; 590 xuio.uio_iovcnt = beio->num_segs; 591 xuio.uio_td = curthread; 592 593 for (i = 0, xiovec = xuio.uio_iov; i < xuio.uio_iovcnt; i++, xiovec++) { 594 xiovec->iov_base = beio->sg_segs[i].addr; 595 xiovec->iov_len = beio->sg_segs[i].len; 596 } 597 598 if (beio->bio_cmd == BIO_READ) { 599 vn_lock(be_lun->vn, LK_SHARED | LK_RETRY); 600 601 binuptime(&beio->ds_t0); 602 devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0); 603 604 /* 605 * UFS pays attention to IO_DIRECT for reads. If the 606 * DIRECTIO option is configured into the kernel, it calls 607 * ffs_rawread(). But that only works for single-segment 608 * uios with user space addresses. In our case, with a 609 * kernel uio, it still reads into the buffer cache, but it 610 * will just try to release the buffer from the cache later 611 * on in ffs_read(). 612 * 613 * ZFS does not pay attention to IO_DIRECT for reads. 614 * 615 * UFS does not pay attention to IO_SYNC for reads. 616 * 617 * ZFS pays attention to IO_SYNC (which translates into the 618 * Solaris define FRSYNC for zfs_read()) for reads. It 619 * attempts to sync the file before reading. 620 * 621 * So, to attempt to provide some barrier semantics in the 622 * BIO_ORDERED case, set both IO_DIRECT and IO_SYNC. 623 */ 624 error = VOP_READ(be_lun->vn, &xuio, (flags & BIO_ORDERED) ? 625 (IO_DIRECT|IO_SYNC) : 0, file_data->cred); 626 627 VOP_UNLOCK(be_lun->vn, 0); 628 } else { 629 struct mount *mountpoint; 630 int lock_flags; 631 632 (void)vn_start_write(be_lun->vn, &mountpoint, V_WAIT); 633 634 if (MNT_SHARED_WRITES(mountpoint) 635 || ((mountpoint == NULL) 636 && MNT_SHARED_WRITES(be_lun->vn->v_mount))) 637 lock_flags = LK_SHARED; 638 else 639 lock_flags = LK_EXCLUSIVE; 640 641 vn_lock(be_lun->vn, lock_flags | LK_RETRY); 642 643 binuptime(&beio->ds_t0); 644 devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0); 645 646 /* 647 * UFS pays attention to IO_DIRECT for writes. The write 648 * is done asynchronously. (Normally the write would just 649 * get put into cache. 650 * 651 * UFS pays attention to IO_SYNC for writes. It will 652 * attempt to write the buffer out synchronously if that 653 * flag is set. 654 * 655 * ZFS does not pay attention to IO_DIRECT for writes. 656 * 657 * ZFS pays attention to IO_SYNC (a.k.a. FSYNC or FRSYNC) 658 * for writes. It will flush the transaction from the 659 * cache before returning. 660 * 661 * So if we've got the BIO_ORDERED flag set, we want 662 * IO_SYNC in either the UFS or ZFS case. 663 */ 664 error = VOP_WRITE(be_lun->vn, &xuio, (flags & BIO_ORDERED) ? 665 IO_SYNC : 0, file_data->cred); 666 VOP_UNLOCK(be_lun->vn, 0); 667 668 vn_finished_write(mountpoint); 669 } 670 671 /* 672 * If we got an error, set the sense data to "MEDIUM ERROR" and 673 * return the I/O to the user. 674 */ 675 if (error != 0) { 676 char path_str[32]; 677 678 ctl_scsi_path_string(io, path_str, sizeof(path_str)); 679 /* 680 * XXX KDM ZFS returns ENOSPC when the underlying 681 * filesystem fills up. What kind of SCSI error should we 682 * return for that? 683 */ 684 printf("%s%s command returned errno %d\n", path_str, 685 (beio->bio_cmd == BIO_READ) ? "READ" : "WRITE", error); 686 ctl_set_medium_error(&io->scsiio); 687 ctl_complete_beio(beio); 688 return; 689 } 690 691 /* 692 * If this is a write, we're all done. 693 * If this is a read, we can now send the data to the user. 694 */ 695 if (beio->bio_cmd == BIO_WRITE) { 696 ctl_set_success(&io->scsiio); 697 SDT_PROBE(cbb, kernel, write, file_done, 0, 0, 0, 0, 0); 698 ctl_complete_beio(beio); 699 } else { 700 SDT_PROBE(cbb, kernel, read, file_done, 0, 0, 0, 0, 0); 701 #ifdef CTL_TIME_IO 702 getbintime(&io->io_hdr.dma_start_bt); 703 #endif 704 ctl_datamove(io); 705 } 706 } 707 708 static void 709 ctl_be_block_flush_dev(struct ctl_be_block_lun *be_lun, 710 struct ctl_be_block_io *beio) 711 { 712 struct bio *bio; 713 union ctl_io *io; 714 struct ctl_be_block_devdata *dev_data; 715 716 dev_data = &be_lun->backend.dev; 717 io = beio->io; 718 719 DPRINTF("entered\n"); 720 721 /* This can't fail, it's a blocking allocation. */ 722 bio = g_alloc_bio(); 723 724 bio->bio_cmd = BIO_FLUSH; 725 bio->bio_flags |= BIO_ORDERED; 726 bio->bio_dev = dev_data->cdev; 727 bio->bio_offset = 0; 728 bio->bio_data = 0; 729 bio->bio_done = ctl_be_block_biodone; 730 bio->bio_caller1 = beio; 731 bio->bio_pblkno = 0; 732 733 /* 734 * We don't need to acquire the LUN lock here, because we are only 735 * sending one bio, and so there is no other context to synchronize 736 * with. 737 */ 738 beio->num_bios_sent = 1; 739 beio->send_complete = 1; 740 741 binuptime(&beio->ds_t0); 742 devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0); 743 744 (*dev_data->csw->d_strategy)(bio); 745 } 746 747 static void 748 ctl_be_block_unmap_dev_range(struct ctl_be_block_lun *be_lun, 749 struct ctl_be_block_io *beio, 750 uint64_t off, uint64_t len, int last) 751 { 752 struct bio *bio; 753 struct ctl_be_block_devdata *dev_data; 754 uint64_t maxlen; 755 756 dev_data = &be_lun->backend.dev; 757 maxlen = LONG_MAX - (LONG_MAX % be_lun->blocksize); 758 while (len > 0) { 759 bio = g_alloc_bio(); 760 bio->bio_cmd = BIO_DELETE; 761 bio->bio_flags |= beio->bio_flags; 762 bio->bio_dev = dev_data->cdev; 763 bio->bio_offset = off; 764 bio->bio_length = MIN(len, maxlen); 765 bio->bio_data = 0; 766 bio->bio_done = ctl_be_block_biodone; 767 bio->bio_caller1 = beio; 768 bio->bio_pblkno = off / be_lun->blocksize; 769 770 off += bio->bio_length; 771 len -= bio->bio_length; 772 773 mtx_lock(&be_lun->lock); 774 beio->num_bios_sent++; 775 if (last && len == 0) 776 beio->send_complete = 1; 777 mtx_unlock(&be_lun->lock); 778 779 (*dev_data->csw->d_strategy)(bio); 780 } 781 } 782 783 static void 784 ctl_be_block_unmap_dev(struct ctl_be_block_lun *be_lun, 785 struct ctl_be_block_io *beio) 786 { 787 union ctl_io *io; 788 struct ctl_be_block_devdata *dev_data; 789 struct ctl_ptr_len_flags ptrlen; 790 struct scsi_unmap_desc *buf, *end; 791 uint64_t len; 792 793 dev_data = &be_lun->backend.dev; 794 io = beio->io; 795 796 DPRINTF("entered\n"); 797 798 binuptime(&beio->ds_t0); 799 devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0); 800 801 if (beio->io_offset == -1) { 802 beio->io_len = 0; 803 memcpy(&ptrlen, io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes, 804 sizeof(ptrlen)); 805 buf = (struct scsi_unmap_desc *)ptrlen.ptr; 806 end = buf + ptrlen.len / sizeof(*buf); 807 for (; buf < end; buf++) { 808 len = (uint64_t)scsi_4btoul(buf->length) * 809 be_lun->blocksize; 810 beio->io_len += len; 811 ctl_be_block_unmap_dev_range(be_lun, beio, 812 scsi_8btou64(buf->lba) * be_lun->blocksize, len, 813 (end - buf < 2) ? TRUE : FALSE); 814 } 815 } else 816 ctl_be_block_unmap_dev_range(be_lun, beio, 817 beio->io_offset, beio->io_len, TRUE); 818 } 819 820 static void 821 ctl_be_block_dispatch_dev(struct ctl_be_block_lun *be_lun, 822 struct ctl_be_block_io *beio) 823 { 824 int i; 825 struct bio *bio; 826 struct ctl_be_block_devdata *dev_data; 827 off_t cur_offset; 828 int max_iosize; 829 830 DPRINTF("entered\n"); 831 832 dev_data = &be_lun->backend.dev; 833 834 /* 835 * We have to limit our I/O size to the maximum supported by the 836 * backend device. Hopefully it is MAXPHYS. If the driver doesn't 837 * set it properly, use DFLTPHYS. 838 */ 839 max_iosize = dev_data->cdev->si_iosize_max; 840 if (max_iosize < PAGE_SIZE) 841 max_iosize = DFLTPHYS; 842 843 cur_offset = beio->io_offset; 844 845 /* 846 * XXX KDM need to accurately reflect the number of I/Os outstanding 847 * to a device. 848 */ 849 binuptime(&beio->ds_t0); 850 devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0); 851 852 for (i = 0; i < beio->num_segs; i++) { 853 size_t cur_size; 854 uint8_t *cur_ptr; 855 856 cur_size = beio->sg_segs[i].len; 857 cur_ptr = beio->sg_segs[i].addr; 858 859 while (cur_size > 0) { 860 /* This can't fail, it's a blocking allocation. */ 861 bio = g_alloc_bio(); 862 863 KASSERT(bio != NULL, ("g_alloc_bio() failed!\n")); 864 865 bio->bio_cmd = beio->bio_cmd; 866 bio->bio_flags |= beio->bio_flags; 867 bio->bio_dev = dev_data->cdev; 868 bio->bio_caller1 = beio; 869 bio->bio_length = min(cur_size, max_iosize); 870 bio->bio_offset = cur_offset; 871 bio->bio_data = cur_ptr; 872 bio->bio_done = ctl_be_block_biodone; 873 bio->bio_pblkno = cur_offset / be_lun->blocksize; 874 875 cur_offset += bio->bio_length; 876 cur_ptr += bio->bio_length; 877 cur_size -= bio->bio_length; 878 879 /* 880 * Make sure we set the complete bit just before we 881 * issue the last bio so we don't wind up with a 882 * race. 883 * 884 * Use the LUN mutex here instead of a combination 885 * of atomic variables for simplicity. 886 * 887 * XXX KDM we could have a per-IO lock, but that 888 * would cause additional per-IO setup and teardown 889 * overhead. Hopefully there won't be too much 890 * contention on the LUN lock. 891 */ 892 mtx_lock(&be_lun->lock); 893 894 beio->num_bios_sent++; 895 896 if ((i == beio->num_segs - 1) 897 && (cur_size == 0)) 898 beio->send_complete = 1; 899 900 mtx_unlock(&be_lun->lock); 901 902 (*dev_data->csw->d_strategy)(bio); 903 } 904 } 905 } 906 907 static void 908 ctl_be_block_cw_done_ws(struct ctl_be_block_io *beio) 909 { 910 union ctl_io *io; 911 912 io = beio->io; 913 ctl_free_beio(beio); 914 if (((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE) 915 && ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) { 916 ctl_config_write_done(io); 917 return; 918 } 919 920 ctl_be_block_config_write(io); 921 } 922 923 static void 924 ctl_be_block_cw_dispatch_ws(struct ctl_be_block_lun *be_lun, 925 union ctl_io *io) 926 { 927 struct ctl_be_block_io *beio; 928 struct ctl_be_block_softc *softc; 929 struct ctl_lba_len_flags lbalen; 930 uint64_t len_left, lba; 931 int i, seglen; 932 uint8_t *buf, *end; 933 934 DPRINTF("entered\n"); 935 936 beio = io->io_hdr.ctl_private[CTL_PRIV_BACKEND].ptr; 937 softc = be_lun->softc; 938 memcpy(&lbalen, io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes, 939 sizeof(lbalen)); 940 941 if (lbalen.flags & ~(SWS_LBDATA | SWS_UNMAP) || 942 (lbalen.flags & SWS_UNMAP && be_lun->unmap == NULL)) { 943 ctl_free_beio(beio); 944 ctl_set_invalid_field(&io->scsiio, 945 /*sks_valid*/ 1, 946 /*command*/ 1, 947 /*field*/ 1, 948 /*bit_valid*/ 0, 949 /*bit*/ 0); 950 ctl_config_write_done(io); 951 return; 952 } 953 954 /* 955 * If the I/O came down with an ordered or head of queue tag, set 956 * the BIO_ORDERED attribute. For head of queue tags, that's 957 * pretty much the best we can do. 958 */ 959 if ((io->scsiio.tag_type == CTL_TAG_ORDERED) 960 || (io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE)) 961 beio->bio_flags = BIO_ORDERED; 962 963 switch (io->scsiio.tag_type) { 964 case CTL_TAG_ORDERED: 965 beio->ds_tag_type = DEVSTAT_TAG_ORDERED; 966 break; 967 case CTL_TAG_HEAD_OF_QUEUE: 968 beio->ds_tag_type = DEVSTAT_TAG_HEAD; 969 break; 970 case CTL_TAG_UNTAGGED: 971 case CTL_TAG_SIMPLE: 972 case CTL_TAG_ACA: 973 default: 974 beio->ds_tag_type = DEVSTAT_TAG_SIMPLE; 975 break; 976 } 977 978 if (lbalen.flags & SWS_UNMAP) { 979 beio->io_offset = lbalen.lba * be_lun->blocksize; 980 beio->io_len = (uint64_t)lbalen.len * be_lun->blocksize; 981 beio->bio_cmd = BIO_DELETE; 982 beio->ds_trans_type = DEVSTAT_FREE; 983 984 be_lun->unmap(be_lun, beio); 985 return; 986 } 987 988 beio->bio_cmd = BIO_WRITE; 989 beio->ds_trans_type = DEVSTAT_WRITE; 990 991 DPRINTF("WRITE SAME at LBA %jx len %u\n", 992 (uintmax_t)lbalen.lba, lbalen.len); 993 994 len_left = (uint64_t)lbalen.len * be_lun->blocksize; 995 for (i = 0, lba = 0; i < CTLBLK_MAX_SEGS && len_left > 0; i++) { 996 997 /* 998 * Setup the S/G entry for this chunk. 999 */ 1000 seglen = MIN(CTLBLK_MAX_SEG, len_left); 1001 seglen -= seglen % be_lun->blocksize; 1002 beio->sg_segs[i].len = seglen; 1003 beio->sg_segs[i].addr = uma_zalloc(be_lun->lun_zone, M_WAITOK); 1004 1005 DPRINTF("segment %d addr %p len %zd\n", i, 1006 beio->sg_segs[i].addr, beio->sg_segs[i].len); 1007 1008 beio->num_segs++; 1009 len_left -= seglen; 1010 1011 buf = beio->sg_segs[i].addr; 1012 end = buf + seglen; 1013 for (; buf < end; buf += be_lun->blocksize) { 1014 memcpy(buf, io->scsiio.kern_data_ptr, be_lun->blocksize); 1015 if (lbalen.flags & SWS_LBDATA) 1016 scsi_ulto4b(lbalen.lba + lba, buf); 1017 lba++; 1018 } 1019 } 1020 1021 beio->io_offset = lbalen.lba * be_lun->blocksize; 1022 beio->io_len = lba * be_lun->blocksize; 1023 1024 /* We can not do all in one run. Correct and schedule rerun. */ 1025 if (len_left > 0) { 1026 lbalen.lba += lba; 1027 lbalen.len -= lba; 1028 memcpy(io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes, &lbalen, 1029 sizeof(lbalen)); 1030 beio->beio_cont = ctl_be_block_cw_done_ws; 1031 } 1032 1033 be_lun->dispatch(be_lun, beio); 1034 } 1035 1036 static void 1037 ctl_be_block_cw_dispatch_unmap(struct ctl_be_block_lun *be_lun, 1038 union ctl_io *io) 1039 { 1040 struct ctl_be_block_io *beio; 1041 struct ctl_be_block_softc *softc; 1042 struct ctl_ptr_len_flags ptrlen; 1043 1044 DPRINTF("entered\n"); 1045 1046 beio = io->io_hdr.ctl_private[CTL_PRIV_BACKEND].ptr; 1047 softc = be_lun->softc; 1048 memcpy(&ptrlen, io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes, 1049 sizeof(ptrlen)); 1050 1051 if (ptrlen.flags != 0 || be_lun->unmap == NULL) { 1052 ctl_free_beio(beio); 1053 ctl_set_invalid_field(&io->scsiio, 1054 /*sks_valid*/ 0, 1055 /*command*/ 1, 1056 /*field*/ 0, 1057 /*bit_valid*/ 0, 1058 /*bit*/ 0); 1059 ctl_config_write_done(io); 1060 return; 1061 } 1062 1063 /* 1064 * If the I/O came down with an ordered or head of queue tag, set 1065 * the BIO_ORDERED attribute. For head of queue tags, that's 1066 * pretty much the best we can do. 1067 */ 1068 if ((io->scsiio.tag_type == CTL_TAG_ORDERED) 1069 || (io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE)) 1070 beio->bio_flags = BIO_ORDERED; 1071 1072 switch (io->scsiio.tag_type) { 1073 case CTL_TAG_ORDERED: 1074 beio->ds_tag_type = DEVSTAT_TAG_ORDERED; 1075 break; 1076 case CTL_TAG_HEAD_OF_QUEUE: 1077 beio->ds_tag_type = DEVSTAT_TAG_HEAD; 1078 break; 1079 case CTL_TAG_UNTAGGED: 1080 case CTL_TAG_SIMPLE: 1081 case CTL_TAG_ACA: 1082 default: 1083 beio->ds_tag_type = DEVSTAT_TAG_SIMPLE; 1084 break; 1085 } 1086 1087 beio->io_len = 0; 1088 beio->io_offset = -1; 1089 1090 beio->bio_cmd = BIO_DELETE; 1091 beio->ds_trans_type = DEVSTAT_FREE; 1092 1093 DPRINTF("WRITE SAME at LBA %jx len %u\n", 1094 (uintmax_t)lbalen.lba, lbalen.len); 1095 1096 be_lun->unmap(be_lun, beio); 1097 } 1098 1099 static void 1100 ctl_be_block_cw_done(struct ctl_be_block_io *beio) 1101 { 1102 union ctl_io *io; 1103 1104 io = beio->io; 1105 ctl_free_beio(beio); 1106 ctl_config_write_done(io); 1107 } 1108 1109 static void 1110 ctl_be_block_cw_dispatch(struct ctl_be_block_lun *be_lun, 1111 union ctl_io *io) 1112 { 1113 struct ctl_be_block_io *beio; 1114 struct ctl_be_block_softc *softc; 1115 1116 DPRINTF("entered\n"); 1117 1118 softc = be_lun->softc; 1119 beio = ctl_alloc_beio(softc); 1120 beio->io = io; 1121 beio->lun = be_lun; 1122 beio->beio_cont = ctl_be_block_cw_done; 1123 io->io_hdr.ctl_private[CTL_PRIV_BACKEND].ptr = beio; 1124 1125 switch (io->scsiio.cdb[0]) { 1126 case SYNCHRONIZE_CACHE: 1127 case SYNCHRONIZE_CACHE_16: 1128 beio->bio_cmd = BIO_FLUSH; 1129 beio->ds_trans_type = DEVSTAT_NO_DATA; 1130 beio->ds_tag_type = DEVSTAT_TAG_ORDERED; 1131 beio->io_len = 0; 1132 be_lun->lun_flush(be_lun, beio); 1133 break; 1134 case WRITE_SAME_10: 1135 case WRITE_SAME_16: 1136 ctl_be_block_cw_dispatch_ws(be_lun, io); 1137 break; 1138 case UNMAP: 1139 ctl_be_block_cw_dispatch_unmap(be_lun, io); 1140 break; 1141 default: 1142 panic("Unhandled CDB type %#x", io->scsiio.cdb[0]); 1143 break; 1144 } 1145 } 1146 1147 SDT_PROBE_DEFINE1(cbb, kernel, read, start, "uint64_t"); 1148 SDT_PROBE_DEFINE1(cbb, kernel, write, start, "uint64_t"); 1149 SDT_PROBE_DEFINE1(cbb, kernel, read, alloc_done, "uint64_t"); 1150 SDT_PROBE_DEFINE1(cbb, kernel, write, alloc_done, "uint64_t"); 1151 1152 static void 1153 ctl_be_block_next(struct ctl_be_block_io *beio) 1154 { 1155 struct ctl_be_block_lun *be_lun; 1156 union ctl_io *io; 1157 1158 io = beio->io; 1159 be_lun = beio->lun; 1160 ctl_free_beio(beio); 1161 if (((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE) 1162 && ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) { 1163 ctl_done(io); 1164 return; 1165 } 1166 1167 io->scsiio.kern_rel_offset += io->scsiio.kern_data_len; 1168 io->io_hdr.status &= ~CTL_STATUS_MASK; 1169 io->io_hdr.status |= CTL_STATUS_NONE; 1170 1171 mtx_lock(&be_lun->lock); 1172 /* 1173 * XXX KDM make sure that links is okay to use at this point. 1174 * Otherwise, we either need to add another field to ctl_io_hdr, 1175 * or deal with resource allocation here. 1176 */ 1177 STAILQ_INSERT_TAIL(&be_lun->input_queue, &io->io_hdr, links); 1178 mtx_unlock(&be_lun->lock); 1179 1180 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); 1181 } 1182 1183 static void 1184 ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun, 1185 union ctl_io *io) 1186 { 1187 struct ctl_be_block_io *beio; 1188 struct ctl_be_block_softc *softc; 1189 struct ctl_lba_len lbalen; 1190 uint64_t len_left, lbaoff; 1191 int i; 1192 1193 softc = be_lun->softc; 1194 1195 DPRINTF("entered\n"); 1196 1197 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) { 1198 SDT_PROBE(cbb, kernel, read, start, 0, 0, 0, 0, 0); 1199 } else { 1200 SDT_PROBE(cbb, kernel, write, start, 0, 0, 0, 0, 0); 1201 } 1202 1203 beio = ctl_alloc_beio(softc); 1204 beio->io = io; 1205 beio->lun = be_lun; 1206 io->io_hdr.ctl_private[CTL_PRIV_BACKEND].ptr = beio; 1207 1208 /* 1209 * If the I/O came down with an ordered or head of queue tag, set 1210 * the BIO_ORDERED attribute. For head of queue tags, that's 1211 * pretty much the best we can do. 1212 * 1213 * XXX KDM we don't have a great way to easily know about the FUA 1214 * bit right now (it is decoded in ctl_read_write(), but we don't 1215 * pass that knowledge to the backend), and in any case we would 1216 * need to determine how to handle it. 1217 */ 1218 if ((io->scsiio.tag_type == CTL_TAG_ORDERED) 1219 || (io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE)) 1220 beio->bio_flags = BIO_ORDERED; 1221 1222 switch (io->scsiio.tag_type) { 1223 case CTL_TAG_ORDERED: 1224 beio->ds_tag_type = DEVSTAT_TAG_ORDERED; 1225 break; 1226 case CTL_TAG_HEAD_OF_QUEUE: 1227 beio->ds_tag_type = DEVSTAT_TAG_HEAD; 1228 break; 1229 case CTL_TAG_UNTAGGED: 1230 case CTL_TAG_SIMPLE: 1231 case CTL_TAG_ACA: 1232 default: 1233 beio->ds_tag_type = DEVSTAT_TAG_SIMPLE; 1234 break; 1235 } 1236 1237 /* 1238 * This path handles read and write only. The config write path 1239 * handles flush operations. 1240 */ 1241 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) { 1242 beio->bio_cmd = BIO_READ; 1243 beio->ds_trans_type = DEVSTAT_READ; 1244 } else { 1245 beio->bio_cmd = BIO_WRITE; 1246 beio->ds_trans_type = DEVSTAT_WRITE; 1247 } 1248 1249 memcpy(&lbalen, io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes, 1250 sizeof(lbalen)); 1251 DPRINTF("%s at LBA %jx len %u @%ju\n", 1252 (beio->bio_cmd == BIO_READ) ? "READ" : "WRITE", 1253 (uintmax_t)lbalen.lba, lbalen.len, lbaoff); 1254 lbaoff = io->scsiio.kern_rel_offset / be_lun->blocksize; 1255 beio->io_offset = (lbalen.lba + lbaoff) * be_lun->blocksize; 1256 beio->io_len = MIN((lbalen.len - lbaoff) * be_lun->blocksize, 1257 CTLBLK_MAX_IO_SIZE); 1258 beio->io_len -= beio->io_len % be_lun->blocksize; 1259 1260 for (i = 0, len_left = beio->io_len; len_left > 0; i++) { 1261 KASSERT(i < CTLBLK_MAX_SEGS, ("Too many segs (%d >= %d)", 1262 i, CTLBLK_MAX_SEGS)); 1263 1264 /* 1265 * Setup the S/G entry for this chunk. 1266 */ 1267 beio->sg_segs[i].len = min(CTLBLK_MAX_SEG, len_left); 1268 beio->sg_segs[i].addr = uma_zalloc(be_lun->lun_zone, M_WAITOK); 1269 1270 DPRINTF("segment %d addr %p len %zd\n", i, 1271 beio->sg_segs[i].addr, beio->sg_segs[i].len); 1272 1273 beio->num_segs++; 1274 len_left -= beio->sg_segs[i].len; 1275 } 1276 if (io->scsiio.kern_rel_offset + beio->io_len < 1277 io->scsiio.kern_total_len) 1278 beio->beio_cont = ctl_be_block_next; 1279 io->scsiio.be_move_done = ctl_be_block_move_done; 1280 io->scsiio.kern_data_ptr = (uint8_t *)beio->sg_segs; 1281 io->scsiio.kern_data_len = beio->io_len; 1282 io->scsiio.kern_data_resid = 0; 1283 io->scsiio.kern_sg_entries = beio->num_segs; 1284 io->io_hdr.flags |= CTL_FLAG_ALLOCATED | CTL_FLAG_KDPTR_SGLIST; 1285 1286 /* 1287 * For the read case, we need to read the data into our buffers and 1288 * then we can send it back to the user. For the write case, we 1289 * need to get the data from the user first. 1290 */ 1291 if (beio->bio_cmd == BIO_READ) { 1292 SDT_PROBE(cbb, kernel, read, alloc_done, 0, 0, 0, 0, 0); 1293 be_lun->dispatch(be_lun, beio); 1294 } else { 1295 SDT_PROBE(cbb, kernel, write, alloc_done, 0, 0, 0, 0, 0); 1296 #ifdef CTL_TIME_IO 1297 getbintime(&io->io_hdr.dma_start_bt); 1298 #endif 1299 ctl_datamove(io); 1300 } 1301 } 1302 1303 static void 1304 ctl_be_block_worker(void *context, int pending) 1305 { 1306 struct ctl_be_block_lun *be_lun; 1307 struct ctl_be_block_softc *softc; 1308 union ctl_io *io; 1309 1310 be_lun = (struct ctl_be_block_lun *)context; 1311 softc = be_lun->softc; 1312 1313 DPRINTF("entered\n"); 1314 1315 mtx_lock(&be_lun->lock); 1316 for (;;) { 1317 io = (union ctl_io *)STAILQ_FIRST(&be_lun->datamove_queue); 1318 if (io != NULL) { 1319 struct ctl_be_block_io *beio; 1320 1321 DPRINTF("datamove queue\n"); 1322 1323 STAILQ_REMOVE(&be_lun->datamove_queue, &io->io_hdr, 1324 ctl_io_hdr, links); 1325 1326 mtx_unlock(&be_lun->lock); 1327 1328 beio = (struct ctl_be_block_io *) 1329 io->io_hdr.ctl_private[CTL_PRIV_BACKEND].ptr; 1330 1331 be_lun->dispatch(be_lun, beio); 1332 1333 mtx_lock(&be_lun->lock); 1334 continue; 1335 } 1336 io = (union ctl_io *)STAILQ_FIRST(&be_lun->config_write_queue); 1337 if (io != NULL) { 1338 1339 DPRINTF("config write queue\n"); 1340 1341 STAILQ_REMOVE(&be_lun->config_write_queue, &io->io_hdr, 1342 ctl_io_hdr, links); 1343 1344 mtx_unlock(&be_lun->lock); 1345 1346 ctl_be_block_cw_dispatch(be_lun, io); 1347 1348 mtx_lock(&be_lun->lock); 1349 continue; 1350 } 1351 io = (union ctl_io *)STAILQ_FIRST(&be_lun->input_queue); 1352 if (io != NULL) { 1353 DPRINTF("input queue\n"); 1354 1355 STAILQ_REMOVE(&be_lun->input_queue, &io->io_hdr, 1356 ctl_io_hdr, links); 1357 mtx_unlock(&be_lun->lock); 1358 1359 /* 1360 * We must drop the lock, since this routine and 1361 * its children may sleep. 1362 */ 1363 ctl_be_block_dispatch(be_lun, io); 1364 1365 mtx_lock(&be_lun->lock); 1366 continue; 1367 } 1368 1369 /* 1370 * If we get here, there is no work left in the queues, so 1371 * just break out and let the task queue go to sleep. 1372 */ 1373 break; 1374 } 1375 mtx_unlock(&be_lun->lock); 1376 } 1377 1378 /* 1379 * Entry point from CTL to the backend for I/O. We queue everything to a 1380 * work thread, so this just puts the I/O on a queue and wakes up the 1381 * thread. 1382 */ 1383 static int 1384 ctl_be_block_submit(union ctl_io *io) 1385 { 1386 struct ctl_lba_len lbalen; 1387 struct ctl_be_block_lun *be_lun; 1388 struct ctl_be_lun *ctl_be_lun; 1389 int retval; 1390 1391 DPRINTF("entered\n"); 1392 1393 retval = CTL_RETVAL_COMPLETE; 1394 1395 ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[ 1396 CTL_PRIV_BACKEND_LUN].ptr; 1397 be_lun = (struct ctl_be_block_lun *)ctl_be_lun->be_lun; 1398 1399 /* 1400 * Make sure we only get SCSI I/O. 1401 */ 1402 KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, ("Non-SCSI I/O (type " 1403 "%#x) encountered", io->io_hdr.io_type)); 1404 1405 memcpy(&lbalen, io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes, 1406 sizeof(lbalen)); 1407 io->scsiio.kern_total_len = lbalen.len * be_lun->blocksize; 1408 io->scsiio.kern_rel_offset = 0; 1409 1410 mtx_lock(&be_lun->lock); 1411 /* 1412 * XXX KDM make sure that links is okay to use at this point. 1413 * Otherwise, we either need to add another field to ctl_io_hdr, 1414 * or deal with resource allocation here. 1415 */ 1416 STAILQ_INSERT_TAIL(&be_lun->input_queue, &io->io_hdr, links); 1417 mtx_unlock(&be_lun->lock); 1418 1419 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); 1420 1421 return (retval); 1422 } 1423 1424 static int 1425 ctl_be_block_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, 1426 int flag, struct thread *td) 1427 { 1428 struct ctl_be_block_softc *softc; 1429 int error; 1430 1431 softc = &backend_block_softc; 1432 1433 error = 0; 1434 1435 switch (cmd) { 1436 case CTL_LUN_REQ: { 1437 struct ctl_lun_req *lun_req; 1438 1439 lun_req = (struct ctl_lun_req *)addr; 1440 1441 switch (lun_req->reqtype) { 1442 case CTL_LUNREQ_CREATE: 1443 error = ctl_be_block_create(softc, lun_req); 1444 break; 1445 case CTL_LUNREQ_RM: 1446 error = ctl_be_block_rm(softc, lun_req); 1447 break; 1448 case CTL_LUNREQ_MODIFY: 1449 error = ctl_be_block_modify(softc, lun_req); 1450 break; 1451 default: 1452 lun_req->status = CTL_LUN_ERROR; 1453 snprintf(lun_req->error_str, sizeof(lun_req->error_str), 1454 "%s: invalid LUN request type %d", __func__, 1455 lun_req->reqtype); 1456 break; 1457 } 1458 break; 1459 } 1460 default: 1461 error = ENOTTY; 1462 break; 1463 } 1464 1465 return (error); 1466 } 1467 1468 static int 1469 ctl_be_block_open_file(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req) 1470 { 1471 struct ctl_be_block_filedata *file_data; 1472 struct ctl_lun_create_params *params; 1473 struct vattr vattr; 1474 int error; 1475 1476 error = 0; 1477 file_data = &be_lun->backend.file; 1478 params = &req->reqdata.create; 1479 1480 be_lun->dev_type = CTL_BE_BLOCK_FILE; 1481 be_lun->dispatch = ctl_be_block_dispatch_file; 1482 be_lun->lun_flush = ctl_be_block_flush_file; 1483 1484 error = VOP_GETATTR(be_lun->vn, &vattr, curthread->td_ucred); 1485 if (error != 0) { 1486 snprintf(req->error_str, sizeof(req->error_str), 1487 "error calling VOP_GETATTR() for file %s", 1488 be_lun->dev_path); 1489 return (error); 1490 } 1491 1492 /* 1493 * Verify that we have the ability to upgrade to exclusive 1494 * access on this file so we can trap errors at open instead 1495 * of reporting them during first access. 1496 */ 1497 if (VOP_ISLOCKED(be_lun->vn) != LK_EXCLUSIVE) { 1498 vn_lock(be_lun->vn, LK_UPGRADE | LK_RETRY); 1499 if (be_lun->vn->v_iflag & VI_DOOMED) { 1500 error = EBADF; 1501 snprintf(req->error_str, sizeof(req->error_str), 1502 "error locking file %s", be_lun->dev_path); 1503 return (error); 1504 } 1505 } 1506 1507 1508 file_data->cred = crhold(curthread->td_ucred); 1509 if (params->lun_size_bytes != 0) 1510 be_lun->size_bytes = params->lun_size_bytes; 1511 else 1512 be_lun->size_bytes = vattr.va_size; 1513 /* 1514 * We set the multi thread flag for file operations because all 1515 * filesystems (in theory) are capable of allowing multiple readers 1516 * of a file at once. So we want to get the maximum possible 1517 * concurrency. 1518 */ 1519 be_lun->flags |= CTL_BE_BLOCK_LUN_MULTI_THREAD; 1520 1521 /* 1522 * XXX KDM vattr.va_blocksize may be larger than 512 bytes here. 1523 * With ZFS, it is 131072 bytes. Block sizes that large don't work 1524 * with disklabel and UFS on FreeBSD at least. Large block sizes 1525 * may not work with other OSes as well. So just export a sector 1526 * size of 512 bytes, which should work with any OS or 1527 * application. Since our backing is a file, any block size will 1528 * work fine for the backing store. 1529 */ 1530 #if 0 1531 be_lun->blocksize= vattr.va_blocksize; 1532 #endif 1533 if (params->blocksize_bytes != 0) 1534 be_lun->blocksize = params->blocksize_bytes; 1535 else 1536 be_lun->blocksize = 512; 1537 1538 /* 1539 * Sanity check. The media size has to be at least one 1540 * sector long. 1541 */ 1542 if (be_lun->size_bytes < be_lun->blocksize) { 1543 error = EINVAL; 1544 snprintf(req->error_str, sizeof(req->error_str), 1545 "file %s size %ju < block size %u", be_lun->dev_path, 1546 (uintmax_t)be_lun->size_bytes, be_lun->blocksize); 1547 } 1548 return (error); 1549 } 1550 1551 static int 1552 ctl_be_block_open_dev(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req) 1553 { 1554 struct ctl_lun_create_params *params; 1555 struct vattr vattr; 1556 struct cdev *dev; 1557 struct cdevsw *devsw; 1558 int error; 1559 off_t ps, pss, po, pos; 1560 1561 params = &req->reqdata.create; 1562 1563 be_lun->dev_type = CTL_BE_BLOCK_DEV; 1564 be_lun->dispatch = ctl_be_block_dispatch_dev; 1565 be_lun->lun_flush = ctl_be_block_flush_dev; 1566 be_lun->unmap = ctl_be_block_unmap_dev; 1567 be_lun->backend.dev.cdev = be_lun->vn->v_rdev; 1568 be_lun->backend.dev.csw = dev_refthread(be_lun->backend.dev.cdev, 1569 &be_lun->backend.dev.dev_ref); 1570 if (be_lun->backend.dev.csw == NULL) 1571 panic("Unable to retrieve device switch"); 1572 1573 error = VOP_GETATTR(be_lun->vn, &vattr, NOCRED); 1574 if (error) { 1575 snprintf(req->error_str, sizeof(req->error_str), 1576 "%s: error getting vnode attributes for device %s", 1577 __func__, be_lun->dev_path); 1578 return (error); 1579 } 1580 1581 dev = be_lun->vn->v_rdev; 1582 devsw = dev->si_devsw; 1583 if (!devsw->d_ioctl) { 1584 snprintf(req->error_str, sizeof(req->error_str), 1585 "%s: no d_ioctl for device %s!", __func__, 1586 be_lun->dev_path); 1587 return (ENODEV); 1588 } 1589 1590 error = devsw->d_ioctl(dev, DIOCGSECTORSIZE, 1591 (caddr_t)&be_lun->blocksize, FREAD, 1592 curthread); 1593 if (error) { 1594 snprintf(req->error_str, sizeof(req->error_str), 1595 "%s: error %d returned for DIOCGSECTORSIZE ioctl " 1596 "on %s!", __func__, error, be_lun->dev_path); 1597 return (error); 1598 } 1599 1600 /* 1601 * If the user has asked for a blocksize that is greater than the 1602 * backing device's blocksize, we can do it only if the blocksize 1603 * the user is asking for is an even multiple of the underlying 1604 * device's blocksize. 1605 */ 1606 if ((params->blocksize_bytes != 0) 1607 && (params->blocksize_bytes > be_lun->blocksize)) { 1608 uint32_t bs_multiple, tmp_blocksize; 1609 1610 bs_multiple = params->blocksize_bytes / be_lun->blocksize; 1611 1612 tmp_blocksize = bs_multiple * be_lun->blocksize; 1613 1614 if (tmp_blocksize == params->blocksize_bytes) { 1615 be_lun->blocksize = params->blocksize_bytes; 1616 } else { 1617 snprintf(req->error_str, sizeof(req->error_str), 1618 "%s: requested blocksize %u is not an even " 1619 "multiple of backing device blocksize %u", 1620 __func__, params->blocksize_bytes, 1621 be_lun->blocksize); 1622 return (EINVAL); 1623 1624 } 1625 } else if ((params->blocksize_bytes != 0) 1626 && (params->blocksize_bytes != be_lun->blocksize)) { 1627 snprintf(req->error_str, sizeof(req->error_str), 1628 "%s: requested blocksize %u < backing device " 1629 "blocksize %u", __func__, params->blocksize_bytes, 1630 be_lun->blocksize); 1631 return (EINVAL); 1632 } 1633 1634 error = devsw->d_ioctl(dev, DIOCGMEDIASIZE, 1635 (caddr_t)&be_lun->size_bytes, FREAD, 1636 curthread); 1637 if (error) { 1638 snprintf(req->error_str, sizeof(req->error_str), 1639 "%s: error %d returned for DIOCGMEDIASIZE " 1640 " ioctl on %s!", __func__, error, 1641 be_lun->dev_path); 1642 return (error); 1643 } 1644 1645 if (params->lun_size_bytes != 0) { 1646 if (params->lun_size_bytes > be_lun->size_bytes) { 1647 snprintf(req->error_str, sizeof(req->error_str), 1648 "%s: requested LUN size %ju > backing device " 1649 "size %ju", __func__, 1650 (uintmax_t)params->lun_size_bytes, 1651 (uintmax_t)be_lun->size_bytes); 1652 return (EINVAL); 1653 } 1654 1655 be_lun->size_bytes = params->lun_size_bytes; 1656 } 1657 1658 error = devsw->d_ioctl(dev, DIOCGSTRIPESIZE, 1659 (caddr_t)&ps, FREAD, curthread); 1660 if (error) 1661 ps = po = 0; 1662 else { 1663 error = devsw->d_ioctl(dev, DIOCGSTRIPEOFFSET, 1664 (caddr_t)&po, FREAD, curthread); 1665 if (error) 1666 po = 0; 1667 } 1668 pss = ps / be_lun->blocksize; 1669 pos = po / be_lun->blocksize; 1670 if ((pss > 0) && (pss * be_lun->blocksize == ps) && (pss >= pos) && 1671 ((pss & (pss - 1)) == 0) && (pos * be_lun->blocksize == po)) { 1672 be_lun->pblockexp = fls(pss) - 1; 1673 be_lun->pblockoff = (pss - pos) % pss; 1674 } 1675 1676 return (0); 1677 } 1678 1679 static int 1680 ctl_be_block_close(struct ctl_be_block_lun *be_lun) 1681 { 1682 DROP_GIANT(); 1683 if (be_lun->vn) { 1684 int flags = FREAD | FWRITE; 1685 1686 switch (be_lun->dev_type) { 1687 case CTL_BE_BLOCK_DEV: 1688 if (be_lun->backend.dev.csw) { 1689 dev_relthread(be_lun->backend.dev.cdev, 1690 be_lun->backend.dev.dev_ref); 1691 be_lun->backend.dev.csw = NULL; 1692 be_lun->backend.dev.cdev = NULL; 1693 } 1694 break; 1695 case CTL_BE_BLOCK_FILE: 1696 break; 1697 case CTL_BE_BLOCK_NONE: 1698 break; 1699 default: 1700 panic("Unexpected backend type."); 1701 break; 1702 } 1703 1704 (void)vn_close(be_lun->vn, flags, NOCRED, curthread); 1705 be_lun->vn = NULL; 1706 1707 switch (be_lun->dev_type) { 1708 case CTL_BE_BLOCK_DEV: 1709 break; 1710 case CTL_BE_BLOCK_FILE: 1711 if (be_lun->backend.file.cred != NULL) { 1712 crfree(be_lun->backend.file.cred); 1713 be_lun->backend.file.cred = NULL; 1714 } 1715 break; 1716 case CTL_BE_BLOCK_NONE: 1717 break; 1718 default: 1719 panic("Unexpected backend type."); 1720 break; 1721 } 1722 } 1723 PICKUP_GIANT(); 1724 1725 return (0); 1726 } 1727 1728 static int 1729 ctl_be_block_open(struct ctl_be_block_softc *softc, 1730 struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req) 1731 { 1732 struct nameidata nd; 1733 int flags; 1734 int error; 1735 1736 /* 1737 * XXX KDM allow a read-only option? 1738 */ 1739 flags = FREAD | FWRITE; 1740 error = 0; 1741 1742 if (rootvnode == NULL) { 1743 snprintf(req->error_str, sizeof(req->error_str), 1744 "%s: Root filesystem is not mounted", __func__); 1745 return (1); 1746 } 1747 1748 if (!curthread->td_proc->p_fd->fd_cdir) { 1749 curthread->td_proc->p_fd->fd_cdir = rootvnode; 1750 VREF(rootvnode); 1751 } 1752 if (!curthread->td_proc->p_fd->fd_rdir) { 1753 curthread->td_proc->p_fd->fd_rdir = rootvnode; 1754 VREF(rootvnode); 1755 } 1756 if (!curthread->td_proc->p_fd->fd_jdir) { 1757 curthread->td_proc->p_fd->fd_jdir = rootvnode; 1758 VREF(rootvnode); 1759 } 1760 1761 again: 1762 NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, be_lun->dev_path, curthread); 1763 error = vn_open(&nd, &flags, 0, NULL); 1764 if (error) { 1765 /* 1766 * This is the only reasonable guess we can make as far as 1767 * path if the user doesn't give us a fully qualified path. 1768 * If they want to specify a file, they need to specify the 1769 * full path. 1770 */ 1771 if (be_lun->dev_path[0] != '/') { 1772 char *dev_path = "/dev/"; 1773 char *dev_name; 1774 1775 /* Try adding device path at beginning of name */ 1776 dev_name = malloc(strlen(be_lun->dev_path) 1777 + strlen(dev_path) + 1, 1778 M_CTLBLK, M_WAITOK); 1779 if (dev_name) { 1780 sprintf(dev_name, "%s%s", dev_path, 1781 be_lun->dev_path); 1782 free(be_lun->dev_path, M_CTLBLK); 1783 be_lun->dev_path = dev_name; 1784 goto again; 1785 } 1786 } 1787 snprintf(req->error_str, sizeof(req->error_str), 1788 "%s: error opening %s", __func__, be_lun->dev_path); 1789 return (error); 1790 } 1791 1792 NDFREE(&nd, NDF_ONLY_PNBUF); 1793 1794 be_lun->vn = nd.ni_vp; 1795 1796 /* We only support disks and files. */ 1797 if (vn_isdisk(be_lun->vn, &error)) { 1798 error = ctl_be_block_open_dev(be_lun, req); 1799 } else if (be_lun->vn->v_type == VREG) { 1800 error = ctl_be_block_open_file(be_lun, req); 1801 } else { 1802 error = EINVAL; 1803 snprintf(req->error_str, sizeof(req->error_str), 1804 "%s is not a disk or plain file", be_lun->dev_path); 1805 } 1806 VOP_UNLOCK(be_lun->vn, 0); 1807 1808 if (error != 0) { 1809 ctl_be_block_close(be_lun); 1810 return (error); 1811 } 1812 1813 be_lun->blocksize_shift = fls(be_lun->blocksize) - 1; 1814 be_lun->size_blocks = be_lun->size_bytes >> be_lun->blocksize_shift; 1815 1816 return (0); 1817 } 1818 1819 static int 1820 ctl_be_block_create(struct ctl_be_block_softc *softc, struct ctl_lun_req *req) 1821 { 1822 struct ctl_be_block_lun *be_lun; 1823 struct ctl_lun_create_params *params; 1824 struct ctl_be_arg *file_arg; 1825 char tmpstr[32]; 1826 int retval, num_threads, unmap; 1827 int i; 1828 1829 params = &req->reqdata.create; 1830 retval = 0; 1831 1832 num_threads = cbb_num_threads; 1833 1834 file_arg = NULL; 1835 1836 be_lun = malloc(sizeof(*be_lun), M_CTLBLK, M_ZERO | M_WAITOK); 1837 1838 be_lun->softc = softc; 1839 STAILQ_INIT(&be_lun->input_queue); 1840 STAILQ_INIT(&be_lun->config_write_queue); 1841 STAILQ_INIT(&be_lun->datamove_queue); 1842 STAILQ_INIT(&be_lun->ctl_be_lun.options); 1843 sprintf(be_lun->lunname, "cblk%d", softc->num_luns); 1844 mtx_init(&be_lun->lock, be_lun->lunname, NULL, MTX_DEF); 1845 1846 be_lun->lun_zone = uma_zcreate(be_lun->lunname, CTLBLK_MAX_SEG, 1847 NULL, NULL, NULL, NULL, /*align*/ 0, /*flags*/0); 1848 1849 if (be_lun->lun_zone == NULL) { 1850 snprintf(req->error_str, sizeof(req->error_str), 1851 "%s: error allocating UMA zone", __func__); 1852 goto bailout_error; 1853 } 1854 1855 if (params->flags & CTL_LUN_FLAG_DEV_TYPE) 1856 be_lun->ctl_be_lun.lun_type = params->device_type; 1857 else 1858 be_lun->ctl_be_lun.lun_type = T_DIRECT; 1859 1860 if (be_lun->ctl_be_lun.lun_type == T_DIRECT) { 1861 for (i = 0; i < req->num_be_args; i++) { 1862 if (strcmp(req->kern_be_args[i].kname, "file") == 0) { 1863 file_arg = &req->kern_be_args[i]; 1864 break; 1865 } 1866 } 1867 1868 if (file_arg == NULL) { 1869 snprintf(req->error_str, sizeof(req->error_str), 1870 "%s: no file argument specified", __func__); 1871 goto bailout_error; 1872 } 1873 1874 be_lun->dev_path = malloc(file_arg->vallen, M_CTLBLK, 1875 M_WAITOK | M_ZERO); 1876 1877 strlcpy(be_lun->dev_path, (char *)file_arg->kvalue, 1878 file_arg->vallen); 1879 1880 retval = ctl_be_block_open(softc, be_lun, req); 1881 if (retval != 0) { 1882 retval = 0; 1883 goto bailout_error; 1884 } 1885 1886 /* 1887 * Tell the user the size of the file/device. 1888 */ 1889 params->lun_size_bytes = be_lun->size_bytes; 1890 1891 /* 1892 * The maximum LBA is the size - 1. 1893 */ 1894 be_lun->ctl_be_lun.maxlba = be_lun->size_blocks - 1; 1895 } else { 1896 /* 1897 * For processor devices, we don't have any size. 1898 */ 1899 be_lun->blocksize = 0; 1900 be_lun->pblockexp = 0; 1901 be_lun->pblockoff = 0; 1902 be_lun->size_blocks = 0; 1903 be_lun->size_bytes = 0; 1904 be_lun->ctl_be_lun.maxlba = 0; 1905 params->lun_size_bytes = 0; 1906 1907 /* 1908 * Default to just 1 thread for processor devices. 1909 */ 1910 num_threads = 1; 1911 } 1912 1913 /* 1914 * XXX This searching loop might be refactored to be combined with 1915 * the loop above, 1916 */ 1917 unmap = 0; 1918 for (i = 0; i < req->num_be_args; i++) { 1919 if (strcmp(req->kern_be_args[i].kname, "num_threads") == 0) { 1920 struct ctl_be_arg *thread_arg; 1921 char num_thread_str[16]; 1922 int tmp_num_threads; 1923 1924 1925 thread_arg = &req->kern_be_args[i]; 1926 1927 strlcpy(num_thread_str, (char *)thread_arg->kvalue, 1928 min(thread_arg->vallen, 1929 sizeof(num_thread_str))); 1930 1931 tmp_num_threads = strtol(num_thread_str, NULL, 0); 1932 1933 /* 1934 * We don't let the user specify less than one 1935 * thread, but hope he's clueful enough not to 1936 * specify 1000 threads. 1937 */ 1938 if (tmp_num_threads < 1) { 1939 snprintf(req->error_str, sizeof(req->error_str), 1940 "%s: invalid number of threads %s", 1941 __func__, num_thread_str); 1942 goto bailout_error; 1943 } 1944 1945 num_threads = tmp_num_threads; 1946 } else if (strcmp(req->kern_be_args[i].kname, "unmap") == 0 && 1947 strcmp(req->kern_be_args[i].kvalue, "on") == 0) { 1948 unmap = 1; 1949 } else if (strcmp(req->kern_be_args[i].kname, "file") != 0 && 1950 strcmp(req->kern_be_args[i].kname, "dev") != 0) { 1951 struct ctl_be_lun_option *opt; 1952 1953 opt = malloc(sizeof(*opt), M_CTLBLK, M_WAITOK); 1954 opt->name = malloc(strlen(req->kern_be_args[i].kname) + 1, M_CTLBLK, M_WAITOK); 1955 strcpy(opt->name, req->kern_be_args[i].kname); 1956 opt->value = malloc(strlen(req->kern_be_args[i].kvalue) + 1, M_CTLBLK, M_WAITOK); 1957 strcpy(opt->value, req->kern_be_args[i].kvalue); 1958 STAILQ_INSERT_TAIL(&be_lun->ctl_be_lun.options, opt, links); 1959 } 1960 } 1961 1962 be_lun->flags = CTL_BE_BLOCK_LUN_UNCONFIGURED; 1963 be_lun->ctl_be_lun.flags = CTL_LUN_FLAG_PRIMARY; 1964 if (unmap) 1965 be_lun->ctl_be_lun.flags |= CTL_LUN_FLAG_UNMAP; 1966 be_lun->ctl_be_lun.be_lun = be_lun; 1967 be_lun->ctl_be_lun.blocksize = be_lun->blocksize; 1968 be_lun->ctl_be_lun.pblockexp = be_lun->pblockexp; 1969 be_lun->ctl_be_lun.pblockoff = be_lun->pblockoff; 1970 /* Tell the user the blocksize we ended up using */ 1971 params->blocksize_bytes = be_lun->blocksize; 1972 if (params->flags & CTL_LUN_FLAG_ID_REQ) { 1973 be_lun->ctl_be_lun.req_lun_id = params->req_lun_id; 1974 be_lun->ctl_be_lun.flags |= CTL_LUN_FLAG_ID_REQ; 1975 } else 1976 be_lun->ctl_be_lun.req_lun_id = 0; 1977 1978 be_lun->ctl_be_lun.lun_shutdown = ctl_be_block_lun_shutdown; 1979 be_lun->ctl_be_lun.lun_config_status = 1980 ctl_be_block_lun_config_status; 1981 be_lun->ctl_be_lun.be = &ctl_be_block_driver; 1982 1983 if ((params->flags & CTL_LUN_FLAG_SERIAL_NUM) == 0) { 1984 snprintf(tmpstr, sizeof(tmpstr), "MYSERIAL%4d", 1985 softc->num_luns); 1986 strncpy((char *)be_lun->ctl_be_lun.serial_num, tmpstr, 1987 ctl_min(sizeof(be_lun->ctl_be_lun.serial_num), 1988 sizeof(tmpstr))); 1989 1990 /* Tell the user what we used for a serial number */ 1991 strncpy((char *)params->serial_num, tmpstr, 1992 ctl_min(sizeof(params->serial_num), sizeof(tmpstr))); 1993 } else { 1994 strncpy((char *)be_lun->ctl_be_lun.serial_num, 1995 params->serial_num, 1996 ctl_min(sizeof(be_lun->ctl_be_lun.serial_num), 1997 sizeof(params->serial_num))); 1998 } 1999 if ((params->flags & CTL_LUN_FLAG_DEVID) == 0) { 2000 snprintf(tmpstr, sizeof(tmpstr), "MYDEVID%4d", softc->num_luns); 2001 strncpy((char *)be_lun->ctl_be_lun.device_id, tmpstr, 2002 ctl_min(sizeof(be_lun->ctl_be_lun.device_id), 2003 sizeof(tmpstr))); 2004 2005 /* Tell the user what we used for a device ID */ 2006 strncpy((char *)params->device_id, tmpstr, 2007 ctl_min(sizeof(params->device_id), sizeof(tmpstr))); 2008 } else { 2009 strncpy((char *)be_lun->ctl_be_lun.device_id, 2010 params->device_id, 2011 ctl_min(sizeof(be_lun->ctl_be_lun.device_id), 2012 sizeof(params->device_id))); 2013 } 2014 2015 TASK_INIT(&be_lun->io_task, /*priority*/0, ctl_be_block_worker, be_lun); 2016 2017 be_lun->io_taskqueue = taskqueue_create(be_lun->lunname, M_WAITOK, 2018 taskqueue_thread_enqueue, /*context*/&be_lun->io_taskqueue); 2019 2020 if (be_lun->io_taskqueue == NULL) { 2021 snprintf(req->error_str, sizeof(req->error_str), 2022 "%s: Unable to create taskqueue", __func__); 2023 goto bailout_error; 2024 } 2025 2026 /* 2027 * Note that we start the same number of threads by default for 2028 * both the file case and the block device case. For the file 2029 * case, we need multiple threads to allow concurrency, because the 2030 * vnode interface is designed to be a blocking interface. For the 2031 * block device case, ZFS zvols at least will block the caller's 2032 * context in many instances, and so we need multiple threads to 2033 * overcome that problem. Other block devices don't need as many 2034 * threads, but they shouldn't cause too many problems. 2035 * 2036 * If the user wants to just have a single thread for a block 2037 * device, he can specify that when the LUN is created, or change 2038 * the tunable/sysctl to alter the default number of threads. 2039 */ 2040 retval = taskqueue_start_threads(&be_lun->io_taskqueue, 2041 /*num threads*/num_threads, 2042 /*priority*/PWAIT, 2043 /*thread name*/ 2044 "%s taskq", be_lun->lunname); 2045 2046 if (retval != 0) 2047 goto bailout_error; 2048 2049 be_lun->num_threads = num_threads; 2050 2051 mtx_lock(&softc->lock); 2052 softc->num_luns++; 2053 STAILQ_INSERT_TAIL(&softc->lun_list, be_lun, links); 2054 2055 mtx_unlock(&softc->lock); 2056 2057 retval = ctl_add_lun(&be_lun->ctl_be_lun); 2058 if (retval != 0) { 2059 mtx_lock(&softc->lock); 2060 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_block_lun, 2061 links); 2062 softc->num_luns--; 2063 mtx_unlock(&softc->lock); 2064 snprintf(req->error_str, sizeof(req->error_str), 2065 "%s: ctl_add_lun() returned error %d, see dmesg for " 2066 "details", __func__, retval); 2067 retval = 0; 2068 goto bailout_error; 2069 } 2070 2071 mtx_lock(&softc->lock); 2072 2073 /* 2074 * Tell the config_status routine that we're waiting so it won't 2075 * clean up the LUN in the event of an error. 2076 */ 2077 be_lun->flags |= CTL_BE_BLOCK_LUN_WAITING; 2078 2079 while (be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) { 2080 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlblk", 0); 2081 if (retval == EINTR) 2082 break; 2083 } 2084 be_lun->flags &= ~CTL_BE_BLOCK_LUN_WAITING; 2085 2086 if (be_lun->flags & CTL_BE_BLOCK_LUN_CONFIG_ERR) { 2087 snprintf(req->error_str, sizeof(req->error_str), 2088 "%s: LUN configuration error, see dmesg for details", 2089 __func__); 2090 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_block_lun, 2091 links); 2092 softc->num_luns--; 2093 mtx_unlock(&softc->lock); 2094 goto bailout_error; 2095 } else { 2096 params->req_lun_id = be_lun->ctl_be_lun.lun_id; 2097 } 2098 2099 mtx_unlock(&softc->lock); 2100 2101 be_lun->disk_stats = devstat_new_entry("cbb", params->req_lun_id, 2102 be_lun->blocksize, 2103 DEVSTAT_ALL_SUPPORTED, 2104 be_lun->ctl_be_lun.lun_type 2105 | DEVSTAT_TYPE_IF_OTHER, 2106 DEVSTAT_PRIORITY_OTHER); 2107 2108 2109 req->status = CTL_LUN_OK; 2110 2111 return (retval); 2112 2113 bailout_error: 2114 req->status = CTL_LUN_ERROR; 2115 2116 ctl_be_block_close(be_lun); 2117 2118 free(be_lun->dev_path, M_CTLBLK); 2119 free(be_lun, M_CTLBLK); 2120 2121 return (retval); 2122 } 2123 2124 static int 2125 ctl_be_block_rm(struct ctl_be_block_softc *softc, struct ctl_lun_req *req) 2126 { 2127 struct ctl_lun_rm_params *params; 2128 struct ctl_be_block_lun *be_lun; 2129 int retval; 2130 2131 params = &req->reqdata.rm; 2132 2133 mtx_lock(&softc->lock); 2134 2135 be_lun = NULL; 2136 2137 STAILQ_FOREACH(be_lun, &softc->lun_list, links) { 2138 if (be_lun->ctl_be_lun.lun_id == params->lun_id) 2139 break; 2140 } 2141 mtx_unlock(&softc->lock); 2142 2143 if (be_lun == NULL) { 2144 snprintf(req->error_str, sizeof(req->error_str), 2145 "%s: LUN %u is not managed by the block backend", 2146 __func__, params->lun_id); 2147 goto bailout_error; 2148 } 2149 2150 retval = ctl_disable_lun(&be_lun->ctl_be_lun); 2151 2152 if (retval != 0) { 2153 snprintf(req->error_str, sizeof(req->error_str), 2154 "%s: error %d returned from ctl_disable_lun() for " 2155 "LUN %d", __func__, retval, params->lun_id); 2156 goto bailout_error; 2157 2158 } 2159 2160 retval = ctl_invalidate_lun(&be_lun->ctl_be_lun); 2161 if (retval != 0) { 2162 snprintf(req->error_str, sizeof(req->error_str), 2163 "%s: error %d returned from ctl_invalidate_lun() for " 2164 "LUN %d", __func__, retval, params->lun_id); 2165 goto bailout_error; 2166 } 2167 2168 mtx_lock(&softc->lock); 2169 2170 be_lun->flags |= CTL_BE_BLOCK_LUN_WAITING; 2171 2172 while ((be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) == 0) { 2173 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlblk", 0); 2174 if (retval == EINTR) 2175 break; 2176 } 2177 2178 be_lun->flags &= ~CTL_BE_BLOCK_LUN_WAITING; 2179 2180 if ((be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) == 0) { 2181 snprintf(req->error_str, sizeof(req->error_str), 2182 "%s: interrupted waiting for LUN to be freed", 2183 __func__); 2184 mtx_unlock(&softc->lock); 2185 goto bailout_error; 2186 } 2187 2188 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_block_lun, links); 2189 2190 softc->num_luns--; 2191 mtx_unlock(&softc->lock); 2192 2193 taskqueue_drain(be_lun->io_taskqueue, &be_lun->io_task); 2194 2195 taskqueue_free(be_lun->io_taskqueue); 2196 2197 ctl_be_block_close(be_lun); 2198 2199 if (be_lun->disk_stats != NULL) 2200 devstat_remove_entry(be_lun->disk_stats); 2201 2202 uma_zdestroy(be_lun->lun_zone); 2203 2204 free(be_lun->dev_path, M_CTLBLK); 2205 2206 free(be_lun, M_CTLBLK); 2207 2208 req->status = CTL_LUN_OK; 2209 2210 return (0); 2211 2212 bailout_error: 2213 2214 req->status = CTL_LUN_ERROR; 2215 2216 return (0); 2217 } 2218 2219 static int 2220 ctl_be_block_modify_file(struct ctl_be_block_lun *be_lun, 2221 struct ctl_lun_req *req) 2222 { 2223 struct vattr vattr; 2224 int error; 2225 struct ctl_lun_modify_params *params; 2226 2227 params = &req->reqdata.modify; 2228 2229 if (params->lun_size_bytes != 0) { 2230 be_lun->size_bytes = params->lun_size_bytes; 2231 } else { 2232 error = VOP_GETATTR(be_lun->vn, &vattr, curthread->td_ucred); 2233 if (error != 0) { 2234 snprintf(req->error_str, sizeof(req->error_str), 2235 "error calling VOP_GETATTR() for file %s", 2236 be_lun->dev_path); 2237 return (error); 2238 } 2239 2240 be_lun->size_bytes = vattr.va_size; 2241 } 2242 2243 return (0); 2244 } 2245 2246 static int 2247 ctl_be_block_modify_dev(struct ctl_be_block_lun *be_lun, 2248 struct ctl_lun_req *req) 2249 { 2250 struct cdev *dev; 2251 struct cdevsw *devsw; 2252 int error; 2253 struct ctl_lun_modify_params *params; 2254 uint64_t size_bytes; 2255 2256 params = &req->reqdata.modify; 2257 2258 dev = be_lun->vn->v_rdev; 2259 devsw = dev->si_devsw; 2260 if (!devsw->d_ioctl) { 2261 snprintf(req->error_str, sizeof(req->error_str), 2262 "%s: no d_ioctl for device %s!", __func__, 2263 be_lun->dev_path); 2264 return (ENODEV); 2265 } 2266 2267 error = devsw->d_ioctl(dev, DIOCGMEDIASIZE, 2268 (caddr_t)&size_bytes, FREAD, 2269 curthread); 2270 if (error) { 2271 snprintf(req->error_str, sizeof(req->error_str), 2272 "%s: error %d returned for DIOCGMEDIASIZE ioctl " 2273 "on %s!", __func__, error, be_lun->dev_path); 2274 return (error); 2275 } 2276 2277 if (params->lun_size_bytes != 0) { 2278 if (params->lun_size_bytes > size_bytes) { 2279 snprintf(req->error_str, sizeof(req->error_str), 2280 "%s: requested LUN size %ju > backing device " 2281 "size %ju", __func__, 2282 (uintmax_t)params->lun_size_bytes, 2283 (uintmax_t)size_bytes); 2284 return (EINVAL); 2285 } 2286 2287 be_lun->size_bytes = params->lun_size_bytes; 2288 } else { 2289 be_lun->size_bytes = size_bytes; 2290 } 2291 2292 return (0); 2293 } 2294 2295 static int 2296 ctl_be_block_modify(struct ctl_be_block_softc *softc, struct ctl_lun_req *req) 2297 { 2298 struct ctl_lun_modify_params *params; 2299 struct ctl_be_block_lun *be_lun; 2300 int error; 2301 2302 params = &req->reqdata.modify; 2303 2304 mtx_lock(&softc->lock); 2305 2306 be_lun = NULL; 2307 2308 STAILQ_FOREACH(be_lun, &softc->lun_list, links) { 2309 if (be_lun->ctl_be_lun.lun_id == params->lun_id) 2310 break; 2311 } 2312 mtx_unlock(&softc->lock); 2313 2314 if (be_lun == NULL) { 2315 snprintf(req->error_str, sizeof(req->error_str), 2316 "%s: LUN %u is not managed by the block backend", 2317 __func__, params->lun_id); 2318 goto bailout_error; 2319 } 2320 2321 if (params->lun_size_bytes != 0) { 2322 if (params->lun_size_bytes < be_lun->blocksize) { 2323 snprintf(req->error_str, sizeof(req->error_str), 2324 "%s: LUN size %ju < blocksize %u", __func__, 2325 params->lun_size_bytes, be_lun->blocksize); 2326 goto bailout_error; 2327 } 2328 } 2329 2330 vn_lock(be_lun->vn, LK_SHARED | LK_RETRY); 2331 2332 if (be_lun->vn->v_type == VREG) 2333 error = ctl_be_block_modify_file(be_lun, req); 2334 else 2335 error = ctl_be_block_modify_dev(be_lun, req); 2336 2337 VOP_UNLOCK(be_lun->vn, 0); 2338 2339 if (error != 0) 2340 goto bailout_error; 2341 2342 be_lun->size_blocks = be_lun->size_bytes >> be_lun->blocksize_shift; 2343 2344 /* 2345 * The maximum LBA is the size - 1. 2346 * 2347 * XXX: Note that this field is being updated without locking, 2348 * which might cause problems on 32-bit architectures. 2349 */ 2350 be_lun->ctl_be_lun.maxlba = be_lun->size_blocks - 1; 2351 ctl_lun_capacity_changed(&be_lun->ctl_be_lun); 2352 2353 /* Tell the user the exact size we ended up using */ 2354 params->lun_size_bytes = be_lun->size_bytes; 2355 2356 req->status = CTL_LUN_OK; 2357 2358 return (0); 2359 2360 bailout_error: 2361 req->status = CTL_LUN_ERROR; 2362 2363 return (0); 2364 } 2365 2366 static void 2367 ctl_be_block_lun_shutdown(void *be_lun) 2368 { 2369 struct ctl_be_block_lun *lun; 2370 struct ctl_be_block_softc *softc; 2371 2372 lun = (struct ctl_be_block_lun *)be_lun; 2373 2374 softc = lun->softc; 2375 2376 mtx_lock(&softc->lock); 2377 lun->flags |= CTL_BE_BLOCK_LUN_UNCONFIGURED; 2378 if (lun->flags & CTL_BE_BLOCK_LUN_WAITING) 2379 wakeup(lun); 2380 mtx_unlock(&softc->lock); 2381 2382 } 2383 2384 static void 2385 ctl_be_block_lun_config_status(void *be_lun, ctl_lun_config_status status) 2386 { 2387 struct ctl_be_block_lun *lun; 2388 struct ctl_be_block_softc *softc; 2389 2390 lun = (struct ctl_be_block_lun *)be_lun; 2391 softc = lun->softc; 2392 2393 if (status == CTL_LUN_CONFIG_OK) { 2394 mtx_lock(&softc->lock); 2395 lun->flags &= ~CTL_BE_BLOCK_LUN_UNCONFIGURED; 2396 if (lun->flags & CTL_BE_BLOCK_LUN_WAITING) 2397 wakeup(lun); 2398 mtx_unlock(&softc->lock); 2399 2400 /* 2401 * We successfully added the LUN, attempt to enable it. 2402 */ 2403 if (ctl_enable_lun(&lun->ctl_be_lun) != 0) { 2404 printf("%s: ctl_enable_lun() failed!\n", __func__); 2405 if (ctl_invalidate_lun(&lun->ctl_be_lun) != 0) { 2406 printf("%s: ctl_invalidate_lun() failed!\n", 2407 __func__); 2408 } 2409 } 2410 2411 return; 2412 } 2413 2414 2415 mtx_lock(&softc->lock); 2416 lun->flags &= ~CTL_BE_BLOCK_LUN_UNCONFIGURED; 2417 lun->flags |= CTL_BE_BLOCK_LUN_CONFIG_ERR; 2418 wakeup(lun); 2419 mtx_unlock(&softc->lock); 2420 } 2421 2422 2423 static int 2424 ctl_be_block_config_write(union ctl_io *io) 2425 { 2426 struct ctl_be_block_lun *be_lun; 2427 struct ctl_be_lun *ctl_be_lun; 2428 int retval; 2429 2430 retval = 0; 2431 2432 DPRINTF("entered\n"); 2433 2434 ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[ 2435 CTL_PRIV_BACKEND_LUN].ptr; 2436 be_lun = (struct ctl_be_block_lun *)ctl_be_lun->be_lun; 2437 2438 switch (io->scsiio.cdb[0]) { 2439 case SYNCHRONIZE_CACHE: 2440 case SYNCHRONIZE_CACHE_16: 2441 case WRITE_SAME_10: 2442 case WRITE_SAME_16: 2443 case UNMAP: 2444 /* 2445 * The upper level CTL code will filter out any CDBs with 2446 * the immediate bit set and return the proper error. 2447 * 2448 * We don't really need to worry about what LBA range the 2449 * user asked to be synced out. When they issue a sync 2450 * cache command, we'll sync out the whole thing. 2451 */ 2452 mtx_lock(&be_lun->lock); 2453 STAILQ_INSERT_TAIL(&be_lun->config_write_queue, &io->io_hdr, 2454 links); 2455 mtx_unlock(&be_lun->lock); 2456 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); 2457 break; 2458 case START_STOP_UNIT: { 2459 struct scsi_start_stop_unit *cdb; 2460 2461 cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb; 2462 2463 if (cdb->how & SSS_START) 2464 retval = ctl_start_lun(ctl_be_lun); 2465 else { 2466 retval = ctl_stop_lun(ctl_be_lun); 2467 /* 2468 * XXX KDM Copan-specific offline behavior. 2469 * Figure out a reasonable way to port this? 2470 */ 2471 #ifdef NEEDTOPORT 2472 if ((retval == 0) 2473 && (cdb->byte2 & SSS_ONOFFLINE)) 2474 retval = ctl_lun_offline(ctl_be_lun); 2475 #endif 2476 } 2477 2478 /* 2479 * In general, the above routines should not fail. They 2480 * just set state for the LUN. So we've got something 2481 * pretty wrong here if we can't start or stop the LUN. 2482 */ 2483 if (retval != 0) { 2484 ctl_set_internal_failure(&io->scsiio, 2485 /*sks_valid*/ 1, 2486 /*retry_count*/ 0xf051); 2487 retval = CTL_RETVAL_COMPLETE; 2488 } else { 2489 ctl_set_success(&io->scsiio); 2490 } 2491 ctl_config_write_done(io); 2492 break; 2493 } 2494 default: 2495 ctl_set_invalid_opcode(&io->scsiio); 2496 ctl_config_write_done(io); 2497 retval = CTL_RETVAL_COMPLETE; 2498 break; 2499 } 2500 2501 return (retval); 2502 2503 } 2504 2505 static int 2506 ctl_be_block_config_read(union ctl_io *io) 2507 { 2508 return (0); 2509 } 2510 2511 static int 2512 ctl_be_block_lun_info(void *be_lun, struct sbuf *sb) 2513 { 2514 struct ctl_be_block_lun *lun; 2515 int retval; 2516 2517 lun = (struct ctl_be_block_lun *)be_lun; 2518 retval = 0; 2519 2520 retval = sbuf_printf(sb, "<num_threads>"); 2521 2522 if (retval != 0) 2523 goto bailout; 2524 2525 retval = sbuf_printf(sb, "%d", lun->num_threads); 2526 2527 if (retval != 0) 2528 goto bailout; 2529 2530 retval = sbuf_printf(sb, "</num_threads>"); 2531 2532 /* 2533 * For processor devices, we don't have a path variable. 2534 */ 2535 if ((retval != 0) 2536 || (lun->dev_path == NULL)) 2537 goto bailout; 2538 2539 retval = sbuf_printf(sb, "<file>"); 2540 2541 if (retval != 0) 2542 goto bailout; 2543 2544 retval = ctl_sbuf_printf_esc(sb, lun->dev_path); 2545 2546 if (retval != 0) 2547 goto bailout; 2548 2549 retval = sbuf_printf(sb, "</file>\n"); 2550 2551 bailout: 2552 2553 return (retval); 2554 } 2555 2556 int 2557 ctl_be_block_init(void) 2558 { 2559 struct ctl_be_block_softc *softc; 2560 int retval; 2561 2562 softc = &backend_block_softc; 2563 retval = 0; 2564 2565 mtx_init(&softc->lock, "ctlblk", NULL, MTX_DEF); 2566 beio_zone = uma_zcreate("beio", sizeof(struct ctl_be_block_io), 2567 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 2568 STAILQ_INIT(&softc->disk_list); 2569 STAILQ_INIT(&softc->lun_list); 2570 2571 return (retval); 2572 } 2573