1 /*- 2 * Copyright (c) 2003 Silicon Graphics International Corp. 3 * Copyright (c) 2009-2011 Spectra Logic Corporation 4 * Copyright (c) 2012 The FreeBSD Foundation 5 * All rights reserved. 6 * 7 * Portions of this software were developed by Edward Tomasz Napierala 8 * under sponsorship from the FreeBSD Foundation. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions, and the following disclaimer, 15 * without modification. 16 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 17 * substantially similar to the "NO WARRANTY" disclaimer below 18 * ("Disclaimer") and any redistribution must be conditioned upon 19 * including a substantially similar Disclaimer requirement for further 20 * binary redistribution. 21 * 22 * NO WARRANTY 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 27 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 31 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 32 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 33 * POSSIBILITY OF SUCH DAMAGES. 34 * 35 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend_block.c#5 $ 36 */ 37 /* 38 * CAM Target Layer driver backend for block devices. 39 * 40 * Author: Ken Merry <ken@FreeBSD.org> 41 */ 42 #include <sys/cdefs.h> 43 __FBSDID("$FreeBSD$"); 44 45 #include <opt_kdtrace.h> 46 47 #include <sys/param.h> 48 #include <sys/systm.h> 49 #include <sys/kernel.h> 50 #include <sys/types.h> 51 #include <sys/kthread.h> 52 #include <sys/bio.h> 53 #include <sys/fcntl.h> 54 #include <sys/lock.h> 55 #include <sys/mutex.h> 56 #include <sys/condvar.h> 57 #include <sys/malloc.h> 58 #include <sys/conf.h> 59 #include <sys/ioccom.h> 60 #include <sys/queue.h> 61 #include <sys/sbuf.h> 62 #include <sys/endian.h> 63 #include <sys/uio.h> 64 #include <sys/buf.h> 65 #include <sys/taskqueue.h> 66 #include <sys/vnode.h> 67 #include <sys/namei.h> 68 #include <sys/mount.h> 69 #include <sys/disk.h> 70 #include <sys/fcntl.h> 71 #include <sys/filedesc.h> 72 #include <sys/proc.h> 73 #include <sys/pcpu.h> 74 #include <sys/module.h> 75 #include <sys/sdt.h> 76 #include <sys/devicestat.h> 77 #include <sys/sysctl.h> 78 79 #include <geom/geom.h> 80 81 #include <cam/cam.h> 82 #include <cam/scsi/scsi_all.h> 83 #include <cam/scsi/scsi_da.h> 84 #include <cam/ctl/ctl_io.h> 85 #include <cam/ctl/ctl.h> 86 #include <cam/ctl/ctl_backend.h> 87 #include <cam/ctl/ctl_frontend_internal.h> 88 #include <cam/ctl/ctl_ioctl.h> 89 #include <cam/ctl/ctl_scsi_all.h> 90 #include <cam/ctl/ctl_error.h> 91 92 /* 93 * The idea here is that we'll allocate enough S/G space to hold a 16MB 94 * I/O. If we get an I/O larger than that, we'll reject it. 95 */ 96 #define CTLBLK_MAX_IO_SIZE (16 * 1024 * 1024) 97 #define CTLBLK_MAX_SEGS (CTLBLK_MAX_IO_SIZE / MAXPHYS) + 1 98 99 #ifdef CTLBLK_DEBUG 100 #define DPRINTF(fmt, args...) \ 101 printf("cbb(%s:%d): " fmt, __FUNCTION__, __LINE__, ##args) 102 #else 103 #define DPRINTF(fmt, args...) do {} while(0) 104 #endif 105 106 SDT_PROVIDER_DEFINE(cbb); 107 108 typedef enum { 109 CTL_BE_BLOCK_LUN_UNCONFIGURED = 0x01, 110 CTL_BE_BLOCK_LUN_CONFIG_ERR = 0x02, 111 CTL_BE_BLOCK_LUN_WAITING = 0x04, 112 CTL_BE_BLOCK_LUN_MULTI_THREAD = 0x08 113 } ctl_be_block_lun_flags; 114 115 typedef enum { 116 CTL_BE_BLOCK_NONE, 117 CTL_BE_BLOCK_DEV, 118 CTL_BE_BLOCK_FILE 119 } ctl_be_block_type; 120 121 struct ctl_be_block_devdata { 122 struct cdev *cdev; 123 struct cdevsw *csw; 124 int dev_ref; 125 }; 126 127 struct ctl_be_block_filedata { 128 struct ucred *cred; 129 }; 130 131 union ctl_be_block_bedata { 132 struct ctl_be_block_devdata dev; 133 struct ctl_be_block_filedata file; 134 }; 135 136 struct ctl_be_block_io; 137 struct ctl_be_block_lun; 138 139 typedef void (*cbb_dispatch_t)(struct ctl_be_block_lun *be_lun, 140 struct ctl_be_block_io *beio); 141 142 /* 143 * Backend LUN structure. There is a 1:1 mapping between a block device 144 * and a backend block LUN, and between a backend block LUN and a CTL LUN. 145 */ 146 struct ctl_be_block_lun { 147 struct ctl_block_disk *disk; 148 char lunname[32]; 149 char *dev_path; 150 ctl_be_block_type dev_type; 151 struct vnode *vn; 152 union ctl_be_block_bedata backend; 153 cbb_dispatch_t dispatch; 154 cbb_dispatch_t lun_flush; 155 struct mtx lock; 156 uma_zone_t lun_zone; 157 uint64_t size_blocks; 158 uint64_t size_bytes; 159 uint32_t blocksize; 160 int blocksize_shift; 161 struct ctl_be_block_softc *softc; 162 struct devstat *disk_stats; 163 ctl_be_block_lun_flags flags; 164 STAILQ_ENTRY(ctl_be_block_lun) links; 165 struct ctl_be_lun ctl_be_lun; 166 struct taskqueue *io_taskqueue; 167 struct task io_task; 168 int num_threads; 169 STAILQ_HEAD(, ctl_io_hdr) input_queue; 170 STAILQ_HEAD(, ctl_io_hdr) config_write_queue; 171 STAILQ_HEAD(, ctl_io_hdr) datamove_queue; 172 }; 173 174 /* 175 * Overall softc structure for the block backend module. 176 */ 177 struct ctl_be_block_softc { 178 STAILQ_HEAD(, ctl_be_block_io) beio_free_queue; 179 struct mtx lock; 180 int prealloc_beio; 181 int num_disks; 182 STAILQ_HEAD(, ctl_block_disk) disk_list; 183 int num_luns; 184 STAILQ_HEAD(, ctl_be_block_lun) lun_list; 185 }; 186 187 static struct ctl_be_block_softc backend_block_softc; 188 189 /* 190 * Per-I/O information. 191 */ 192 struct ctl_be_block_io { 193 union ctl_io *io; 194 struct ctl_sg_entry sg_segs[CTLBLK_MAX_SEGS]; 195 struct iovec xiovecs[CTLBLK_MAX_SEGS]; 196 int bio_cmd; 197 int bio_flags; 198 int num_segs; 199 int num_bios_sent; 200 int num_bios_done; 201 int send_complete; 202 int num_errors; 203 struct bintime ds_t0; 204 devstat_tag_type ds_tag_type; 205 devstat_trans_flags ds_trans_type; 206 uint64_t io_len; 207 uint64_t io_offset; 208 struct ctl_be_block_softc *softc; 209 struct ctl_be_block_lun *lun; 210 STAILQ_ENTRY(ctl_be_block_io) links; 211 }; 212 213 static int cbb_num_threads = 14; 214 TUNABLE_INT("kern.cam.ctl.block.num_threads", &cbb_num_threads); 215 SYSCTL_NODE(_kern_cam_ctl, OID_AUTO, block, CTLFLAG_RD, 0, 216 "CAM Target Layer Block Backend"); 217 SYSCTL_INT(_kern_cam_ctl_block, OID_AUTO, num_threads, CTLFLAG_RW, 218 &cbb_num_threads, 0, "Number of threads per backing file"); 219 220 static struct ctl_be_block_io *ctl_alloc_beio(struct ctl_be_block_softc *softc); 221 static void ctl_free_beio(struct ctl_be_block_io *beio); 222 static int ctl_grow_beio(struct ctl_be_block_softc *softc, int count); 223 #if 0 224 static void ctl_shrink_beio(struct ctl_be_block_softc *softc); 225 #endif 226 static void ctl_complete_beio(struct ctl_be_block_io *beio); 227 static int ctl_be_block_move_done(union ctl_io *io); 228 static void ctl_be_block_biodone(struct bio *bio); 229 static void ctl_be_block_flush_file(struct ctl_be_block_lun *be_lun, 230 struct ctl_be_block_io *beio); 231 static void ctl_be_block_dispatch_file(struct ctl_be_block_lun *be_lun, 232 struct ctl_be_block_io *beio); 233 static void ctl_be_block_flush_dev(struct ctl_be_block_lun *be_lun, 234 struct ctl_be_block_io *beio); 235 static void ctl_be_block_dispatch_dev(struct ctl_be_block_lun *be_lun, 236 struct ctl_be_block_io *beio); 237 static void ctl_be_block_cw_dispatch(struct ctl_be_block_lun *be_lun, 238 union ctl_io *io); 239 static void ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun, 240 union ctl_io *io); 241 static void ctl_be_block_worker(void *context, int pending); 242 static int ctl_be_block_submit(union ctl_io *io); 243 static int ctl_be_block_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, 244 int flag, struct thread *td); 245 static int ctl_be_block_open_file(struct ctl_be_block_lun *be_lun, 246 struct ctl_lun_req *req); 247 static int ctl_be_block_open_dev(struct ctl_be_block_lun *be_lun, 248 struct ctl_lun_req *req); 249 static int ctl_be_block_close(struct ctl_be_block_lun *be_lun); 250 static int ctl_be_block_open(struct ctl_be_block_softc *softc, 251 struct ctl_be_block_lun *be_lun, 252 struct ctl_lun_req *req); 253 static int ctl_be_block_create(struct ctl_be_block_softc *softc, 254 struct ctl_lun_req *req); 255 static int ctl_be_block_rm(struct ctl_be_block_softc *softc, 256 struct ctl_lun_req *req); 257 static int ctl_be_block_modify_file(struct ctl_be_block_lun *be_lun, 258 struct ctl_lun_req *req); 259 static int ctl_be_block_modify_dev(struct ctl_be_block_lun *be_lun, 260 struct ctl_lun_req *req); 261 static int ctl_be_block_modify(struct ctl_be_block_softc *softc, 262 struct ctl_lun_req *req); 263 static void ctl_be_block_lun_shutdown(void *be_lun); 264 static void ctl_be_block_lun_config_status(void *be_lun, 265 ctl_lun_config_status status); 266 static int ctl_be_block_config_write(union ctl_io *io); 267 static int ctl_be_block_config_read(union ctl_io *io); 268 static int ctl_be_block_lun_info(void *be_lun, struct sbuf *sb); 269 int ctl_be_block_init(void); 270 271 static struct ctl_backend_driver ctl_be_block_driver = 272 { 273 .name = "block", 274 .flags = CTL_BE_FLAG_HAS_CONFIG, 275 .init = ctl_be_block_init, 276 .data_submit = ctl_be_block_submit, 277 .data_move_done = ctl_be_block_move_done, 278 .config_read = ctl_be_block_config_read, 279 .config_write = ctl_be_block_config_write, 280 .ioctl = ctl_be_block_ioctl, 281 .lun_info = ctl_be_block_lun_info 282 }; 283 284 MALLOC_DEFINE(M_CTLBLK, "ctlblk", "Memory used for CTL block backend"); 285 CTL_BACKEND_DECLARE(cbb, ctl_be_block_driver); 286 287 static struct ctl_be_block_io * 288 ctl_alloc_beio(struct ctl_be_block_softc *softc) 289 { 290 struct ctl_be_block_io *beio; 291 int count; 292 293 mtx_lock(&softc->lock); 294 295 beio = STAILQ_FIRST(&softc->beio_free_queue); 296 if (beio != NULL) { 297 STAILQ_REMOVE(&softc->beio_free_queue, beio, 298 ctl_be_block_io, links); 299 } 300 mtx_unlock(&softc->lock); 301 302 if (beio != NULL) { 303 bzero(beio, sizeof(*beio)); 304 beio->softc = softc; 305 return (beio); 306 } 307 308 for (;;) { 309 310 count = ctl_grow_beio(softc, /*count*/ 10); 311 312 /* 313 * This shouldn't be possible, since ctl_grow_beio() uses a 314 * blocking malloc. 315 */ 316 if (count == 0) 317 return (NULL); 318 319 /* 320 * Since we have to drop the lock when we're allocating beio 321 * structures, it's possible someone else can come along and 322 * allocate the beio's we've just allocated. 323 */ 324 mtx_lock(&softc->lock); 325 beio = STAILQ_FIRST(&softc->beio_free_queue); 326 if (beio != NULL) { 327 STAILQ_REMOVE(&softc->beio_free_queue, beio, 328 ctl_be_block_io, links); 329 } 330 mtx_unlock(&softc->lock); 331 332 if (beio != NULL) { 333 bzero(beio, sizeof(*beio)); 334 beio->softc = softc; 335 break; 336 } 337 } 338 return (beio); 339 } 340 341 static void 342 ctl_free_beio(struct ctl_be_block_io *beio) 343 { 344 struct ctl_be_block_softc *softc; 345 int duplicate_free; 346 int i; 347 348 softc = beio->softc; 349 duplicate_free = 0; 350 351 for (i = 0; i < beio->num_segs; i++) { 352 if (beio->sg_segs[i].addr == NULL) 353 duplicate_free++; 354 355 uma_zfree(beio->lun->lun_zone, beio->sg_segs[i].addr); 356 beio->sg_segs[i].addr = NULL; 357 } 358 359 if (duplicate_free > 0) { 360 printf("%s: %d duplicate frees out of %d segments\n", __func__, 361 duplicate_free, beio->num_segs); 362 } 363 mtx_lock(&softc->lock); 364 STAILQ_INSERT_TAIL(&softc->beio_free_queue, beio, links); 365 mtx_unlock(&softc->lock); 366 } 367 368 static int 369 ctl_grow_beio(struct ctl_be_block_softc *softc, int count) 370 { 371 int i; 372 373 for (i = 0; i < count; i++) { 374 struct ctl_be_block_io *beio; 375 376 beio = (struct ctl_be_block_io *)malloc(sizeof(*beio), 377 M_CTLBLK, 378 M_WAITOK | M_ZERO); 379 beio->softc = softc; 380 mtx_lock(&softc->lock); 381 STAILQ_INSERT_TAIL(&softc->beio_free_queue, beio, links); 382 mtx_unlock(&softc->lock); 383 } 384 385 return (i); 386 } 387 388 #if 0 389 static void 390 ctl_shrink_beio(struct ctl_be_block_softc *softc) 391 { 392 struct ctl_be_block_io *beio, *beio_tmp; 393 394 mtx_lock(&softc->lock); 395 STAILQ_FOREACH_SAFE(beio, &softc->beio_free_queue, links, beio_tmp) { 396 STAILQ_REMOVE(&softc->beio_free_queue, beio, 397 ctl_be_block_io, links); 398 free(beio, M_CTLBLK); 399 } 400 mtx_unlock(&softc->lock); 401 } 402 #endif 403 404 static void 405 ctl_complete_beio(struct ctl_be_block_io *beio) 406 { 407 union ctl_io *io; 408 int io_len; 409 410 io = beio->io; 411 412 if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) 413 io_len = beio->io_len; 414 else 415 io_len = 0; 416 417 devstat_end_transaction(beio->lun->disk_stats, 418 /*bytes*/ io_len, 419 beio->ds_tag_type, 420 beio->ds_trans_type, 421 /*now*/ NULL, 422 /*then*/&beio->ds_t0); 423 424 ctl_free_beio(beio); 425 ctl_done(io); 426 } 427 428 static int 429 ctl_be_block_move_done(union ctl_io *io) 430 { 431 struct ctl_be_block_io *beio; 432 struct ctl_be_block_lun *be_lun; 433 #ifdef CTL_TIME_IO 434 struct bintime cur_bt; 435 #endif 436 437 beio = (struct ctl_be_block_io *) 438 io->io_hdr.ctl_private[CTL_PRIV_BACKEND].ptr; 439 440 be_lun = beio->lun; 441 442 DPRINTF("entered\n"); 443 444 #ifdef CTL_TIME_IO 445 getbintime(&cur_bt); 446 bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt); 447 bintime_add(&io->io_hdr.dma_bt, &cur_bt); 448 io->io_hdr.num_dmas++; 449 #endif 450 451 /* 452 * We set status at this point for read commands, and write 453 * commands with errors. 454 */ 455 if ((beio->bio_cmd == BIO_READ) 456 && (io->io_hdr.port_status == 0) 457 && ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0) 458 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) 459 ctl_set_success(&io->scsiio); 460 else if ((io->io_hdr.port_status != 0) 461 && ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0) 462 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) { 463 /* 464 * For hardware error sense keys, the sense key 465 * specific value is defined to be a retry count, 466 * but we use it to pass back an internal FETD 467 * error code. XXX KDM Hopefully the FETD is only 468 * using 16 bits for an error code, since that's 469 * all the space we have in the sks field. 470 */ 471 ctl_set_internal_failure(&io->scsiio, 472 /*sks_valid*/ 1, 473 /*retry_count*/ 474 io->io_hdr.port_status); 475 } 476 477 /* 478 * If this is a read, or a write with errors, it is done. 479 */ 480 if ((beio->bio_cmd == BIO_READ) 481 || ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0) 482 || ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE)) { 483 ctl_complete_beio(beio); 484 return (0); 485 } 486 487 /* 488 * At this point, we have a write and the DMA completed 489 * successfully. We now have to queue it to the task queue to 490 * execute the backend I/O. That is because we do blocking 491 * memory allocations, and in the file backing case, blocking I/O. 492 * This move done routine is generally called in the SIM's 493 * interrupt context, and therefore we cannot block. 494 */ 495 mtx_lock(&be_lun->lock); 496 /* 497 * XXX KDM make sure that links is okay to use at this point. 498 * Otherwise, we either need to add another field to ctl_io_hdr, 499 * or deal with resource allocation here. 500 */ 501 STAILQ_INSERT_TAIL(&be_lun->datamove_queue, &io->io_hdr, links); 502 mtx_unlock(&be_lun->lock); 503 504 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); 505 506 return (0); 507 } 508 509 static void 510 ctl_be_block_biodone(struct bio *bio) 511 { 512 struct ctl_be_block_io *beio; 513 struct ctl_be_block_lun *be_lun; 514 union ctl_io *io; 515 516 beio = bio->bio_caller1; 517 be_lun = beio->lun; 518 io = beio->io; 519 520 DPRINTF("entered\n"); 521 522 mtx_lock(&be_lun->lock); 523 if (bio->bio_error != 0) 524 beio->num_errors++; 525 526 beio->num_bios_done++; 527 528 /* 529 * XXX KDM will this cause WITNESS to complain? Holding a lock 530 * during the free might cause it to complain. 531 */ 532 g_destroy_bio(bio); 533 534 /* 535 * If the send complete bit isn't set, or we aren't the last I/O to 536 * complete, then we're done. 537 */ 538 if ((beio->send_complete == 0) 539 || (beio->num_bios_done < beio->num_bios_sent)) { 540 mtx_unlock(&be_lun->lock); 541 return; 542 } 543 544 /* 545 * At this point, we've verified that we are the last I/O to 546 * complete, so it's safe to drop the lock. 547 */ 548 mtx_unlock(&be_lun->lock); 549 550 /* 551 * If there are any errors from the backing device, we fail the 552 * entire I/O with a medium error. 553 */ 554 if (beio->num_errors > 0) { 555 if (beio->bio_cmd == BIO_FLUSH) { 556 /* XXX KDM is there is a better error here? */ 557 ctl_set_internal_failure(&io->scsiio, 558 /*sks_valid*/ 1, 559 /*retry_count*/ 0xbad2); 560 } else 561 ctl_set_medium_error(&io->scsiio); 562 ctl_complete_beio(beio); 563 return; 564 } 565 566 /* 567 * If this is a write or a flush, we're all done. 568 * If this is a read, we can now send the data to the user. 569 */ 570 if ((beio->bio_cmd == BIO_WRITE) 571 || (beio->bio_cmd == BIO_FLUSH)) { 572 ctl_set_success(&io->scsiio); 573 ctl_complete_beio(beio); 574 } else { 575 io->scsiio.be_move_done = ctl_be_block_move_done; 576 io->scsiio.kern_data_ptr = (uint8_t *)beio->sg_segs; 577 io->scsiio.kern_data_len = beio->io_len; 578 io->scsiio.kern_total_len = beio->io_len; 579 io->scsiio.kern_rel_offset = 0; 580 io->scsiio.kern_data_resid = 0; 581 io->scsiio.kern_sg_entries = beio->num_segs; 582 io->io_hdr.flags |= CTL_FLAG_ALLOCATED | CTL_FLAG_KDPTR_SGLIST; 583 #ifdef CTL_TIME_IO 584 getbintime(&io->io_hdr.dma_start_bt); 585 #endif 586 ctl_datamove(io); 587 } 588 } 589 590 static void 591 ctl_be_block_flush_file(struct ctl_be_block_lun *be_lun, 592 struct ctl_be_block_io *beio) 593 { 594 union ctl_io *io; 595 struct mount *mountpoint; 596 int error, lock_flags; 597 598 DPRINTF("entered\n"); 599 600 io = beio->io; 601 602 (void) vn_start_write(be_lun->vn, &mountpoint, V_WAIT); 603 604 if (MNT_SHARED_WRITES(mountpoint) 605 || ((mountpoint == NULL) 606 && MNT_SHARED_WRITES(be_lun->vn->v_mount))) 607 lock_flags = LK_SHARED; 608 else 609 lock_flags = LK_EXCLUSIVE; 610 611 vn_lock(be_lun->vn, lock_flags | LK_RETRY); 612 613 binuptime(&beio->ds_t0); 614 devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0); 615 616 error = VOP_FSYNC(be_lun->vn, MNT_WAIT, curthread); 617 VOP_UNLOCK(be_lun->vn, 0); 618 619 vn_finished_write(mountpoint); 620 621 if (error == 0) 622 ctl_set_success(&io->scsiio); 623 else { 624 /* XXX KDM is there is a better error here? */ 625 ctl_set_internal_failure(&io->scsiio, 626 /*sks_valid*/ 1, 627 /*retry_count*/ 0xbad1); 628 } 629 630 ctl_complete_beio(beio); 631 } 632 633 SDT_PROBE_DEFINE1(cbb, kernel, read, file_start, file_start, "uint64_t"); 634 SDT_PROBE_DEFINE1(cbb, kernel, write, file_start, file_start, "uint64_t"); 635 SDT_PROBE_DEFINE1(cbb, kernel, read, file_done, file_done,"uint64_t"); 636 SDT_PROBE_DEFINE1(cbb, kernel, write, file_done, file_done, "uint64_t"); 637 638 static void 639 ctl_be_block_dispatch_file(struct ctl_be_block_lun *be_lun, 640 struct ctl_be_block_io *beio) 641 { 642 struct ctl_be_block_filedata *file_data; 643 union ctl_io *io; 644 struct uio xuio; 645 struct iovec *xiovec; 646 int flags; 647 int error, i; 648 649 DPRINTF("entered\n"); 650 651 file_data = &be_lun->backend.file; 652 io = beio->io; 653 flags = beio->bio_flags; 654 655 if (beio->bio_cmd == BIO_READ) { 656 SDT_PROBE(cbb, kernel, read, file_start, 0, 0, 0, 0, 0); 657 } else { 658 SDT_PROBE(cbb, kernel, write, file_start, 0, 0, 0, 0, 0); 659 } 660 661 bzero(&xuio, sizeof(xuio)); 662 if (beio->bio_cmd == BIO_READ) 663 xuio.uio_rw = UIO_READ; 664 else 665 xuio.uio_rw = UIO_WRITE; 666 667 xuio.uio_offset = beio->io_offset; 668 xuio.uio_resid = beio->io_len; 669 xuio.uio_segflg = UIO_SYSSPACE; 670 xuio.uio_iov = beio->xiovecs; 671 xuio.uio_iovcnt = beio->num_segs; 672 xuio.uio_td = curthread; 673 674 for (i = 0, xiovec = xuio.uio_iov; i < xuio.uio_iovcnt; i++, xiovec++) { 675 xiovec->iov_base = beio->sg_segs[i].addr; 676 xiovec->iov_len = beio->sg_segs[i].len; 677 } 678 679 if (beio->bio_cmd == BIO_READ) { 680 vn_lock(be_lun->vn, LK_SHARED | LK_RETRY); 681 682 binuptime(&beio->ds_t0); 683 devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0); 684 685 /* 686 * UFS pays attention to IO_DIRECT for reads. If the 687 * DIRECTIO option is configured into the kernel, it calls 688 * ffs_rawread(). But that only works for single-segment 689 * uios with user space addresses. In our case, with a 690 * kernel uio, it still reads into the buffer cache, but it 691 * will just try to release the buffer from the cache later 692 * on in ffs_read(). 693 * 694 * ZFS does not pay attention to IO_DIRECT for reads. 695 * 696 * UFS does not pay attention to IO_SYNC for reads. 697 * 698 * ZFS pays attention to IO_SYNC (which translates into the 699 * Solaris define FRSYNC for zfs_read()) for reads. It 700 * attempts to sync the file before reading. 701 * 702 * So, to attempt to provide some barrier semantics in the 703 * BIO_ORDERED case, set both IO_DIRECT and IO_SYNC. 704 */ 705 error = VOP_READ(be_lun->vn, &xuio, (flags & BIO_ORDERED) ? 706 (IO_DIRECT|IO_SYNC) : 0, file_data->cred); 707 708 VOP_UNLOCK(be_lun->vn, 0); 709 } else { 710 struct mount *mountpoint; 711 int lock_flags; 712 713 (void)vn_start_write(be_lun->vn, &mountpoint, V_WAIT); 714 715 if (MNT_SHARED_WRITES(mountpoint) 716 || ((mountpoint == NULL) 717 && MNT_SHARED_WRITES(be_lun->vn->v_mount))) 718 lock_flags = LK_SHARED; 719 else 720 lock_flags = LK_EXCLUSIVE; 721 722 vn_lock(be_lun->vn, lock_flags | LK_RETRY); 723 724 binuptime(&beio->ds_t0); 725 devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0); 726 727 /* 728 * UFS pays attention to IO_DIRECT for writes. The write 729 * is done asynchronously. (Normally the write would just 730 * get put into cache. 731 * 732 * UFS pays attention to IO_SYNC for writes. It will 733 * attempt to write the buffer out synchronously if that 734 * flag is set. 735 * 736 * ZFS does not pay attention to IO_DIRECT for writes. 737 * 738 * ZFS pays attention to IO_SYNC (a.k.a. FSYNC or FRSYNC) 739 * for writes. It will flush the transaction from the 740 * cache before returning. 741 * 742 * So if we've got the BIO_ORDERED flag set, we want 743 * IO_SYNC in either the UFS or ZFS case. 744 */ 745 error = VOP_WRITE(be_lun->vn, &xuio, (flags & BIO_ORDERED) ? 746 IO_SYNC : 0, file_data->cred); 747 VOP_UNLOCK(be_lun->vn, 0); 748 749 vn_finished_write(mountpoint); 750 } 751 752 /* 753 * If we got an error, set the sense data to "MEDIUM ERROR" and 754 * return the I/O to the user. 755 */ 756 if (error != 0) { 757 char path_str[32]; 758 759 ctl_scsi_path_string(io, path_str, sizeof(path_str)); 760 /* 761 * XXX KDM ZFS returns ENOSPC when the underlying 762 * filesystem fills up. What kind of SCSI error should we 763 * return for that? 764 */ 765 printf("%s%s command returned errno %d\n", path_str, 766 (beio->bio_cmd == BIO_READ) ? "READ" : "WRITE", error); 767 ctl_set_medium_error(&io->scsiio); 768 ctl_complete_beio(beio); 769 return; 770 } 771 772 /* 773 * If this is a write, we're all done. 774 * If this is a read, we can now send the data to the user. 775 */ 776 if (beio->bio_cmd == BIO_WRITE) { 777 ctl_set_success(&io->scsiio); 778 SDT_PROBE(cbb, kernel, write, file_done, 0, 0, 0, 0, 0); 779 ctl_complete_beio(beio); 780 } else { 781 SDT_PROBE(cbb, kernel, read, file_done, 0, 0, 0, 0, 0); 782 io->scsiio.be_move_done = ctl_be_block_move_done; 783 io->scsiio.kern_data_ptr = (uint8_t *)beio->sg_segs; 784 io->scsiio.kern_data_len = beio->io_len; 785 io->scsiio.kern_total_len = beio->io_len; 786 io->scsiio.kern_rel_offset = 0; 787 io->scsiio.kern_data_resid = 0; 788 io->scsiio.kern_sg_entries = beio->num_segs; 789 io->io_hdr.flags |= CTL_FLAG_ALLOCATED | CTL_FLAG_KDPTR_SGLIST; 790 #ifdef CTL_TIME_IO 791 getbintime(&io->io_hdr.dma_start_bt); 792 #endif 793 ctl_datamove(io); 794 } 795 } 796 797 static void 798 ctl_be_block_flush_dev(struct ctl_be_block_lun *be_lun, 799 struct ctl_be_block_io *beio) 800 { 801 struct bio *bio; 802 union ctl_io *io; 803 struct ctl_be_block_devdata *dev_data; 804 805 dev_data = &be_lun->backend.dev; 806 io = beio->io; 807 808 DPRINTF("entered\n"); 809 810 /* This can't fail, it's a blocking allocation. */ 811 bio = g_alloc_bio(); 812 813 bio->bio_cmd = BIO_FLUSH; 814 bio->bio_flags |= BIO_ORDERED; 815 bio->bio_dev = dev_data->cdev; 816 bio->bio_offset = 0; 817 bio->bio_data = 0; 818 bio->bio_done = ctl_be_block_biodone; 819 bio->bio_caller1 = beio; 820 bio->bio_pblkno = 0; 821 822 /* 823 * We don't need to acquire the LUN lock here, because we are only 824 * sending one bio, and so there is no other context to synchronize 825 * with. 826 */ 827 beio->num_bios_sent = 1; 828 beio->send_complete = 1; 829 830 binuptime(&beio->ds_t0); 831 devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0); 832 833 (*dev_data->csw->d_strategy)(bio); 834 } 835 836 static void 837 ctl_be_block_dispatch_dev(struct ctl_be_block_lun *be_lun, 838 struct ctl_be_block_io *beio) 839 { 840 int i; 841 struct bio *bio; 842 struct ctl_be_block_devdata *dev_data; 843 off_t cur_offset; 844 int max_iosize; 845 846 DPRINTF("entered\n"); 847 848 dev_data = &be_lun->backend.dev; 849 850 /* 851 * We have to limit our I/O size to the maximum supported by the 852 * backend device. Hopefully it is MAXPHYS. If the driver doesn't 853 * set it properly, use DFLTPHYS. 854 */ 855 max_iosize = dev_data->cdev->si_iosize_max; 856 if (max_iosize < PAGE_SIZE) 857 max_iosize = DFLTPHYS; 858 859 cur_offset = beio->io_offset; 860 861 /* 862 * XXX KDM need to accurately reflect the number of I/Os outstanding 863 * to a device. 864 */ 865 binuptime(&beio->ds_t0); 866 devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0); 867 868 for (i = 0; i < beio->num_segs; i++) { 869 size_t cur_size; 870 uint8_t *cur_ptr; 871 872 cur_size = beio->sg_segs[i].len; 873 cur_ptr = beio->sg_segs[i].addr; 874 875 while (cur_size > 0) { 876 /* This can't fail, it's a blocking allocation. */ 877 bio = g_alloc_bio(); 878 879 KASSERT(bio != NULL, ("g_alloc_bio() failed!\n")); 880 881 bio->bio_cmd = beio->bio_cmd; 882 bio->bio_flags |= beio->bio_flags; 883 bio->bio_dev = dev_data->cdev; 884 bio->bio_caller1 = beio; 885 bio->bio_length = min(cur_size, max_iosize); 886 bio->bio_offset = cur_offset; 887 bio->bio_data = cur_ptr; 888 bio->bio_done = ctl_be_block_biodone; 889 bio->bio_pblkno = cur_offset / be_lun->blocksize; 890 891 cur_offset += bio->bio_length; 892 cur_ptr += bio->bio_length; 893 cur_size -= bio->bio_length; 894 895 /* 896 * Make sure we set the complete bit just before we 897 * issue the last bio so we don't wind up with a 898 * race. 899 * 900 * Use the LUN mutex here instead of a combination 901 * of atomic variables for simplicity. 902 * 903 * XXX KDM we could have a per-IO lock, but that 904 * would cause additional per-IO setup and teardown 905 * overhead. Hopefully there won't be too much 906 * contention on the LUN lock. 907 */ 908 mtx_lock(&be_lun->lock); 909 910 beio->num_bios_sent++; 911 912 if ((i == beio->num_segs - 1) 913 && (cur_size == 0)) 914 beio->send_complete = 1; 915 916 mtx_unlock(&be_lun->lock); 917 918 (*dev_data->csw->d_strategy)(bio); 919 } 920 } 921 } 922 923 static void 924 ctl_be_block_cw_dispatch(struct ctl_be_block_lun *be_lun, 925 union ctl_io *io) 926 { 927 struct ctl_be_block_io *beio; 928 struct ctl_be_block_softc *softc; 929 930 DPRINTF("entered\n"); 931 932 softc = be_lun->softc; 933 beio = ctl_alloc_beio(softc); 934 if (beio == NULL) { 935 /* 936 * This should not happen. ctl_alloc_beio() will call 937 * ctl_grow_beio() with a blocking malloc as needed. 938 * A malloc with M_WAITOK should not fail. 939 */ 940 ctl_set_busy(&io->scsiio); 941 ctl_done(io); 942 return; 943 } 944 945 beio->io = io; 946 beio->softc = softc; 947 beio->lun = be_lun; 948 io->io_hdr.ctl_private[CTL_PRIV_BACKEND].ptr = beio; 949 950 switch (io->scsiio.cdb[0]) { 951 case SYNCHRONIZE_CACHE: 952 case SYNCHRONIZE_CACHE_16: 953 beio->ds_trans_type = DEVSTAT_NO_DATA; 954 beio->ds_tag_type = DEVSTAT_TAG_ORDERED; 955 beio->io_len = 0; 956 be_lun->lun_flush(be_lun, beio); 957 break; 958 default: 959 panic("Unhandled CDB type %#x", io->scsiio.cdb[0]); 960 break; 961 } 962 } 963 964 SDT_PROBE_DEFINE1(cbb, kernel, read, start, start, "uint64_t"); 965 SDT_PROBE_DEFINE1(cbb, kernel, write, start, start, "uint64_t"); 966 SDT_PROBE_DEFINE1(cbb, kernel, read, alloc_done, alloc_done, "uint64_t"); 967 SDT_PROBE_DEFINE1(cbb, kernel, write, alloc_done, alloc_done, "uint64_t"); 968 969 static void 970 ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun, 971 union ctl_io *io) 972 { 973 struct ctl_be_block_io *beio; 974 struct ctl_be_block_softc *softc; 975 struct ctl_lba_len lbalen; 976 uint64_t len_left, io_size_bytes; 977 int i; 978 979 softc = be_lun->softc; 980 981 DPRINTF("entered\n"); 982 983 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) { 984 SDT_PROBE(cbb, kernel, read, start, 0, 0, 0, 0, 0); 985 } else { 986 SDT_PROBE(cbb, kernel, write, start, 0, 0, 0, 0, 0); 987 } 988 989 memcpy(&lbalen, io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes, 990 sizeof(lbalen)); 991 992 io_size_bytes = lbalen.len * be_lun->blocksize; 993 994 /* 995 * XXX KDM this is temporary, until we implement chaining of beio 996 * structures and multiple datamove calls to move all the data in 997 * or out. 998 */ 999 if (io_size_bytes > CTLBLK_MAX_IO_SIZE) { 1000 printf("%s: IO length %ju > max io size %u\n", __func__, 1001 io_size_bytes, CTLBLK_MAX_IO_SIZE); 1002 ctl_set_invalid_field(&io->scsiio, 1003 /*sks_valid*/ 0, 1004 /*command*/ 1, 1005 /*field*/ 0, 1006 /*bit_valid*/ 0, 1007 /*bit*/ 0); 1008 ctl_done(io); 1009 return; 1010 } 1011 1012 beio = ctl_alloc_beio(softc); 1013 if (beio == NULL) { 1014 /* 1015 * This should not happen. ctl_alloc_beio() will call 1016 * ctl_grow_beio() with a blocking malloc as needed. 1017 * A malloc with M_WAITOK should not fail. 1018 */ 1019 ctl_set_busy(&io->scsiio); 1020 ctl_done(io); 1021 return; 1022 } 1023 1024 beio->io = io; 1025 beio->softc = softc; 1026 beio->lun = be_lun; 1027 io->io_hdr.ctl_private[CTL_PRIV_BACKEND].ptr = beio; 1028 1029 /* 1030 * If the I/O came down with an ordered or head of queue tag, set 1031 * the BIO_ORDERED attribute. For head of queue tags, that's 1032 * pretty much the best we can do. 1033 * 1034 * XXX KDM we don't have a great way to easily know about the FUA 1035 * bit right now (it is decoded in ctl_read_write(), but we don't 1036 * pass that knowledge to the backend), and in any case we would 1037 * need to determine how to handle it. 1038 */ 1039 if ((io->scsiio.tag_type == CTL_TAG_ORDERED) 1040 || (io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE)) 1041 beio->bio_flags = BIO_ORDERED; 1042 1043 switch (io->scsiio.tag_type) { 1044 case CTL_TAG_ORDERED: 1045 beio->ds_tag_type = DEVSTAT_TAG_ORDERED; 1046 break; 1047 case CTL_TAG_HEAD_OF_QUEUE: 1048 beio->ds_tag_type = DEVSTAT_TAG_HEAD; 1049 break; 1050 case CTL_TAG_UNTAGGED: 1051 case CTL_TAG_SIMPLE: 1052 case CTL_TAG_ACA: 1053 default: 1054 beio->ds_tag_type = DEVSTAT_TAG_SIMPLE; 1055 break; 1056 } 1057 1058 /* 1059 * This path handles read and write only. The config write path 1060 * handles flush operations. 1061 */ 1062 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) { 1063 beio->bio_cmd = BIO_READ; 1064 beio->ds_trans_type = DEVSTAT_READ; 1065 } else { 1066 beio->bio_cmd = BIO_WRITE; 1067 beio->ds_trans_type = DEVSTAT_WRITE; 1068 } 1069 1070 beio->io_len = lbalen.len * be_lun->blocksize; 1071 beio->io_offset = lbalen.lba * be_lun->blocksize; 1072 1073 DPRINTF("%s at LBA %jx len %u\n", 1074 (beio->bio_cmd == BIO_READ) ? "READ" : "WRITE", 1075 (uintmax_t)lbalen.lba, lbalen.len); 1076 1077 for (i = 0, len_left = io_size_bytes; i < CTLBLK_MAX_SEGS && 1078 len_left > 0; i++) { 1079 1080 /* 1081 * Setup the S/G entry for this chunk. 1082 */ 1083 beio->sg_segs[i].len = min(MAXPHYS, len_left); 1084 beio->sg_segs[i].addr = uma_zalloc(be_lun->lun_zone, M_WAITOK); 1085 1086 DPRINTF("segment %d addr %p len %zd\n", i, 1087 beio->sg_segs[i].addr, beio->sg_segs[i].len); 1088 1089 beio->num_segs++; 1090 len_left -= beio->sg_segs[i].len; 1091 } 1092 1093 /* 1094 * For the read case, we need to read the data into our buffers and 1095 * then we can send it back to the user. For the write case, we 1096 * need to get the data from the user first. 1097 */ 1098 if (beio->bio_cmd == BIO_READ) { 1099 SDT_PROBE(cbb, kernel, read, alloc_done, 0, 0, 0, 0, 0); 1100 be_lun->dispatch(be_lun, beio); 1101 } else { 1102 SDT_PROBE(cbb, kernel, write, alloc_done, 0, 0, 0, 0, 0); 1103 io->scsiio.be_move_done = ctl_be_block_move_done; 1104 io->scsiio.kern_data_ptr = (uint8_t *)beio->sg_segs; 1105 io->scsiio.kern_data_len = beio->io_len; 1106 io->scsiio.kern_total_len = beio->io_len; 1107 io->scsiio.kern_rel_offset = 0; 1108 io->scsiio.kern_data_resid = 0; 1109 io->scsiio.kern_sg_entries = beio->num_segs; 1110 io->io_hdr.flags |= CTL_FLAG_ALLOCATED | CTL_FLAG_KDPTR_SGLIST; 1111 #ifdef CTL_TIME_IO 1112 getbintime(&io->io_hdr.dma_start_bt); 1113 #endif 1114 ctl_datamove(io); 1115 } 1116 } 1117 1118 static void 1119 ctl_be_block_worker(void *context, int pending) 1120 { 1121 struct ctl_be_block_lun *be_lun; 1122 struct ctl_be_block_softc *softc; 1123 union ctl_io *io; 1124 1125 be_lun = (struct ctl_be_block_lun *)context; 1126 softc = be_lun->softc; 1127 1128 DPRINTF("entered\n"); 1129 1130 mtx_lock(&be_lun->lock); 1131 for (;;) { 1132 io = (union ctl_io *)STAILQ_FIRST(&be_lun->datamove_queue); 1133 if (io != NULL) { 1134 struct ctl_be_block_io *beio; 1135 1136 DPRINTF("datamove queue\n"); 1137 1138 STAILQ_REMOVE(&be_lun->datamove_queue, &io->io_hdr, 1139 ctl_io_hdr, links); 1140 1141 mtx_unlock(&be_lun->lock); 1142 1143 beio = (struct ctl_be_block_io *) 1144 io->io_hdr.ctl_private[CTL_PRIV_BACKEND].ptr; 1145 1146 be_lun->dispatch(be_lun, beio); 1147 1148 mtx_lock(&be_lun->lock); 1149 continue; 1150 } 1151 io = (union ctl_io *)STAILQ_FIRST(&be_lun->config_write_queue); 1152 if (io != NULL) { 1153 1154 DPRINTF("config write queue\n"); 1155 1156 STAILQ_REMOVE(&be_lun->config_write_queue, &io->io_hdr, 1157 ctl_io_hdr, links); 1158 1159 mtx_unlock(&be_lun->lock); 1160 1161 ctl_be_block_cw_dispatch(be_lun, io); 1162 1163 mtx_lock(&be_lun->lock); 1164 continue; 1165 } 1166 io = (union ctl_io *)STAILQ_FIRST(&be_lun->input_queue); 1167 if (io != NULL) { 1168 DPRINTF("input queue\n"); 1169 1170 STAILQ_REMOVE(&be_lun->input_queue, &io->io_hdr, 1171 ctl_io_hdr, links); 1172 mtx_unlock(&be_lun->lock); 1173 1174 /* 1175 * We must drop the lock, since this routine and 1176 * its children may sleep. 1177 */ 1178 ctl_be_block_dispatch(be_lun, io); 1179 1180 mtx_lock(&be_lun->lock); 1181 continue; 1182 } 1183 1184 /* 1185 * If we get here, there is no work left in the queues, so 1186 * just break out and let the task queue go to sleep. 1187 */ 1188 break; 1189 } 1190 mtx_unlock(&be_lun->lock); 1191 } 1192 1193 /* 1194 * Entry point from CTL to the backend for I/O. We queue everything to a 1195 * work thread, so this just puts the I/O on a queue and wakes up the 1196 * thread. 1197 */ 1198 static int 1199 ctl_be_block_submit(union ctl_io *io) 1200 { 1201 struct ctl_be_block_lun *be_lun; 1202 struct ctl_be_lun *ctl_be_lun; 1203 int retval; 1204 1205 DPRINTF("entered\n"); 1206 1207 retval = CTL_RETVAL_COMPLETE; 1208 1209 ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[ 1210 CTL_PRIV_BACKEND_LUN].ptr; 1211 be_lun = (struct ctl_be_block_lun *)ctl_be_lun->be_lun; 1212 1213 /* 1214 * Make sure we only get SCSI I/O. 1215 */ 1216 KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, ("Non-SCSI I/O (type " 1217 "%#x) encountered", io->io_hdr.io_type)); 1218 1219 mtx_lock(&be_lun->lock); 1220 /* 1221 * XXX KDM make sure that links is okay to use at this point. 1222 * Otherwise, we either need to add another field to ctl_io_hdr, 1223 * or deal with resource allocation here. 1224 */ 1225 STAILQ_INSERT_TAIL(&be_lun->input_queue, &io->io_hdr, links); 1226 mtx_unlock(&be_lun->lock); 1227 1228 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); 1229 1230 return (retval); 1231 } 1232 1233 static int 1234 ctl_be_block_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, 1235 int flag, struct thread *td) 1236 { 1237 struct ctl_be_block_softc *softc; 1238 int error; 1239 1240 softc = &backend_block_softc; 1241 1242 error = 0; 1243 1244 switch (cmd) { 1245 case CTL_LUN_REQ: { 1246 struct ctl_lun_req *lun_req; 1247 1248 lun_req = (struct ctl_lun_req *)addr; 1249 1250 switch (lun_req->reqtype) { 1251 case CTL_LUNREQ_CREATE: 1252 error = ctl_be_block_create(softc, lun_req); 1253 break; 1254 case CTL_LUNREQ_RM: 1255 error = ctl_be_block_rm(softc, lun_req); 1256 break; 1257 case CTL_LUNREQ_MODIFY: 1258 error = ctl_be_block_modify(softc, lun_req); 1259 break; 1260 default: 1261 lun_req->status = CTL_LUN_ERROR; 1262 snprintf(lun_req->error_str, sizeof(lun_req->error_str), 1263 "%s: invalid LUN request type %d", __func__, 1264 lun_req->reqtype); 1265 break; 1266 } 1267 break; 1268 } 1269 default: 1270 error = ENOTTY; 1271 break; 1272 } 1273 1274 return (error); 1275 } 1276 1277 static int 1278 ctl_be_block_open_file(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req) 1279 { 1280 struct ctl_be_block_filedata *file_data; 1281 struct ctl_lun_create_params *params; 1282 struct vattr vattr; 1283 int error; 1284 1285 error = 0; 1286 file_data = &be_lun->backend.file; 1287 params = &req->reqdata.create; 1288 1289 be_lun->dev_type = CTL_BE_BLOCK_FILE; 1290 be_lun->dispatch = ctl_be_block_dispatch_file; 1291 be_lun->lun_flush = ctl_be_block_flush_file; 1292 1293 error = VOP_GETATTR(be_lun->vn, &vattr, curthread->td_ucred); 1294 if (error != 0) { 1295 snprintf(req->error_str, sizeof(req->error_str), 1296 "error calling VOP_GETATTR() for file %s", 1297 be_lun->dev_path); 1298 return (error); 1299 } 1300 1301 /* 1302 * Verify that we have the ability to upgrade to exclusive 1303 * access on this file so we can trap errors at open instead 1304 * of reporting them during first access. 1305 */ 1306 if (VOP_ISLOCKED(be_lun->vn) != LK_EXCLUSIVE) { 1307 vn_lock(be_lun->vn, LK_UPGRADE | LK_RETRY); 1308 if (be_lun->vn->v_iflag & VI_DOOMED) { 1309 error = EBADF; 1310 snprintf(req->error_str, sizeof(req->error_str), 1311 "error locking file %s", be_lun->dev_path); 1312 return (error); 1313 } 1314 } 1315 1316 1317 file_data->cred = crhold(curthread->td_ucred); 1318 if (params->lun_size_bytes != 0) 1319 be_lun->size_bytes = params->lun_size_bytes; 1320 else 1321 be_lun->size_bytes = vattr.va_size; 1322 /* 1323 * We set the multi thread flag for file operations because all 1324 * filesystems (in theory) are capable of allowing multiple readers 1325 * of a file at once. So we want to get the maximum possible 1326 * concurrency. 1327 */ 1328 be_lun->flags |= CTL_BE_BLOCK_LUN_MULTI_THREAD; 1329 1330 /* 1331 * XXX KDM vattr.va_blocksize may be larger than 512 bytes here. 1332 * With ZFS, it is 131072 bytes. Block sizes that large don't work 1333 * with disklabel and UFS on FreeBSD at least. Large block sizes 1334 * may not work with other OSes as well. So just export a sector 1335 * size of 512 bytes, which should work with any OS or 1336 * application. Since our backing is a file, any block size will 1337 * work fine for the backing store. 1338 */ 1339 #if 0 1340 be_lun->blocksize= vattr.va_blocksize; 1341 #endif 1342 if (params->blocksize_bytes != 0) 1343 be_lun->blocksize = params->blocksize_bytes; 1344 else 1345 be_lun->blocksize = 512; 1346 1347 /* 1348 * Sanity check. The media size has to be at least one 1349 * sector long. 1350 */ 1351 if (be_lun->size_bytes < be_lun->blocksize) { 1352 error = EINVAL; 1353 snprintf(req->error_str, sizeof(req->error_str), 1354 "file %s size %ju < block size %u", be_lun->dev_path, 1355 (uintmax_t)be_lun->size_bytes, be_lun->blocksize); 1356 } 1357 return (error); 1358 } 1359 1360 static int 1361 ctl_be_block_open_dev(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req) 1362 { 1363 struct ctl_lun_create_params *params; 1364 struct vattr vattr; 1365 struct cdev *dev; 1366 struct cdevsw *devsw; 1367 int error; 1368 1369 params = &req->reqdata.create; 1370 1371 be_lun->dev_type = CTL_BE_BLOCK_DEV; 1372 be_lun->dispatch = ctl_be_block_dispatch_dev; 1373 be_lun->lun_flush = ctl_be_block_flush_dev; 1374 be_lun->backend.dev.cdev = be_lun->vn->v_rdev; 1375 be_lun->backend.dev.csw = dev_refthread(be_lun->backend.dev.cdev, 1376 &be_lun->backend.dev.dev_ref); 1377 if (be_lun->backend.dev.csw == NULL) 1378 panic("Unable to retrieve device switch"); 1379 1380 error = VOP_GETATTR(be_lun->vn, &vattr, NOCRED); 1381 if (error) { 1382 snprintf(req->error_str, sizeof(req->error_str), 1383 "%s: error getting vnode attributes for device %s", 1384 __func__, be_lun->dev_path); 1385 return (error); 1386 } 1387 1388 dev = be_lun->vn->v_rdev; 1389 devsw = dev->si_devsw; 1390 if (!devsw->d_ioctl) { 1391 snprintf(req->error_str, sizeof(req->error_str), 1392 "%s: no d_ioctl for device %s!", __func__, 1393 be_lun->dev_path); 1394 return (ENODEV); 1395 } 1396 1397 error = devsw->d_ioctl(dev, DIOCGSECTORSIZE, 1398 (caddr_t)&be_lun->blocksize, FREAD, 1399 curthread); 1400 if (error) { 1401 snprintf(req->error_str, sizeof(req->error_str), 1402 "%s: error %d returned for DIOCGSECTORSIZE ioctl " 1403 "on %s!", __func__, error, be_lun->dev_path); 1404 return (error); 1405 } 1406 1407 /* 1408 * If the user has asked for a blocksize that is greater than the 1409 * backing device's blocksize, we can do it only if the blocksize 1410 * the user is asking for is an even multiple of the underlying 1411 * device's blocksize. 1412 */ 1413 if ((params->blocksize_bytes != 0) 1414 && (params->blocksize_bytes > be_lun->blocksize)) { 1415 uint32_t bs_multiple, tmp_blocksize; 1416 1417 bs_multiple = params->blocksize_bytes / be_lun->blocksize; 1418 1419 tmp_blocksize = bs_multiple * be_lun->blocksize; 1420 1421 if (tmp_blocksize == params->blocksize_bytes) { 1422 be_lun->blocksize = params->blocksize_bytes; 1423 } else { 1424 snprintf(req->error_str, sizeof(req->error_str), 1425 "%s: requested blocksize %u is not an even " 1426 "multiple of backing device blocksize %u", 1427 __func__, params->blocksize_bytes, 1428 be_lun->blocksize); 1429 return (EINVAL); 1430 1431 } 1432 } else if ((params->blocksize_bytes != 0) 1433 && (params->blocksize_bytes != be_lun->blocksize)) { 1434 snprintf(req->error_str, sizeof(req->error_str), 1435 "%s: requested blocksize %u < backing device " 1436 "blocksize %u", __func__, params->blocksize_bytes, 1437 be_lun->blocksize); 1438 return (EINVAL); 1439 } 1440 1441 error = devsw->d_ioctl(dev, DIOCGMEDIASIZE, 1442 (caddr_t)&be_lun->size_bytes, FREAD, 1443 curthread); 1444 if (error) { 1445 snprintf(req->error_str, sizeof(req->error_str), 1446 "%s: error %d returned for DIOCGMEDIASIZE " 1447 " ioctl on %s!", __func__, error, 1448 be_lun->dev_path); 1449 return (error); 1450 } 1451 1452 if (params->lun_size_bytes != 0) { 1453 if (params->lun_size_bytes > be_lun->size_bytes) { 1454 snprintf(req->error_str, sizeof(req->error_str), 1455 "%s: requested LUN size %ju > backing device " 1456 "size %ju", __func__, 1457 (uintmax_t)params->lun_size_bytes, 1458 (uintmax_t)be_lun->size_bytes); 1459 return (EINVAL); 1460 } 1461 1462 be_lun->size_bytes = params->lun_size_bytes; 1463 } 1464 1465 return (0); 1466 } 1467 1468 static int 1469 ctl_be_block_close(struct ctl_be_block_lun *be_lun) 1470 { 1471 DROP_GIANT(); 1472 if (be_lun->vn) { 1473 int flags = FREAD | FWRITE; 1474 1475 switch (be_lun->dev_type) { 1476 case CTL_BE_BLOCK_DEV: 1477 if (be_lun->backend.dev.csw) { 1478 dev_relthread(be_lun->backend.dev.cdev, 1479 be_lun->backend.dev.dev_ref); 1480 be_lun->backend.dev.csw = NULL; 1481 be_lun->backend.dev.cdev = NULL; 1482 } 1483 break; 1484 case CTL_BE_BLOCK_FILE: 1485 break; 1486 case CTL_BE_BLOCK_NONE: 1487 default: 1488 panic("Unexpected backend type."); 1489 break; 1490 } 1491 1492 (void)vn_close(be_lun->vn, flags, NOCRED, curthread); 1493 be_lun->vn = NULL; 1494 1495 switch (be_lun->dev_type) { 1496 case CTL_BE_BLOCK_DEV: 1497 break; 1498 case CTL_BE_BLOCK_FILE: 1499 if (be_lun->backend.file.cred != NULL) { 1500 crfree(be_lun->backend.file.cred); 1501 be_lun->backend.file.cred = NULL; 1502 } 1503 break; 1504 case CTL_BE_BLOCK_NONE: 1505 default: 1506 panic("Unexpected backend type."); 1507 break; 1508 } 1509 } 1510 PICKUP_GIANT(); 1511 1512 return (0); 1513 } 1514 1515 static int 1516 ctl_be_block_open(struct ctl_be_block_softc *softc, 1517 struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req) 1518 { 1519 struct nameidata nd; 1520 int flags; 1521 int error; 1522 1523 /* 1524 * XXX KDM allow a read-only option? 1525 */ 1526 flags = FREAD | FWRITE; 1527 error = 0; 1528 1529 if (rootvnode == NULL) { 1530 snprintf(req->error_str, sizeof(req->error_str), 1531 "%s: Root filesystem is not mounted", __func__); 1532 return (1); 1533 } 1534 1535 if (!curthread->td_proc->p_fd->fd_cdir) { 1536 curthread->td_proc->p_fd->fd_cdir = rootvnode; 1537 VREF(rootvnode); 1538 } 1539 if (!curthread->td_proc->p_fd->fd_rdir) { 1540 curthread->td_proc->p_fd->fd_rdir = rootvnode; 1541 VREF(rootvnode); 1542 } 1543 if (!curthread->td_proc->p_fd->fd_jdir) { 1544 curthread->td_proc->p_fd->fd_jdir = rootvnode; 1545 VREF(rootvnode); 1546 } 1547 1548 again: 1549 NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, be_lun->dev_path, curthread); 1550 error = vn_open(&nd, &flags, 0, NULL); 1551 if (error) { 1552 /* 1553 * This is the only reasonable guess we can make as far as 1554 * path if the user doesn't give us a fully qualified path. 1555 * If they want to specify a file, they need to specify the 1556 * full path. 1557 */ 1558 if (be_lun->dev_path[0] != '/') { 1559 char *dev_path = "/dev/"; 1560 char *dev_name; 1561 1562 /* Try adding device path at beginning of name */ 1563 dev_name = malloc(strlen(be_lun->dev_path) 1564 + strlen(dev_path) + 1, 1565 M_CTLBLK, M_WAITOK); 1566 if (dev_name) { 1567 sprintf(dev_name, "%s%s", dev_path, 1568 be_lun->dev_path); 1569 free(be_lun->dev_path, M_CTLBLK); 1570 be_lun->dev_path = dev_name; 1571 goto again; 1572 } 1573 } 1574 snprintf(req->error_str, sizeof(req->error_str), 1575 "%s: error opening %s", __func__, be_lun->dev_path); 1576 return (error); 1577 } 1578 1579 NDFREE(&nd, NDF_ONLY_PNBUF); 1580 1581 be_lun->vn = nd.ni_vp; 1582 1583 /* We only support disks and files. */ 1584 if (vn_isdisk(be_lun->vn, &error)) { 1585 error = ctl_be_block_open_dev(be_lun, req); 1586 } else if (be_lun->vn->v_type == VREG) { 1587 error = ctl_be_block_open_file(be_lun, req); 1588 } else { 1589 error = EINVAL; 1590 snprintf(req->error_str, sizeof(req->error_str), 1591 "%s is not a disk or file", be_lun->dev_path); 1592 } 1593 VOP_UNLOCK(be_lun->vn, 0); 1594 1595 if (error != 0) { 1596 ctl_be_block_close(be_lun); 1597 return (error); 1598 } 1599 1600 be_lun->blocksize_shift = fls(be_lun->blocksize) - 1; 1601 be_lun->size_blocks = be_lun->size_bytes >> be_lun->blocksize_shift; 1602 1603 return (0); 1604 } 1605 1606 static int 1607 ctl_be_block_mem_ctor(void *mem, int size, void *arg, int flags) 1608 { 1609 return (0); 1610 } 1611 1612 static void 1613 ctl_be_block_mem_dtor(void *mem, int size, void *arg) 1614 { 1615 bzero(mem, size); 1616 } 1617 1618 static int 1619 ctl_be_block_create(struct ctl_be_block_softc *softc, struct ctl_lun_req *req) 1620 { 1621 struct ctl_be_block_lun *be_lun; 1622 struct ctl_lun_create_params *params; 1623 struct ctl_be_arg *file_arg; 1624 char tmpstr[32]; 1625 int retval, num_threads; 1626 int i; 1627 1628 params = &req->reqdata.create; 1629 retval = 0; 1630 1631 num_threads = cbb_num_threads; 1632 1633 file_arg = NULL; 1634 1635 be_lun = malloc(sizeof(*be_lun), M_CTLBLK, M_ZERO | M_WAITOK); 1636 1637 be_lun->softc = softc; 1638 STAILQ_INIT(&be_lun->input_queue); 1639 STAILQ_INIT(&be_lun->config_write_queue); 1640 STAILQ_INIT(&be_lun->datamove_queue); 1641 sprintf(be_lun->lunname, "cblk%d", softc->num_luns); 1642 mtx_init(&be_lun->lock, be_lun->lunname, NULL, MTX_DEF); 1643 1644 be_lun->lun_zone = uma_zcreate(be_lun->lunname, MAXPHYS, 1645 ctl_be_block_mem_ctor, ctl_be_block_mem_dtor, NULL, NULL, 1646 /*align*/ 0, /*flags*/0); 1647 1648 if (be_lun->lun_zone == NULL) { 1649 snprintf(req->error_str, sizeof(req->error_str), 1650 "%s: error allocating UMA zone", __func__); 1651 goto bailout_error; 1652 } 1653 1654 if (params->flags & CTL_LUN_FLAG_DEV_TYPE) 1655 be_lun->ctl_be_lun.lun_type = params->device_type; 1656 else 1657 be_lun->ctl_be_lun.lun_type = T_DIRECT; 1658 1659 if (be_lun->ctl_be_lun.lun_type == T_DIRECT) { 1660 for (i = 0; i < req->num_be_args; i++) { 1661 if (strcmp(req->kern_be_args[i].name, "file") == 0) { 1662 file_arg = &req->kern_be_args[i]; 1663 break; 1664 } 1665 } 1666 1667 if (file_arg == NULL) { 1668 snprintf(req->error_str, sizeof(req->error_str), 1669 "%s: no file argument specified", __func__); 1670 goto bailout_error; 1671 } 1672 1673 be_lun->dev_path = malloc(file_arg->vallen, M_CTLBLK, 1674 M_WAITOK | M_ZERO); 1675 1676 strlcpy(be_lun->dev_path, (char *)file_arg->value, 1677 file_arg->vallen); 1678 1679 retval = ctl_be_block_open(softc, be_lun, req); 1680 if (retval != 0) { 1681 retval = 0; 1682 goto bailout_error; 1683 } 1684 1685 /* 1686 * Tell the user the size of the file/device. 1687 */ 1688 params->lun_size_bytes = be_lun->size_bytes; 1689 1690 /* 1691 * The maximum LBA is the size - 1. 1692 */ 1693 be_lun->ctl_be_lun.maxlba = be_lun->size_blocks - 1; 1694 } else { 1695 /* 1696 * For processor devices, we don't have any size. 1697 */ 1698 be_lun->blocksize = 0; 1699 be_lun->size_blocks = 0; 1700 be_lun->size_bytes = 0; 1701 be_lun->ctl_be_lun.maxlba = 0; 1702 params->lun_size_bytes = 0; 1703 1704 /* 1705 * Default to just 1 thread for processor devices. 1706 */ 1707 num_threads = 1; 1708 } 1709 1710 /* 1711 * XXX This searching loop might be refactored to be combined with 1712 * the loop above, 1713 */ 1714 for (i = 0; i < req->num_be_args; i++) { 1715 if (strcmp(req->kern_be_args[i].name, "num_threads") == 0) { 1716 struct ctl_be_arg *thread_arg; 1717 char num_thread_str[16]; 1718 int tmp_num_threads; 1719 1720 1721 thread_arg = &req->kern_be_args[i]; 1722 1723 strlcpy(num_thread_str, (char *)thread_arg->value, 1724 min(thread_arg->vallen, 1725 sizeof(num_thread_str))); 1726 1727 tmp_num_threads = strtol(num_thread_str, NULL, 0); 1728 1729 /* 1730 * We don't let the user specify less than one 1731 * thread, but hope he's clueful enough not to 1732 * specify 1000 threads. 1733 */ 1734 if (tmp_num_threads < 1) { 1735 snprintf(req->error_str, sizeof(req->error_str), 1736 "%s: invalid number of threads %s", 1737 __func__, num_thread_str); 1738 goto bailout_error; 1739 } 1740 1741 num_threads = tmp_num_threads; 1742 } 1743 } 1744 1745 be_lun->flags = CTL_BE_BLOCK_LUN_UNCONFIGURED; 1746 be_lun->ctl_be_lun.flags = CTL_LUN_FLAG_PRIMARY; 1747 be_lun->ctl_be_lun.be_lun = be_lun; 1748 be_lun->ctl_be_lun.blocksize = be_lun->blocksize; 1749 /* Tell the user the blocksize we ended up using */ 1750 params->blocksize_bytes = be_lun->blocksize; 1751 if (params->flags & CTL_LUN_FLAG_ID_REQ) { 1752 be_lun->ctl_be_lun.req_lun_id = params->req_lun_id; 1753 be_lun->ctl_be_lun.flags |= CTL_LUN_FLAG_ID_REQ; 1754 } else 1755 be_lun->ctl_be_lun.req_lun_id = 0; 1756 1757 be_lun->ctl_be_lun.lun_shutdown = ctl_be_block_lun_shutdown; 1758 be_lun->ctl_be_lun.lun_config_status = 1759 ctl_be_block_lun_config_status; 1760 be_lun->ctl_be_lun.be = &ctl_be_block_driver; 1761 1762 if ((params->flags & CTL_LUN_FLAG_SERIAL_NUM) == 0) { 1763 snprintf(tmpstr, sizeof(tmpstr), "MYSERIAL%4d", 1764 softc->num_luns); 1765 strncpy((char *)be_lun->ctl_be_lun.serial_num, tmpstr, 1766 ctl_min(sizeof(be_lun->ctl_be_lun.serial_num), 1767 sizeof(tmpstr))); 1768 1769 /* Tell the user what we used for a serial number */ 1770 strncpy((char *)params->serial_num, tmpstr, 1771 ctl_min(sizeof(params->serial_num), sizeof(tmpstr))); 1772 } else { 1773 strncpy((char *)be_lun->ctl_be_lun.serial_num, 1774 params->serial_num, 1775 ctl_min(sizeof(be_lun->ctl_be_lun.serial_num), 1776 sizeof(params->serial_num))); 1777 } 1778 if ((params->flags & CTL_LUN_FLAG_DEVID) == 0) { 1779 snprintf(tmpstr, sizeof(tmpstr), "MYDEVID%4d", softc->num_luns); 1780 strncpy((char *)be_lun->ctl_be_lun.device_id, tmpstr, 1781 ctl_min(sizeof(be_lun->ctl_be_lun.device_id), 1782 sizeof(tmpstr))); 1783 1784 /* Tell the user what we used for a device ID */ 1785 strncpy((char *)params->device_id, tmpstr, 1786 ctl_min(sizeof(params->device_id), sizeof(tmpstr))); 1787 } else { 1788 strncpy((char *)be_lun->ctl_be_lun.device_id, 1789 params->device_id, 1790 ctl_min(sizeof(be_lun->ctl_be_lun.device_id), 1791 sizeof(params->device_id))); 1792 } 1793 1794 TASK_INIT(&be_lun->io_task, /*priority*/0, ctl_be_block_worker, be_lun); 1795 1796 be_lun->io_taskqueue = taskqueue_create(be_lun->lunname, M_WAITOK, 1797 taskqueue_thread_enqueue, /*context*/&be_lun->io_taskqueue); 1798 1799 if (be_lun->io_taskqueue == NULL) { 1800 snprintf(req->error_str, sizeof(req->error_str), 1801 "%s: Unable to create taskqueue", __func__); 1802 goto bailout_error; 1803 } 1804 1805 /* 1806 * Note that we start the same number of threads by default for 1807 * both the file case and the block device case. For the file 1808 * case, we need multiple threads to allow concurrency, because the 1809 * vnode interface is designed to be a blocking interface. For the 1810 * block device case, ZFS zvols at least will block the caller's 1811 * context in many instances, and so we need multiple threads to 1812 * overcome that problem. Other block devices don't need as many 1813 * threads, but they shouldn't cause too many problems. 1814 * 1815 * If the user wants to just have a single thread for a block 1816 * device, he can specify that when the LUN is created, or change 1817 * the tunable/sysctl to alter the default number of threads. 1818 */ 1819 retval = taskqueue_start_threads(&be_lun->io_taskqueue, 1820 /*num threads*/num_threads, 1821 /*priority*/PWAIT, 1822 /*thread name*/ 1823 "%s taskq", be_lun->lunname); 1824 1825 if (retval != 0) 1826 goto bailout_error; 1827 1828 be_lun->num_threads = num_threads; 1829 1830 mtx_lock(&softc->lock); 1831 softc->num_luns++; 1832 STAILQ_INSERT_TAIL(&softc->lun_list, be_lun, links); 1833 1834 mtx_unlock(&softc->lock); 1835 1836 retval = ctl_add_lun(&be_lun->ctl_be_lun); 1837 if (retval != 0) { 1838 mtx_lock(&softc->lock); 1839 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_block_lun, 1840 links); 1841 softc->num_luns--; 1842 mtx_unlock(&softc->lock); 1843 snprintf(req->error_str, sizeof(req->error_str), 1844 "%s: ctl_add_lun() returned error %d, see dmesg for " 1845 "details", __func__, retval); 1846 retval = 0; 1847 goto bailout_error; 1848 } 1849 1850 mtx_lock(&softc->lock); 1851 1852 /* 1853 * Tell the config_status routine that we're waiting so it won't 1854 * clean up the LUN in the event of an error. 1855 */ 1856 be_lun->flags |= CTL_BE_BLOCK_LUN_WAITING; 1857 1858 while (be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) { 1859 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlblk", 0); 1860 if (retval == EINTR) 1861 break; 1862 } 1863 be_lun->flags &= ~CTL_BE_BLOCK_LUN_WAITING; 1864 1865 if (be_lun->flags & CTL_BE_BLOCK_LUN_CONFIG_ERR) { 1866 snprintf(req->error_str, sizeof(req->error_str), 1867 "%s: LUN configuration error, see dmesg for details", 1868 __func__); 1869 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_block_lun, 1870 links); 1871 softc->num_luns--; 1872 mtx_unlock(&softc->lock); 1873 goto bailout_error; 1874 } else { 1875 params->req_lun_id = be_lun->ctl_be_lun.lun_id; 1876 } 1877 1878 mtx_unlock(&softc->lock); 1879 1880 be_lun->disk_stats = devstat_new_entry("cbb", params->req_lun_id, 1881 be_lun->blocksize, 1882 DEVSTAT_ALL_SUPPORTED, 1883 be_lun->ctl_be_lun.lun_type 1884 | DEVSTAT_TYPE_IF_OTHER, 1885 DEVSTAT_PRIORITY_OTHER); 1886 1887 1888 req->status = CTL_LUN_OK; 1889 1890 return (retval); 1891 1892 bailout_error: 1893 req->status = CTL_LUN_ERROR; 1894 1895 ctl_be_block_close(be_lun); 1896 1897 free(be_lun->dev_path, M_CTLBLK); 1898 free(be_lun, M_CTLBLK); 1899 1900 return (retval); 1901 } 1902 1903 static int 1904 ctl_be_block_rm(struct ctl_be_block_softc *softc, struct ctl_lun_req *req) 1905 { 1906 struct ctl_lun_rm_params *params; 1907 struct ctl_be_block_lun *be_lun; 1908 int retval; 1909 1910 params = &req->reqdata.rm; 1911 1912 mtx_lock(&softc->lock); 1913 1914 be_lun = NULL; 1915 1916 STAILQ_FOREACH(be_lun, &softc->lun_list, links) { 1917 if (be_lun->ctl_be_lun.lun_id == params->lun_id) 1918 break; 1919 } 1920 mtx_unlock(&softc->lock); 1921 1922 if (be_lun == NULL) { 1923 snprintf(req->error_str, sizeof(req->error_str), 1924 "%s: LUN %u is not managed by the block backend", 1925 __func__, params->lun_id); 1926 goto bailout_error; 1927 } 1928 1929 retval = ctl_disable_lun(&be_lun->ctl_be_lun); 1930 1931 if (retval != 0) { 1932 snprintf(req->error_str, sizeof(req->error_str), 1933 "%s: error %d returned from ctl_disable_lun() for " 1934 "LUN %d", __func__, retval, params->lun_id); 1935 goto bailout_error; 1936 1937 } 1938 1939 retval = ctl_invalidate_lun(&be_lun->ctl_be_lun); 1940 if (retval != 0) { 1941 snprintf(req->error_str, sizeof(req->error_str), 1942 "%s: error %d returned from ctl_invalidate_lun() for " 1943 "LUN %d", __func__, retval, params->lun_id); 1944 goto bailout_error; 1945 } 1946 1947 mtx_lock(&softc->lock); 1948 1949 be_lun->flags |= CTL_BE_BLOCK_LUN_WAITING; 1950 1951 while ((be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) == 0) { 1952 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlblk", 0); 1953 if (retval == EINTR) 1954 break; 1955 } 1956 1957 be_lun->flags &= ~CTL_BE_BLOCK_LUN_WAITING; 1958 1959 if ((be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) == 0) { 1960 snprintf(req->error_str, sizeof(req->error_str), 1961 "%s: interrupted waiting for LUN to be freed", 1962 __func__); 1963 mtx_unlock(&softc->lock); 1964 goto bailout_error; 1965 } 1966 1967 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_block_lun, links); 1968 1969 softc->num_luns--; 1970 mtx_unlock(&softc->lock); 1971 1972 taskqueue_drain(be_lun->io_taskqueue, &be_lun->io_task); 1973 1974 taskqueue_free(be_lun->io_taskqueue); 1975 1976 ctl_be_block_close(be_lun); 1977 1978 if (be_lun->disk_stats != NULL) 1979 devstat_remove_entry(be_lun->disk_stats); 1980 1981 uma_zdestroy(be_lun->lun_zone); 1982 1983 free(be_lun->dev_path, M_CTLBLK); 1984 1985 free(be_lun, M_CTLBLK); 1986 1987 req->status = CTL_LUN_OK; 1988 1989 return (0); 1990 1991 bailout_error: 1992 1993 req->status = CTL_LUN_ERROR; 1994 1995 return (0); 1996 } 1997 1998 static int 1999 ctl_be_block_modify_file(struct ctl_be_block_lun *be_lun, 2000 struct ctl_lun_req *req) 2001 { 2002 struct vattr vattr; 2003 int error; 2004 struct ctl_lun_modify_params *params; 2005 2006 params = &req->reqdata.modify; 2007 2008 if (params->lun_size_bytes != 0) { 2009 be_lun->size_bytes = params->lun_size_bytes; 2010 } else { 2011 error = VOP_GETATTR(be_lun->vn, &vattr, curthread->td_ucred); 2012 if (error != 0) { 2013 snprintf(req->error_str, sizeof(req->error_str), 2014 "error calling VOP_GETATTR() for file %s", 2015 be_lun->dev_path); 2016 return (error); 2017 } 2018 2019 be_lun->size_bytes = vattr.va_size; 2020 } 2021 2022 return (0); 2023 } 2024 2025 static int 2026 ctl_be_block_modify_dev(struct ctl_be_block_lun *be_lun, 2027 struct ctl_lun_req *req) 2028 { 2029 struct cdev *dev; 2030 struct cdevsw *devsw; 2031 int error; 2032 struct ctl_lun_modify_params *params; 2033 uint64_t size_bytes; 2034 2035 params = &req->reqdata.modify; 2036 2037 dev = be_lun->vn->v_rdev; 2038 devsw = dev->si_devsw; 2039 if (!devsw->d_ioctl) { 2040 snprintf(req->error_str, sizeof(req->error_str), 2041 "%s: no d_ioctl for device %s!", __func__, 2042 be_lun->dev_path); 2043 return (ENODEV); 2044 } 2045 2046 error = devsw->d_ioctl(dev, DIOCGMEDIASIZE, 2047 (caddr_t)&size_bytes, FREAD, 2048 curthread); 2049 if (error) { 2050 snprintf(req->error_str, sizeof(req->error_str), 2051 "%s: error %d returned for DIOCGMEDIASIZE ioctl " 2052 "on %s!", __func__, error, be_lun->dev_path); 2053 return (error); 2054 } 2055 2056 if (params->lun_size_bytes != 0) { 2057 if (params->lun_size_bytes > size_bytes) { 2058 snprintf(req->error_str, sizeof(req->error_str), 2059 "%s: requested LUN size %ju > backing device " 2060 "size %ju", __func__, 2061 (uintmax_t)params->lun_size_bytes, 2062 (uintmax_t)size_bytes); 2063 return (EINVAL); 2064 } 2065 2066 be_lun->size_bytes = params->lun_size_bytes; 2067 } else { 2068 be_lun->size_bytes = size_bytes; 2069 } 2070 2071 return (0); 2072 } 2073 2074 static int 2075 ctl_be_block_modify(struct ctl_be_block_softc *softc, struct ctl_lun_req *req) 2076 { 2077 struct ctl_lun_modify_params *params; 2078 struct ctl_be_block_lun *be_lun; 2079 int error; 2080 2081 params = &req->reqdata.modify; 2082 2083 mtx_lock(&softc->lock); 2084 2085 be_lun = NULL; 2086 2087 STAILQ_FOREACH(be_lun, &softc->lun_list, links) { 2088 if (be_lun->ctl_be_lun.lun_id == params->lun_id) 2089 break; 2090 } 2091 mtx_unlock(&softc->lock); 2092 2093 if (be_lun == NULL) { 2094 snprintf(req->error_str, sizeof(req->error_str), 2095 "%s: LUN %u is not managed by the block backend", 2096 __func__, params->lun_id); 2097 goto bailout_error; 2098 } 2099 2100 if (params->lun_size_bytes != 0) { 2101 if (params->lun_size_bytes < be_lun->blocksize) { 2102 snprintf(req->error_str, sizeof(req->error_str), 2103 "%s: LUN size %ju < blocksize %u", __func__, 2104 params->lun_size_bytes, be_lun->blocksize); 2105 goto bailout_error; 2106 } 2107 } 2108 2109 vn_lock(be_lun->vn, LK_SHARED | LK_RETRY); 2110 2111 if (be_lun->vn->v_type == VREG) 2112 error = ctl_be_block_modify_file(be_lun, req); 2113 else 2114 error = ctl_be_block_modify_dev(be_lun, req); 2115 2116 VOP_UNLOCK(be_lun->vn, 0); 2117 2118 if (error != 0) 2119 goto bailout_error; 2120 2121 be_lun->size_blocks = be_lun->size_bytes >> be_lun->blocksize_shift; 2122 2123 /* 2124 * The maximum LBA is the size - 1. 2125 * 2126 * XXX: Note that this field is being updated without locking, 2127 * which might cause problems on 32-bit architectures. 2128 */ 2129 be_lun->ctl_be_lun.maxlba = be_lun->size_blocks - 1; 2130 ctl_lun_capacity_changed(&be_lun->ctl_be_lun); 2131 2132 /* Tell the user the exact size we ended up using */ 2133 params->lun_size_bytes = be_lun->size_bytes; 2134 2135 req->status = CTL_LUN_OK; 2136 2137 return (0); 2138 2139 bailout_error: 2140 req->status = CTL_LUN_ERROR; 2141 2142 return (0); 2143 } 2144 2145 static void 2146 ctl_be_block_lun_shutdown(void *be_lun) 2147 { 2148 struct ctl_be_block_lun *lun; 2149 struct ctl_be_block_softc *softc; 2150 2151 lun = (struct ctl_be_block_lun *)be_lun; 2152 2153 softc = lun->softc; 2154 2155 mtx_lock(&softc->lock); 2156 lun->flags |= CTL_BE_BLOCK_LUN_UNCONFIGURED; 2157 if (lun->flags & CTL_BE_BLOCK_LUN_WAITING) 2158 wakeup(lun); 2159 mtx_unlock(&softc->lock); 2160 2161 } 2162 2163 static void 2164 ctl_be_block_lun_config_status(void *be_lun, ctl_lun_config_status status) 2165 { 2166 struct ctl_be_block_lun *lun; 2167 struct ctl_be_block_softc *softc; 2168 2169 lun = (struct ctl_be_block_lun *)be_lun; 2170 softc = lun->softc; 2171 2172 if (status == CTL_LUN_CONFIG_OK) { 2173 mtx_lock(&softc->lock); 2174 lun->flags &= ~CTL_BE_BLOCK_LUN_UNCONFIGURED; 2175 if (lun->flags & CTL_BE_BLOCK_LUN_WAITING) 2176 wakeup(lun); 2177 mtx_unlock(&softc->lock); 2178 2179 /* 2180 * We successfully added the LUN, attempt to enable it. 2181 */ 2182 if (ctl_enable_lun(&lun->ctl_be_lun) != 0) { 2183 printf("%s: ctl_enable_lun() failed!\n", __func__); 2184 if (ctl_invalidate_lun(&lun->ctl_be_lun) != 0) { 2185 printf("%s: ctl_invalidate_lun() failed!\n", 2186 __func__); 2187 } 2188 } 2189 2190 return; 2191 } 2192 2193 2194 mtx_lock(&softc->lock); 2195 lun->flags &= ~CTL_BE_BLOCK_LUN_UNCONFIGURED; 2196 lun->flags |= CTL_BE_BLOCK_LUN_CONFIG_ERR; 2197 wakeup(lun); 2198 mtx_unlock(&softc->lock); 2199 } 2200 2201 2202 static int 2203 ctl_be_block_config_write(union ctl_io *io) 2204 { 2205 struct ctl_be_block_lun *be_lun; 2206 struct ctl_be_lun *ctl_be_lun; 2207 int retval; 2208 2209 retval = 0; 2210 2211 DPRINTF("entered\n"); 2212 2213 ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[ 2214 CTL_PRIV_BACKEND_LUN].ptr; 2215 be_lun = (struct ctl_be_block_lun *)ctl_be_lun->be_lun; 2216 2217 switch (io->scsiio.cdb[0]) { 2218 case SYNCHRONIZE_CACHE: 2219 case SYNCHRONIZE_CACHE_16: 2220 /* 2221 * The upper level CTL code will filter out any CDBs with 2222 * the immediate bit set and return the proper error. 2223 * 2224 * We don't really need to worry about what LBA range the 2225 * user asked to be synced out. When they issue a sync 2226 * cache command, we'll sync out the whole thing. 2227 */ 2228 mtx_lock(&be_lun->lock); 2229 STAILQ_INSERT_TAIL(&be_lun->config_write_queue, &io->io_hdr, 2230 links); 2231 mtx_unlock(&be_lun->lock); 2232 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); 2233 break; 2234 case START_STOP_UNIT: { 2235 struct scsi_start_stop_unit *cdb; 2236 2237 cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb; 2238 2239 if (cdb->how & SSS_START) 2240 retval = ctl_start_lun(ctl_be_lun); 2241 else { 2242 retval = ctl_stop_lun(ctl_be_lun); 2243 /* 2244 * XXX KDM Copan-specific offline behavior. 2245 * Figure out a reasonable way to port this? 2246 */ 2247 #ifdef NEEDTOPORT 2248 if ((retval == 0) 2249 && (cdb->byte2 & SSS_ONOFFLINE)) 2250 retval = ctl_lun_offline(ctl_be_lun); 2251 #endif 2252 } 2253 2254 /* 2255 * In general, the above routines should not fail. They 2256 * just set state for the LUN. So we've got something 2257 * pretty wrong here if we can't start or stop the LUN. 2258 */ 2259 if (retval != 0) { 2260 ctl_set_internal_failure(&io->scsiio, 2261 /*sks_valid*/ 1, 2262 /*retry_count*/ 0xf051); 2263 retval = CTL_RETVAL_COMPLETE; 2264 } else { 2265 ctl_set_success(&io->scsiio); 2266 } 2267 ctl_config_write_done(io); 2268 break; 2269 } 2270 default: 2271 ctl_set_invalid_opcode(&io->scsiio); 2272 ctl_config_write_done(io); 2273 retval = CTL_RETVAL_COMPLETE; 2274 break; 2275 } 2276 2277 return (retval); 2278 2279 } 2280 2281 static int 2282 ctl_be_block_config_read(union ctl_io *io) 2283 { 2284 return (0); 2285 } 2286 2287 static int 2288 ctl_be_block_lun_info(void *be_lun, struct sbuf *sb) 2289 { 2290 struct ctl_be_block_lun *lun; 2291 int retval; 2292 2293 lun = (struct ctl_be_block_lun *)be_lun; 2294 retval = 0; 2295 2296 retval = sbuf_printf(sb, "<num_threads>"); 2297 2298 if (retval != 0) 2299 goto bailout; 2300 2301 retval = sbuf_printf(sb, "%d", lun->num_threads); 2302 2303 if (retval != 0) 2304 goto bailout; 2305 2306 retval = sbuf_printf(sb, "</num_threads>"); 2307 2308 /* 2309 * For processor devices, we don't have a path variable. 2310 */ 2311 if ((retval != 0) 2312 || (lun->dev_path == NULL)) 2313 goto bailout; 2314 2315 retval = sbuf_printf(sb, "<file>"); 2316 2317 if (retval != 0) 2318 goto bailout; 2319 2320 retval = ctl_sbuf_printf_esc(sb, lun->dev_path); 2321 2322 if (retval != 0) 2323 goto bailout; 2324 2325 retval = sbuf_printf(sb, "</file>\n"); 2326 2327 bailout: 2328 2329 return (retval); 2330 } 2331 2332 int 2333 ctl_be_block_init(void) 2334 { 2335 struct ctl_be_block_softc *softc; 2336 int retval; 2337 2338 softc = &backend_block_softc; 2339 retval = 0; 2340 2341 mtx_init(&softc->lock, "ctlblk", NULL, MTX_DEF); 2342 STAILQ_INIT(&softc->beio_free_queue); 2343 STAILQ_INIT(&softc->disk_list); 2344 STAILQ_INIT(&softc->lun_list); 2345 ctl_grow_beio(softc, 200); 2346 2347 return (retval); 2348 } 2349