1 /*- 2 * Copyright (c) 2003 Silicon Graphics International Corp. 3 * Copyright (c) 2009-2011 Spectra Logic Corporation 4 * Copyright (c) 2012 The FreeBSD Foundation 5 * All rights reserved. 6 * 7 * Portions of this software were developed by Edward Tomasz Napierala 8 * under sponsorship from the FreeBSD Foundation. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions, and the following disclaimer, 15 * without modification. 16 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 17 * substantially similar to the "NO WARRANTY" disclaimer below 18 * ("Disclaimer") and any redistribution must be conditioned upon 19 * including a substantially similar Disclaimer requirement for further 20 * binary redistribution. 21 * 22 * NO WARRANTY 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 27 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 31 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 32 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 33 * POSSIBILITY OF SUCH DAMAGES. 34 * 35 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend_block.c#5 $ 36 */ 37 /* 38 * CAM Target Layer driver backend for block devices. 39 * 40 * Author: Ken Merry <ken@FreeBSD.org> 41 */ 42 #include <sys/cdefs.h> 43 __FBSDID("$FreeBSD$"); 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/kernel.h> 48 #include <sys/types.h> 49 #include <sys/kthread.h> 50 #include <sys/bio.h> 51 #include <sys/fcntl.h> 52 #include <sys/lock.h> 53 #include <sys/mutex.h> 54 #include <sys/condvar.h> 55 #include <sys/malloc.h> 56 #include <sys/conf.h> 57 #include <sys/ioccom.h> 58 #include <sys/queue.h> 59 #include <sys/sbuf.h> 60 #include <sys/endian.h> 61 #include <sys/uio.h> 62 #include <sys/buf.h> 63 #include <sys/taskqueue.h> 64 #include <sys/vnode.h> 65 #include <sys/namei.h> 66 #include <sys/mount.h> 67 #include <sys/disk.h> 68 #include <sys/fcntl.h> 69 #include <sys/filedesc.h> 70 #include <sys/proc.h> 71 #include <sys/pcpu.h> 72 #include <sys/module.h> 73 #include <sys/sdt.h> 74 #include <sys/devicestat.h> 75 #include <sys/sysctl.h> 76 77 #include <geom/geom.h> 78 79 #include <cam/cam.h> 80 #include <cam/scsi/scsi_all.h> 81 #include <cam/scsi/scsi_da.h> 82 #include <cam/ctl/ctl_io.h> 83 #include <cam/ctl/ctl.h> 84 #include <cam/ctl/ctl_backend.h> 85 #include <cam/ctl/ctl_frontend_internal.h> 86 #include <cam/ctl/ctl_ioctl.h> 87 #include <cam/ctl/ctl_scsi_all.h> 88 #include <cam/ctl/ctl_error.h> 89 90 /* 91 * The idea here is that we'll allocate enough S/G space to hold a 16MB 92 * I/O. If we get an I/O larger than that, we'll reject it. 93 */ 94 #define CTLBLK_MAX_IO_SIZE (16 * 1024 * 1024) 95 #define CTLBLK_MAX_SEGS (CTLBLK_MAX_IO_SIZE / MAXPHYS) + 1 96 97 #ifdef CTLBLK_DEBUG 98 #define DPRINTF(fmt, args...) \ 99 printf("cbb(%s:%d): " fmt, __FUNCTION__, __LINE__, ##args) 100 #else 101 #define DPRINTF(fmt, args...) do {} while(0) 102 #endif 103 104 SDT_PROVIDER_DEFINE(cbb); 105 106 typedef enum { 107 CTL_BE_BLOCK_LUN_UNCONFIGURED = 0x01, 108 CTL_BE_BLOCK_LUN_CONFIG_ERR = 0x02, 109 CTL_BE_BLOCK_LUN_WAITING = 0x04, 110 CTL_BE_BLOCK_LUN_MULTI_THREAD = 0x08 111 } ctl_be_block_lun_flags; 112 113 typedef enum { 114 CTL_BE_BLOCK_NONE, 115 CTL_BE_BLOCK_DEV, 116 CTL_BE_BLOCK_FILE 117 } ctl_be_block_type; 118 119 struct ctl_be_block_devdata { 120 struct cdev *cdev; 121 struct cdevsw *csw; 122 int dev_ref; 123 }; 124 125 struct ctl_be_block_filedata { 126 struct ucred *cred; 127 }; 128 129 union ctl_be_block_bedata { 130 struct ctl_be_block_devdata dev; 131 struct ctl_be_block_filedata file; 132 }; 133 134 struct ctl_be_block_io; 135 struct ctl_be_block_lun; 136 137 typedef void (*cbb_dispatch_t)(struct ctl_be_block_lun *be_lun, 138 struct ctl_be_block_io *beio); 139 140 /* 141 * Backend LUN structure. There is a 1:1 mapping between a block device 142 * and a backend block LUN, and between a backend block LUN and a CTL LUN. 143 */ 144 struct ctl_be_block_lun { 145 struct ctl_block_disk *disk; 146 char lunname[32]; 147 char *dev_path; 148 ctl_be_block_type dev_type; 149 struct vnode *vn; 150 union ctl_be_block_bedata backend; 151 cbb_dispatch_t dispatch; 152 cbb_dispatch_t lun_flush; 153 struct mtx lock; 154 uma_zone_t lun_zone; 155 uint64_t size_blocks; 156 uint64_t size_bytes; 157 uint32_t blocksize; 158 int blocksize_shift; 159 struct ctl_be_block_softc *softc; 160 struct devstat *disk_stats; 161 ctl_be_block_lun_flags flags; 162 STAILQ_ENTRY(ctl_be_block_lun) links; 163 struct ctl_be_lun ctl_be_lun; 164 struct taskqueue *io_taskqueue; 165 struct task io_task; 166 int num_threads; 167 STAILQ_HEAD(, ctl_io_hdr) input_queue; 168 STAILQ_HEAD(, ctl_io_hdr) config_write_queue; 169 STAILQ_HEAD(, ctl_io_hdr) datamove_queue; 170 }; 171 172 /* 173 * Overall softc structure for the block backend module. 174 */ 175 struct ctl_be_block_softc { 176 STAILQ_HEAD(, ctl_be_block_io) beio_free_queue; 177 struct mtx lock; 178 int prealloc_beio; 179 int num_disks; 180 STAILQ_HEAD(, ctl_block_disk) disk_list; 181 int num_luns; 182 STAILQ_HEAD(, ctl_be_block_lun) lun_list; 183 }; 184 185 static struct ctl_be_block_softc backend_block_softc; 186 187 /* 188 * Per-I/O information. 189 */ 190 struct ctl_be_block_io { 191 union ctl_io *io; 192 struct ctl_sg_entry sg_segs[CTLBLK_MAX_SEGS]; 193 struct iovec xiovecs[CTLBLK_MAX_SEGS]; 194 int bio_cmd; 195 int bio_flags; 196 int num_segs; 197 int num_bios_sent; 198 int num_bios_done; 199 int send_complete; 200 int num_errors; 201 struct bintime ds_t0; 202 devstat_tag_type ds_tag_type; 203 devstat_trans_flags ds_trans_type; 204 uint64_t io_len; 205 uint64_t io_offset; 206 struct ctl_be_block_softc *softc; 207 struct ctl_be_block_lun *lun; 208 STAILQ_ENTRY(ctl_be_block_io) links; 209 }; 210 211 static int cbb_num_threads = 14; 212 TUNABLE_INT("kern.cam.ctl.block.num_threads", &cbb_num_threads); 213 SYSCTL_NODE(_kern_cam_ctl, OID_AUTO, block, CTLFLAG_RD, 0, 214 "CAM Target Layer Block Backend"); 215 SYSCTL_INT(_kern_cam_ctl_block, OID_AUTO, num_threads, CTLFLAG_RW, 216 &cbb_num_threads, 0, "Number of threads per backing file"); 217 218 static struct ctl_be_block_io *ctl_alloc_beio(struct ctl_be_block_softc *softc); 219 static void ctl_free_beio(struct ctl_be_block_io *beio); 220 static int ctl_grow_beio(struct ctl_be_block_softc *softc, int count); 221 #if 0 222 static void ctl_shrink_beio(struct ctl_be_block_softc *softc); 223 #endif 224 static void ctl_complete_beio(struct ctl_be_block_io *beio); 225 static int ctl_be_block_move_done(union ctl_io *io); 226 static void ctl_be_block_biodone(struct bio *bio); 227 static void ctl_be_block_flush_file(struct ctl_be_block_lun *be_lun, 228 struct ctl_be_block_io *beio); 229 static void ctl_be_block_dispatch_file(struct ctl_be_block_lun *be_lun, 230 struct ctl_be_block_io *beio); 231 static void ctl_be_block_flush_dev(struct ctl_be_block_lun *be_lun, 232 struct ctl_be_block_io *beio); 233 static void ctl_be_block_dispatch_dev(struct ctl_be_block_lun *be_lun, 234 struct ctl_be_block_io *beio); 235 static void ctl_be_block_cw_dispatch(struct ctl_be_block_lun *be_lun, 236 union ctl_io *io); 237 static void ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun, 238 union ctl_io *io); 239 static void ctl_be_block_worker(void *context, int pending); 240 static int ctl_be_block_submit(union ctl_io *io); 241 static int ctl_be_block_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, 242 int flag, struct thread *td); 243 static int ctl_be_block_open_file(struct ctl_be_block_lun *be_lun, 244 struct ctl_lun_req *req); 245 static int ctl_be_block_open_dev(struct ctl_be_block_lun *be_lun, 246 struct ctl_lun_req *req); 247 static int ctl_be_block_close(struct ctl_be_block_lun *be_lun); 248 static int ctl_be_block_open(struct ctl_be_block_softc *softc, 249 struct ctl_be_block_lun *be_lun, 250 struct ctl_lun_req *req); 251 static int ctl_be_block_create(struct ctl_be_block_softc *softc, 252 struct ctl_lun_req *req); 253 static int ctl_be_block_rm(struct ctl_be_block_softc *softc, 254 struct ctl_lun_req *req); 255 static int ctl_be_block_modify_file(struct ctl_be_block_lun *be_lun, 256 struct ctl_lun_req *req); 257 static int ctl_be_block_modify_dev(struct ctl_be_block_lun *be_lun, 258 struct ctl_lun_req *req); 259 static int ctl_be_block_modify(struct ctl_be_block_softc *softc, 260 struct ctl_lun_req *req); 261 static void ctl_be_block_lun_shutdown(void *be_lun); 262 static void ctl_be_block_lun_config_status(void *be_lun, 263 ctl_lun_config_status status); 264 static int ctl_be_block_config_write(union ctl_io *io); 265 static int ctl_be_block_config_read(union ctl_io *io); 266 static int ctl_be_block_lun_info(void *be_lun, struct sbuf *sb); 267 int ctl_be_block_init(void); 268 269 static struct ctl_backend_driver ctl_be_block_driver = 270 { 271 .name = "block", 272 .flags = CTL_BE_FLAG_HAS_CONFIG, 273 .init = ctl_be_block_init, 274 .data_submit = ctl_be_block_submit, 275 .data_move_done = ctl_be_block_move_done, 276 .config_read = ctl_be_block_config_read, 277 .config_write = ctl_be_block_config_write, 278 .ioctl = ctl_be_block_ioctl, 279 .lun_info = ctl_be_block_lun_info 280 }; 281 282 MALLOC_DEFINE(M_CTLBLK, "ctlblk", "Memory used for CTL block backend"); 283 CTL_BACKEND_DECLARE(cbb, ctl_be_block_driver); 284 285 static struct ctl_be_block_io * 286 ctl_alloc_beio(struct ctl_be_block_softc *softc) 287 { 288 struct ctl_be_block_io *beio; 289 int count; 290 291 mtx_lock(&softc->lock); 292 293 beio = STAILQ_FIRST(&softc->beio_free_queue); 294 if (beio != NULL) { 295 STAILQ_REMOVE(&softc->beio_free_queue, beio, 296 ctl_be_block_io, links); 297 } 298 mtx_unlock(&softc->lock); 299 300 if (beio != NULL) { 301 bzero(beio, sizeof(*beio)); 302 beio->softc = softc; 303 return (beio); 304 } 305 306 for (;;) { 307 308 count = ctl_grow_beio(softc, /*count*/ 10); 309 310 /* 311 * This shouldn't be possible, since ctl_grow_beio() uses a 312 * blocking malloc. 313 */ 314 if (count == 0) 315 return (NULL); 316 317 /* 318 * Since we have to drop the lock when we're allocating beio 319 * structures, it's possible someone else can come along and 320 * allocate the beio's we've just allocated. 321 */ 322 mtx_lock(&softc->lock); 323 beio = STAILQ_FIRST(&softc->beio_free_queue); 324 if (beio != NULL) { 325 STAILQ_REMOVE(&softc->beio_free_queue, beio, 326 ctl_be_block_io, links); 327 } 328 mtx_unlock(&softc->lock); 329 330 if (beio != NULL) { 331 bzero(beio, sizeof(*beio)); 332 beio->softc = softc; 333 break; 334 } 335 } 336 return (beio); 337 } 338 339 static void 340 ctl_free_beio(struct ctl_be_block_io *beio) 341 { 342 struct ctl_be_block_softc *softc; 343 int duplicate_free; 344 int i; 345 346 softc = beio->softc; 347 duplicate_free = 0; 348 349 for (i = 0; i < beio->num_segs; i++) { 350 if (beio->sg_segs[i].addr == NULL) 351 duplicate_free++; 352 353 uma_zfree(beio->lun->lun_zone, beio->sg_segs[i].addr); 354 beio->sg_segs[i].addr = NULL; 355 } 356 357 if (duplicate_free > 0) { 358 printf("%s: %d duplicate frees out of %d segments\n", __func__, 359 duplicate_free, beio->num_segs); 360 } 361 mtx_lock(&softc->lock); 362 STAILQ_INSERT_TAIL(&softc->beio_free_queue, beio, links); 363 mtx_unlock(&softc->lock); 364 } 365 366 static int 367 ctl_grow_beio(struct ctl_be_block_softc *softc, int count) 368 { 369 int i; 370 371 for (i = 0; i < count; i++) { 372 struct ctl_be_block_io *beio; 373 374 beio = (struct ctl_be_block_io *)malloc(sizeof(*beio), 375 M_CTLBLK, 376 M_WAITOK | M_ZERO); 377 beio->softc = softc; 378 mtx_lock(&softc->lock); 379 STAILQ_INSERT_TAIL(&softc->beio_free_queue, beio, links); 380 mtx_unlock(&softc->lock); 381 } 382 383 return (i); 384 } 385 386 #if 0 387 static void 388 ctl_shrink_beio(struct ctl_be_block_softc *softc) 389 { 390 struct ctl_be_block_io *beio, *beio_tmp; 391 392 mtx_lock(&softc->lock); 393 STAILQ_FOREACH_SAFE(beio, &softc->beio_free_queue, links, beio_tmp) { 394 STAILQ_REMOVE(&softc->beio_free_queue, beio, 395 ctl_be_block_io, links); 396 free(beio, M_CTLBLK); 397 } 398 mtx_unlock(&softc->lock); 399 } 400 #endif 401 402 static void 403 ctl_complete_beio(struct ctl_be_block_io *beio) 404 { 405 union ctl_io *io; 406 int io_len; 407 408 io = beio->io; 409 410 if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) 411 io_len = beio->io_len; 412 else 413 io_len = 0; 414 415 devstat_end_transaction(beio->lun->disk_stats, 416 /*bytes*/ io_len, 417 beio->ds_tag_type, 418 beio->ds_trans_type, 419 /*now*/ NULL, 420 /*then*/&beio->ds_t0); 421 422 ctl_free_beio(beio); 423 ctl_done(io); 424 } 425 426 static int 427 ctl_be_block_move_done(union ctl_io *io) 428 { 429 struct ctl_be_block_io *beio; 430 struct ctl_be_block_lun *be_lun; 431 #ifdef CTL_TIME_IO 432 struct bintime cur_bt; 433 #endif 434 435 beio = (struct ctl_be_block_io *) 436 io->io_hdr.ctl_private[CTL_PRIV_BACKEND].ptr; 437 438 be_lun = beio->lun; 439 440 DPRINTF("entered\n"); 441 442 #ifdef CTL_TIME_IO 443 getbintime(&cur_bt); 444 bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt); 445 bintime_add(&io->io_hdr.dma_bt, &cur_bt); 446 io->io_hdr.num_dmas++; 447 #endif 448 449 /* 450 * We set status at this point for read commands, and write 451 * commands with errors. 452 */ 453 if ((beio->bio_cmd == BIO_READ) 454 && (io->io_hdr.port_status == 0) 455 && ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0) 456 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) 457 ctl_set_success(&io->scsiio); 458 else if ((io->io_hdr.port_status != 0) 459 && ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0) 460 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) { 461 /* 462 * For hardware error sense keys, the sense key 463 * specific value is defined to be a retry count, 464 * but we use it to pass back an internal FETD 465 * error code. XXX KDM Hopefully the FETD is only 466 * using 16 bits for an error code, since that's 467 * all the space we have in the sks field. 468 */ 469 ctl_set_internal_failure(&io->scsiio, 470 /*sks_valid*/ 1, 471 /*retry_count*/ 472 io->io_hdr.port_status); 473 } 474 475 /* 476 * If this is a read, or a write with errors, it is done. 477 */ 478 if ((beio->bio_cmd == BIO_READ) 479 || ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0) 480 || ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE)) { 481 ctl_complete_beio(beio); 482 return (0); 483 } 484 485 /* 486 * At this point, we have a write and the DMA completed 487 * successfully. We now have to queue it to the task queue to 488 * execute the backend I/O. That is because we do blocking 489 * memory allocations, and in the file backing case, blocking I/O. 490 * This move done routine is generally called in the SIM's 491 * interrupt context, and therefore we cannot block. 492 */ 493 mtx_lock(&be_lun->lock); 494 /* 495 * XXX KDM make sure that links is okay to use at this point. 496 * Otherwise, we either need to add another field to ctl_io_hdr, 497 * or deal with resource allocation here. 498 */ 499 STAILQ_INSERT_TAIL(&be_lun->datamove_queue, &io->io_hdr, links); 500 mtx_unlock(&be_lun->lock); 501 502 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); 503 504 return (0); 505 } 506 507 static void 508 ctl_be_block_biodone(struct bio *bio) 509 { 510 struct ctl_be_block_io *beio; 511 struct ctl_be_block_lun *be_lun; 512 union ctl_io *io; 513 514 beio = bio->bio_caller1; 515 be_lun = beio->lun; 516 io = beio->io; 517 518 DPRINTF("entered\n"); 519 520 mtx_lock(&be_lun->lock); 521 if (bio->bio_error != 0) 522 beio->num_errors++; 523 524 beio->num_bios_done++; 525 526 /* 527 * XXX KDM will this cause WITNESS to complain? Holding a lock 528 * during the free might cause it to complain. 529 */ 530 g_destroy_bio(bio); 531 532 /* 533 * If the send complete bit isn't set, or we aren't the last I/O to 534 * complete, then we're done. 535 */ 536 if ((beio->send_complete == 0) 537 || (beio->num_bios_done < beio->num_bios_sent)) { 538 mtx_unlock(&be_lun->lock); 539 return; 540 } 541 542 /* 543 * At this point, we've verified that we are the last I/O to 544 * complete, so it's safe to drop the lock. 545 */ 546 mtx_unlock(&be_lun->lock); 547 548 /* 549 * If there are any errors from the backing device, we fail the 550 * entire I/O with a medium error. 551 */ 552 if (beio->num_errors > 0) { 553 if (beio->bio_cmd == BIO_FLUSH) { 554 /* XXX KDM is there is a better error here? */ 555 ctl_set_internal_failure(&io->scsiio, 556 /*sks_valid*/ 1, 557 /*retry_count*/ 0xbad2); 558 } else 559 ctl_set_medium_error(&io->scsiio); 560 ctl_complete_beio(beio); 561 return; 562 } 563 564 /* 565 * If this is a write or a flush, we're all done. 566 * If this is a read, we can now send the data to the user. 567 */ 568 if ((beio->bio_cmd == BIO_WRITE) 569 || (beio->bio_cmd == BIO_FLUSH)) { 570 ctl_set_success(&io->scsiio); 571 ctl_complete_beio(beio); 572 } else { 573 io->scsiio.be_move_done = ctl_be_block_move_done; 574 io->scsiio.kern_data_ptr = (uint8_t *)beio->sg_segs; 575 io->scsiio.kern_data_len = beio->io_len; 576 io->scsiio.kern_total_len = beio->io_len; 577 io->scsiio.kern_rel_offset = 0; 578 io->scsiio.kern_data_resid = 0; 579 io->scsiio.kern_sg_entries = beio->num_segs; 580 io->io_hdr.flags |= CTL_FLAG_ALLOCATED | CTL_FLAG_KDPTR_SGLIST; 581 #ifdef CTL_TIME_IO 582 getbintime(&io->io_hdr.dma_start_bt); 583 #endif 584 ctl_datamove(io); 585 } 586 } 587 588 static void 589 ctl_be_block_flush_file(struct ctl_be_block_lun *be_lun, 590 struct ctl_be_block_io *beio) 591 { 592 union ctl_io *io; 593 struct mount *mountpoint; 594 int error, lock_flags; 595 596 DPRINTF("entered\n"); 597 598 io = beio->io; 599 600 (void) vn_start_write(be_lun->vn, &mountpoint, V_WAIT); 601 602 if (MNT_SHARED_WRITES(mountpoint) 603 || ((mountpoint == NULL) 604 && MNT_SHARED_WRITES(be_lun->vn->v_mount))) 605 lock_flags = LK_SHARED; 606 else 607 lock_flags = LK_EXCLUSIVE; 608 609 vn_lock(be_lun->vn, lock_flags | LK_RETRY); 610 611 binuptime(&beio->ds_t0); 612 devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0); 613 614 error = VOP_FSYNC(be_lun->vn, MNT_WAIT, curthread); 615 VOP_UNLOCK(be_lun->vn, 0); 616 617 vn_finished_write(mountpoint); 618 619 if (error == 0) 620 ctl_set_success(&io->scsiio); 621 else { 622 /* XXX KDM is there is a better error here? */ 623 ctl_set_internal_failure(&io->scsiio, 624 /*sks_valid*/ 1, 625 /*retry_count*/ 0xbad1); 626 } 627 628 ctl_complete_beio(beio); 629 } 630 631 SDT_PROBE_DEFINE1(cbb, kernel, read, file_start, "uint64_t"); 632 SDT_PROBE_DEFINE1(cbb, kernel, write, file_start, "uint64_t"); 633 SDT_PROBE_DEFINE1(cbb, kernel, read, file_done,"uint64_t"); 634 SDT_PROBE_DEFINE1(cbb, kernel, write, file_done, "uint64_t"); 635 636 static void 637 ctl_be_block_dispatch_file(struct ctl_be_block_lun *be_lun, 638 struct ctl_be_block_io *beio) 639 { 640 struct ctl_be_block_filedata *file_data; 641 union ctl_io *io; 642 struct uio xuio; 643 struct iovec *xiovec; 644 int flags; 645 int error, i; 646 647 DPRINTF("entered\n"); 648 649 file_data = &be_lun->backend.file; 650 io = beio->io; 651 flags = beio->bio_flags; 652 653 if (beio->bio_cmd == BIO_READ) { 654 SDT_PROBE(cbb, kernel, read, file_start, 0, 0, 0, 0, 0); 655 } else { 656 SDT_PROBE(cbb, kernel, write, file_start, 0, 0, 0, 0, 0); 657 } 658 659 bzero(&xuio, sizeof(xuio)); 660 if (beio->bio_cmd == BIO_READ) 661 xuio.uio_rw = UIO_READ; 662 else 663 xuio.uio_rw = UIO_WRITE; 664 665 xuio.uio_offset = beio->io_offset; 666 xuio.uio_resid = beio->io_len; 667 xuio.uio_segflg = UIO_SYSSPACE; 668 xuio.uio_iov = beio->xiovecs; 669 xuio.uio_iovcnt = beio->num_segs; 670 xuio.uio_td = curthread; 671 672 for (i = 0, xiovec = xuio.uio_iov; i < xuio.uio_iovcnt; i++, xiovec++) { 673 xiovec->iov_base = beio->sg_segs[i].addr; 674 xiovec->iov_len = beio->sg_segs[i].len; 675 } 676 677 if (beio->bio_cmd == BIO_READ) { 678 vn_lock(be_lun->vn, LK_SHARED | LK_RETRY); 679 680 binuptime(&beio->ds_t0); 681 devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0); 682 683 /* 684 * UFS pays attention to IO_DIRECT for reads. If the 685 * DIRECTIO option is configured into the kernel, it calls 686 * ffs_rawread(). But that only works for single-segment 687 * uios with user space addresses. In our case, with a 688 * kernel uio, it still reads into the buffer cache, but it 689 * will just try to release the buffer from the cache later 690 * on in ffs_read(). 691 * 692 * ZFS does not pay attention to IO_DIRECT for reads. 693 * 694 * UFS does not pay attention to IO_SYNC for reads. 695 * 696 * ZFS pays attention to IO_SYNC (which translates into the 697 * Solaris define FRSYNC for zfs_read()) for reads. It 698 * attempts to sync the file before reading. 699 * 700 * So, to attempt to provide some barrier semantics in the 701 * BIO_ORDERED case, set both IO_DIRECT and IO_SYNC. 702 */ 703 error = VOP_READ(be_lun->vn, &xuio, (flags & BIO_ORDERED) ? 704 (IO_DIRECT|IO_SYNC) : 0, file_data->cred); 705 706 VOP_UNLOCK(be_lun->vn, 0); 707 } else { 708 struct mount *mountpoint; 709 int lock_flags; 710 711 (void)vn_start_write(be_lun->vn, &mountpoint, V_WAIT); 712 713 if (MNT_SHARED_WRITES(mountpoint) 714 || ((mountpoint == NULL) 715 && MNT_SHARED_WRITES(be_lun->vn->v_mount))) 716 lock_flags = LK_SHARED; 717 else 718 lock_flags = LK_EXCLUSIVE; 719 720 vn_lock(be_lun->vn, lock_flags | LK_RETRY); 721 722 binuptime(&beio->ds_t0); 723 devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0); 724 725 /* 726 * UFS pays attention to IO_DIRECT for writes. The write 727 * is done asynchronously. (Normally the write would just 728 * get put into cache. 729 * 730 * UFS pays attention to IO_SYNC for writes. It will 731 * attempt to write the buffer out synchronously if that 732 * flag is set. 733 * 734 * ZFS does not pay attention to IO_DIRECT for writes. 735 * 736 * ZFS pays attention to IO_SYNC (a.k.a. FSYNC or FRSYNC) 737 * for writes. It will flush the transaction from the 738 * cache before returning. 739 * 740 * So if we've got the BIO_ORDERED flag set, we want 741 * IO_SYNC in either the UFS or ZFS case. 742 */ 743 error = VOP_WRITE(be_lun->vn, &xuio, (flags & BIO_ORDERED) ? 744 IO_SYNC : 0, file_data->cred); 745 VOP_UNLOCK(be_lun->vn, 0); 746 747 vn_finished_write(mountpoint); 748 } 749 750 /* 751 * If we got an error, set the sense data to "MEDIUM ERROR" and 752 * return the I/O to the user. 753 */ 754 if (error != 0) { 755 char path_str[32]; 756 757 ctl_scsi_path_string(io, path_str, sizeof(path_str)); 758 /* 759 * XXX KDM ZFS returns ENOSPC when the underlying 760 * filesystem fills up. What kind of SCSI error should we 761 * return for that? 762 */ 763 printf("%s%s command returned errno %d\n", path_str, 764 (beio->bio_cmd == BIO_READ) ? "READ" : "WRITE", error); 765 ctl_set_medium_error(&io->scsiio); 766 ctl_complete_beio(beio); 767 return; 768 } 769 770 /* 771 * If this is a write, we're all done. 772 * If this is a read, we can now send the data to the user. 773 */ 774 if (beio->bio_cmd == BIO_WRITE) { 775 ctl_set_success(&io->scsiio); 776 SDT_PROBE(cbb, kernel, write, file_done, 0, 0, 0, 0, 0); 777 ctl_complete_beio(beio); 778 } else { 779 SDT_PROBE(cbb, kernel, read, file_done, 0, 0, 0, 0, 0); 780 io->scsiio.be_move_done = ctl_be_block_move_done; 781 io->scsiio.kern_data_ptr = (uint8_t *)beio->sg_segs; 782 io->scsiio.kern_data_len = beio->io_len; 783 io->scsiio.kern_total_len = beio->io_len; 784 io->scsiio.kern_rel_offset = 0; 785 io->scsiio.kern_data_resid = 0; 786 io->scsiio.kern_sg_entries = beio->num_segs; 787 io->io_hdr.flags |= CTL_FLAG_ALLOCATED | CTL_FLAG_KDPTR_SGLIST; 788 #ifdef CTL_TIME_IO 789 getbintime(&io->io_hdr.dma_start_bt); 790 #endif 791 ctl_datamove(io); 792 } 793 } 794 795 static void 796 ctl_be_block_flush_dev(struct ctl_be_block_lun *be_lun, 797 struct ctl_be_block_io *beio) 798 { 799 struct bio *bio; 800 union ctl_io *io; 801 struct ctl_be_block_devdata *dev_data; 802 803 dev_data = &be_lun->backend.dev; 804 io = beio->io; 805 806 DPRINTF("entered\n"); 807 808 /* This can't fail, it's a blocking allocation. */ 809 bio = g_alloc_bio(); 810 811 bio->bio_cmd = BIO_FLUSH; 812 bio->bio_flags |= BIO_ORDERED; 813 bio->bio_dev = dev_data->cdev; 814 bio->bio_offset = 0; 815 bio->bio_data = 0; 816 bio->bio_done = ctl_be_block_biodone; 817 bio->bio_caller1 = beio; 818 bio->bio_pblkno = 0; 819 820 /* 821 * We don't need to acquire the LUN lock here, because we are only 822 * sending one bio, and so there is no other context to synchronize 823 * with. 824 */ 825 beio->num_bios_sent = 1; 826 beio->send_complete = 1; 827 828 binuptime(&beio->ds_t0); 829 devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0); 830 831 (*dev_data->csw->d_strategy)(bio); 832 } 833 834 static void 835 ctl_be_block_dispatch_dev(struct ctl_be_block_lun *be_lun, 836 struct ctl_be_block_io *beio) 837 { 838 int i; 839 struct bio *bio; 840 struct ctl_be_block_devdata *dev_data; 841 off_t cur_offset; 842 int max_iosize; 843 844 DPRINTF("entered\n"); 845 846 dev_data = &be_lun->backend.dev; 847 848 /* 849 * We have to limit our I/O size to the maximum supported by the 850 * backend device. Hopefully it is MAXPHYS. If the driver doesn't 851 * set it properly, use DFLTPHYS. 852 */ 853 max_iosize = dev_data->cdev->si_iosize_max; 854 if (max_iosize < PAGE_SIZE) 855 max_iosize = DFLTPHYS; 856 857 cur_offset = beio->io_offset; 858 859 /* 860 * XXX KDM need to accurately reflect the number of I/Os outstanding 861 * to a device. 862 */ 863 binuptime(&beio->ds_t0); 864 devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0); 865 866 for (i = 0; i < beio->num_segs; i++) { 867 size_t cur_size; 868 uint8_t *cur_ptr; 869 870 cur_size = beio->sg_segs[i].len; 871 cur_ptr = beio->sg_segs[i].addr; 872 873 while (cur_size > 0) { 874 /* This can't fail, it's a blocking allocation. */ 875 bio = g_alloc_bio(); 876 877 KASSERT(bio != NULL, ("g_alloc_bio() failed!\n")); 878 879 bio->bio_cmd = beio->bio_cmd; 880 bio->bio_flags |= beio->bio_flags; 881 bio->bio_dev = dev_data->cdev; 882 bio->bio_caller1 = beio; 883 bio->bio_length = min(cur_size, max_iosize); 884 bio->bio_offset = cur_offset; 885 bio->bio_data = cur_ptr; 886 bio->bio_done = ctl_be_block_biodone; 887 bio->bio_pblkno = cur_offset / be_lun->blocksize; 888 889 cur_offset += bio->bio_length; 890 cur_ptr += bio->bio_length; 891 cur_size -= bio->bio_length; 892 893 /* 894 * Make sure we set the complete bit just before we 895 * issue the last bio so we don't wind up with a 896 * race. 897 * 898 * Use the LUN mutex here instead of a combination 899 * of atomic variables for simplicity. 900 * 901 * XXX KDM we could have a per-IO lock, but that 902 * would cause additional per-IO setup and teardown 903 * overhead. Hopefully there won't be too much 904 * contention on the LUN lock. 905 */ 906 mtx_lock(&be_lun->lock); 907 908 beio->num_bios_sent++; 909 910 if ((i == beio->num_segs - 1) 911 && (cur_size == 0)) 912 beio->send_complete = 1; 913 914 mtx_unlock(&be_lun->lock); 915 916 (*dev_data->csw->d_strategy)(bio); 917 } 918 } 919 } 920 921 static void 922 ctl_be_block_cw_dispatch(struct ctl_be_block_lun *be_lun, 923 union ctl_io *io) 924 { 925 struct ctl_be_block_io *beio; 926 struct ctl_be_block_softc *softc; 927 928 DPRINTF("entered\n"); 929 930 softc = be_lun->softc; 931 beio = ctl_alloc_beio(softc); 932 if (beio == NULL) { 933 /* 934 * This should not happen. ctl_alloc_beio() will call 935 * ctl_grow_beio() with a blocking malloc as needed. 936 * A malloc with M_WAITOK should not fail. 937 */ 938 ctl_set_busy(&io->scsiio); 939 ctl_done(io); 940 return; 941 } 942 943 beio->io = io; 944 beio->softc = softc; 945 beio->lun = be_lun; 946 io->io_hdr.ctl_private[CTL_PRIV_BACKEND].ptr = beio; 947 948 switch (io->scsiio.cdb[0]) { 949 case SYNCHRONIZE_CACHE: 950 case SYNCHRONIZE_CACHE_16: 951 beio->bio_cmd = BIO_FLUSH; 952 beio->ds_trans_type = DEVSTAT_NO_DATA; 953 beio->ds_tag_type = DEVSTAT_TAG_ORDERED; 954 beio->io_len = 0; 955 be_lun->lun_flush(be_lun, beio); 956 break; 957 default: 958 panic("Unhandled CDB type %#x", io->scsiio.cdb[0]); 959 break; 960 } 961 } 962 963 SDT_PROBE_DEFINE1(cbb, kernel, read, start, "uint64_t"); 964 SDT_PROBE_DEFINE1(cbb, kernel, write, start, "uint64_t"); 965 SDT_PROBE_DEFINE1(cbb, kernel, read, alloc_done, "uint64_t"); 966 SDT_PROBE_DEFINE1(cbb, kernel, write, alloc_done, "uint64_t"); 967 968 static void 969 ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun, 970 union ctl_io *io) 971 { 972 struct ctl_be_block_io *beio; 973 struct ctl_be_block_softc *softc; 974 struct ctl_lba_len lbalen; 975 uint64_t len_left, io_size_bytes; 976 int i; 977 978 softc = be_lun->softc; 979 980 DPRINTF("entered\n"); 981 982 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) { 983 SDT_PROBE(cbb, kernel, read, start, 0, 0, 0, 0, 0); 984 } else { 985 SDT_PROBE(cbb, kernel, write, start, 0, 0, 0, 0, 0); 986 } 987 988 memcpy(&lbalen, io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes, 989 sizeof(lbalen)); 990 991 io_size_bytes = lbalen.len * be_lun->blocksize; 992 993 /* 994 * XXX KDM this is temporary, until we implement chaining of beio 995 * structures and multiple datamove calls to move all the data in 996 * or out. 997 */ 998 if (io_size_bytes > CTLBLK_MAX_IO_SIZE) { 999 printf("%s: IO length %ju > max io size %u\n", __func__, 1000 io_size_bytes, CTLBLK_MAX_IO_SIZE); 1001 ctl_set_invalid_field(&io->scsiio, 1002 /*sks_valid*/ 0, 1003 /*command*/ 1, 1004 /*field*/ 0, 1005 /*bit_valid*/ 0, 1006 /*bit*/ 0); 1007 ctl_done(io); 1008 return; 1009 } 1010 1011 beio = ctl_alloc_beio(softc); 1012 if (beio == NULL) { 1013 /* 1014 * This should not happen. ctl_alloc_beio() will call 1015 * ctl_grow_beio() with a blocking malloc as needed. 1016 * A malloc with M_WAITOK should not fail. 1017 */ 1018 ctl_set_busy(&io->scsiio); 1019 ctl_done(io); 1020 return; 1021 } 1022 1023 beio->io = io; 1024 beio->softc = softc; 1025 beio->lun = be_lun; 1026 io->io_hdr.ctl_private[CTL_PRIV_BACKEND].ptr = beio; 1027 1028 /* 1029 * If the I/O came down with an ordered or head of queue tag, set 1030 * the BIO_ORDERED attribute. For head of queue tags, that's 1031 * pretty much the best we can do. 1032 * 1033 * XXX KDM we don't have a great way to easily know about the FUA 1034 * bit right now (it is decoded in ctl_read_write(), but we don't 1035 * pass that knowledge to the backend), and in any case we would 1036 * need to determine how to handle it. 1037 */ 1038 if ((io->scsiio.tag_type == CTL_TAG_ORDERED) 1039 || (io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE)) 1040 beio->bio_flags = BIO_ORDERED; 1041 1042 switch (io->scsiio.tag_type) { 1043 case CTL_TAG_ORDERED: 1044 beio->ds_tag_type = DEVSTAT_TAG_ORDERED; 1045 break; 1046 case CTL_TAG_HEAD_OF_QUEUE: 1047 beio->ds_tag_type = DEVSTAT_TAG_HEAD; 1048 break; 1049 case CTL_TAG_UNTAGGED: 1050 case CTL_TAG_SIMPLE: 1051 case CTL_TAG_ACA: 1052 default: 1053 beio->ds_tag_type = DEVSTAT_TAG_SIMPLE; 1054 break; 1055 } 1056 1057 /* 1058 * This path handles read and write only. The config write path 1059 * handles flush operations. 1060 */ 1061 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) { 1062 beio->bio_cmd = BIO_READ; 1063 beio->ds_trans_type = DEVSTAT_READ; 1064 } else { 1065 beio->bio_cmd = BIO_WRITE; 1066 beio->ds_trans_type = DEVSTAT_WRITE; 1067 } 1068 1069 beio->io_len = lbalen.len * be_lun->blocksize; 1070 beio->io_offset = lbalen.lba * be_lun->blocksize; 1071 1072 DPRINTF("%s at LBA %jx len %u\n", 1073 (beio->bio_cmd == BIO_READ) ? "READ" : "WRITE", 1074 (uintmax_t)lbalen.lba, lbalen.len); 1075 1076 for (i = 0, len_left = io_size_bytes; i < CTLBLK_MAX_SEGS && 1077 len_left > 0; i++) { 1078 1079 /* 1080 * Setup the S/G entry for this chunk. 1081 */ 1082 beio->sg_segs[i].len = min(MAXPHYS, len_left); 1083 beio->sg_segs[i].addr = uma_zalloc(be_lun->lun_zone, M_WAITOK); 1084 1085 DPRINTF("segment %d addr %p len %zd\n", i, 1086 beio->sg_segs[i].addr, beio->sg_segs[i].len); 1087 1088 beio->num_segs++; 1089 len_left -= beio->sg_segs[i].len; 1090 } 1091 1092 /* 1093 * For the read case, we need to read the data into our buffers and 1094 * then we can send it back to the user. For the write case, we 1095 * need to get the data from the user first. 1096 */ 1097 if (beio->bio_cmd == BIO_READ) { 1098 SDT_PROBE(cbb, kernel, read, alloc_done, 0, 0, 0, 0, 0); 1099 be_lun->dispatch(be_lun, beio); 1100 } else { 1101 SDT_PROBE(cbb, kernel, write, alloc_done, 0, 0, 0, 0, 0); 1102 io->scsiio.be_move_done = ctl_be_block_move_done; 1103 io->scsiio.kern_data_ptr = (uint8_t *)beio->sg_segs; 1104 io->scsiio.kern_data_len = beio->io_len; 1105 io->scsiio.kern_total_len = beio->io_len; 1106 io->scsiio.kern_rel_offset = 0; 1107 io->scsiio.kern_data_resid = 0; 1108 io->scsiio.kern_sg_entries = beio->num_segs; 1109 io->io_hdr.flags |= CTL_FLAG_ALLOCATED | CTL_FLAG_KDPTR_SGLIST; 1110 #ifdef CTL_TIME_IO 1111 getbintime(&io->io_hdr.dma_start_bt); 1112 #endif 1113 ctl_datamove(io); 1114 } 1115 } 1116 1117 static void 1118 ctl_be_block_worker(void *context, int pending) 1119 { 1120 struct ctl_be_block_lun *be_lun; 1121 struct ctl_be_block_softc *softc; 1122 union ctl_io *io; 1123 1124 be_lun = (struct ctl_be_block_lun *)context; 1125 softc = be_lun->softc; 1126 1127 DPRINTF("entered\n"); 1128 1129 mtx_lock(&be_lun->lock); 1130 for (;;) { 1131 io = (union ctl_io *)STAILQ_FIRST(&be_lun->datamove_queue); 1132 if (io != NULL) { 1133 struct ctl_be_block_io *beio; 1134 1135 DPRINTF("datamove queue\n"); 1136 1137 STAILQ_REMOVE(&be_lun->datamove_queue, &io->io_hdr, 1138 ctl_io_hdr, links); 1139 1140 mtx_unlock(&be_lun->lock); 1141 1142 beio = (struct ctl_be_block_io *) 1143 io->io_hdr.ctl_private[CTL_PRIV_BACKEND].ptr; 1144 1145 be_lun->dispatch(be_lun, beio); 1146 1147 mtx_lock(&be_lun->lock); 1148 continue; 1149 } 1150 io = (union ctl_io *)STAILQ_FIRST(&be_lun->config_write_queue); 1151 if (io != NULL) { 1152 1153 DPRINTF("config write queue\n"); 1154 1155 STAILQ_REMOVE(&be_lun->config_write_queue, &io->io_hdr, 1156 ctl_io_hdr, links); 1157 1158 mtx_unlock(&be_lun->lock); 1159 1160 ctl_be_block_cw_dispatch(be_lun, io); 1161 1162 mtx_lock(&be_lun->lock); 1163 continue; 1164 } 1165 io = (union ctl_io *)STAILQ_FIRST(&be_lun->input_queue); 1166 if (io != NULL) { 1167 DPRINTF("input queue\n"); 1168 1169 STAILQ_REMOVE(&be_lun->input_queue, &io->io_hdr, 1170 ctl_io_hdr, links); 1171 mtx_unlock(&be_lun->lock); 1172 1173 /* 1174 * We must drop the lock, since this routine and 1175 * its children may sleep. 1176 */ 1177 ctl_be_block_dispatch(be_lun, io); 1178 1179 mtx_lock(&be_lun->lock); 1180 continue; 1181 } 1182 1183 /* 1184 * If we get here, there is no work left in the queues, so 1185 * just break out and let the task queue go to sleep. 1186 */ 1187 break; 1188 } 1189 mtx_unlock(&be_lun->lock); 1190 } 1191 1192 /* 1193 * Entry point from CTL to the backend for I/O. We queue everything to a 1194 * work thread, so this just puts the I/O on a queue and wakes up the 1195 * thread. 1196 */ 1197 static int 1198 ctl_be_block_submit(union ctl_io *io) 1199 { 1200 struct ctl_be_block_lun *be_lun; 1201 struct ctl_be_lun *ctl_be_lun; 1202 int retval; 1203 1204 DPRINTF("entered\n"); 1205 1206 retval = CTL_RETVAL_COMPLETE; 1207 1208 ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[ 1209 CTL_PRIV_BACKEND_LUN].ptr; 1210 be_lun = (struct ctl_be_block_lun *)ctl_be_lun->be_lun; 1211 1212 /* 1213 * Make sure we only get SCSI I/O. 1214 */ 1215 KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, ("Non-SCSI I/O (type " 1216 "%#x) encountered", io->io_hdr.io_type)); 1217 1218 mtx_lock(&be_lun->lock); 1219 /* 1220 * XXX KDM make sure that links is okay to use at this point. 1221 * Otherwise, we either need to add another field to ctl_io_hdr, 1222 * or deal with resource allocation here. 1223 */ 1224 STAILQ_INSERT_TAIL(&be_lun->input_queue, &io->io_hdr, links); 1225 mtx_unlock(&be_lun->lock); 1226 1227 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); 1228 1229 return (retval); 1230 } 1231 1232 static int 1233 ctl_be_block_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, 1234 int flag, struct thread *td) 1235 { 1236 struct ctl_be_block_softc *softc; 1237 int error; 1238 1239 softc = &backend_block_softc; 1240 1241 error = 0; 1242 1243 switch (cmd) { 1244 case CTL_LUN_REQ: { 1245 struct ctl_lun_req *lun_req; 1246 1247 lun_req = (struct ctl_lun_req *)addr; 1248 1249 switch (lun_req->reqtype) { 1250 case CTL_LUNREQ_CREATE: 1251 error = ctl_be_block_create(softc, lun_req); 1252 break; 1253 case CTL_LUNREQ_RM: 1254 error = ctl_be_block_rm(softc, lun_req); 1255 break; 1256 case CTL_LUNREQ_MODIFY: 1257 error = ctl_be_block_modify(softc, lun_req); 1258 break; 1259 default: 1260 lun_req->status = CTL_LUN_ERROR; 1261 snprintf(lun_req->error_str, sizeof(lun_req->error_str), 1262 "%s: invalid LUN request type %d", __func__, 1263 lun_req->reqtype); 1264 break; 1265 } 1266 break; 1267 } 1268 default: 1269 error = ENOTTY; 1270 break; 1271 } 1272 1273 return (error); 1274 } 1275 1276 static int 1277 ctl_be_block_open_file(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req) 1278 { 1279 struct ctl_be_block_filedata *file_data; 1280 struct ctl_lun_create_params *params; 1281 struct vattr vattr; 1282 int error; 1283 1284 error = 0; 1285 file_data = &be_lun->backend.file; 1286 params = &req->reqdata.create; 1287 1288 be_lun->dev_type = CTL_BE_BLOCK_FILE; 1289 be_lun->dispatch = ctl_be_block_dispatch_file; 1290 be_lun->lun_flush = ctl_be_block_flush_file; 1291 1292 error = VOP_GETATTR(be_lun->vn, &vattr, curthread->td_ucred); 1293 if (error != 0) { 1294 snprintf(req->error_str, sizeof(req->error_str), 1295 "error calling VOP_GETATTR() for file %s", 1296 be_lun->dev_path); 1297 return (error); 1298 } 1299 1300 /* 1301 * Verify that we have the ability to upgrade to exclusive 1302 * access on this file so we can trap errors at open instead 1303 * of reporting them during first access. 1304 */ 1305 if (VOP_ISLOCKED(be_lun->vn) != LK_EXCLUSIVE) { 1306 vn_lock(be_lun->vn, LK_UPGRADE | LK_RETRY); 1307 if (be_lun->vn->v_iflag & VI_DOOMED) { 1308 error = EBADF; 1309 snprintf(req->error_str, sizeof(req->error_str), 1310 "error locking file %s", be_lun->dev_path); 1311 return (error); 1312 } 1313 } 1314 1315 1316 file_data->cred = crhold(curthread->td_ucred); 1317 if (params->lun_size_bytes != 0) 1318 be_lun->size_bytes = params->lun_size_bytes; 1319 else 1320 be_lun->size_bytes = vattr.va_size; 1321 /* 1322 * We set the multi thread flag for file operations because all 1323 * filesystems (in theory) are capable of allowing multiple readers 1324 * of a file at once. So we want to get the maximum possible 1325 * concurrency. 1326 */ 1327 be_lun->flags |= CTL_BE_BLOCK_LUN_MULTI_THREAD; 1328 1329 /* 1330 * XXX KDM vattr.va_blocksize may be larger than 512 bytes here. 1331 * With ZFS, it is 131072 bytes. Block sizes that large don't work 1332 * with disklabel and UFS on FreeBSD at least. Large block sizes 1333 * may not work with other OSes as well. So just export a sector 1334 * size of 512 bytes, which should work with any OS or 1335 * application. Since our backing is a file, any block size will 1336 * work fine for the backing store. 1337 */ 1338 #if 0 1339 be_lun->blocksize= vattr.va_blocksize; 1340 #endif 1341 if (params->blocksize_bytes != 0) 1342 be_lun->blocksize = params->blocksize_bytes; 1343 else 1344 be_lun->blocksize = 512; 1345 1346 /* 1347 * Sanity check. The media size has to be at least one 1348 * sector long. 1349 */ 1350 if (be_lun->size_bytes < be_lun->blocksize) { 1351 error = EINVAL; 1352 snprintf(req->error_str, sizeof(req->error_str), 1353 "file %s size %ju < block size %u", be_lun->dev_path, 1354 (uintmax_t)be_lun->size_bytes, be_lun->blocksize); 1355 } 1356 return (error); 1357 } 1358 1359 static int 1360 ctl_be_block_open_dev(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req) 1361 { 1362 struct ctl_lun_create_params *params; 1363 struct vattr vattr; 1364 struct cdev *dev; 1365 struct cdevsw *devsw; 1366 int error; 1367 1368 params = &req->reqdata.create; 1369 1370 be_lun->dev_type = CTL_BE_BLOCK_DEV; 1371 be_lun->dispatch = ctl_be_block_dispatch_dev; 1372 be_lun->lun_flush = ctl_be_block_flush_dev; 1373 be_lun->backend.dev.cdev = be_lun->vn->v_rdev; 1374 be_lun->backend.dev.csw = dev_refthread(be_lun->backend.dev.cdev, 1375 &be_lun->backend.dev.dev_ref); 1376 if (be_lun->backend.dev.csw == NULL) 1377 panic("Unable to retrieve device switch"); 1378 1379 error = VOP_GETATTR(be_lun->vn, &vattr, NOCRED); 1380 if (error) { 1381 snprintf(req->error_str, sizeof(req->error_str), 1382 "%s: error getting vnode attributes for device %s", 1383 __func__, be_lun->dev_path); 1384 return (error); 1385 } 1386 1387 dev = be_lun->vn->v_rdev; 1388 devsw = dev->si_devsw; 1389 if (!devsw->d_ioctl) { 1390 snprintf(req->error_str, sizeof(req->error_str), 1391 "%s: no d_ioctl for device %s!", __func__, 1392 be_lun->dev_path); 1393 return (ENODEV); 1394 } 1395 1396 error = devsw->d_ioctl(dev, DIOCGSECTORSIZE, 1397 (caddr_t)&be_lun->blocksize, FREAD, 1398 curthread); 1399 if (error) { 1400 snprintf(req->error_str, sizeof(req->error_str), 1401 "%s: error %d returned for DIOCGSECTORSIZE ioctl " 1402 "on %s!", __func__, error, be_lun->dev_path); 1403 return (error); 1404 } 1405 1406 /* 1407 * If the user has asked for a blocksize that is greater than the 1408 * backing device's blocksize, we can do it only if the blocksize 1409 * the user is asking for is an even multiple of the underlying 1410 * device's blocksize. 1411 */ 1412 if ((params->blocksize_bytes != 0) 1413 && (params->blocksize_bytes > be_lun->blocksize)) { 1414 uint32_t bs_multiple, tmp_blocksize; 1415 1416 bs_multiple = params->blocksize_bytes / be_lun->blocksize; 1417 1418 tmp_blocksize = bs_multiple * be_lun->blocksize; 1419 1420 if (tmp_blocksize == params->blocksize_bytes) { 1421 be_lun->blocksize = params->blocksize_bytes; 1422 } else { 1423 snprintf(req->error_str, sizeof(req->error_str), 1424 "%s: requested blocksize %u is not an even " 1425 "multiple of backing device blocksize %u", 1426 __func__, params->blocksize_bytes, 1427 be_lun->blocksize); 1428 return (EINVAL); 1429 1430 } 1431 } else if ((params->blocksize_bytes != 0) 1432 && (params->blocksize_bytes != be_lun->blocksize)) { 1433 snprintf(req->error_str, sizeof(req->error_str), 1434 "%s: requested blocksize %u < backing device " 1435 "blocksize %u", __func__, params->blocksize_bytes, 1436 be_lun->blocksize); 1437 return (EINVAL); 1438 } 1439 1440 error = devsw->d_ioctl(dev, DIOCGMEDIASIZE, 1441 (caddr_t)&be_lun->size_bytes, FREAD, 1442 curthread); 1443 if (error) { 1444 snprintf(req->error_str, sizeof(req->error_str), 1445 "%s: error %d returned for DIOCGMEDIASIZE " 1446 " ioctl on %s!", __func__, error, 1447 be_lun->dev_path); 1448 return (error); 1449 } 1450 1451 if (params->lun_size_bytes != 0) { 1452 if (params->lun_size_bytes > be_lun->size_bytes) { 1453 snprintf(req->error_str, sizeof(req->error_str), 1454 "%s: requested LUN size %ju > backing device " 1455 "size %ju", __func__, 1456 (uintmax_t)params->lun_size_bytes, 1457 (uintmax_t)be_lun->size_bytes); 1458 return (EINVAL); 1459 } 1460 1461 be_lun->size_bytes = params->lun_size_bytes; 1462 } 1463 1464 return (0); 1465 } 1466 1467 static int 1468 ctl_be_block_close(struct ctl_be_block_lun *be_lun) 1469 { 1470 DROP_GIANT(); 1471 if (be_lun->vn) { 1472 int flags = FREAD | FWRITE; 1473 1474 switch (be_lun->dev_type) { 1475 case CTL_BE_BLOCK_DEV: 1476 if (be_lun->backend.dev.csw) { 1477 dev_relthread(be_lun->backend.dev.cdev, 1478 be_lun->backend.dev.dev_ref); 1479 be_lun->backend.dev.csw = NULL; 1480 be_lun->backend.dev.cdev = NULL; 1481 } 1482 break; 1483 case CTL_BE_BLOCK_FILE: 1484 break; 1485 case CTL_BE_BLOCK_NONE: 1486 break; 1487 default: 1488 panic("Unexpected backend type."); 1489 break; 1490 } 1491 1492 (void)vn_close(be_lun->vn, flags, NOCRED, curthread); 1493 be_lun->vn = NULL; 1494 1495 switch (be_lun->dev_type) { 1496 case CTL_BE_BLOCK_DEV: 1497 break; 1498 case CTL_BE_BLOCK_FILE: 1499 if (be_lun->backend.file.cred != NULL) { 1500 crfree(be_lun->backend.file.cred); 1501 be_lun->backend.file.cred = NULL; 1502 } 1503 break; 1504 case CTL_BE_BLOCK_NONE: 1505 break; 1506 default: 1507 panic("Unexpected backend type."); 1508 break; 1509 } 1510 } 1511 PICKUP_GIANT(); 1512 1513 return (0); 1514 } 1515 1516 static int 1517 ctl_be_block_open(struct ctl_be_block_softc *softc, 1518 struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req) 1519 { 1520 struct nameidata nd; 1521 int flags; 1522 int error; 1523 1524 /* 1525 * XXX KDM allow a read-only option? 1526 */ 1527 flags = FREAD | FWRITE; 1528 error = 0; 1529 1530 if (rootvnode == NULL) { 1531 snprintf(req->error_str, sizeof(req->error_str), 1532 "%s: Root filesystem is not mounted", __func__); 1533 return (1); 1534 } 1535 1536 if (!curthread->td_proc->p_fd->fd_cdir) { 1537 curthread->td_proc->p_fd->fd_cdir = rootvnode; 1538 VREF(rootvnode); 1539 } 1540 if (!curthread->td_proc->p_fd->fd_rdir) { 1541 curthread->td_proc->p_fd->fd_rdir = rootvnode; 1542 VREF(rootvnode); 1543 } 1544 if (!curthread->td_proc->p_fd->fd_jdir) { 1545 curthread->td_proc->p_fd->fd_jdir = rootvnode; 1546 VREF(rootvnode); 1547 } 1548 1549 again: 1550 NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, be_lun->dev_path, curthread); 1551 error = vn_open(&nd, &flags, 0, NULL); 1552 if (error) { 1553 /* 1554 * This is the only reasonable guess we can make as far as 1555 * path if the user doesn't give us a fully qualified path. 1556 * If they want to specify a file, they need to specify the 1557 * full path. 1558 */ 1559 if (be_lun->dev_path[0] != '/') { 1560 char *dev_path = "/dev/"; 1561 char *dev_name; 1562 1563 /* Try adding device path at beginning of name */ 1564 dev_name = malloc(strlen(be_lun->dev_path) 1565 + strlen(dev_path) + 1, 1566 M_CTLBLK, M_WAITOK); 1567 if (dev_name) { 1568 sprintf(dev_name, "%s%s", dev_path, 1569 be_lun->dev_path); 1570 free(be_lun->dev_path, M_CTLBLK); 1571 be_lun->dev_path = dev_name; 1572 goto again; 1573 } 1574 } 1575 snprintf(req->error_str, sizeof(req->error_str), 1576 "%s: error opening %s", __func__, be_lun->dev_path); 1577 return (error); 1578 } 1579 1580 NDFREE(&nd, NDF_ONLY_PNBUF); 1581 1582 be_lun->vn = nd.ni_vp; 1583 1584 /* We only support disks and files. */ 1585 if (vn_isdisk(be_lun->vn, &error)) { 1586 error = ctl_be_block_open_dev(be_lun, req); 1587 } else if (be_lun->vn->v_type == VREG) { 1588 error = ctl_be_block_open_file(be_lun, req); 1589 } else { 1590 error = EINVAL; 1591 snprintf(req->error_str, sizeof(req->error_str), 1592 "%s is not a disk or plain file", be_lun->dev_path); 1593 } 1594 VOP_UNLOCK(be_lun->vn, 0); 1595 1596 if (error != 0) { 1597 ctl_be_block_close(be_lun); 1598 return (error); 1599 } 1600 1601 be_lun->blocksize_shift = fls(be_lun->blocksize) - 1; 1602 be_lun->size_blocks = be_lun->size_bytes >> be_lun->blocksize_shift; 1603 1604 return (0); 1605 } 1606 1607 static int 1608 ctl_be_block_create(struct ctl_be_block_softc *softc, struct ctl_lun_req *req) 1609 { 1610 struct ctl_be_block_lun *be_lun; 1611 struct ctl_lun_create_params *params; 1612 struct ctl_be_arg *file_arg; 1613 char tmpstr[32]; 1614 int retval, num_threads; 1615 int i; 1616 1617 params = &req->reqdata.create; 1618 retval = 0; 1619 1620 num_threads = cbb_num_threads; 1621 1622 file_arg = NULL; 1623 1624 be_lun = malloc(sizeof(*be_lun), M_CTLBLK, M_ZERO | M_WAITOK); 1625 1626 be_lun->softc = softc; 1627 STAILQ_INIT(&be_lun->input_queue); 1628 STAILQ_INIT(&be_lun->config_write_queue); 1629 STAILQ_INIT(&be_lun->datamove_queue); 1630 STAILQ_INIT(&be_lun->ctl_be_lun.options); 1631 sprintf(be_lun->lunname, "cblk%d", softc->num_luns); 1632 mtx_init(&be_lun->lock, be_lun->lunname, NULL, MTX_DEF); 1633 1634 be_lun->lun_zone = uma_zcreate(be_lun->lunname, MAXPHYS, 1635 NULL, NULL, NULL, NULL, /*align*/ 0, /*flags*/0); 1636 1637 if (be_lun->lun_zone == NULL) { 1638 snprintf(req->error_str, sizeof(req->error_str), 1639 "%s: error allocating UMA zone", __func__); 1640 goto bailout_error; 1641 } 1642 1643 if (params->flags & CTL_LUN_FLAG_DEV_TYPE) 1644 be_lun->ctl_be_lun.lun_type = params->device_type; 1645 else 1646 be_lun->ctl_be_lun.lun_type = T_DIRECT; 1647 1648 if (be_lun->ctl_be_lun.lun_type == T_DIRECT) { 1649 for (i = 0; i < req->num_be_args; i++) { 1650 if (strcmp(req->kern_be_args[i].kname, "file") == 0) { 1651 file_arg = &req->kern_be_args[i]; 1652 break; 1653 } 1654 } 1655 1656 if (file_arg == NULL) { 1657 snprintf(req->error_str, sizeof(req->error_str), 1658 "%s: no file argument specified", __func__); 1659 goto bailout_error; 1660 } 1661 1662 be_lun->dev_path = malloc(file_arg->vallen, M_CTLBLK, 1663 M_WAITOK | M_ZERO); 1664 1665 strlcpy(be_lun->dev_path, (char *)file_arg->kvalue, 1666 file_arg->vallen); 1667 1668 retval = ctl_be_block_open(softc, be_lun, req); 1669 if (retval != 0) { 1670 retval = 0; 1671 goto bailout_error; 1672 } 1673 1674 /* 1675 * Tell the user the size of the file/device. 1676 */ 1677 params->lun_size_bytes = be_lun->size_bytes; 1678 1679 /* 1680 * The maximum LBA is the size - 1. 1681 */ 1682 be_lun->ctl_be_lun.maxlba = be_lun->size_blocks - 1; 1683 } else { 1684 /* 1685 * For processor devices, we don't have any size. 1686 */ 1687 be_lun->blocksize = 0; 1688 be_lun->size_blocks = 0; 1689 be_lun->size_bytes = 0; 1690 be_lun->ctl_be_lun.maxlba = 0; 1691 params->lun_size_bytes = 0; 1692 1693 /* 1694 * Default to just 1 thread for processor devices. 1695 */ 1696 num_threads = 1; 1697 } 1698 1699 /* 1700 * XXX This searching loop might be refactored to be combined with 1701 * the loop above, 1702 */ 1703 for (i = 0; i < req->num_be_args; i++) { 1704 if (strcmp(req->kern_be_args[i].kname, "num_threads") == 0) { 1705 struct ctl_be_arg *thread_arg; 1706 char num_thread_str[16]; 1707 int tmp_num_threads; 1708 1709 1710 thread_arg = &req->kern_be_args[i]; 1711 1712 strlcpy(num_thread_str, (char *)thread_arg->kvalue, 1713 min(thread_arg->vallen, 1714 sizeof(num_thread_str))); 1715 1716 tmp_num_threads = strtol(num_thread_str, NULL, 0); 1717 1718 /* 1719 * We don't let the user specify less than one 1720 * thread, but hope he's clueful enough not to 1721 * specify 1000 threads. 1722 */ 1723 if (tmp_num_threads < 1) { 1724 snprintf(req->error_str, sizeof(req->error_str), 1725 "%s: invalid number of threads %s", 1726 __func__, num_thread_str); 1727 goto bailout_error; 1728 } 1729 1730 num_threads = tmp_num_threads; 1731 } else if (strcmp(req->kern_be_args[i].kname, "file") != 0 && 1732 strcmp(req->kern_be_args[i].kname, "dev") != 0) { 1733 struct ctl_be_lun_option *opt; 1734 1735 opt = malloc(sizeof(*opt), M_CTLBLK, M_WAITOK); 1736 opt->name = malloc(strlen(req->kern_be_args[i].kname) + 1, M_CTLBLK, M_WAITOK); 1737 strcpy(opt->name, req->kern_be_args[i].kname); 1738 opt->value = malloc(strlen(req->kern_be_args[i].kvalue) + 1, M_CTLBLK, M_WAITOK); 1739 strcpy(opt->value, req->kern_be_args[i].kvalue); 1740 STAILQ_INSERT_TAIL(&be_lun->ctl_be_lun.options, opt, links); 1741 } 1742 } 1743 1744 be_lun->flags = CTL_BE_BLOCK_LUN_UNCONFIGURED; 1745 be_lun->ctl_be_lun.flags = CTL_LUN_FLAG_PRIMARY; 1746 be_lun->ctl_be_lun.be_lun = be_lun; 1747 be_lun->ctl_be_lun.blocksize = be_lun->blocksize; 1748 /* Tell the user the blocksize we ended up using */ 1749 params->blocksize_bytes = be_lun->blocksize; 1750 if (params->flags & CTL_LUN_FLAG_ID_REQ) { 1751 be_lun->ctl_be_lun.req_lun_id = params->req_lun_id; 1752 be_lun->ctl_be_lun.flags |= CTL_LUN_FLAG_ID_REQ; 1753 } else 1754 be_lun->ctl_be_lun.req_lun_id = 0; 1755 1756 be_lun->ctl_be_lun.lun_shutdown = ctl_be_block_lun_shutdown; 1757 be_lun->ctl_be_lun.lun_config_status = 1758 ctl_be_block_lun_config_status; 1759 be_lun->ctl_be_lun.be = &ctl_be_block_driver; 1760 1761 if ((params->flags & CTL_LUN_FLAG_SERIAL_NUM) == 0) { 1762 snprintf(tmpstr, sizeof(tmpstr), "MYSERIAL%4d", 1763 softc->num_luns); 1764 strncpy((char *)be_lun->ctl_be_lun.serial_num, tmpstr, 1765 ctl_min(sizeof(be_lun->ctl_be_lun.serial_num), 1766 sizeof(tmpstr))); 1767 1768 /* Tell the user what we used for a serial number */ 1769 strncpy((char *)params->serial_num, tmpstr, 1770 ctl_min(sizeof(params->serial_num), sizeof(tmpstr))); 1771 } else { 1772 strncpy((char *)be_lun->ctl_be_lun.serial_num, 1773 params->serial_num, 1774 ctl_min(sizeof(be_lun->ctl_be_lun.serial_num), 1775 sizeof(params->serial_num))); 1776 } 1777 if ((params->flags & CTL_LUN_FLAG_DEVID) == 0) { 1778 snprintf(tmpstr, sizeof(tmpstr), "MYDEVID%4d", softc->num_luns); 1779 strncpy((char *)be_lun->ctl_be_lun.device_id, tmpstr, 1780 ctl_min(sizeof(be_lun->ctl_be_lun.device_id), 1781 sizeof(tmpstr))); 1782 1783 /* Tell the user what we used for a device ID */ 1784 strncpy((char *)params->device_id, tmpstr, 1785 ctl_min(sizeof(params->device_id), sizeof(tmpstr))); 1786 } else { 1787 strncpy((char *)be_lun->ctl_be_lun.device_id, 1788 params->device_id, 1789 ctl_min(sizeof(be_lun->ctl_be_lun.device_id), 1790 sizeof(params->device_id))); 1791 } 1792 1793 TASK_INIT(&be_lun->io_task, /*priority*/0, ctl_be_block_worker, be_lun); 1794 1795 be_lun->io_taskqueue = taskqueue_create(be_lun->lunname, M_WAITOK, 1796 taskqueue_thread_enqueue, /*context*/&be_lun->io_taskqueue); 1797 1798 if (be_lun->io_taskqueue == NULL) { 1799 snprintf(req->error_str, sizeof(req->error_str), 1800 "%s: Unable to create taskqueue", __func__); 1801 goto bailout_error; 1802 } 1803 1804 /* 1805 * Note that we start the same number of threads by default for 1806 * both the file case and the block device case. For the file 1807 * case, we need multiple threads to allow concurrency, because the 1808 * vnode interface is designed to be a blocking interface. For the 1809 * block device case, ZFS zvols at least will block the caller's 1810 * context in many instances, and so we need multiple threads to 1811 * overcome that problem. Other block devices don't need as many 1812 * threads, but they shouldn't cause too many problems. 1813 * 1814 * If the user wants to just have a single thread for a block 1815 * device, he can specify that when the LUN is created, or change 1816 * the tunable/sysctl to alter the default number of threads. 1817 */ 1818 retval = taskqueue_start_threads(&be_lun->io_taskqueue, 1819 /*num threads*/num_threads, 1820 /*priority*/PWAIT, 1821 /*thread name*/ 1822 "%s taskq", be_lun->lunname); 1823 1824 if (retval != 0) 1825 goto bailout_error; 1826 1827 be_lun->num_threads = num_threads; 1828 1829 mtx_lock(&softc->lock); 1830 softc->num_luns++; 1831 STAILQ_INSERT_TAIL(&softc->lun_list, be_lun, links); 1832 1833 mtx_unlock(&softc->lock); 1834 1835 retval = ctl_add_lun(&be_lun->ctl_be_lun); 1836 if (retval != 0) { 1837 mtx_lock(&softc->lock); 1838 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_block_lun, 1839 links); 1840 softc->num_luns--; 1841 mtx_unlock(&softc->lock); 1842 snprintf(req->error_str, sizeof(req->error_str), 1843 "%s: ctl_add_lun() returned error %d, see dmesg for " 1844 "details", __func__, retval); 1845 retval = 0; 1846 goto bailout_error; 1847 } 1848 1849 mtx_lock(&softc->lock); 1850 1851 /* 1852 * Tell the config_status routine that we're waiting so it won't 1853 * clean up the LUN in the event of an error. 1854 */ 1855 be_lun->flags |= CTL_BE_BLOCK_LUN_WAITING; 1856 1857 while (be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) { 1858 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlblk", 0); 1859 if (retval == EINTR) 1860 break; 1861 } 1862 be_lun->flags &= ~CTL_BE_BLOCK_LUN_WAITING; 1863 1864 if (be_lun->flags & CTL_BE_BLOCK_LUN_CONFIG_ERR) { 1865 snprintf(req->error_str, sizeof(req->error_str), 1866 "%s: LUN configuration error, see dmesg for details", 1867 __func__); 1868 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_block_lun, 1869 links); 1870 softc->num_luns--; 1871 mtx_unlock(&softc->lock); 1872 goto bailout_error; 1873 } else { 1874 params->req_lun_id = be_lun->ctl_be_lun.lun_id; 1875 } 1876 1877 mtx_unlock(&softc->lock); 1878 1879 be_lun->disk_stats = devstat_new_entry("cbb", params->req_lun_id, 1880 be_lun->blocksize, 1881 DEVSTAT_ALL_SUPPORTED, 1882 be_lun->ctl_be_lun.lun_type 1883 | DEVSTAT_TYPE_IF_OTHER, 1884 DEVSTAT_PRIORITY_OTHER); 1885 1886 1887 req->status = CTL_LUN_OK; 1888 1889 return (retval); 1890 1891 bailout_error: 1892 req->status = CTL_LUN_ERROR; 1893 1894 ctl_be_block_close(be_lun); 1895 1896 free(be_lun->dev_path, M_CTLBLK); 1897 free(be_lun, M_CTLBLK); 1898 1899 return (retval); 1900 } 1901 1902 static int 1903 ctl_be_block_rm(struct ctl_be_block_softc *softc, struct ctl_lun_req *req) 1904 { 1905 struct ctl_lun_rm_params *params; 1906 struct ctl_be_block_lun *be_lun; 1907 int retval; 1908 1909 params = &req->reqdata.rm; 1910 1911 mtx_lock(&softc->lock); 1912 1913 be_lun = NULL; 1914 1915 STAILQ_FOREACH(be_lun, &softc->lun_list, links) { 1916 if (be_lun->ctl_be_lun.lun_id == params->lun_id) 1917 break; 1918 } 1919 mtx_unlock(&softc->lock); 1920 1921 if (be_lun == NULL) { 1922 snprintf(req->error_str, sizeof(req->error_str), 1923 "%s: LUN %u is not managed by the block backend", 1924 __func__, params->lun_id); 1925 goto bailout_error; 1926 } 1927 1928 retval = ctl_disable_lun(&be_lun->ctl_be_lun); 1929 1930 if (retval != 0) { 1931 snprintf(req->error_str, sizeof(req->error_str), 1932 "%s: error %d returned from ctl_disable_lun() for " 1933 "LUN %d", __func__, retval, params->lun_id); 1934 goto bailout_error; 1935 1936 } 1937 1938 retval = ctl_invalidate_lun(&be_lun->ctl_be_lun); 1939 if (retval != 0) { 1940 snprintf(req->error_str, sizeof(req->error_str), 1941 "%s: error %d returned from ctl_invalidate_lun() for " 1942 "LUN %d", __func__, retval, params->lun_id); 1943 goto bailout_error; 1944 } 1945 1946 mtx_lock(&softc->lock); 1947 1948 be_lun->flags |= CTL_BE_BLOCK_LUN_WAITING; 1949 1950 while ((be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) == 0) { 1951 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlblk", 0); 1952 if (retval == EINTR) 1953 break; 1954 } 1955 1956 be_lun->flags &= ~CTL_BE_BLOCK_LUN_WAITING; 1957 1958 if ((be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) == 0) { 1959 snprintf(req->error_str, sizeof(req->error_str), 1960 "%s: interrupted waiting for LUN to be freed", 1961 __func__); 1962 mtx_unlock(&softc->lock); 1963 goto bailout_error; 1964 } 1965 1966 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_block_lun, links); 1967 1968 softc->num_luns--; 1969 mtx_unlock(&softc->lock); 1970 1971 taskqueue_drain(be_lun->io_taskqueue, &be_lun->io_task); 1972 1973 taskqueue_free(be_lun->io_taskqueue); 1974 1975 ctl_be_block_close(be_lun); 1976 1977 if (be_lun->disk_stats != NULL) 1978 devstat_remove_entry(be_lun->disk_stats); 1979 1980 uma_zdestroy(be_lun->lun_zone); 1981 1982 free(be_lun->dev_path, M_CTLBLK); 1983 1984 free(be_lun, M_CTLBLK); 1985 1986 req->status = CTL_LUN_OK; 1987 1988 return (0); 1989 1990 bailout_error: 1991 1992 req->status = CTL_LUN_ERROR; 1993 1994 return (0); 1995 } 1996 1997 static int 1998 ctl_be_block_modify_file(struct ctl_be_block_lun *be_lun, 1999 struct ctl_lun_req *req) 2000 { 2001 struct vattr vattr; 2002 int error; 2003 struct ctl_lun_modify_params *params; 2004 2005 params = &req->reqdata.modify; 2006 2007 if (params->lun_size_bytes != 0) { 2008 be_lun->size_bytes = params->lun_size_bytes; 2009 } else { 2010 error = VOP_GETATTR(be_lun->vn, &vattr, curthread->td_ucred); 2011 if (error != 0) { 2012 snprintf(req->error_str, sizeof(req->error_str), 2013 "error calling VOP_GETATTR() for file %s", 2014 be_lun->dev_path); 2015 return (error); 2016 } 2017 2018 be_lun->size_bytes = vattr.va_size; 2019 } 2020 2021 return (0); 2022 } 2023 2024 static int 2025 ctl_be_block_modify_dev(struct ctl_be_block_lun *be_lun, 2026 struct ctl_lun_req *req) 2027 { 2028 struct cdev *dev; 2029 struct cdevsw *devsw; 2030 int error; 2031 struct ctl_lun_modify_params *params; 2032 uint64_t size_bytes; 2033 2034 params = &req->reqdata.modify; 2035 2036 dev = be_lun->vn->v_rdev; 2037 devsw = dev->si_devsw; 2038 if (!devsw->d_ioctl) { 2039 snprintf(req->error_str, sizeof(req->error_str), 2040 "%s: no d_ioctl for device %s!", __func__, 2041 be_lun->dev_path); 2042 return (ENODEV); 2043 } 2044 2045 error = devsw->d_ioctl(dev, DIOCGMEDIASIZE, 2046 (caddr_t)&size_bytes, FREAD, 2047 curthread); 2048 if (error) { 2049 snprintf(req->error_str, sizeof(req->error_str), 2050 "%s: error %d returned for DIOCGMEDIASIZE ioctl " 2051 "on %s!", __func__, error, be_lun->dev_path); 2052 return (error); 2053 } 2054 2055 if (params->lun_size_bytes != 0) { 2056 if (params->lun_size_bytes > size_bytes) { 2057 snprintf(req->error_str, sizeof(req->error_str), 2058 "%s: requested LUN size %ju > backing device " 2059 "size %ju", __func__, 2060 (uintmax_t)params->lun_size_bytes, 2061 (uintmax_t)size_bytes); 2062 return (EINVAL); 2063 } 2064 2065 be_lun->size_bytes = params->lun_size_bytes; 2066 } else { 2067 be_lun->size_bytes = size_bytes; 2068 } 2069 2070 return (0); 2071 } 2072 2073 static int 2074 ctl_be_block_modify(struct ctl_be_block_softc *softc, struct ctl_lun_req *req) 2075 { 2076 struct ctl_lun_modify_params *params; 2077 struct ctl_be_block_lun *be_lun; 2078 int error; 2079 2080 params = &req->reqdata.modify; 2081 2082 mtx_lock(&softc->lock); 2083 2084 be_lun = NULL; 2085 2086 STAILQ_FOREACH(be_lun, &softc->lun_list, links) { 2087 if (be_lun->ctl_be_lun.lun_id == params->lun_id) 2088 break; 2089 } 2090 mtx_unlock(&softc->lock); 2091 2092 if (be_lun == NULL) { 2093 snprintf(req->error_str, sizeof(req->error_str), 2094 "%s: LUN %u is not managed by the block backend", 2095 __func__, params->lun_id); 2096 goto bailout_error; 2097 } 2098 2099 if (params->lun_size_bytes != 0) { 2100 if (params->lun_size_bytes < be_lun->blocksize) { 2101 snprintf(req->error_str, sizeof(req->error_str), 2102 "%s: LUN size %ju < blocksize %u", __func__, 2103 params->lun_size_bytes, be_lun->blocksize); 2104 goto bailout_error; 2105 } 2106 } 2107 2108 vn_lock(be_lun->vn, LK_SHARED | LK_RETRY); 2109 2110 if (be_lun->vn->v_type == VREG) 2111 error = ctl_be_block_modify_file(be_lun, req); 2112 else 2113 error = ctl_be_block_modify_dev(be_lun, req); 2114 2115 VOP_UNLOCK(be_lun->vn, 0); 2116 2117 if (error != 0) 2118 goto bailout_error; 2119 2120 be_lun->size_blocks = be_lun->size_bytes >> be_lun->blocksize_shift; 2121 2122 /* 2123 * The maximum LBA is the size - 1. 2124 * 2125 * XXX: Note that this field is being updated without locking, 2126 * which might cause problems on 32-bit architectures. 2127 */ 2128 be_lun->ctl_be_lun.maxlba = be_lun->size_blocks - 1; 2129 ctl_lun_capacity_changed(&be_lun->ctl_be_lun); 2130 2131 /* Tell the user the exact size we ended up using */ 2132 params->lun_size_bytes = be_lun->size_bytes; 2133 2134 req->status = CTL_LUN_OK; 2135 2136 return (0); 2137 2138 bailout_error: 2139 req->status = CTL_LUN_ERROR; 2140 2141 return (0); 2142 } 2143 2144 static void 2145 ctl_be_block_lun_shutdown(void *be_lun) 2146 { 2147 struct ctl_be_block_lun *lun; 2148 struct ctl_be_block_softc *softc; 2149 2150 lun = (struct ctl_be_block_lun *)be_lun; 2151 2152 softc = lun->softc; 2153 2154 mtx_lock(&softc->lock); 2155 lun->flags |= CTL_BE_BLOCK_LUN_UNCONFIGURED; 2156 if (lun->flags & CTL_BE_BLOCK_LUN_WAITING) 2157 wakeup(lun); 2158 mtx_unlock(&softc->lock); 2159 2160 } 2161 2162 static void 2163 ctl_be_block_lun_config_status(void *be_lun, ctl_lun_config_status status) 2164 { 2165 struct ctl_be_block_lun *lun; 2166 struct ctl_be_block_softc *softc; 2167 2168 lun = (struct ctl_be_block_lun *)be_lun; 2169 softc = lun->softc; 2170 2171 if (status == CTL_LUN_CONFIG_OK) { 2172 mtx_lock(&softc->lock); 2173 lun->flags &= ~CTL_BE_BLOCK_LUN_UNCONFIGURED; 2174 if (lun->flags & CTL_BE_BLOCK_LUN_WAITING) 2175 wakeup(lun); 2176 mtx_unlock(&softc->lock); 2177 2178 /* 2179 * We successfully added the LUN, attempt to enable it. 2180 */ 2181 if (ctl_enable_lun(&lun->ctl_be_lun) != 0) { 2182 printf("%s: ctl_enable_lun() failed!\n", __func__); 2183 if (ctl_invalidate_lun(&lun->ctl_be_lun) != 0) { 2184 printf("%s: ctl_invalidate_lun() failed!\n", 2185 __func__); 2186 } 2187 } 2188 2189 return; 2190 } 2191 2192 2193 mtx_lock(&softc->lock); 2194 lun->flags &= ~CTL_BE_BLOCK_LUN_UNCONFIGURED; 2195 lun->flags |= CTL_BE_BLOCK_LUN_CONFIG_ERR; 2196 wakeup(lun); 2197 mtx_unlock(&softc->lock); 2198 } 2199 2200 2201 static int 2202 ctl_be_block_config_write(union ctl_io *io) 2203 { 2204 struct ctl_be_block_lun *be_lun; 2205 struct ctl_be_lun *ctl_be_lun; 2206 int retval; 2207 2208 retval = 0; 2209 2210 DPRINTF("entered\n"); 2211 2212 ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[ 2213 CTL_PRIV_BACKEND_LUN].ptr; 2214 be_lun = (struct ctl_be_block_lun *)ctl_be_lun->be_lun; 2215 2216 switch (io->scsiio.cdb[0]) { 2217 case SYNCHRONIZE_CACHE: 2218 case SYNCHRONIZE_CACHE_16: 2219 /* 2220 * The upper level CTL code will filter out any CDBs with 2221 * the immediate bit set and return the proper error. 2222 * 2223 * We don't really need to worry about what LBA range the 2224 * user asked to be synced out. When they issue a sync 2225 * cache command, we'll sync out the whole thing. 2226 */ 2227 mtx_lock(&be_lun->lock); 2228 STAILQ_INSERT_TAIL(&be_lun->config_write_queue, &io->io_hdr, 2229 links); 2230 mtx_unlock(&be_lun->lock); 2231 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); 2232 break; 2233 case START_STOP_UNIT: { 2234 struct scsi_start_stop_unit *cdb; 2235 2236 cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb; 2237 2238 if (cdb->how & SSS_START) 2239 retval = ctl_start_lun(ctl_be_lun); 2240 else { 2241 retval = ctl_stop_lun(ctl_be_lun); 2242 /* 2243 * XXX KDM Copan-specific offline behavior. 2244 * Figure out a reasonable way to port this? 2245 */ 2246 #ifdef NEEDTOPORT 2247 if ((retval == 0) 2248 && (cdb->byte2 & SSS_ONOFFLINE)) 2249 retval = ctl_lun_offline(ctl_be_lun); 2250 #endif 2251 } 2252 2253 /* 2254 * In general, the above routines should not fail. They 2255 * just set state for the LUN. So we've got something 2256 * pretty wrong here if we can't start or stop the LUN. 2257 */ 2258 if (retval != 0) { 2259 ctl_set_internal_failure(&io->scsiio, 2260 /*sks_valid*/ 1, 2261 /*retry_count*/ 0xf051); 2262 retval = CTL_RETVAL_COMPLETE; 2263 } else { 2264 ctl_set_success(&io->scsiio); 2265 } 2266 ctl_config_write_done(io); 2267 break; 2268 } 2269 default: 2270 ctl_set_invalid_opcode(&io->scsiio); 2271 ctl_config_write_done(io); 2272 retval = CTL_RETVAL_COMPLETE; 2273 break; 2274 } 2275 2276 return (retval); 2277 2278 } 2279 2280 static int 2281 ctl_be_block_config_read(union ctl_io *io) 2282 { 2283 return (0); 2284 } 2285 2286 static int 2287 ctl_be_block_lun_info(void *be_lun, struct sbuf *sb) 2288 { 2289 struct ctl_be_block_lun *lun; 2290 int retval; 2291 2292 lun = (struct ctl_be_block_lun *)be_lun; 2293 retval = 0; 2294 2295 retval = sbuf_printf(sb, "<num_threads>"); 2296 2297 if (retval != 0) 2298 goto bailout; 2299 2300 retval = sbuf_printf(sb, "%d", lun->num_threads); 2301 2302 if (retval != 0) 2303 goto bailout; 2304 2305 retval = sbuf_printf(sb, "</num_threads>"); 2306 2307 /* 2308 * For processor devices, we don't have a path variable. 2309 */ 2310 if ((retval != 0) 2311 || (lun->dev_path == NULL)) 2312 goto bailout; 2313 2314 retval = sbuf_printf(sb, "<file>"); 2315 2316 if (retval != 0) 2317 goto bailout; 2318 2319 retval = ctl_sbuf_printf_esc(sb, lun->dev_path); 2320 2321 if (retval != 0) 2322 goto bailout; 2323 2324 retval = sbuf_printf(sb, "</file>\n"); 2325 2326 bailout: 2327 2328 return (retval); 2329 } 2330 2331 int 2332 ctl_be_block_init(void) 2333 { 2334 struct ctl_be_block_softc *softc; 2335 int retval; 2336 2337 softc = &backend_block_softc; 2338 retval = 0; 2339 2340 mtx_init(&softc->lock, "ctlblk", NULL, MTX_DEF); 2341 STAILQ_INIT(&softc->beio_free_queue); 2342 STAILQ_INIT(&softc->disk_list); 2343 STAILQ_INIT(&softc->lun_list); 2344 ctl_grow_beio(softc, 200); 2345 2346 return (retval); 2347 } 2348