1 /*- 2 * Copyright (c) 2003 Silicon Graphics International Corp. 3 * Copyright (c) 2009-2011 Spectra Logic Corporation 4 * Copyright (c) 2012 The FreeBSD Foundation 5 * All rights reserved. 6 * 7 * Portions of this software were developed by Edward Tomasz Napierala 8 * under sponsorship from the FreeBSD Foundation. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions, and the following disclaimer, 15 * without modification. 16 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 17 * substantially similar to the "NO WARRANTY" disclaimer below 18 * ("Disclaimer") and any redistribution must be conditioned upon 19 * including a substantially similar Disclaimer requirement for further 20 * binary redistribution. 21 * 22 * NO WARRANTY 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 27 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 31 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 32 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 33 * POSSIBILITY OF SUCH DAMAGES. 34 * 35 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend_block.c#5 $ 36 */ 37 /* 38 * CAM Target Layer driver backend for block devices. 39 * 40 * Author: Ken Merry <ken@FreeBSD.org> 41 */ 42 #include <sys/cdefs.h> 43 __FBSDID("$FreeBSD$"); 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/kernel.h> 48 #include <sys/types.h> 49 #include <sys/kthread.h> 50 #include <sys/bio.h> 51 #include <sys/fcntl.h> 52 #include <sys/lock.h> 53 #include <sys/mutex.h> 54 #include <sys/condvar.h> 55 #include <sys/malloc.h> 56 #include <sys/conf.h> 57 #include <sys/ioccom.h> 58 #include <sys/queue.h> 59 #include <sys/sbuf.h> 60 #include <sys/endian.h> 61 #include <sys/uio.h> 62 #include <sys/buf.h> 63 #include <sys/taskqueue.h> 64 #include <sys/vnode.h> 65 #include <sys/namei.h> 66 #include <sys/mount.h> 67 #include <sys/disk.h> 68 #include <sys/fcntl.h> 69 #include <sys/filedesc.h> 70 #include <sys/proc.h> 71 #include <sys/pcpu.h> 72 #include <sys/module.h> 73 #include <sys/sdt.h> 74 #include <sys/devicestat.h> 75 #include <sys/sysctl.h> 76 77 #include <geom/geom.h> 78 79 #include <cam/cam.h> 80 #include <cam/scsi/scsi_all.h> 81 #include <cam/scsi/scsi_da.h> 82 #include <cam/ctl/ctl_io.h> 83 #include <cam/ctl/ctl.h> 84 #include <cam/ctl/ctl_backend.h> 85 #include <cam/ctl/ctl_frontend_internal.h> 86 #include <cam/ctl/ctl_ioctl.h> 87 #include <cam/ctl/ctl_scsi_all.h> 88 #include <cam/ctl/ctl_error.h> 89 90 /* 91 * The idea here is that we'll allocate enough S/G space to hold a 16MB 92 * I/O. If we get an I/O larger than that, we'll reject it. 93 */ 94 #define CTLBLK_MAX_IO_SIZE (16 * 1024 * 1024) 95 #define CTLBLK_MAX_SEGS (CTLBLK_MAX_IO_SIZE / MAXPHYS) + 1 96 97 #ifdef CTLBLK_DEBUG 98 #define DPRINTF(fmt, args...) \ 99 printf("cbb(%s:%d): " fmt, __FUNCTION__, __LINE__, ##args) 100 #else 101 #define DPRINTF(fmt, args...) do {} while(0) 102 #endif 103 104 SDT_PROVIDER_DEFINE(cbb); 105 106 typedef enum { 107 CTL_BE_BLOCK_LUN_UNCONFIGURED = 0x01, 108 CTL_BE_BLOCK_LUN_CONFIG_ERR = 0x02, 109 CTL_BE_BLOCK_LUN_WAITING = 0x04, 110 CTL_BE_BLOCK_LUN_MULTI_THREAD = 0x08 111 } ctl_be_block_lun_flags; 112 113 typedef enum { 114 CTL_BE_BLOCK_NONE, 115 CTL_BE_BLOCK_DEV, 116 CTL_BE_BLOCK_FILE 117 } ctl_be_block_type; 118 119 struct ctl_be_block_devdata { 120 struct cdev *cdev; 121 struct cdevsw *csw; 122 int dev_ref; 123 }; 124 125 struct ctl_be_block_filedata { 126 struct ucred *cred; 127 }; 128 129 union ctl_be_block_bedata { 130 struct ctl_be_block_devdata dev; 131 struct ctl_be_block_filedata file; 132 }; 133 134 struct ctl_be_block_io; 135 struct ctl_be_block_lun; 136 137 typedef void (*cbb_dispatch_t)(struct ctl_be_block_lun *be_lun, 138 struct ctl_be_block_io *beio); 139 140 /* 141 * Backend LUN structure. There is a 1:1 mapping between a block device 142 * and a backend block LUN, and between a backend block LUN and a CTL LUN. 143 */ 144 struct ctl_be_block_lun { 145 struct ctl_block_disk *disk; 146 char lunname[32]; 147 char *dev_path; 148 ctl_be_block_type dev_type; 149 struct vnode *vn; 150 union ctl_be_block_bedata backend; 151 cbb_dispatch_t dispatch; 152 cbb_dispatch_t lun_flush; 153 struct mtx lock; 154 uma_zone_t lun_zone; 155 uint64_t size_blocks; 156 uint64_t size_bytes; 157 uint32_t blocksize; 158 int blocksize_shift; 159 struct ctl_be_block_softc *softc; 160 struct devstat *disk_stats; 161 ctl_be_block_lun_flags flags; 162 STAILQ_ENTRY(ctl_be_block_lun) links; 163 struct ctl_be_lun ctl_be_lun; 164 struct taskqueue *io_taskqueue; 165 struct task io_task; 166 int num_threads; 167 STAILQ_HEAD(, ctl_io_hdr) input_queue; 168 STAILQ_HEAD(, ctl_io_hdr) config_write_queue; 169 STAILQ_HEAD(, ctl_io_hdr) datamove_queue; 170 }; 171 172 /* 173 * Overall softc structure for the block backend module. 174 */ 175 struct ctl_be_block_softc { 176 STAILQ_HEAD(, ctl_be_block_io) beio_free_queue; 177 struct mtx lock; 178 int prealloc_beio; 179 int num_disks; 180 STAILQ_HEAD(, ctl_block_disk) disk_list; 181 int num_luns; 182 STAILQ_HEAD(, ctl_be_block_lun) lun_list; 183 }; 184 185 static struct ctl_be_block_softc backend_block_softc; 186 187 /* 188 * Per-I/O information. 189 */ 190 struct ctl_be_block_io { 191 union ctl_io *io; 192 struct ctl_sg_entry sg_segs[CTLBLK_MAX_SEGS]; 193 struct iovec xiovecs[CTLBLK_MAX_SEGS]; 194 int bio_cmd; 195 int bio_flags; 196 int num_segs; 197 int num_bios_sent; 198 int num_bios_done; 199 int send_complete; 200 int num_errors; 201 struct bintime ds_t0; 202 devstat_tag_type ds_tag_type; 203 devstat_trans_flags ds_trans_type; 204 uint64_t io_len; 205 uint64_t io_offset; 206 struct ctl_be_block_softc *softc; 207 struct ctl_be_block_lun *lun; 208 STAILQ_ENTRY(ctl_be_block_io) links; 209 }; 210 211 static int cbb_num_threads = 14; 212 TUNABLE_INT("kern.cam.ctl.block.num_threads", &cbb_num_threads); 213 SYSCTL_NODE(_kern_cam_ctl, OID_AUTO, block, CTLFLAG_RD, 0, 214 "CAM Target Layer Block Backend"); 215 SYSCTL_INT(_kern_cam_ctl_block, OID_AUTO, num_threads, CTLFLAG_RW, 216 &cbb_num_threads, 0, "Number of threads per backing file"); 217 218 static struct ctl_be_block_io *ctl_alloc_beio(struct ctl_be_block_softc *softc); 219 static void ctl_free_beio(struct ctl_be_block_io *beio); 220 static int ctl_grow_beio(struct ctl_be_block_softc *softc, int count); 221 #if 0 222 static void ctl_shrink_beio(struct ctl_be_block_softc *softc); 223 #endif 224 static void ctl_complete_beio(struct ctl_be_block_io *beio); 225 static int ctl_be_block_move_done(union ctl_io *io); 226 static void ctl_be_block_biodone(struct bio *bio); 227 static void ctl_be_block_flush_file(struct ctl_be_block_lun *be_lun, 228 struct ctl_be_block_io *beio); 229 static void ctl_be_block_dispatch_file(struct ctl_be_block_lun *be_lun, 230 struct ctl_be_block_io *beio); 231 static void ctl_be_block_flush_dev(struct ctl_be_block_lun *be_lun, 232 struct ctl_be_block_io *beio); 233 static void ctl_be_block_dispatch_dev(struct ctl_be_block_lun *be_lun, 234 struct ctl_be_block_io *beio); 235 static void ctl_be_block_cw_dispatch(struct ctl_be_block_lun *be_lun, 236 union ctl_io *io); 237 static void ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun, 238 union ctl_io *io); 239 static void ctl_be_block_worker(void *context, int pending); 240 static int ctl_be_block_submit(union ctl_io *io); 241 static int ctl_be_block_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, 242 int flag, struct thread *td); 243 static int ctl_be_block_open_file(struct ctl_be_block_lun *be_lun, 244 struct ctl_lun_req *req); 245 static int ctl_be_block_open_dev(struct ctl_be_block_lun *be_lun, 246 struct ctl_lun_req *req); 247 static int ctl_be_block_close(struct ctl_be_block_lun *be_lun); 248 static int ctl_be_block_open(struct ctl_be_block_softc *softc, 249 struct ctl_be_block_lun *be_lun, 250 struct ctl_lun_req *req); 251 static int ctl_be_block_create(struct ctl_be_block_softc *softc, 252 struct ctl_lun_req *req); 253 static int ctl_be_block_rm(struct ctl_be_block_softc *softc, 254 struct ctl_lun_req *req); 255 static int ctl_be_block_modify_file(struct ctl_be_block_lun *be_lun, 256 struct ctl_lun_req *req); 257 static int ctl_be_block_modify_dev(struct ctl_be_block_lun *be_lun, 258 struct ctl_lun_req *req); 259 static int ctl_be_block_modify(struct ctl_be_block_softc *softc, 260 struct ctl_lun_req *req); 261 static void ctl_be_block_lun_shutdown(void *be_lun); 262 static void ctl_be_block_lun_config_status(void *be_lun, 263 ctl_lun_config_status status); 264 static int ctl_be_block_config_write(union ctl_io *io); 265 static int ctl_be_block_config_read(union ctl_io *io); 266 static int ctl_be_block_lun_info(void *be_lun, struct sbuf *sb); 267 int ctl_be_block_init(void); 268 269 static struct ctl_backend_driver ctl_be_block_driver = 270 { 271 .name = "block", 272 .flags = CTL_BE_FLAG_HAS_CONFIG, 273 .init = ctl_be_block_init, 274 .data_submit = ctl_be_block_submit, 275 .data_move_done = ctl_be_block_move_done, 276 .config_read = ctl_be_block_config_read, 277 .config_write = ctl_be_block_config_write, 278 .ioctl = ctl_be_block_ioctl, 279 .lun_info = ctl_be_block_lun_info 280 }; 281 282 MALLOC_DEFINE(M_CTLBLK, "ctlblk", "Memory used for CTL block backend"); 283 CTL_BACKEND_DECLARE(cbb, ctl_be_block_driver); 284 285 static struct ctl_be_block_io * 286 ctl_alloc_beio(struct ctl_be_block_softc *softc) 287 { 288 struct ctl_be_block_io *beio; 289 int count; 290 291 mtx_lock(&softc->lock); 292 293 beio = STAILQ_FIRST(&softc->beio_free_queue); 294 if (beio != NULL) { 295 STAILQ_REMOVE(&softc->beio_free_queue, beio, 296 ctl_be_block_io, links); 297 } 298 mtx_unlock(&softc->lock); 299 300 if (beio != NULL) { 301 bzero(beio, sizeof(*beio)); 302 beio->softc = softc; 303 return (beio); 304 } 305 306 for (;;) { 307 308 count = ctl_grow_beio(softc, /*count*/ 10); 309 310 /* 311 * This shouldn't be possible, since ctl_grow_beio() uses a 312 * blocking malloc. 313 */ 314 if (count == 0) 315 return (NULL); 316 317 /* 318 * Since we have to drop the lock when we're allocating beio 319 * structures, it's possible someone else can come along and 320 * allocate the beio's we've just allocated. 321 */ 322 mtx_lock(&softc->lock); 323 beio = STAILQ_FIRST(&softc->beio_free_queue); 324 if (beio != NULL) { 325 STAILQ_REMOVE(&softc->beio_free_queue, beio, 326 ctl_be_block_io, links); 327 } 328 mtx_unlock(&softc->lock); 329 330 if (beio != NULL) { 331 bzero(beio, sizeof(*beio)); 332 beio->softc = softc; 333 break; 334 } 335 } 336 return (beio); 337 } 338 339 static void 340 ctl_free_beio(struct ctl_be_block_io *beio) 341 { 342 struct ctl_be_block_softc *softc; 343 int duplicate_free; 344 int i; 345 346 softc = beio->softc; 347 duplicate_free = 0; 348 349 for (i = 0; i < beio->num_segs; i++) { 350 if (beio->sg_segs[i].addr == NULL) 351 duplicate_free++; 352 353 uma_zfree(beio->lun->lun_zone, beio->sg_segs[i].addr); 354 beio->sg_segs[i].addr = NULL; 355 } 356 357 if (duplicate_free > 0) { 358 printf("%s: %d duplicate frees out of %d segments\n", __func__, 359 duplicate_free, beio->num_segs); 360 } 361 mtx_lock(&softc->lock); 362 STAILQ_INSERT_TAIL(&softc->beio_free_queue, beio, links); 363 mtx_unlock(&softc->lock); 364 } 365 366 static int 367 ctl_grow_beio(struct ctl_be_block_softc *softc, int count) 368 { 369 int i; 370 371 for (i = 0; i < count; i++) { 372 struct ctl_be_block_io *beio; 373 374 beio = (struct ctl_be_block_io *)malloc(sizeof(*beio), 375 M_CTLBLK, 376 M_WAITOK | M_ZERO); 377 beio->softc = softc; 378 mtx_lock(&softc->lock); 379 STAILQ_INSERT_TAIL(&softc->beio_free_queue, beio, links); 380 mtx_unlock(&softc->lock); 381 } 382 383 return (i); 384 } 385 386 #if 0 387 static void 388 ctl_shrink_beio(struct ctl_be_block_softc *softc) 389 { 390 struct ctl_be_block_io *beio, *beio_tmp; 391 392 mtx_lock(&softc->lock); 393 STAILQ_FOREACH_SAFE(beio, &softc->beio_free_queue, links, beio_tmp) { 394 STAILQ_REMOVE(&softc->beio_free_queue, beio, 395 ctl_be_block_io, links); 396 free(beio, M_CTLBLK); 397 } 398 mtx_unlock(&softc->lock); 399 } 400 #endif 401 402 static void 403 ctl_complete_beio(struct ctl_be_block_io *beio) 404 { 405 union ctl_io *io; 406 int io_len; 407 408 io = beio->io; 409 410 if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) 411 io_len = beio->io_len; 412 else 413 io_len = 0; 414 415 devstat_end_transaction(beio->lun->disk_stats, 416 /*bytes*/ io_len, 417 beio->ds_tag_type, 418 beio->ds_trans_type, 419 /*now*/ NULL, 420 /*then*/&beio->ds_t0); 421 422 ctl_free_beio(beio); 423 ctl_done(io); 424 } 425 426 static int 427 ctl_be_block_move_done(union ctl_io *io) 428 { 429 struct ctl_be_block_io *beio; 430 struct ctl_be_block_lun *be_lun; 431 #ifdef CTL_TIME_IO 432 struct bintime cur_bt; 433 #endif 434 435 beio = (struct ctl_be_block_io *) 436 io->io_hdr.ctl_private[CTL_PRIV_BACKEND].ptr; 437 438 be_lun = beio->lun; 439 440 DPRINTF("entered\n"); 441 442 #ifdef CTL_TIME_IO 443 getbintime(&cur_bt); 444 bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt); 445 bintime_add(&io->io_hdr.dma_bt, &cur_bt); 446 io->io_hdr.num_dmas++; 447 #endif 448 449 /* 450 * We set status at this point for read commands, and write 451 * commands with errors. 452 */ 453 if ((beio->bio_cmd == BIO_READ) 454 && (io->io_hdr.port_status == 0) 455 && ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0) 456 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) 457 ctl_set_success(&io->scsiio); 458 else if ((io->io_hdr.port_status != 0) 459 && ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0) 460 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) { 461 /* 462 * For hardware error sense keys, the sense key 463 * specific value is defined to be a retry count, 464 * but we use it to pass back an internal FETD 465 * error code. XXX KDM Hopefully the FETD is only 466 * using 16 bits for an error code, since that's 467 * all the space we have in the sks field. 468 */ 469 ctl_set_internal_failure(&io->scsiio, 470 /*sks_valid*/ 1, 471 /*retry_count*/ 472 io->io_hdr.port_status); 473 } 474 475 /* 476 * If this is a read, or a write with errors, it is done. 477 */ 478 if ((beio->bio_cmd == BIO_READ) 479 || ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0) 480 || ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE)) { 481 ctl_complete_beio(beio); 482 return (0); 483 } 484 485 /* 486 * At this point, we have a write and the DMA completed 487 * successfully. We now have to queue it to the task queue to 488 * execute the backend I/O. That is because we do blocking 489 * memory allocations, and in the file backing case, blocking I/O. 490 * This move done routine is generally called in the SIM's 491 * interrupt context, and therefore we cannot block. 492 */ 493 mtx_lock(&be_lun->lock); 494 /* 495 * XXX KDM make sure that links is okay to use at this point. 496 * Otherwise, we either need to add another field to ctl_io_hdr, 497 * or deal with resource allocation here. 498 */ 499 STAILQ_INSERT_TAIL(&be_lun->datamove_queue, &io->io_hdr, links); 500 mtx_unlock(&be_lun->lock); 501 502 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); 503 504 return (0); 505 } 506 507 static void 508 ctl_be_block_biodone(struct bio *bio) 509 { 510 struct ctl_be_block_io *beio; 511 struct ctl_be_block_lun *be_lun; 512 union ctl_io *io; 513 int error; 514 515 beio = bio->bio_caller1; 516 be_lun = beio->lun; 517 io = beio->io; 518 519 DPRINTF("entered\n"); 520 521 error = bio->bio_error; 522 mtx_lock(&be_lun->lock); 523 if (error != 0) 524 beio->num_errors++; 525 526 beio->num_bios_done++; 527 528 /* 529 * XXX KDM will this cause WITNESS to complain? Holding a lock 530 * during the free might cause it to complain. 531 */ 532 g_destroy_bio(bio); 533 534 /* 535 * If the send complete bit isn't set, or we aren't the last I/O to 536 * complete, then we're done. 537 */ 538 if ((beio->send_complete == 0) 539 || (beio->num_bios_done < beio->num_bios_sent)) { 540 mtx_unlock(&be_lun->lock); 541 return; 542 } 543 544 /* 545 * At this point, we've verified that we are the last I/O to 546 * complete, so it's safe to drop the lock. 547 */ 548 mtx_unlock(&be_lun->lock); 549 550 /* 551 * If there are any errors from the backing device, we fail the 552 * entire I/O with a medium error. 553 */ 554 if (beio->num_errors > 0) { 555 if (error == EOPNOTSUPP) { 556 ctl_set_invalid_opcode(&io->scsiio); 557 } else if (beio->bio_cmd == BIO_FLUSH) { 558 /* XXX KDM is there is a better error here? */ 559 ctl_set_internal_failure(&io->scsiio, 560 /*sks_valid*/ 1, 561 /*retry_count*/ 0xbad2); 562 } else 563 ctl_set_medium_error(&io->scsiio); 564 ctl_complete_beio(beio); 565 return; 566 } 567 568 /* 569 * If this is a write or a flush, we're all done. 570 * If this is a read, we can now send the data to the user. 571 */ 572 if ((beio->bio_cmd == BIO_WRITE) 573 || (beio->bio_cmd == BIO_FLUSH)) { 574 ctl_set_success(&io->scsiio); 575 ctl_complete_beio(beio); 576 } else { 577 io->scsiio.be_move_done = ctl_be_block_move_done; 578 io->scsiio.kern_data_ptr = (uint8_t *)beio->sg_segs; 579 io->scsiio.kern_data_len = beio->io_len; 580 io->scsiio.kern_total_len = beio->io_len; 581 io->scsiio.kern_rel_offset = 0; 582 io->scsiio.kern_data_resid = 0; 583 io->scsiio.kern_sg_entries = beio->num_segs; 584 io->io_hdr.flags |= CTL_FLAG_ALLOCATED | CTL_FLAG_KDPTR_SGLIST; 585 #ifdef CTL_TIME_IO 586 getbintime(&io->io_hdr.dma_start_bt); 587 #endif 588 ctl_datamove(io); 589 } 590 } 591 592 static void 593 ctl_be_block_flush_file(struct ctl_be_block_lun *be_lun, 594 struct ctl_be_block_io *beio) 595 { 596 union ctl_io *io; 597 struct mount *mountpoint; 598 int error, lock_flags; 599 600 DPRINTF("entered\n"); 601 602 io = beio->io; 603 604 (void) vn_start_write(be_lun->vn, &mountpoint, V_WAIT); 605 606 if (MNT_SHARED_WRITES(mountpoint) 607 || ((mountpoint == NULL) 608 && MNT_SHARED_WRITES(be_lun->vn->v_mount))) 609 lock_flags = LK_SHARED; 610 else 611 lock_flags = LK_EXCLUSIVE; 612 613 vn_lock(be_lun->vn, lock_flags | LK_RETRY); 614 615 binuptime(&beio->ds_t0); 616 devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0); 617 618 error = VOP_FSYNC(be_lun->vn, MNT_WAIT, curthread); 619 VOP_UNLOCK(be_lun->vn, 0); 620 621 vn_finished_write(mountpoint); 622 623 if (error == 0) 624 ctl_set_success(&io->scsiio); 625 else { 626 /* XXX KDM is there is a better error here? */ 627 ctl_set_internal_failure(&io->scsiio, 628 /*sks_valid*/ 1, 629 /*retry_count*/ 0xbad1); 630 } 631 632 ctl_complete_beio(beio); 633 } 634 635 SDT_PROBE_DEFINE1(cbb, kernel, read, file_start, "uint64_t"); 636 SDT_PROBE_DEFINE1(cbb, kernel, write, file_start, "uint64_t"); 637 SDT_PROBE_DEFINE1(cbb, kernel, read, file_done,"uint64_t"); 638 SDT_PROBE_DEFINE1(cbb, kernel, write, file_done, "uint64_t"); 639 640 static void 641 ctl_be_block_dispatch_file(struct ctl_be_block_lun *be_lun, 642 struct ctl_be_block_io *beio) 643 { 644 struct ctl_be_block_filedata *file_data; 645 union ctl_io *io; 646 struct uio xuio; 647 struct iovec *xiovec; 648 int flags; 649 int error, i; 650 651 DPRINTF("entered\n"); 652 653 file_data = &be_lun->backend.file; 654 io = beio->io; 655 flags = beio->bio_flags; 656 657 if (beio->bio_cmd == BIO_READ) { 658 SDT_PROBE(cbb, kernel, read, file_start, 0, 0, 0, 0, 0); 659 } else { 660 SDT_PROBE(cbb, kernel, write, file_start, 0, 0, 0, 0, 0); 661 } 662 663 bzero(&xuio, sizeof(xuio)); 664 if (beio->bio_cmd == BIO_READ) 665 xuio.uio_rw = UIO_READ; 666 else 667 xuio.uio_rw = UIO_WRITE; 668 669 xuio.uio_offset = beio->io_offset; 670 xuio.uio_resid = beio->io_len; 671 xuio.uio_segflg = UIO_SYSSPACE; 672 xuio.uio_iov = beio->xiovecs; 673 xuio.uio_iovcnt = beio->num_segs; 674 xuio.uio_td = curthread; 675 676 for (i = 0, xiovec = xuio.uio_iov; i < xuio.uio_iovcnt; i++, xiovec++) { 677 xiovec->iov_base = beio->sg_segs[i].addr; 678 xiovec->iov_len = beio->sg_segs[i].len; 679 } 680 681 if (beio->bio_cmd == BIO_READ) { 682 vn_lock(be_lun->vn, LK_SHARED | LK_RETRY); 683 684 binuptime(&beio->ds_t0); 685 devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0); 686 687 /* 688 * UFS pays attention to IO_DIRECT for reads. If the 689 * DIRECTIO option is configured into the kernel, it calls 690 * ffs_rawread(). But that only works for single-segment 691 * uios with user space addresses. In our case, with a 692 * kernel uio, it still reads into the buffer cache, but it 693 * will just try to release the buffer from the cache later 694 * on in ffs_read(). 695 * 696 * ZFS does not pay attention to IO_DIRECT for reads. 697 * 698 * UFS does not pay attention to IO_SYNC for reads. 699 * 700 * ZFS pays attention to IO_SYNC (which translates into the 701 * Solaris define FRSYNC for zfs_read()) for reads. It 702 * attempts to sync the file before reading. 703 * 704 * So, to attempt to provide some barrier semantics in the 705 * BIO_ORDERED case, set both IO_DIRECT and IO_SYNC. 706 */ 707 error = VOP_READ(be_lun->vn, &xuio, (flags & BIO_ORDERED) ? 708 (IO_DIRECT|IO_SYNC) : 0, file_data->cred); 709 710 VOP_UNLOCK(be_lun->vn, 0); 711 } else { 712 struct mount *mountpoint; 713 int lock_flags; 714 715 (void)vn_start_write(be_lun->vn, &mountpoint, V_WAIT); 716 717 if (MNT_SHARED_WRITES(mountpoint) 718 || ((mountpoint == NULL) 719 && MNT_SHARED_WRITES(be_lun->vn->v_mount))) 720 lock_flags = LK_SHARED; 721 else 722 lock_flags = LK_EXCLUSIVE; 723 724 vn_lock(be_lun->vn, lock_flags | LK_RETRY); 725 726 binuptime(&beio->ds_t0); 727 devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0); 728 729 /* 730 * UFS pays attention to IO_DIRECT for writes. The write 731 * is done asynchronously. (Normally the write would just 732 * get put into cache. 733 * 734 * UFS pays attention to IO_SYNC for writes. It will 735 * attempt to write the buffer out synchronously if that 736 * flag is set. 737 * 738 * ZFS does not pay attention to IO_DIRECT for writes. 739 * 740 * ZFS pays attention to IO_SYNC (a.k.a. FSYNC or FRSYNC) 741 * for writes. It will flush the transaction from the 742 * cache before returning. 743 * 744 * So if we've got the BIO_ORDERED flag set, we want 745 * IO_SYNC in either the UFS or ZFS case. 746 */ 747 error = VOP_WRITE(be_lun->vn, &xuio, (flags & BIO_ORDERED) ? 748 IO_SYNC : 0, file_data->cred); 749 VOP_UNLOCK(be_lun->vn, 0); 750 751 vn_finished_write(mountpoint); 752 } 753 754 /* 755 * If we got an error, set the sense data to "MEDIUM ERROR" and 756 * return the I/O to the user. 757 */ 758 if (error != 0) { 759 char path_str[32]; 760 761 ctl_scsi_path_string(io, path_str, sizeof(path_str)); 762 /* 763 * XXX KDM ZFS returns ENOSPC when the underlying 764 * filesystem fills up. What kind of SCSI error should we 765 * return for that? 766 */ 767 printf("%s%s command returned errno %d\n", path_str, 768 (beio->bio_cmd == BIO_READ) ? "READ" : "WRITE", error); 769 ctl_set_medium_error(&io->scsiio); 770 ctl_complete_beio(beio); 771 return; 772 } 773 774 /* 775 * If this is a write, we're all done. 776 * If this is a read, we can now send the data to the user. 777 */ 778 if (beio->bio_cmd == BIO_WRITE) { 779 ctl_set_success(&io->scsiio); 780 SDT_PROBE(cbb, kernel, write, file_done, 0, 0, 0, 0, 0); 781 ctl_complete_beio(beio); 782 } else { 783 SDT_PROBE(cbb, kernel, read, file_done, 0, 0, 0, 0, 0); 784 io->scsiio.be_move_done = ctl_be_block_move_done; 785 io->scsiio.kern_data_ptr = (uint8_t *)beio->sg_segs; 786 io->scsiio.kern_data_len = beio->io_len; 787 io->scsiio.kern_total_len = beio->io_len; 788 io->scsiio.kern_rel_offset = 0; 789 io->scsiio.kern_data_resid = 0; 790 io->scsiio.kern_sg_entries = beio->num_segs; 791 io->io_hdr.flags |= CTL_FLAG_ALLOCATED | CTL_FLAG_KDPTR_SGLIST; 792 #ifdef CTL_TIME_IO 793 getbintime(&io->io_hdr.dma_start_bt); 794 #endif 795 ctl_datamove(io); 796 } 797 } 798 799 static void 800 ctl_be_block_flush_dev(struct ctl_be_block_lun *be_lun, 801 struct ctl_be_block_io *beio) 802 { 803 struct bio *bio; 804 union ctl_io *io; 805 struct ctl_be_block_devdata *dev_data; 806 807 dev_data = &be_lun->backend.dev; 808 io = beio->io; 809 810 DPRINTF("entered\n"); 811 812 /* This can't fail, it's a blocking allocation. */ 813 bio = g_alloc_bio(); 814 815 bio->bio_cmd = BIO_FLUSH; 816 bio->bio_flags |= BIO_ORDERED; 817 bio->bio_dev = dev_data->cdev; 818 bio->bio_offset = 0; 819 bio->bio_data = 0; 820 bio->bio_done = ctl_be_block_biodone; 821 bio->bio_caller1 = beio; 822 bio->bio_pblkno = 0; 823 824 /* 825 * We don't need to acquire the LUN lock here, because we are only 826 * sending one bio, and so there is no other context to synchronize 827 * with. 828 */ 829 beio->num_bios_sent = 1; 830 beio->send_complete = 1; 831 832 binuptime(&beio->ds_t0); 833 devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0); 834 835 (*dev_data->csw->d_strategy)(bio); 836 } 837 838 static void 839 ctl_be_block_dispatch_dev(struct ctl_be_block_lun *be_lun, 840 struct ctl_be_block_io *beio) 841 { 842 int i; 843 struct bio *bio; 844 struct ctl_be_block_devdata *dev_data; 845 off_t cur_offset; 846 int max_iosize; 847 848 DPRINTF("entered\n"); 849 850 dev_data = &be_lun->backend.dev; 851 852 /* 853 * We have to limit our I/O size to the maximum supported by the 854 * backend device. Hopefully it is MAXPHYS. If the driver doesn't 855 * set it properly, use DFLTPHYS. 856 */ 857 max_iosize = dev_data->cdev->si_iosize_max; 858 if (max_iosize < PAGE_SIZE) 859 max_iosize = DFLTPHYS; 860 861 cur_offset = beio->io_offset; 862 863 /* 864 * XXX KDM need to accurately reflect the number of I/Os outstanding 865 * to a device. 866 */ 867 binuptime(&beio->ds_t0); 868 devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0); 869 870 for (i = 0; i < beio->num_segs; i++) { 871 size_t cur_size; 872 uint8_t *cur_ptr; 873 874 cur_size = beio->sg_segs[i].len; 875 cur_ptr = beio->sg_segs[i].addr; 876 877 while (cur_size > 0) { 878 /* This can't fail, it's a blocking allocation. */ 879 bio = g_alloc_bio(); 880 881 KASSERT(bio != NULL, ("g_alloc_bio() failed!\n")); 882 883 bio->bio_cmd = beio->bio_cmd; 884 bio->bio_flags |= beio->bio_flags; 885 bio->bio_dev = dev_data->cdev; 886 bio->bio_caller1 = beio; 887 bio->bio_length = min(cur_size, max_iosize); 888 bio->bio_offset = cur_offset; 889 bio->bio_data = cur_ptr; 890 bio->bio_done = ctl_be_block_biodone; 891 bio->bio_pblkno = cur_offset / be_lun->blocksize; 892 893 cur_offset += bio->bio_length; 894 cur_ptr += bio->bio_length; 895 cur_size -= bio->bio_length; 896 897 /* 898 * Make sure we set the complete bit just before we 899 * issue the last bio so we don't wind up with a 900 * race. 901 * 902 * Use the LUN mutex here instead of a combination 903 * of atomic variables for simplicity. 904 * 905 * XXX KDM we could have a per-IO lock, but that 906 * would cause additional per-IO setup and teardown 907 * overhead. Hopefully there won't be too much 908 * contention on the LUN lock. 909 */ 910 mtx_lock(&be_lun->lock); 911 912 beio->num_bios_sent++; 913 914 if ((i == beio->num_segs - 1) 915 && (cur_size == 0)) 916 beio->send_complete = 1; 917 918 mtx_unlock(&be_lun->lock); 919 920 (*dev_data->csw->d_strategy)(bio); 921 } 922 } 923 } 924 925 static void 926 ctl_be_block_cw_dispatch(struct ctl_be_block_lun *be_lun, 927 union ctl_io *io) 928 { 929 struct ctl_be_block_io *beio; 930 struct ctl_be_block_softc *softc; 931 932 DPRINTF("entered\n"); 933 934 softc = be_lun->softc; 935 beio = ctl_alloc_beio(softc); 936 if (beio == NULL) { 937 /* 938 * This should not happen. ctl_alloc_beio() will call 939 * ctl_grow_beio() with a blocking malloc as needed. 940 * A malloc with M_WAITOK should not fail. 941 */ 942 ctl_set_busy(&io->scsiio); 943 ctl_done(io); 944 return; 945 } 946 947 beio->io = io; 948 beio->softc = softc; 949 beio->lun = be_lun; 950 io->io_hdr.ctl_private[CTL_PRIV_BACKEND].ptr = beio; 951 952 switch (io->scsiio.cdb[0]) { 953 case SYNCHRONIZE_CACHE: 954 case SYNCHRONIZE_CACHE_16: 955 beio->bio_cmd = BIO_FLUSH; 956 beio->ds_trans_type = DEVSTAT_NO_DATA; 957 beio->ds_tag_type = DEVSTAT_TAG_ORDERED; 958 beio->io_len = 0; 959 be_lun->lun_flush(be_lun, beio); 960 break; 961 default: 962 panic("Unhandled CDB type %#x", io->scsiio.cdb[0]); 963 break; 964 } 965 } 966 967 SDT_PROBE_DEFINE1(cbb, kernel, read, start, "uint64_t"); 968 SDT_PROBE_DEFINE1(cbb, kernel, write, start, "uint64_t"); 969 SDT_PROBE_DEFINE1(cbb, kernel, read, alloc_done, "uint64_t"); 970 SDT_PROBE_DEFINE1(cbb, kernel, write, alloc_done, "uint64_t"); 971 972 static void 973 ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun, 974 union ctl_io *io) 975 { 976 struct ctl_be_block_io *beio; 977 struct ctl_be_block_softc *softc; 978 struct ctl_lba_len lbalen; 979 uint64_t len_left, io_size_bytes; 980 int i; 981 982 softc = be_lun->softc; 983 984 DPRINTF("entered\n"); 985 986 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) { 987 SDT_PROBE(cbb, kernel, read, start, 0, 0, 0, 0, 0); 988 } else { 989 SDT_PROBE(cbb, kernel, write, start, 0, 0, 0, 0, 0); 990 } 991 992 memcpy(&lbalen, io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes, 993 sizeof(lbalen)); 994 995 io_size_bytes = lbalen.len * be_lun->blocksize; 996 997 /* 998 * XXX KDM this is temporary, until we implement chaining of beio 999 * structures and multiple datamove calls to move all the data in 1000 * or out. 1001 */ 1002 if (io_size_bytes > CTLBLK_MAX_IO_SIZE) { 1003 printf("%s: IO length %ju > max io size %u\n", __func__, 1004 io_size_bytes, CTLBLK_MAX_IO_SIZE); 1005 ctl_set_invalid_field(&io->scsiio, 1006 /*sks_valid*/ 0, 1007 /*command*/ 1, 1008 /*field*/ 0, 1009 /*bit_valid*/ 0, 1010 /*bit*/ 0); 1011 ctl_done(io); 1012 return; 1013 } 1014 1015 beio = ctl_alloc_beio(softc); 1016 if (beio == NULL) { 1017 /* 1018 * This should not happen. ctl_alloc_beio() will call 1019 * ctl_grow_beio() with a blocking malloc as needed. 1020 * A malloc with M_WAITOK should not fail. 1021 */ 1022 ctl_set_busy(&io->scsiio); 1023 ctl_done(io); 1024 return; 1025 } 1026 1027 beio->io = io; 1028 beio->softc = softc; 1029 beio->lun = be_lun; 1030 io->io_hdr.ctl_private[CTL_PRIV_BACKEND].ptr = beio; 1031 1032 /* 1033 * If the I/O came down with an ordered or head of queue tag, set 1034 * the BIO_ORDERED attribute. For head of queue tags, that's 1035 * pretty much the best we can do. 1036 * 1037 * XXX KDM we don't have a great way to easily know about the FUA 1038 * bit right now (it is decoded in ctl_read_write(), but we don't 1039 * pass that knowledge to the backend), and in any case we would 1040 * need to determine how to handle it. 1041 */ 1042 if ((io->scsiio.tag_type == CTL_TAG_ORDERED) 1043 || (io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE)) 1044 beio->bio_flags = BIO_ORDERED; 1045 1046 switch (io->scsiio.tag_type) { 1047 case CTL_TAG_ORDERED: 1048 beio->ds_tag_type = DEVSTAT_TAG_ORDERED; 1049 break; 1050 case CTL_TAG_HEAD_OF_QUEUE: 1051 beio->ds_tag_type = DEVSTAT_TAG_HEAD; 1052 break; 1053 case CTL_TAG_UNTAGGED: 1054 case CTL_TAG_SIMPLE: 1055 case CTL_TAG_ACA: 1056 default: 1057 beio->ds_tag_type = DEVSTAT_TAG_SIMPLE; 1058 break; 1059 } 1060 1061 /* 1062 * This path handles read and write only. The config write path 1063 * handles flush operations. 1064 */ 1065 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) { 1066 beio->bio_cmd = BIO_READ; 1067 beio->ds_trans_type = DEVSTAT_READ; 1068 } else { 1069 beio->bio_cmd = BIO_WRITE; 1070 beio->ds_trans_type = DEVSTAT_WRITE; 1071 } 1072 1073 beio->io_len = lbalen.len * be_lun->blocksize; 1074 beio->io_offset = lbalen.lba * be_lun->blocksize; 1075 1076 DPRINTF("%s at LBA %jx len %u\n", 1077 (beio->bio_cmd == BIO_READ) ? "READ" : "WRITE", 1078 (uintmax_t)lbalen.lba, lbalen.len); 1079 1080 for (i = 0, len_left = io_size_bytes; i < CTLBLK_MAX_SEGS && 1081 len_left > 0; i++) { 1082 1083 /* 1084 * Setup the S/G entry for this chunk. 1085 */ 1086 beio->sg_segs[i].len = min(MAXPHYS, len_left); 1087 beio->sg_segs[i].addr = uma_zalloc(be_lun->lun_zone, M_WAITOK); 1088 1089 DPRINTF("segment %d addr %p len %zd\n", i, 1090 beio->sg_segs[i].addr, beio->sg_segs[i].len); 1091 1092 beio->num_segs++; 1093 len_left -= beio->sg_segs[i].len; 1094 } 1095 1096 /* 1097 * For the read case, we need to read the data into our buffers and 1098 * then we can send it back to the user. For the write case, we 1099 * need to get the data from the user first. 1100 */ 1101 if (beio->bio_cmd == BIO_READ) { 1102 SDT_PROBE(cbb, kernel, read, alloc_done, 0, 0, 0, 0, 0); 1103 be_lun->dispatch(be_lun, beio); 1104 } else { 1105 SDT_PROBE(cbb, kernel, write, alloc_done, 0, 0, 0, 0, 0); 1106 io->scsiio.be_move_done = ctl_be_block_move_done; 1107 io->scsiio.kern_data_ptr = (uint8_t *)beio->sg_segs; 1108 io->scsiio.kern_data_len = beio->io_len; 1109 io->scsiio.kern_total_len = beio->io_len; 1110 io->scsiio.kern_rel_offset = 0; 1111 io->scsiio.kern_data_resid = 0; 1112 io->scsiio.kern_sg_entries = beio->num_segs; 1113 io->io_hdr.flags |= CTL_FLAG_ALLOCATED | CTL_FLAG_KDPTR_SGLIST; 1114 #ifdef CTL_TIME_IO 1115 getbintime(&io->io_hdr.dma_start_bt); 1116 #endif 1117 ctl_datamove(io); 1118 } 1119 } 1120 1121 static void 1122 ctl_be_block_worker(void *context, int pending) 1123 { 1124 struct ctl_be_block_lun *be_lun; 1125 struct ctl_be_block_softc *softc; 1126 union ctl_io *io; 1127 1128 be_lun = (struct ctl_be_block_lun *)context; 1129 softc = be_lun->softc; 1130 1131 DPRINTF("entered\n"); 1132 1133 mtx_lock(&be_lun->lock); 1134 for (;;) { 1135 io = (union ctl_io *)STAILQ_FIRST(&be_lun->datamove_queue); 1136 if (io != NULL) { 1137 struct ctl_be_block_io *beio; 1138 1139 DPRINTF("datamove queue\n"); 1140 1141 STAILQ_REMOVE(&be_lun->datamove_queue, &io->io_hdr, 1142 ctl_io_hdr, links); 1143 1144 mtx_unlock(&be_lun->lock); 1145 1146 beio = (struct ctl_be_block_io *) 1147 io->io_hdr.ctl_private[CTL_PRIV_BACKEND].ptr; 1148 1149 be_lun->dispatch(be_lun, beio); 1150 1151 mtx_lock(&be_lun->lock); 1152 continue; 1153 } 1154 io = (union ctl_io *)STAILQ_FIRST(&be_lun->config_write_queue); 1155 if (io != NULL) { 1156 1157 DPRINTF("config write queue\n"); 1158 1159 STAILQ_REMOVE(&be_lun->config_write_queue, &io->io_hdr, 1160 ctl_io_hdr, links); 1161 1162 mtx_unlock(&be_lun->lock); 1163 1164 ctl_be_block_cw_dispatch(be_lun, io); 1165 1166 mtx_lock(&be_lun->lock); 1167 continue; 1168 } 1169 io = (union ctl_io *)STAILQ_FIRST(&be_lun->input_queue); 1170 if (io != NULL) { 1171 DPRINTF("input queue\n"); 1172 1173 STAILQ_REMOVE(&be_lun->input_queue, &io->io_hdr, 1174 ctl_io_hdr, links); 1175 mtx_unlock(&be_lun->lock); 1176 1177 /* 1178 * We must drop the lock, since this routine and 1179 * its children may sleep. 1180 */ 1181 ctl_be_block_dispatch(be_lun, io); 1182 1183 mtx_lock(&be_lun->lock); 1184 continue; 1185 } 1186 1187 /* 1188 * If we get here, there is no work left in the queues, so 1189 * just break out and let the task queue go to sleep. 1190 */ 1191 break; 1192 } 1193 mtx_unlock(&be_lun->lock); 1194 } 1195 1196 /* 1197 * Entry point from CTL to the backend for I/O. We queue everything to a 1198 * work thread, so this just puts the I/O on a queue and wakes up the 1199 * thread. 1200 */ 1201 static int 1202 ctl_be_block_submit(union ctl_io *io) 1203 { 1204 struct ctl_be_block_lun *be_lun; 1205 struct ctl_be_lun *ctl_be_lun; 1206 int retval; 1207 1208 DPRINTF("entered\n"); 1209 1210 retval = CTL_RETVAL_COMPLETE; 1211 1212 ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[ 1213 CTL_PRIV_BACKEND_LUN].ptr; 1214 be_lun = (struct ctl_be_block_lun *)ctl_be_lun->be_lun; 1215 1216 /* 1217 * Make sure we only get SCSI I/O. 1218 */ 1219 KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, ("Non-SCSI I/O (type " 1220 "%#x) encountered", io->io_hdr.io_type)); 1221 1222 mtx_lock(&be_lun->lock); 1223 /* 1224 * XXX KDM make sure that links is okay to use at this point. 1225 * Otherwise, we either need to add another field to ctl_io_hdr, 1226 * or deal with resource allocation here. 1227 */ 1228 STAILQ_INSERT_TAIL(&be_lun->input_queue, &io->io_hdr, links); 1229 mtx_unlock(&be_lun->lock); 1230 1231 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); 1232 1233 return (retval); 1234 } 1235 1236 static int 1237 ctl_be_block_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, 1238 int flag, struct thread *td) 1239 { 1240 struct ctl_be_block_softc *softc; 1241 int error; 1242 1243 softc = &backend_block_softc; 1244 1245 error = 0; 1246 1247 switch (cmd) { 1248 case CTL_LUN_REQ: { 1249 struct ctl_lun_req *lun_req; 1250 1251 lun_req = (struct ctl_lun_req *)addr; 1252 1253 switch (lun_req->reqtype) { 1254 case CTL_LUNREQ_CREATE: 1255 error = ctl_be_block_create(softc, lun_req); 1256 break; 1257 case CTL_LUNREQ_RM: 1258 error = ctl_be_block_rm(softc, lun_req); 1259 break; 1260 case CTL_LUNREQ_MODIFY: 1261 error = ctl_be_block_modify(softc, lun_req); 1262 break; 1263 default: 1264 lun_req->status = CTL_LUN_ERROR; 1265 snprintf(lun_req->error_str, sizeof(lun_req->error_str), 1266 "%s: invalid LUN request type %d", __func__, 1267 lun_req->reqtype); 1268 break; 1269 } 1270 break; 1271 } 1272 default: 1273 error = ENOTTY; 1274 break; 1275 } 1276 1277 return (error); 1278 } 1279 1280 static int 1281 ctl_be_block_open_file(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req) 1282 { 1283 struct ctl_be_block_filedata *file_data; 1284 struct ctl_lun_create_params *params; 1285 struct vattr vattr; 1286 int error; 1287 1288 error = 0; 1289 file_data = &be_lun->backend.file; 1290 params = &req->reqdata.create; 1291 1292 be_lun->dev_type = CTL_BE_BLOCK_FILE; 1293 be_lun->dispatch = ctl_be_block_dispatch_file; 1294 be_lun->lun_flush = ctl_be_block_flush_file; 1295 1296 error = VOP_GETATTR(be_lun->vn, &vattr, curthread->td_ucred); 1297 if (error != 0) { 1298 snprintf(req->error_str, sizeof(req->error_str), 1299 "error calling VOP_GETATTR() for file %s", 1300 be_lun->dev_path); 1301 return (error); 1302 } 1303 1304 /* 1305 * Verify that we have the ability to upgrade to exclusive 1306 * access on this file so we can trap errors at open instead 1307 * of reporting them during first access. 1308 */ 1309 if (VOP_ISLOCKED(be_lun->vn) != LK_EXCLUSIVE) { 1310 vn_lock(be_lun->vn, LK_UPGRADE | LK_RETRY); 1311 if (be_lun->vn->v_iflag & VI_DOOMED) { 1312 error = EBADF; 1313 snprintf(req->error_str, sizeof(req->error_str), 1314 "error locking file %s", be_lun->dev_path); 1315 return (error); 1316 } 1317 } 1318 1319 1320 file_data->cred = crhold(curthread->td_ucred); 1321 if (params->lun_size_bytes != 0) 1322 be_lun->size_bytes = params->lun_size_bytes; 1323 else 1324 be_lun->size_bytes = vattr.va_size; 1325 /* 1326 * We set the multi thread flag for file operations because all 1327 * filesystems (in theory) are capable of allowing multiple readers 1328 * of a file at once. So we want to get the maximum possible 1329 * concurrency. 1330 */ 1331 be_lun->flags |= CTL_BE_BLOCK_LUN_MULTI_THREAD; 1332 1333 /* 1334 * XXX KDM vattr.va_blocksize may be larger than 512 bytes here. 1335 * With ZFS, it is 131072 bytes. Block sizes that large don't work 1336 * with disklabel and UFS on FreeBSD at least. Large block sizes 1337 * may not work with other OSes as well. So just export a sector 1338 * size of 512 bytes, which should work with any OS or 1339 * application. Since our backing is a file, any block size will 1340 * work fine for the backing store. 1341 */ 1342 #if 0 1343 be_lun->blocksize= vattr.va_blocksize; 1344 #endif 1345 if (params->blocksize_bytes != 0) 1346 be_lun->blocksize = params->blocksize_bytes; 1347 else 1348 be_lun->blocksize = 512; 1349 1350 /* 1351 * Sanity check. The media size has to be at least one 1352 * sector long. 1353 */ 1354 if (be_lun->size_bytes < be_lun->blocksize) { 1355 error = EINVAL; 1356 snprintf(req->error_str, sizeof(req->error_str), 1357 "file %s size %ju < block size %u", be_lun->dev_path, 1358 (uintmax_t)be_lun->size_bytes, be_lun->blocksize); 1359 } 1360 return (error); 1361 } 1362 1363 static int 1364 ctl_be_block_open_dev(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req) 1365 { 1366 struct ctl_lun_create_params *params; 1367 struct vattr vattr; 1368 struct cdev *dev; 1369 struct cdevsw *devsw; 1370 int error; 1371 1372 params = &req->reqdata.create; 1373 1374 be_lun->dev_type = CTL_BE_BLOCK_DEV; 1375 be_lun->dispatch = ctl_be_block_dispatch_dev; 1376 be_lun->lun_flush = ctl_be_block_flush_dev; 1377 be_lun->backend.dev.cdev = be_lun->vn->v_rdev; 1378 be_lun->backend.dev.csw = dev_refthread(be_lun->backend.dev.cdev, 1379 &be_lun->backend.dev.dev_ref); 1380 if (be_lun->backend.dev.csw == NULL) 1381 panic("Unable to retrieve device switch"); 1382 1383 error = VOP_GETATTR(be_lun->vn, &vattr, NOCRED); 1384 if (error) { 1385 snprintf(req->error_str, sizeof(req->error_str), 1386 "%s: error getting vnode attributes for device %s", 1387 __func__, be_lun->dev_path); 1388 return (error); 1389 } 1390 1391 dev = be_lun->vn->v_rdev; 1392 devsw = dev->si_devsw; 1393 if (!devsw->d_ioctl) { 1394 snprintf(req->error_str, sizeof(req->error_str), 1395 "%s: no d_ioctl for device %s!", __func__, 1396 be_lun->dev_path); 1397 return (ENODEV); 1398 } 1399 1400 error = devsw->d_ioctl(dev, DIOCGSECTORSIZE, 1401 (caddr_t)&be_lun->blocksize, FREAD, 1402 curthread); 1403 if (error) { 1404 snprintf(req->error_str, sizeof(req->error_str), 1405 "%s: error %d returned for DIOCGSECTORSIZE ioctl " 1406 "on %s!", __func__, error, be_lun->dev_path); 1407 return (error); 1408 } 1409 1410 /* 1411 * If the user has asked for a blocksize that is greater than the 1412 * backing device's blocksize, we can do it only if the blocksize 1413 * the user is asking for is an even multiple of the underlying 1414 * device's blocksize. 1415 */ 1416 if ((params->blocksize_bytes != 0) 1417 && (params->blocksize_bytes > be_lun->blocksize)) { 1418 uint32_t bs_multiple, tmp_blocksize; 1419 1420 bs_multiple = params->blocksize_bytes / be_lun->blocksize; 1421 1422 tmp_blocksize = bs_multiple * be_lun->blocksize; 1423 1424 if (tmp_blocksize == params->blocksize_bytes) { 1425 be_lun->blocksize = params->blocksize_bytes; 1426 } else { 1427 snprintf(req->error_str, sizeof(req->error_str), 1428 "%s: requested blocksize %u is not an even " 1429 "multiple of backing device blocksize %u", 1430 __func__, params->blocksize_bytes, 1431 be_lun->blocksize); 1432 return (EINVAL); 1433 1434 } 1435 } else if ((params->blocksize_bytes != 0) 1436 && (params->blocksize_bytes != be_lun->blocksize)) { 1437 snprintf(req->error_str, sizeof(req->error_str), 1438 "%s: requested blocksize %u < backing device " 1439 "blocksize %u", __func__, params->blocksize_bytes, 1440 be_lun->blocksize); 1441 return (EINVAL); 1442 } 1443 1444 error = devsw->d_ioctl(dev, DIOCGMEDIASIZE, 1445 (caddr_t)&be_lun->size_bytes, FREAD, 1446 curthread); 1447 if (error) { 1448 snprintf(req->error_str, sizeof(req->error_str), 1449 "%s: error %d returned for DIOCGMEDIASIZE " 1450 " ioctl on %s!", __func__, error, 1451 be_lun->dev_path); 1452 return (error); 1453 } 1454 1455 if (params->lun_size_bytes != 0) { 1456 if (params->lun_size_bytes > be_lun->size_bytes) { 1457 snprintf(req->error_str, sizeof(req->error_str), 1458 "%s: requested LUN size %ju > backing device " 1459 "size %ju", __func__, 1460 (uintmax_t)params->lun_size_bytes, 1461 (uintmax_t)be_lun->size_bytes); 1462 return (EINVAL); 1463 } 1464 1465 be_lun->size_bytes = params->lun_size_bytes; 1466 } 1467 1468 return (0); 1469 } 1470 1471 static int 1472 ctl_be_block_close(struct ctl_be_block_lun *be_lun) 1473 { 1474 DROP_GIANT(); 1475 if (be_lun->vn) { 1476 int flags = FREAD | FWRITE; 1477 1478 switch (be_lun->dev_type) { 1479 case CTL_BE_BLOCK_DEV: 1480 if (be_lun->backend.dev.csw) { 1481 dev_relthread(be_lun->backend.dev.cdev, 1482 be_lun->backend.dev.dev_ref); 1483 be_lun->backend.dev.csw = NULL; 1484 be_lun->backend.dev.cdev = NULL; 1485 } 1486 break; 1487 case CTL_BE_BLOCK_FILE: 1488 break; 1489 case CTL_BE_BLOCK_NONE: 1490 break; 1491 default: 1492 panic("Unexpected backend type."); 1493 break; 1494 } 1495 1496 (void)vn_close(be_lun->vn, flags, NOCRED, curthread); 1497 be_lun->vn = NULL; 1498 1499 switch (be_lun->dev_type) { 1500 case CTL_BE_BLOCK_DEV: 1501 break; 1502 case CTL_BE_BLOCK_FILE: 1503 if (be_lun->backend.file.cred != NULL) { 1504 crfree(be_lun->backend.file.cred); 1505 be_lun->backend.file.cred = NULL; 1506 } 1507 break; 1508 case CTL_BE_BLOCK_NONE: 1509 break; 1510 default: 1511 panic("Unexpected backend type."); 1512 break; 1513 } 1514 } 1515 PICKUP_GIANT(); 1516 1517 return (0); 1518 } 1519 1520 static int 1521 ctl_be_block_open(struct ctl_be_block_softc *softc, 1522 struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req) 1523 { 1524 struct nameidata nd; 1525 int flags; 1526 int error; 1527 1528 /* 1529 * XXX KDM allow a read-only option? 1530 */ 1531 flags = FREAD | FWRITE; 1532 error = 0; 1533 1534 if (rootvnode == NULL) { 1535 snprintf(req->error_str, sizeof(req->error_str), 1536 "%s: Root filesystem is not mounted", __func__); 1537 return (1); 1538 } 1539 1540 if (!curthread->td_proc->p_fd->fd_cdir) { 1541 curthread->td_proc->p_fd->fd_cdir = rootvnode; 1542 VREF(rootvnode); 1543 } 1544 if (!curthread->td_proc->p_fd->fd_rdir) { 1545 curthread->td_proc->p_fd->fd_rdir = rootvnode; 1546 VREF(rootvnode); 1547 } 1548 if (!curthread->td_proc->p_fd->fd_jdir) { 1549 curthread->td_proc->p_fd->fd_jdir = rootvnode; 1550 VREF(rootvnode); 1551 } 1552 1553 again: 1554 NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, be_lun->dev_path, curthread); 1555 error = vn_open(&nd, &flags, 0, NULL); 1556 if (error) { 1557 /* 1558 * This is the only reasonable guess we can make as far as 1559 * path if the user doesn't give us a fully qualified path. 1560 * If they want to specify a file, they need to specify the 1561 * full path. 1562 */ 1563 if (be_lun->dev_path[0] != '/') { 1564 char *dev_path = "/dev/"; 1565 char *dev_name; 1566 1567 /* Try adding device path at beginning of name */ 1568 dev_name = malloc(strlen(be_lun->dev_path) 1569 + strlen(dev_path) + 1, 1570 M_CTLBLK, M_WAITOK); 1571 if (dev_name) { 1572 sprintf(dev_name, "%s%s", dev_path, 1573 be_lun->dev_path); 1574 free(be_lun->dev_path, M_CTLBLK); 1575 be_lun->dev_path = dev_name; 1576 goto again; 1577 } 1578 } 1579 snprintf(req->error_str, sizeof(req->error_str), 1580 "%s: error opening %s", __func__, be_lun->dev_path); 1581 return (error); 1582 } 1583 1584 NDFREE(&nd, NDF_ONLY_PNBUF); 1585 1586 be_lun->vn = nd.ni_vp; 1587 1588 /* We only support disks and files. */ 1589 if (vn_isdisk(be_lun->vn, &error)) { 1590 error = ctl_be_block_open_dev(be_lun, req); 1591 } else if (be_lun->vn->v_type == VREG) { 1592 error = ctl_be_block_open_file(be_lun, req); 1593 } else { 1594 error = EINVAL; 1595 snprintf(req->error_str, sizeof(req->error_str), 1596 "%s is not a disk or plain file", be_lun->dev_path); 1597 } 1598 VOP_UNLOCK(be_lun->vn, 0); 1599 1600 if (error != 0) { 1601 ctl_be_block_close(be_lun); 1602 return (error); 1603 } 1604 1605 be_lun->blocksize_shift = fls(be_lun->blocksize) - 1; 1606 be_lun->size_blocks = be_lun->size_bytes >> be_lun->blocksize_shift; 1607 1608 return (0); 1609 } 1610 1611 static int 1612 ctl_be_block_create(struct ctl_be_block_softc *softc, struct ctl_lun_req *req) 1613 { 1614 struct ctl_be_block_lun *be_lun; 1615 struct ctl_lun_create_params *params; 1616 struct ctl_be_arg *file_arg; 1617 char tmpstr[32]; 1618 int retval, num_threads; 1619 int i; 1620 1621 params = &req->reqdata.create; 1622 retval = 0; 1623 1624 num_threads = cbb_num_threads; 1625 1626 file_arg = NULL; 1627 1628 be_lun = malloc(sizeof(*be_lun), M_CTLBLK, M_ZERO | M_WAITOK); 1629 1630 be_lun->softc = softc; 1631 STAILQ_INIT(&be_lun->input_queue); 1632 STAILQ_INIT(&be_lun->config_write_queue); 1633 STAILQ_INIT(&be_lun->datamove_queue); 1634 STAILQ_INIT(&be_lun->ctl_be_lun.options); 1635 sprintf(be_lun->lunname, "cblk%d", softc->num_luns); 1636 mtx_init(&be_lun->lock, be_lun->lunname, NULL, MTX_DEF); 1637 1638 be_lun->lun_zone = uma_zcreate(be_lun->lunname, MAXPHYS, 1639 NULL, NULL, NULL, NULL, /*align*/ 0, /*flags*/0); 1640 1641 if (be_lun->lun_zone == NULL) { 1642 snprintf(req->error_str, sizeof(req->error_str), 1643 "%s: error allocating UMA zone", __func__); 1644 goto bailout_error; 1645 } 1646 1647 if (params->flags & CTL_LUN_FLAG_DEV_TYPE) 1648 be_lun->ctl_be_lun.lun_type = params->device_type; 1649 else 1650 be_lun->ctl_be_lun.lun_type = T_DIRECT; 1651 1652 if (be_lun->ctl_be_lun.lun_type == T_DIRECT) { 1653 for (i = 0; i < req->num_be_args; i++) { 1654 if (strcmp(req->kern_be_args[i].kname, "file") == 0) { 1655 file_arg = &req->kern_be_args[i]; 1656 break; 1657 } 1658 } 1659 1660 if (file_arg == NULL) { 1661 snprintf(req->error_str, sizeof(req->error_str), 1662 "%s: no file argument specified", __func__); 1663 goto bailout_error; 1664 } 1665 1666 be_lun->dev_path = malloc(file_arg->vallen, M_CTLBLK, 1667 M_WAITOK | M_ZERO); 1668 1669 strlcpy(be_lun->dev_path, (char *)file_arg->kvalue, 1670 file_arg->vallen); 1671 1672 retval = ctl_be_block_open(softc, be_lun, req); 1673 if (retval != 0) { 1674 retval = 0; 1675 goto bailout_error; 1676 } 1677 1678 /* 1679 * Tell the user the size of the file/device. 1680 */ 1681 params->lun_size_bytes = be_lun->size_bytes; 1682 1683 /* 1684 * The maximum LBA is the size - 1. 1685 */ 1686 be_lun->ctl_be_lun.maxlba = be_lun->size_blocks - 1; 1687 } else { 1688 /* 1689 * For processor devices, we don't have any size. 1690 */ 1691 be_lun->blocksize = 0; 1692 be_lun->size_blocks = 0; 1693 be_lun->size_bytes = 0; 1694 be_lun->ctl_be_lun.maxlba = 0; 1695 params->lun_size_bytes = 0; 1696 1697 /* 1698 * Default to just 1 thread for processor devices. 1699 */ 1700 num_threads = 1; 1701 } 1702 1703 /* 1704 * XXX This searching loop might be refactored to be combined with 1705 * the loop above, 1706 */ 1707 for (i = 0; i < req->num_be_args; i++) { 1708 if (strcmp(req->kern_be_args[i].kname, "num_threads") == 0) { 1709 struct ctl_be_arg *thread_arg; 1710 char num_thread_str[16]; 1711 int tmp_num_threads; 1712 1713 1714 thread_arg = &req->kern_be_args[i]; 1715 1716 strlcpy(num_thread_str, (char *)thread_arg->kvalue, 1717 min(thread_arg->vallen, 1718 sizeof(num_thread_str))); 1719 1720 tmp_num_threads = strtol(num_thread_str, NULL, 0); 1721 1722 /* 1723 * We don't let the user specify less than one 1724 * thread, but hope he's clueful enough not to 1725 * specify 1000 threads. 1726 */ 1727 if (tmp_num_threads < 1) { 1728 snprintf(req->error_str, sizeof(req->error_str), 1729 "%s: invalid number of threads %s", 1730 __func__, num_thread_str); 1731 goto bailout_error; 1732 } 1733 1734 num_threads = tmp_num_threads; 1735 } else if (strcmp(req->kern_be_args[i].kname, "file") != 0 && 1736 strcmp(req->kern_be_args[i].kname, "dev") != 0) { 1737 struct ctl_be_lun_option *opt; 1738 1739 opt = malloc(sizeof(*opt), M_CTLBLK, M_WAITOK); 1740 opt->name = malloc(strlen(req->kern_be_args[i].kname) + 1, M_CTLBLK, M_WAITOK); 1741 strcpy(opt->name, req->kern_be_args[i].kname); 1742 opt->value = malloc(strlen(req->kern_be_args[i].kvalue) + 1, M_CTLBLK, M_WAITOK); 1743 strcpy(opt->value, req->kern_be_args[i].kvalue); 1744 STAILQ_INSERT_TAIL(&be_lun->ctl_be_lun.options, opt, links); 1745 } 1746 } 1747 1748 be_lun->flags = CTL_BE_BLOCK_LUN_UNCONFIGURED; 1749 be_lun->ctl_be_lun.flags = CTL_LUN_FLAG_PRIMARY; 1750 be_lun->ctl_be_lun.be_lun = be_lun; 1751 be_lun->ctl_be_lun.blocksize = be_lun->blocksize; 1752 /* Tell the user the blocksize we ended up using */ 1753 params->blocksize_bytes = be_lun->blocksize; 1754 if (params->flags & CTL_LUN_FLAG_ID_REQ) { 1755 be_lun->ctl_be_lun.req_lun_id = params->req_lun_id; 1756 be_lun->ctl_be_lun.flags |= CTL_LUN_FLAG_ID_REQ; 1757 } else 1758 be_lun->ctl_be_lun.req_lun_id = 0; 1759 1760 be_lun->ctl_be_lun.lun_shutdown = ctl_be_block_lun_shutdown; 1761 be_lun->ctl_be_lun.lun_config_status = 1762 ctl_be_block_lun_config_status; 1763 be_lun->ctl_be_lun.be = &ctl_be_block_driver; 1764 1765 if ((params->flags & CTL_LUN_FLAG_SERIAL_NUM) == 0) { 1766 snprintf(tmpstr, sizeof(tmpstr), "MYSERIAL%4d", 1767 softc->num_luns); 1768 strncpy((char *)be_lun->ctl_be_lun.serial_num, tmpstr, 1769 ctl_min(sizeof(be_lun->ctl_be_lun.serial_num), 1770 sizeof(tmpstr))); 1771 1772 /* Tell the user what we used for a serial number */ 1773 strncpy((char *)params->serial_num, tmpstr, 1774 ctl_min(sizeof(params->serial_num), sizeof(tmpstr))); 1775 } else { 1776 strncpy((char *)be_lun->ctl_be_lun.serial_num, 1777 params->serial_num, 1778 ctl_min(sizeof(be_lun->ctl_be_lun.serial_num), 1779 sizeof(params->serial_num))); 1780 } 1781 if ((params->flags & CTL_LUN_FLAG_DEVID) == 0) { 1782 snprintf(tmpstr, sizeof(tmpstr), "MYDEVID%4d", softc->num_luns); 1783 strncpy((char *)be_lun->ctl_be_lun.device_id, tmpstr, 1784 ctl_min(sizeof(be_lun->ctl_be_lun.device_id), 1785 sizeof(tmpstr))); 1786 1787 /* Tell the user what we used for a device ID */ 1788 strncpy((char *)params->device_id, tmpstr, 1789 ctl_min(sizeof(params->device_id), sizeof(tmpstr))); 1790 } else { 1791 strncpy((char *)be_lun->ctl_be_lun.device_id, 1792 params->device_id, 1793 ctl_min(sizeof(be_lun->ctl_be_lun.device_id), 1794 sizeof(params->device_id))); 1795 } 1796 1797 TASK_INIT(&be_lun->io_task, /*priority*/0, ctl_be_block_worker, be_lun); 1798 1799 be_lun->io_taskqueue = taskqueue_create(be_lun->lunname, M_WAITOK, 1800 taskqueue_thread_enqueue, /*context*/&be_lun->io_taskqueue); 1801 1802 if (be_lun->io_taskqueue == NULL) { 1803 snprintf(req->error_str, sizeof(req->error_str), 1804 "%s: Unable to create taskqueue", __func__); 1805 goto bailout_error; 1806 } 1807 1808 /* 1809 * Note that we start the same number of threads by default for 1810 * both the file case and the block device case. For the file 1811 * case, we need multiple threads to allow concurrency, because the 1812 * vnode interface is designed to be a blocking interface. For the 1813 * block device case, ZFS zvols at least will block the caller's 1814 * context in many instances, and so we need multiple threads to 1815 * overcome that problem. Other block devices don't need as many 1816 * threads, but they shouldn't cause too many problems. 1817 * 1818 * If the user wants to just have a single thread for a block 1819 * device, he can specify that when the LUN is created, or change 1820 * the tunable/sysctl to alter the default number of threads. 1821 */ 1822 retval = taskqueue_start_threads(&be_lun->io_taskqueue, 1823 /*num threads*/num_threads, 1824 /*priority*/PWAIT, 1825 /*thread name*/ 1826 "%s taskq", be_lun->lunname); 1827 1828 if (retval != 0) 1829 goto bailout_error; 1830 1831 be_lun->num_threads = num_threads; 1832 1833 mtx_lock(&softc->lock); 1834 softc->num_luns++; 1835 STAILQ_INSERT_TAIL(&softc->lun_list, be_lun, links); 1836 1837 mtx_unlock(&softc->lock); 1838 1839 retval = ctl_add_lun(&be_lun->ctl_be_lun); 1840 if (retval != 0) { 1841 mtx_lock(&softc->lock); 1842 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_block_lun, 1843 links); 1844 softc->num_luns--; 1845 mtx_unlock(&softc->lock); 1846 snprintf(req->error_str, sizeof(req->error_str), 1847 "%s: ctl_add_lun() returned error %d, see dmesg for " 1848 "details", __func__, retval); 1849 retval = 0; 1850 goto bailout_error; 1851 } 1852 1853 mtx_lock(&softc->lock); 1854 1855 /* 1856 * Tell the config_status routine that we're waiting so it won't 1857 * clean up the LUN in the event of an error. 1858 */ 1859 be_lun->flags |= CTL_BE_BLOCK_LUN_WAITING; 1860 1861 while (be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) { 1862 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlblk", 0); 1863 if (retval == EINTR) 1864 break; 1865 } 1866 be_lun->flags &= ~CTL_BE_BLOCK_LUN_WAITING; 1867 1868 if (be_lun->flags & CTL_BE_BLOCK_LUN_CONFIG_ERR) { 1869 snprintf(req->error_str, sizeof(req->error_str), 1870 "%s: LUN configuration error, see dmesg for details", 1871 __func__); 1872 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_block_lun, 1873 links); 1874 softc->num_luns--; 1875 mtx_unlock(&softc->lock); 1876 goto bailout_error; 1877 } else { 1878 params->req_lun_id = be_lun->ctl_be_lun.lun_id; 1879 } 1880 1881 mtx_unlock(&softc->lock); 1882 1883 be_lun->disk_stats = devstat_new_entry("cbb", params->req_lun_id, 1884 be_lun->blocksize, 1885 DEVSTAT_ALL_SUPPORTED, 1886 be_lun->ctl_be_lun.lun_type 1887 | DEVSTAT_TYPE_IF_OTHER, 1888 DEVSTAT_PRIORITY_OTHER); 1889 1890 1891 req->status = CTL_LUN_OK; 1892 1893 return (retval); 1894 1895 bailout_error: 1896 req->status = CTL_LUN_ERROR; 1897 1898 ctl_be_block_close(be_lun); 1899 1900 free(be_lun->dev_path, M_CTLBLK); 1901 free(be_lun, M_CTLBLK); 1902 1903 return (retval); 1904 } 1905 1906 static int 1907 ctl_be_block_rm(struct ctl_be_block_softc *softc, struct ctl_lun_req *req) 1908 { 1909 struct ctl_lun_rm_params *params; 1910 struct ctl_be_block_lun *be_lun; 1911 int retval; 1912 1913 params = &req->reqdata.rm; 1914 1915 mtx_lock(&softc->lock); 1916 1917 be_lun = NULL; 1918 1919 STAILQ_FOREACH(be_lun, &softc->lun_list, links) { 1920 if (be_lun->ctl_be_lun.lun_id == params->lun_id) 1921 break; 1922 } 1923 mtx_unlock(&softc->lock); 1924 1925 if (be_lun == NULL) { 1926 snprintf(req->error_str, sizeof(req->error_str), 1927 "%s: LUN %u is not managed by the block backend", 1928 __func__, params->lun_id); 1929 goto bailout_error; 1930 } 1931 1932 retval = ctl_disable_lun(&be_lun->ctl_be_lun); 1933 1934 if (retval != 0) { 1935 snprintf(req->error_str, sizeof(req->error_str), 1936 "%s: error %d returned from ctl_disable_lun() for " 1937 "LUN %d", __func__, retval, params->lun_id); 1938 goto bailout_error; 1939 1940 } 1941 1942 retval = ctl_invalidate_lun(&be_lun->ctl_be_lun); 1943 if (retval != 0) { 1944 snprintf(req->error_str, sizeof(req->error_str), 1945 "%s: error %d returned from ctl_invalidate_lun() for " 1946 "LUN %d", __func__, retval, params->lun_id); 1947 goto bailout_error; 1948 } 1949 1950 mtx_lock(&softc->lock); 1951 1952 be_lun->flags |= CTL_BE_BLOCK_LUN_WAITING; 1953 1954 while ((be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) == 0) { 1955 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlblk", 0); 1956 if (retval == EINTR) 1957 break; 1958 } 1959 1960 be_lun->flags &= ~CTL_BE_BLOCK_LUN_WAITING; 1961 1962 if ((be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) == 0) { 1963 snprintf(req->error_str, sizeof(req->error_str), 1964 "%s: interrupted waiting for LUN to be freed", 1965 __func__); 1966 mtx_unlock(&softc->lock); 1967 goto bailout_error; 1968 } 1969 1970 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_block_lun, links); 1971 1972 softc->num_luns--; 1973 mtx_unlock(&softc->lock); 1974 1975 taskqueue_drain(be_lun->io_taskqueue, &be_lun->io_task); 1976 1977 taskqueue_free(be_lun->io_taskqueue); 1978 1979 ctl_be_block_close(be_lun); 1980 1981 if (be_lun->disk_stats != NULL) 1982 devstat_remove_entry(be_lun->disk_stats); 1983 1984 uma_zdestroy(be_lun->lun_zone); 1985 1986 free(be_lun->dev_path, M_CTLBLK); 1987 1988 free(be_lun, M_CTLBLK); 1989 1990 req->status = CTL_LUN_OK; 1991 1992 return (0); 1993 1994 bailout_error: 1995 1996 req->status = CTL_LUN_ERROR; 1997 1998 return (0); 1999 } 2000 2001 static int 2002 ctl_be_block_modify_file(struct ctl_be_block_lun *be_lun, 2003 struct ctl_lun_req *req) 2004 { 2005 struct vattr vattr; 2006 int error; 2007 struct ctl_lun_modify_params *params; 2008 2009 params = &req->reqdata.modify; 2010 2011 if (params->lun_size_bytes != 0) { 2012 be_lun->size_bytes = params->lun_size_bytes; 2013 } else { 2014 error = VOP_GETATTR(be_lun->vn, &vattr, curthread->td_ucred); 2015 if (error != 0) { 2016 snprintf(req->error_str, sizeof(req->error_str), 2017 "error calling VOP_GETATTR() for file %s", 2018 be_lun->dev_path); 2019 return (error); 2020 } 2021 2022 be_lun->size_bytes = vattr.va_size; 2023 } 2024 2025 return (0); 2026 } 2027 2028 static int 2029 ctl_be_block_modify_dev(struct ctl_be_block_lun *be_lun, 2030 struct ctl_lun_req *req) 2031 { 2032 struct cdev *dev; 2033 struct cdevsw *devsw; 2034 int error; 2035 struct ctl_lun_modify_params *params; 2036 uint64_t size_bytes; 2037 2038 params = &req->reqdata.modify; 2039 2040 dev = be_lun->vn->v_rdev; 2041 devsw = dev->si_devsw; 2042 if (!devsw->d_ioctl) { 2043 snprintf(req->error_str, sizeof(req->error_str), 2044 "%s: no d_ioctl for device %s!", __func__, 2045 be_lun->dev_path); 2046 return (ENODEV); 2047 } 2048 2049 error = devsw->d_ioctl(dev, DIOCGMEDIASIZE, 2050 (caddr_t)&size_bytes, FREAD, 2051 curthread); 2052 if (error) { 2053 snprintf(req->error_str, sizeof(req->error_str), 2054 "%s: error %d returned for DIOCGMEDIASIZE ioctl " 2055 "on %s!", __func__, error, be_lun->dev_path); 2056 return (error); 2057 } 2058 2059 if (params->lun_size_bytes != 0) { 2060 if (params->lun_size_bytes > size_bytes) { 2061 snprintf(req->error_str, sizeof(req->error_str), 2062 "%s: requested LUN size %ju > backing device " 2063 "size %ju", __func__, 2064 (uintmax_t)params->lun_size_bytes, 2065 (uintmax_t)size_bytes); 2066 return (EINVAL); 2067 } 2068 2069 be_lun->size_bytes = params->lun_size_bytes; 2070 } else { 2071 be_lun->size_bytes = size_bytes; 2072 } 2073 2074 return (0); 2075 } 2076 2077 static int 2078 ctl_be_block_modify(struct ctl_be_block_softc *softc, struct ctl_lun_req *req) 2079 { 2080 struct ctl_lun_modify_params *params; 2081 struct ctl_be_block_lun *be_lun; 2082 int error; 2083 2084 params = &req->reqdata.modify; 2085 2086 mtx_lock(&softc->lock); 2087 2088 be_lun = NULL; 2089 2090 STAILQ_FOREACH(be_lun, &softc->lun_list, links) { 2091 if (be_lun->ctl_be_lun.lun_id == params->lun_id) 2092 break; 2093 } 2094 mtx_unlock(&softc->lock); 2095 2096 if (be_lun == NULL) { 2097 snprintf(req->error_str, sizeof(req->error_str), 2098 "%s: LUN %u is not managed by the block backend", 2099 __func__, params->lun_id); 2100 goto bailout_error; 2101 } 2102 2103 if (params->lun_size_bytes != 0) { 2104 if (params->lun_size_bytes < be_lun->blocksize) { 2105 snprintf(req->error_str, sizeof(req->error_str), 2106 "%s: LUN size %ju < blocksize %u", __func__, 2107 params->lun_size_bytes, be_lun->blocksize); 2108 goto bailout_error; 2109 } 2110 } 2111 2112 vn_lock(be_lun->vn, LK_SHARED | LK_RETRY); 2113 2114 if (be_lun->vn->v_type == VREG) 2115 error = ctl_be_block_modify_file(be_lun, req); 2116 else 2117 error = ctl_be_block_modify_dev(be_lun, req); 2118 2119 VOP_UNLOCK(be_lun->vn, 0); 2120 2121 if (error != 0) 2122 goto bailout_error; 2123 2124 be_lun->size_blocks = be_lun->size_bytes >> be_lun->blocksize_shift; 2125 2126 /* 2127 * The maximum LBA is the size - 1. 2128 * 2129 * XXX: Note that this field is being updated without locking, 2130 * which might cause problems on 32-bit architectures. 2131 */ 2132 be_lun->ctl_be_lun.maxlba = be_lun->size_blocks - 1; 2133 ctl_lun_capacity_changed(&be_lun->ctl_be_lun); 2134 2135 /* Tell the user the exact size we ended up using */ 2136 params->lun_size_bytes = be_lun->size_bytes; 2137 2138 req->status = CTL_LUN_OK; 2139 2140 return (0); 2141 2142 bailout_error: 2143 req->status = CTL_LUN_ERROR; 2144 2145 return (0); 2146 } 2147 2148 static void 2149 ctl_be_block_lun_shutdown(void *be_lun) 2150 { 2151 struct ctl_be_block_lun *lun; 2152 struct ctl_be_block_softc *softc; 2153 2154 lun = (struct ctl_be_block_lun *)be_lun; 2155 2156 softc = lun->softc; 2157 2158 mtx_lock(&softc->lock); 2159 lun->flags |= CTL_BE_BLOCK_LUN_UNCONFIGURED; 2160 if (lun->flags & CTL_BE_BLOCK_LUN_WAITING) 2161 wakeup(lun); 2162 mtx_unlock(&softc->lock); 2163 2164 } 2165 2166 static void 2167 ctl_be_block_lun_config_status(void *be_lun, ctl_lun_config_status status) 2168 { 2169 struct ctl_be_block_lun *lun; 2170 struct ctl_be_block_softc *softc; 2171 2172 lun = (struct ctl_be_block_lun *)be_lun; 2173 softc = lun->softc; 2174 2175 if (status == CTL_LUN_CONFIG_OK) { 2176 mtx_lock(&softc->lock); 2177 lun->flags &= ~CTL_BE_BLOCK_LUN_UNCONFIGURED; 2178 if (lun->flags & CTL_BE_BLOCK_LUN_WAITING) 2179 wakeup(lun); 2180 mtx_unlock(&softc->lock); 2181 2182 /* 2183 * We successfully added the LUN, attempt to enable it. 2184 */ 2185 if (ctl_enable_lun(&lun->ctl_be_lun) != 0) { 2186 printf("%s: ctl_enable_lun() failed!\n", __func__); 2187 if (ctl_invalidate_lun(&lun->ctl_be_lun) != 0) { 2188 printf("%s: ctl_invalidate_lun() failed!\n", 2189 __func__); 2190 } 2191 } 2192 2193 return; 2194 } 2195 2196 2197 mtx_lock(&softc->lock); 2198 lun->flags &= ~CTL_BE_BLOCK_LUN_UNCONFIGURED; 2199 lun->flags |= CTL_BE_BLOCK_LUN_CONFIG_ERR; 2200 wakeup(lun); 2201 mtx_unlock(&softc->lock); 2202 } 2203 2204 2205 static int 2206 ctl_be_block_config_write(union ctl_io *io) 2207 { 2208 struct ctl_be_block_lun *be_lun; 2209 struct ctl_be_lun *ctl_be_lun; 2210 int retval; 2211 2212 retval = 0; 2213 2214 DPRINTF("entered\n"); 2215 2216 ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[ 2217 CTL_PRIV_BACKEND_LUN].ptr; 2218 be_lun = (struct ctl_be_block_lun *)ctl_be_lun->be_lun; 2219 2220 switch (io->scsiio.cdb[0]) { 2221 case SYNCHRONIZE_CACHE: 2222 case SYNCHRONIZE_CACHE_16: 2223 /* 2224 * The upper level CTL code will filter out any CDBs with 2225 * the immediate bit set and return the proper error. 2226 * 2227 * We don't really need to worry about what LBA range the 2228 * user asked to be synced out. When they issue a sync 2229 * cache command, we'll sync out the whole thing. 2230 */ 2231 mtx_lock(&be_lun->lock); 2232 STAILQ_INSERT_TAIL(&be_lun->config_write_queue, &io->io_hdr, 2233 links); 2234 mtx_unlock(&be_lun->lock); 2235 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); 2236 break; 2237 case START_STOP_UNIT: { 2238 struct scsi_start_stop_unit *cdb; 2239 2240 cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb; 2241 2242 if (cdb->how & SSS_START) 2243 retval = ctl_start_lun(ctl_be_lun); 2244 else { 2245 retval = ctl_stop_lun(ctl_be_lun); 2246 /* 2247 * XXX KDM Copan-specific offline behavior. 2248 * Figure out a reasonable way to port this? 2249 */ 2250 #ifdef NEEDTOPORT 2251 if ((retval == 0) 2252 && (cdb->byte2 & SSS_ONOFFLINE)) 2253 retval = ctl_lun_offline(ctl_be_lun); 2254 #endif 2255 } 2256 2257 /* 2258 * In general, the above routines should not fail. They 2259 * just set state for the LUN. So we've got something 2260 * pretty wrong here if we can't start or stop the LUN. 2261 */ 2262 if (retval != 0) { 2263 ctl_set_internal_failure(&io->scsiio, 2264 /*sks_valid*/ 1, 2265 /*retry_count*/ 0xf051); 2266 retval = CTL_RETVAL_COMPLETE; 2267 } else { 2268 ctl_set_success(&io->scsiio); 2269 } 2270 ctl_config_write_done(io); 2271 break; 2272 } 2273 default: 2274 ctl_set_invalid_opcode(&io->scsiio); 2275 ctl_config_write_done(io); 2276 retval = CTL_RETVAL_COMPLETE; 2277 break; 2278 } 2279 2280 return (retval); 2281 2282 } 2283 2284 static int 2285 ctl_be_block_config_read(union ctl_io *io) 2286 { 2287 return (0); 2288 } 2289 2290 static int 2291 ctl_be_block_lun_info(void *be_lun, struct sbuf *sb) 2292 { 2293 struct ctl_be_block_lun *lun; 2294 int retval; 2295 2296 lun = (struct ctl_be_block_lun *)be_lun; 2297 retval = 0; 2298 2299 retval = sbuf_printf(sb, "<num_threads>"); 2300 2301 if (retval != 0) 2302 goto bailout; 2303 2304 retval = sbuf_printf(sb, "%d", lun->num_threads); 2305 2306 if (retval != 0) 2307 goto bailout; 2308 2309 retval = sbuf_printf(sb, "</num_threads>"); 2310 2311 /* 2312 * For processor devices, we don't have a path variable. 2313 */ 2314 if ((retval != 0) 2315 || (lun->dev_path == NULL)) 2316 goto bailout; 2317 2318 retval = sbuf_printf(sb, "<file>"); 2319 2320 if (retval != 0) 2321 goto bailout; 2322 2323 retval = ctl_sbuf_printf_esc(sb, lun->dev_path); 2324 2325 if (retval != 0) 2326 goto bailout; 2327 2328 retval = sbuf_printf(sb, "</file>\n"); 2329 2330 bailout: 2331 2332 return (retval); 2333 } 2334 2335 int 2336 ctl_be_block_init(void) 2337 { 2338 struct ctl_be_block_softc *softc; 2339 int retval; 2340 2341 softc = &backend_block_softc; 2342 retval = 0; 2343 2344 mtx_init(&softc->lock, "ctlblk", NULL, MTX_DEF); 2345 STAILQ_INIT(&softc->beio_free_queue); 2346 STAILQ_INIT(&softc->disk_list); 2347 STAILQ_INIT(&softc->lun_list); 2348 ctl_grow_beio(softc, 200); 2349 2350 return (retval); 2351 } 2352