1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2003 Silicon Graphics International Corp. 5 * Copyright (c) 2009-2011 Spectra Logic Corporation 6 * Copyright (c) 2012 The FreeBSD Foundation 7 * Copyright (c) 2014-2015 Alexander Motin <mav@FreeBSD.org> 8 * All rights reserved. 9 * 10 * Portions of this software were developed by Edward Tomasz Napierala 11 * under sponsorship from the FreeBSD Foundation. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions, and the following disclaimer, 18 * without modification. 19 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 20 * substantially similar to the "NO WARRANTY" disclaimer below 21 * ("Disclaimer") and any redistribution must be conditioned upon 22 * including a substantially similar Disclaimer requirement for further 23 * binary redistribution. 24 * 25 * NO WARRANTY 26 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 27 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 28 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 29 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 30 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 34 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 35 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGES. 37 * 38 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend_block.c#5 $ 39 */ 40 /* 41 * CAM Target Layer driver backend for block devices. 42 * 43 * Author: Ken Merry <ken@FreeBSD.org> 44 */ 45 #include <sys/cdefs.h> 46 __FBSDID("$FreeBSD$"); 47 48 #include <sys/param.h> 49 #include <sys/systm.h> 50 #include <sys/kernel.h> 51 #include <sys/types.h> 52 #include <sys/kthread.h> 53 #include <sys/bio.h> 54 #include <sys/fcntl.h> 55 #include <sys/limits.h> 56 #include <sys/lock.h> 57 #include <sys/mutex.h> 58 #include <sys/condvar.h> 59 #include <sys/malloc.h> 60 #include <sys/conf.h> 61 #include <sys/ioccom.h> 62 #include <sys/queue.h> 63 #include <sys/sbuf.h> 64 #include <sys/endian.h> 65 #include <sys/uio.h> 66 #include <sys/buf.h> 67 #include <sys/taskqueue.h> 68 #include <sys/vnode.h> 69 #include <sys/namei.h> 70 #include <sys/mount.h> 71 #include <sys/disk.h> 72 #include <sys/fcntl.h> 73 #include <sys/filedesc.h> 74 #include <sys/filio.h> 75 #include <sys/proc.h> 76 #include <sys/pcpu.h> 77 #include <sys/module.h> 78 #include <sys/sdt.h> 79 #include <sys/devicestat.h> 80 #include <sys/sysctl.h> 81 #include <sys/nv.h> 82 #include <sys/dnv.h> 83 #include <sys/sx.h> 84 85 #include <geom/geom.h> 86 87 #include <cam/cam.h> 88 #include <cam/scsi/scsi_all.h> 89 #include <cam/scsi/scsi_da.h> 90 #include <cam/ctl/ctl_io.h> 91 #include <cam/ctl/ctl.h> 92 #include <cam/ctl/ctl_backend.h> 93 #include <cam/ctl/ctl_ioctl.h> 94 #include <cam/ctl/ctl_ha.h> 95 #include <cam/ctl/ctl_scsi_all.h> 96 #include <cam/ctl/ctl_private.h> 97 #include <cam/ctl/ctl_error.h> 98 99 /* 100 * The idea here is that we'll allocate enough S/G space to hold a 1MB 101 * I/O. If we get an I/O larger than that, we'll split it. 102 */ 103 #define CTLBLK_HALF_IO_SIZE (512 * 1024) 104 #define CTLBLK_MAX_IO_SIZE (CTLBLK_HALF_IO_SIZE * 2) 105 #define CTLBLK_MIN_SEG (128 * 1024) 106 #define CTLBLK_MAX_SEG MIN(CTLBLK_HALF_IO_SIZE, maxphys) 107 #define CTLBLK_HALF_SEGS MAX(CTLBLK_HALF_IO_SIZE / CTLBLK_MIN_SEG, 1) 108 #define CTLBLK_MAX_SEGS (CTLBLK_HALF_SEGS * 2) 109 #define CTLBLK_NUM_SEGS (CTLBLK_MAX_IO_SIZE / CTLBLK_MAX_SEG) 110 111 #ifdef CTLBLK_DEBUG 112 #define DPRINTF(fmt, args...) \ 113 printf("cbb(%s:%d): " fmt, __FUNCTION__, __LINE__, ##args) 114 #else 115 #define DPRINTF(fmt, args...) do {} while(0) 116 #endif 117 118 #define PRIV(io) \ 119 ((struct ctl_ptr_len_flags *)&(io)->io_hdr.ctl_private[CTL_PRIV_BACKEND]) 120 #define ARGS(io) \ 121 ((struct ctl_lba_len_flags *)&(io)->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]) 122 123 SDT_PROVIDER_DEFINE(cbb); 124 125 typedef enum { 126 CTL_BE_BLOCK_LUN_UNCONFIGURED = 0x01, 127 CTL_BE_BLOCK_LUN_WAITING = 0x04, 128 } ctl_be_block_lun_flags; 129 130 typedef enum { 131 CTL_BE_BLOCK_NONE, 132 CTL_BE_BLOCK_DEV, 133 CTL_BE_BLOCK_FILE 134 } ctl_be_block_type; 135 136 struct ctl_be_block_filedata { 137 struct ucred *cred; 138 }; 139 140 union ctl_be_block_bedata { 141 struct ctl_be_block_filedata file; 142 }; 143 144 struct ctl_be_block_io; 145 struct ctl_be_block_lun; 146 147 typedef void (*cbb_dispatch_t)(struct ctl_be_block_lun *be_lun, 148 struct ctl_be_block_io *beio); 149 typedef uint64_t (*cbb_getattr_t)(struct ctl_be_block_lun *be_lun, 150 const char *attrname); 151 152 /* 153 * Backend LUN structure. There is a 1:1 mapping between a block device 154 * and a backend block LUN, and between a backend block LUN and a CTL LUN. 155 */ 156 struct ctl_be_block_lun { 157 struct ctl_be_lun cbe_lun; /* Must be first element. */ 158 struct ctl_lun_create_params params; 159 char *dev_path; 160 ctl_be_block_type dev_type; 161 struct vnode *vn; 162 union ctl_be_block_bedata backend; 163 cbb_dispatch_t dispatch; 164 cbb_dispatch_t lun_flush; 165 cbb_dispatch_t unmap; 166 cbb_dispatch_t get_lba_status; 167 cbb_getattr_t getattr; 168 uint64_t size_blocks; 169 uint64_t size_bytes; 170 struct ctl_be_block_softc *softc; 171 struct devstat *disk_stats; 172 ctl_be_block_lun_flags flags; 173 SLIST_ENTRY(ctl_be_block_lun) links; 174 struct taskqueue *io_taskqueue; 175 struct task io_task; 176 int num_threads; 177 STAILQ_HEAD(, ctl_io_hdr) input_queue; 178 STAILQ_HEAD(, ctl_io_hdr) config_read_queue; 179 STAILQ_HEAD(, ctl_io_hdr) config_write_queue; 180 STAILQ_HEAD(, ctl_io_hdr) datamove_queue; 181 struct mtx_padalign io_lock; 182 struct mtx_padalign queue_lock; 183 }; 184 185 /* 186 * Overall softc structure for the block backend module. 187 */ 188 struct ctl_be_block_softc { 189 struct sx modify_lock; 190 struct mtx lock; 191 int num_luns; 192 SLIST_HEAD(, ctl_be_block_lun) lun_list; 193 uma_zone_t beio_zone; 194 uma_zone_t bufmin_zone; 195 uma_zone_t bufmax_zone; 196 }; 197 198 static struct ctl_be_block_softc backend_block_softc; 199 200 /* 201 * Per-I/O information. 202 */ 203 struct ctl_be_block_io { 204 union ctl_io *io; 205 struct ctl_sg_entry sg_segs[CTLBLK_MAX_SEGS]; 206 struct iovec xiovecs[CTLBLK_MAX_SEGS]; 207 int refcnt; 208 int bio_cmd; 209 int two_sglists; 210 int num_segs; 211 int num_bios_sent; 212 int num_bios_done; 213 int send_complete; 214 int first_error; 215 uint64_t first_error_offset; 216 struct bintime ds_t0; 217 devstat_tag_type ds_tag_type; 218 devstat_trans_flags ds_trans_type; 219 uint64_t io_len; 220 uint64_t io_offset; 221 int io_arg; 222 struct ctl_be_block_softc *softc; 223 struct ctl_be_block_lun *lun; 224 void (*beio_cont)(struct ctl_be_block_io *beio); /* to continue processing */ 225 }; 226 227 extern struct ctl_softc *control_softc; 228 229 static int cbb_num_threads = 32; 230 SYSCTL_NODE(_kern_cam_ctl, OID_AUTO, block, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 231 "CAM Target Layer Block Backend"); 232 SYSCTL_INT(_kern_cam_ctl_block, OID_AUTO, num_threads, CTLFLAG_RWTUN, 233 &cbb_num_threads, 0, "Number of threads per backing file"); 234 235 static struct ctl_be_block_io *ctl_alloc_beio(struct ctl_be_block_softc *softc); 236 static void ctl_free_beio(struct ctl_be_block_io *beio); 237 static void ctl_complete_beio(struct ctl_be_block_io *beio); 238 static int ctl_be_block_move_done(union ctl_io *io, bool samethr); 239 static void ctl_be_block_biodone(struct bio *bio); 240 static void ctl_be_block_flush_file(struct ctl_be_block_lun *be_lun, 241 struct ctl_be_block_io *beio); 242 static void ctl_be_block_dispatch_file(struct ctl_be_block_lun *be_lun, 243 struct ctl_be_block_io *beio); 244 static void ctl_be_block_gls_file(struct ctl_be_block_lun *be_lun, 245 struct ctl_be_block_io *beio); 246 static uint64_t ctl_be_block_getattr_file(struct ctl_be_block_lun *be_lun, 247 const char *attrname); 248 static void ctl_be_block_flush_dev(struct ctl_be_block_lun *be_lun, 249 struct ctl_be_block_io *beio); 250 static void ctl_be_block_unmap_dev(struct ctl_be_block_lun *be_lun, 251 struct ctl_be_block_io *beio); 252 static void ctl_be_block_dispatch_dev(struct ctl_be_block_lun *be_lun, 253 struct ctl_be_block_io *beio); 254 static uint64_t ctl_be_block_getattr_dev(struct ctl_be_block_lun *be_lun, 255 const char *attrname); 256 static void ctl_be_block_cr_dispatch(struct ctl_be_block_lun *be_lun, 257 union ctl_io *io); 258 static void ctl_be_block_cw_dispatch(struct ctl_be_block_lun *be_lun, 259 union ctl_io *io); 260 static void ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun, 261 union ctl_io *io); 262 static void ctl_be_block_worker(void *context, int pending); 263 static int ctl_be_block_submit(union ctl_io *io); 264 static int ctl_be_block_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, 265 int flag, struct thread *td); 266 static int ctl_be_block_open_file(struct ctl_be_block_lun *be_lun, 267 struct ctl_lun_req *req); 268 static int ctl_be_block_open_dev(struct ctl_be_block_lun *be_lun, 269 struct ctl_lun_req *req); 270 static int ctl_be_block_close(struct ctl_be_block_lun *be_lun); 271 static int ctl_be_block_open(struct ctl_be_block_lun *be_lun, 272 struct ctl_lun_req *req); 273 static int ctl_be_block_create(struct ctl_be_block_softc *softc, 274 struct ctl_lun_req *req); 275 static int ctl_be_block_rm(struct ctl_be_block_softc *softc, 276 struct ctl_lun_req *req); 277 static int ctl_be_block_modify(struct ctl_be_block_softc *softc, 278 struct ctl_lun_req *req); 279 static void ctl_be_block_lun_shutdown(struct ctl_be_lun *cbe_lun); 280 static int ctl_be_block_config_write(union ctl_io *io); 281 static int ctl_be_block_config_read(union ctl_io *io); 282 static int ctl_be_block_lun_info(struct ctl_be_lun *cbe_lun, struct sbuf *sb); 283 static uint64_t ctl_be_block_lun_attr(struct ctl_be_lun *cbe_lun, const char *attrname); 284 static int ctl_be_block_init(void); 285 static int ctl_be_block_shutdown(void); 286 287 static struct ctl_backend_driver ctl_be_block_driver = 288 { 289 .name = "block", 290 .flags = CTL_BE_FLAG_HAS_CONFIG, 291 .init = ctl_be_block_init, 292 .shutdown = ctl_be_block_shutdown, 293 .data_submit = ctl_be_block_submit, 294 .config_read = ctl_be_block_config_read, 295 .config_write = ctl_be_block_config_write, 296 .ioctl = ctl_be_block_ioctl, 297 .lun_info = ctl_be_block_lun_info, 298 .lun_attr = ctl_be_block_lun_attr 299 }; 300 301 MALLOC_DEFINE(M_CTLBLK, "ctlblock", "Memory used for CTL block backend"); 302 CTL_BACKEND_DECLARE(cbb, ctl_be_block_driver); 303 304 static void 305 ctl_alloc_seg(struct ctl_be_block_softc *softc, struct ctl_sg_entry *sg, 306 size_t len) 307 { 308 309 if (len <= CTLBLK_MIN_SEG) { 310 sg->addr = uma_zalloc(softc->bufmin_zone, M_WAITOK); 311 } else { 312 KASSERT(len <= CTLBLK_MAX_SEG, 313 ("Too large alloc %zu > %lu", len, CTLBLK_MAX_SEG)); 314 sg->addr = uma_zalloc(softc->bufmax_zone, M_WAITOK); 315 } 316 sg->len = len; 317 } 318 319 static void 320 ctl_free_seg(struct ctl_be_block_softc *softc, struct ctl_sg_entry *sg) 321 { 322 323 if (sg->len <= CTLBLK_MIN_SEG) { 324 uma_zfree(softc->bufmin_zone, sg->addr); 325 } else { 326 KASSERT(sg->len <= CTLBLK_MAX_SEG, 327 ("Too large free %zu > %lu", sg->len, CTLBLK_MAX_SEG)); 328 uma_zfree(softc->bufmax_zone, sg->addr); 329 } 330 } 331 332 static struct ctl_be_block_io * 333 ctl_alloc_beio(struct ctl_be_block_softc *softc) 334 { 335 struct ctl_be_block_io *beio; 336 337 beio = uma_zalloc(softc->beio_zone, M_WAITOK | M_ZERO); 338 beio->softc = softc; 339 beio->refcnt = 1; 340 return (beio); 341 } 342 343 static void 344 ctl_real_free_beio(struct ctl_be_block_io *beio) 345 { 346 struct ctl_be_block_softc *softc = beio->softc; 347 int i; 348 349 for (i = 0; i < beio->num_segs; i++) { 350 ctl_free_seg(softc, &beio->sg_segs[i]); 351 352 /* For compare we had two equal S/G lists. */ 353 if (beio->two_sglists) { 354 ctl_free_seg(softc, 355 &beio->sg_segs[i + CTLBLK_HALF_SEGS]); 356 } 357 } 358 359 uma_zfree(softc->beio_zone, beio); 360 } 361 362 static void 363 ctl_refcnt_beio(void *arg, int diff) 364 { 365 struct ctl_be_block_io *beio = arg; 366 367 if (atomic_fetchadd_int(&beio->refcnt, diff) + diff == 0) 368 ctl_real_free_beio(beio); 369 } 370 371 static void 372 ctl_free_beio(struct ctl_be_block_io *beio) 373 { 374 375 ctl_refcnt_beio(beio, -1); 376 } 377 378 static void 379 ctl_complete_beio(struct ctl_be_block_io *beio) 380 { 381 union ctl_io *io = beio->io; 382 383 if (beio->beio_cont != NULL) { 384 beio->beio_cont(beio); 385 } else { 386 ctl_free_beio(beio); 387 ctl_data_submit_done(io); 388 } 389 } 390 391 static size_t 392 cmp(uint8_t *a, uint8_t *b, size_t size) 393 { 394 size_t i; 395 396 for (i = 0; i < size; i++) { 397 if (a[i] != b[i]) 398 break; 399 } 400 return (i); 401 } 402 403 static void 404 ctl_be_block_compare(union ctl_io *io) 405 { 406 struct ctl_be_block_io *beio; 407 uint64_t off, res; 408 int i; 409 uint8_t info[8]; 410 411 beio = (struct ctl_be_block_io *)PRIV(io)->ptr; 412 off = 0; 413 for (i = 0; i < beio->num_segs; i++) { 414 res = cmp(beio->sg_segs[i].addr, 415 beio->sg_segs[i + CTLBLK_HALF_SEGS].addr, 416 beio->sg_segs[i].len); 417 off += res; 418 if (res < beio->sg_segs[i].len) 419 break; 420 } 421 if (i < beio->num_segs) { 422 scsi_u64to8b(off, info); 423 ctl_set_sense(&io->scsiio, /*current_error*/ 1, 424 /*sense_key*/ SSD_KEY_MISCOMPARE, 425 /*asc*/ 0x1D, /*ascq*/ 0x00, 426 /*type*/ SSD_ELEM_INFO, 427 /*size*/ sizeof(info), /*data*/ &info, 428 /*type*/ SSD_ELEM_NONE); 429 } else 430 ctl_set_success(&io->scsiio); 431 } 432 433 static int 434 ctl_be_block_move_done(union ctl_io *io, bool samethr) 435 { 436 struct ctl_be_block_io *beio; 437 struct ctl_be_block_lun *be_lun; 438 struct ctl_lba_len_flags *lbalen; 439 440 beio = (struct ctl_be_block_io *)PRIV(io)->ptr; 441 442 DPRINTF("entered\n"); 443 io->scsiio.kern_rel_offset += io->scsiio.kern_data_len; 444 445 /* 446 * We set status at this point for read and compare commands. 447 */ 448 if ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && 449 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE) { 450 lbalen = ARGS(io); 451 if (lbalen->flags & CTL_LLF_READ) { 452 ctl_set_success(&io->scsiio); 453 } else if (lbalen->flags & CTL_LLF_COMPARE) { 454 /* We have two data blocks ready for comparison. */ 455 ctl_be_block_compare(io); 456 } 457 } 458 459 /* 460 * If this is a read, or a write with errors, it is done. 461 */ 462 if ((beio->bio_cmd == BIO_READ) 463 || ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0) 464 || ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE)) { 465 ctl_complete_beio(beio); 466 return (0); 467 } 468 469 /* 470 * At this point, we have a write and the DMA completed successfully. 471 * If we were called synchronously in the original thread then just 472 * dispatch, otherwise we now have to queue it to the task queue to 473 * execute the backend I/O. That is because we do blocking 474 * memory allocations, and in the file backing case, blocking I/O. 475 * This move done routine is generally called in the SIM's 476 * interrupt context, and therefore we cannot block. 477 */ 478 be_lun = (struct ctl_be_block_lun *)CTL_BACKEND_LUN(io); 479 if (samethr) { 480 be_lun->dispatch(be_lun, beio); 481 } else { 482 mtx_lock(&be_lun->queue_lock); 483 STAILQ_INSERT_TAIL(&be_lun->datamove_queue, &io->io_hdr, links); 484 mtx_unlock(&be_lun->queue_lock); 485 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); 486 } 487 return (0); 488 } 489 490 static void 491 ctl_be_block_biodone(struct bio *bio) 492 { 493 struct ctl_be_block_io *beio = bio->bio_caller1; 494 struct ctl_be_block_lun *be_lun = beio->lun; 495 struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun; 496 union ctl_io *io; 497 int error; 498 499 io = beio->io; 500 501 DPRINTF("entered\n"); 502 503 error = bio->bio_error; 504 mtx_lock(&be_lun->io_lock); 505 if (error != 0 && 506 (beio->first_error == 0 || 507 bio->bio_offset < beio->first_error_offset)) { 508 beio->first_error = error; 509 beio->first_error_offset = bio->bio_offset; 510 } 511 512 beio->num_bios_done++; 513 514 /* 515 * XXX KDM will this cause WITNESS to complain? Holding a lock 516 * during the free might cause it to complain. 517 */ 518 g_destroy_bio(bio); 519 520 /* 521 * If the send complete bit isn't set, or we aren't the last I/O to 522 * complete, then we're done. 523 */ 524 if ((beio->send_complete == 0) 525 || (beio->num_bios_done < beio->num_bios_sent)) { 526 mtx_unlock(&be_lun->io_lock); 527 return; 528 } 529 530 /* 531 * At this point, we've verified that we are the last I/O to 532 * complete, so it's safe to drop the lock. 533 */ 534 devstat_end_transaction(beio->lun->disk_stats, beio->io_len, 535 beio->ds_tag_type, beio->ds_trans_type, 536 /*now*/ NULL, /*then*/&beio->ds_t0); 537 mtx_unlock(&be_lun->io_lock); 538 539 /* 540 * If there are any errors from the backing device, we fail the 541 * entire I/O with a medium error. 542 */ 543 error = beio->first_error; 544 if (error != 0) { 545 if (error == EOPNOTSUPP) { 546 ctl_set_invalid_opcode(&io->scsiio); 547 } else if (error == ENOSPC || error == EDQUOT) { 548 ctl_set_space_alloc_fail(&io->scsiio); 549 } else if (error == EROFS || error == EACCES) { 550 ctl_set_hw_write_protected(&io->scsiio); 551 } else if (beio->bio_cmd == BIO_FLUSH) { 552 /* XXX KDM is there is a better error here? */ 553 ctl_set_internal_failure(&io->scsiio, 554 /*sks_valid*/ 1, 555 /*retry_count*/ 0xbad2); 556 } else { 557 ctl_set_medium_error(&io->scsiio, 558 beio->bio_cmd == BIO_READ); 559 } 560 ctl_complete_beio(beio); 561 return; 562 } 563 564 /* 565 * If this is a write, a flush, a delete or verify, we're all done. 566 * If this is a read, we can now send the data to the user. 567 */ 568 if ((beio->bio_cmd == BIO_WRITE) 569 || (beio->bio_cmd == BIO_FLUSH) 570 || (beio->bio_cmd == BIO_DELETE) 571 || (ARGS(io)->flags & CTL_LLF_VERIFY)) { 572 ctl_set_success(&io->scsiio); 573 ctl_complete_beio(beio); 574 } else { 575 if ((ARGS(io)->flags & CTL_LLF_READ) && 576 beio->beio_cont == NULL) { 577 ctl_set_success(&io->scsiio); 578 if (cbe_lun->serseq >= CTL_LUN_SERSEQ_SOFT) 579 ctl_serseq_done(io); 580 } 581 ctl_datamove(io); 582 } 583 } 584 585 static void 586 ctl_be_block_flush_file(struct ctl_be_block_lun *be_lun, 587 struct ctl_be_block_io *beio) 588 { 589 union ctl_io *io = beio->io; 590 struct mount *mountpoint; 591 int error; 592 593 DPRINTF("entered\n"); 594 595 binuptime(&beio->ds_t0); 596 devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0); 597 598 (void) vn_start_write(be_lun->vn, &mountpoint, V_WAIT); 599 600 vn_lock(be_lun->vn, vn_lktype_write(mountpoint, be_lun->vn) | 601 LK_RETRY); 602 error = VOP_FSYNC(be_lun->vn, beio->io_arg ? MNT_NOWAIT : MNT_WAIT, 603 curthread); 604 VOP_UNLOCK(be_lun->vn); 605 606 vn_finished_write(mountpoint); 607 608 mtx_lock(&be_lun->io_lock); 609 devstat_end_transaction(beio->lun->disk_stats, beio->io_len, 610 beio->ds_tag_type, beio->ds_trans_type, 611 /*now*/ NULL, /*then*/&beio->ds_t0); 612 mtx_unlock(&be_lun->io_lock); 613 614 if (error == 0) 615 ctl_set_success(&io->scsiio); 616 else { 617 /* XXX KDM is there is a better error here? */ 618 ctl_set_internal_failure(&io->scsiio, 619 /*sks_valid*/ 1, 620 /*retry_count*/ 0xbad1); 621 } 622 623 ctl_complete_beio(beio); 624 } 625 626 SDT_PROBE_DEFINE1(cbb, , read, file_start, "uint64_t"); 627 SDT_PROBE_DEFINE1(cbb, , write, file_start, "uint64_t"); 628 SDT_PROBE_DEFINE1(cbb, , read, file_done,"uint64_t"); 629 SDT_PROBE_DEFINE1(cbb, , write, file_done, "uint64_t"); 630 631 static void 632 ctl_be_block_dispatch_file(struct ctl_be_block_lun *be_lun, 633 struct ctl_be_block_io *beio) 634 { 635 struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun; 636 struct ctl_be_block_filedata *file_data; 637 union ctl_io *io; 638 struct uio xuio; 639 struct iovec *xiovec; 640 size_t s; 641 int error, flags, i; 642 643 DPRINTF("entered\n"); 644 645 file_data = &be_lun->backend.file; 646 io = beio->io; 647 flags = 0; 648 if (ARGS(io)->flags & CTL_LLF_DPO) 649 flags |= IO_DIRECT; 650 if (beio->bio_cmd == BIO_WRITE && ARGS(io)->flags & CTL_LLF_FUA) 651 flags |= IO_SYNC; 652 653 bzero(&xuio, sizeof(xuio)); 654 if (beio->bio_cmd == BIO_READ) { 655 SDT_PROBE0(cbb, , read, file_start); 656 xuio.uio_rw = UIO_READ; 657 } else { 658 SDT_PROBE0(cbb, , write, file_start); 659 xuio.uio_rw = UIO_WRITE; 660 } 661 xuio.uio_offset = beio->io_offset; 662 xuio.uio_resid = beio->io_len; 663 xuio.uio_segflg = UIO_SYSSPACE; 664 xuio.uio_iov = beio->xiovecs; 665 xuio.uio_iovcnt = beio->num_segs; 666 xuio.uio_td = curthread; 667 668 for (i = 0, xiovec = xuio.uio_iov; i < xuio.uio_iovcnt; i++, xiovec++) { 669 xiovec->iov_base = beio->sg_segs[i].addr; 670 xiovec->iov_len = beio->sg_segs[i].len; 671 } 672 673 binuptime(&beio->ds_t0); 674 devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0); 675 676 if (beio->bio_cmd == BIO_READ) { 677 vn_lock(be_lun->vn, LK_SHARED | LK_RETRY); 678 679 if (beio->beio_cont == NULL && 680 cbe_lun->serseq == CTL_LUN_SERSEQ_SOFT) 681 ctl_serseq_done(io); 682 /* 683 * UFS pays attention to IO_DIRECT for reads. If the 684 * DIRECTIO option is configured into the kernel, it calls 685 * ffs_rawread(). But that only works for single-segment 686 * uios with user space addresses. In our case, with a 687 * kernel uio, it still reads into the buffer cache, but it 688 * will just try to release the buffer from the cache later 689 * on in ffs_read(). 690 * 691 * ZFS does not pay attention to IO_DIRECT for reads. 692 * 693 * UFS does not pay attention to IO_SYNC for reads. 694 * 695 * ZFS pays attention to IO_SYNC (which translates into the 696 * Solaris define FRSYNC for zfs_read()) for reads. It 697 * attempts to sync the file before reading. 698 */ 699 error = VOP_READ(be_lun->vn, &xuio, flags, file_data->cred); 700 701 VOP_UNLOCK(be_lun->vn); 702 SDT_PROBE0(cbb, , read, file_done); 703 if (error == 0 && xuio.uio_resid > 0) { 704 /* 705 * If we red less then requested (EOF), then 706 * we should clean the rest of the buffer. 707 */ 708 s = beio->io_len - xuio.uio_resid; 709 for (i = 0; i < beio->num_segs; i++) { 710 if (s >= beio->sg_segs[i].len) { 711 s -= beio->sg_segs[i].len; 712 continue; 713 } 714 bzero((uint8_t *)beio->sg_segs[i].addr + s, 715 beio->sg_segs[i].len - s); 716 s = 0; 717 } 718 } 719 } else { 720 struct mount *mountpoint; 721 722 (void)vn_start_write(be_lun->vn, &mountpoint, V_WAIT); 723 vn_lock(be_lun->vn, vn_lktype_write(mountpoint, 724 be_lun->vn) | LK_RETRY); 725 726 /* 727 * UFS pays attention to IO_DIRECT for writes. The write 728 * is done asynchronously. (Normally the write would just 729 * get put into cache. 730 * 731 * UFS pays attention to IO_SYNC for writes. It will 732 * attempt to write the buffer out synchronously if that 733 * flag is set. 734 * 735 * ZFS does not pay attention to IO_DIRECT for writes. 736 * 737 * ZFS pays attention to IO_SYNC (a.k.a. FSYNC or FRSYNC) 738 * for writes. It will flush the transaction from the 739 * cache before returning. 740 */ 741 error = VOP_WRITE(be_lun->vn, &xuio, flags, file_data->cred); 742 VOP_UNLOCK(be_lun->vn); 743 744 vn_finished_write(mountpoint); 745 SDT_PROBE0(cbb, , write, file_done); 746 } 747 748 mtx_lock(&be_lun->io_lock); 749 devstat_end_transaction(beio->lun->disk_stats, beio->io_len, 750 beio->ds_tag_type, beio->ds_trans_type, 751 /*now*/ NULL, /*then*/&beio->ds_t0); 752 mtx_unlock(&be_lun->io_lock); 753 754 /* 755 * If we got an error, set the sense data to "MEDIUM ERROR" and 756 * return the I/O to the user. 757 */ 758 if (error != 0) { 759 if (error == ENOSPC || error == EDQUOT) { 760 ctl_set_space_alloc_fail(&io->scsiio); 761 } else if (error == EROFS || error == EACCES) { 762 ctl_set_hw_write_protected(&io->scsiio); 763 } else { 764 ctl_set_medium_error(&io->scsiio, 765 beio->bio_cmd == BIO_READ); 766 } 767 ctl_complete_beio(beio); 768 return; 769 } 770 771 /* 772 * If this is a write or a verify, we're all done. 773 * If this is a read, we can now send the data to the user. 774 */ 775 if ((beio->bio_cmd == BIO_WRITE) || 776 (ARGS(io)->flags & CTL_LLF_VERIFY)) { 777 ctl_set_success(&io->scsiio); 778 ctl_complete_beio(beio); 779 } else { 780 if ((ARGS(io)->flags & CTL_LLF_READ) && 781 beio->beio_cont == NULL) { 782 ctl_set_success(&io->scsiio); 783 if (cbe_lun->serseq > CTL_LUN_SERSEQ_SOFT) 784 ctl_serseq_done(io); 785 } 786 ctl_datamove(io); 787 } 788 } 789 790 static void 791 ctl_be_block_gls_file(struct ctl_be_block_lun *be_lun, 792 struct ctl_be_block_io *beio) 793 { 794 union ctl_io *io = beio->io; 795 struct ctl_lba_len_flags *lbalen = ARGS(io); 796 struct scsi_get_lba_status_data *data; 797 off_t roff, off; 798 int error, status; 799 800 DPRINTF("entered\n"); 801 802 off = roff = ((off_t)lbalen->lba) * be_lun->cbe_lun.blocksize; 803 vn_lock(be_lun->vn, LK_SHARED | LK_RETRY); 804 error = VOP_IOCTL(be_lun->vn, FIOSEEKHOLE, &off, 805 0, curthread->td_ucred, curthread); 806 if (error == 0 && off > roff) 807 status = 0; /* mapped up to off */ 808 else { 809 error = VOP_IOCTL(be_lun->vn, FIOSEEKDATA, &off, 810 0, curthread->td_ucred, curthread); 811 if (error == 0 && off > roff) 812 status = 1; /* deallocated up to off */ 813 else { 814 status = 0; /* unknown up to the end */ 815 off = be_lun->size_bytes; 816 } 817 } 818 VOP_UNLOCK(be_lun->vn); 819 820 data = (struct scsi_get_lba_status_data *)io->scsiio.kern_data_ptr; 821 scsi_u64to8b(lbalen->lba, data->descr[0].addr); 822 scsi_ulto4b(MIN(UINT32_MAX, off / be_lun->cbe_lun.blocksize - 823 lbalen->lba), data->descr[0].length); 824 data->descr[0].status = status; 825 826 ctl_complete_beio(beio); 827 } 828 829 static uint64_t 830 ctl_be_block_getattr_file(struct ctl_be_block_lun *be_lun, const char *attrname) 831 { 832 struct vattr vattr; 833 struct statfs statfs; 834 uint64_t val; 835 int error; 836 837 val = UINT64_MAX; 838 if (be_lun->vn == NULL) 839 return (val); 840 vn_lock(be_lun->vn, LK_SHARED | LK_RETRY); 841 if (strcmp(attrname, "blocksused") == 0) { 842 error = VOP_GETATTR(be_lun->vn, &vattr, curthread->td_ucred); 843 if (error == 0) 844 val = vattr.va_bytes / be_lun->cbe_lun.blocksize; 845 } 846 if (strcmp(attrname, "blocksavail") == 0 && 847 !VN_IS_DOOMED(be_lun->vn)) { 848 error = VFS_STATFS(be_lun->vn->v_mount, &statfs); 849 if (error == 0) 850 val = statfs.f_bavail * statfs.f_bsize / 851 be_lun->cbe_lun.blocksize; 852 } 853 VOP_UNLOCK(be_lun->vn); 854 return (val); 855 } 856 857 static void 858 ctl_be_block_dispatch_zvol(struct ctl_be_block_lun *be_lun, 859 struct ctl_be_block_io *beio) 860 { 861 struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun; 862 union ctl_io *io; 863 struct cdevsw *csw; 864 struct cdev *dev; 865 struct uio xuio; 866 struct iovec *xiovec; 867 int error, flags, i, ref; 868 869 DPRINTF("entered\n"); 870 871 io = beio->io; 872 flags = 0; 873 if (ARGS(io)->flags & CTL_LLF_DPO) 874 flags |= IO_DIRECT; 875 if (beio->bio_cmd == BIO_WRITE && ARGS(io)->flags & CTL_LLF_FUA) 876 flags |= IO_SYNC; 877 878 bzero(&xuio, sizeof(xuio)); 879 if (beio->bio_cmd == BIO_READ) { 880 SDT_PROBE0(cbb, , read, file_start); 881 xuio.uio_rw = UIO_READ; 882 } else { 883 SDT_PROBE0(cbb, , write, file_start); 884 xuio.uio_rw = UIO_WRITE; 885 } 886 xuio.uio_offset = beio->io_offset; 887 xuio.uio_resid = beio->io_len; 888 xuio.uio_segflg = UIO_SYSSPACE; 889 xuio.uio_iov = beio->xiovecs; 890 xuio.uio_iovcnt = beio->num_segs; 891 xuio.uio_td = curthread; 892 893 for (i = 0, xiovec = xuio.uio_iov; i < xuio.uio_iovcnt; i++, xiovec++) { 894 xiovec->iov_base = beio->sg_segs[i].addr; 895 xiovec->iov_len = beio->sg_segs[i].len; 896 } 897 898 binuptime(&beio->ds_t0); 899 devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0); 900 901 csw = devvn_refthread(be_lun->vn, &dev, &ref); 902 if (csw) { 903 if (beio->bio_cmd == BIO_READ) { 904 if (beio->beio_cont == NULL && 905 cbe_lun->serseq == CTL_LUN_SERSEQ_SOFT) 906 ctl_serseq_done(io); 907 error = csw->d_read(dev, &xuio, flags); 908 } else 909 error = csw->d_write(dev, &xuio, flags); 910 dev_relthread(dev, ref); 911 } else 912 error = ENXIO; 913 914 if (beio->bio_cmd == BIO_READ) 915 SDT_PROBE0(cbb, , read, file_done); 916 else 917 SDT_PROBE0(cbb, , write, file_done); 918 919 mtx_lock(&be_lun->io_lock); 920 devstat_end_transaction(beio->lun->disk_stats, beio->io_len, 921 beio->ds_tag_type, beio->ds_trans_type, 922 /*now*/ NULL, /*then*/&beio->ds_t0); 923 mtx_unlock(&be_lun->io_lock); 924 925 /* 926 * If we got an error, set the sense data to "MEDIUM ERROR" and 927 * return the I/O to the user. 928 */ 929 if (error != 0) { 930 if (error == ENOSPC || error == EDQUOT) { 931 ctl_set_space_alloc_fail(&io->scsiio); 932 } else if (error == EROFS || error == EACCES) { 933 ctl_set_hw_write_protected(&io->scsiio); 934 } else { 935 ctl_set_medium_error(&io->scsiio, 936 beio->bio_cmd == BIO_READ); 937 } 938 ctl_complete_beio(beio); 939 return; 940 } 941 942 /* 943 * If this is a write or a verify, we're all done. 944 * If this is a read, we can now send the data to the user. 945 */ 946 if ((beio->bio_cmd == BIO_WRITE) || 947 (ARGS(io)->flags & CTL_LLF_VERIFY)) { 948 ctl_set_success(&io->scsiio); 949 ctl_complete_beio(beio); 950 } else { 951 if ((ARGS(io)->flags & CTL_LLF_READ) && 952 beio->beio_cont == NULL) { 953 ctl_set_success(&io->scsiio); 954 if (cbe_lun->serseq > CTL_LUN_SERSEQ_SOFT) 955 ctl_serseq_done(io); 956 } 957 ctl_datamove(io); 958 } 959 } 960 961 static void 962 ctl_be_block_gls_zvol(struct ctl_be_block_lun *be_lun, 963 struct ctl_be_block_io *beio) 964 { 965 union ctl_io *io = beio->io; 966 struct cdevsw *csw; 967 struct cdev *dev; 968 struct ctl_lba_len_flags *lbalen = ARGS(io); 969 struct scsi_get_lba_status_data *data; 970 off_t roff, off; 971 int error, ref, status; 972 973 DPRINTF("entered\n"); 974 975 csw = devvn_refthread(be_lun->vn, &dev, &ref); 976 if (csw == NULL) { 977 status = 0; /* unknown up to the end */ 978 off = be_lun->size_bytes; 979 goto done; 980 } 981 off = roff = ((off_t)lbalen->lba) * be_lun->cbe_lun.blocksize; 982 error = csw->d_ioctl(dev, FIOSEEKHOLE, (caddr_t)&off, FREAD, 983 curthread); 984 if (error == 0 && off > roff) 985 status = 0; /* mapped up to off */ 986 else { 987 error = csw->d_ioctl(dev, FIOSEEKDATA, (caddr_t)&off, FREAD, 988 curthread); 989 if (error == 0 && off > roff) 990 status = 1; /* deallocated up to off */ 991 else { 992 status = 0; /* unknown up to the end */ 993 off = be_lun->size_bytes; 994 } 995 } 996 dev_relthread(dev, ref); 997 998 done: 999 data = (struct scsi_get_lba_status_data *)io->scsiio.kern_data_ptr; 1000 scsi_u64to8b(lbalen->lba, data->descr[0].addr); 1001 scsi_ulto4b(MIN(UINT32_MAX, off / be_lun->cbe_lun.blocksize - 1002 lbalen->lba), data->descr[0].length); 1003 data->descr[0].status = status; 1004 1005 ctl_complete_beio(beio); 1006 } 1007 1008 static void 1009 ctl_be_block_flush_dev(struct ctl_be_block_lun *be_lun, 1010 struct ctl_be_block_io *beio) 1011 { 1012 struct bio *bio; 1013 struct cdevsw *csw; 1014 struct cdev *dev; 1015 int ref; 1016 1017 DPRINTF("entered\n"); 1018 1019 /* This can't fail, it's a blocking allocation. */ 1020 bio = g_alloc_bio(); 1021 1022 bio->bio_cmd = BIO_FLUSH; 1023 bio->bio_offset = 0; 1024 bio->bio_data = 0; 1025 bio->bio_done = ctl_be_block_biodone; 1026 bio->bio_caller1 = beio; 1027 bio->bio_pblkno = 0; 1028 1029 /* 1030 * We don't need to acquire the LUN lock here, because we are only 1031 * sending one bio, and so there is no other context to synchronize 1032 * with. 1033 */ 1034 beio->num_bios_sent = 1; 1035 beio->send_complete = 1; 1036 1037 binuptime(&beio->ds_t0); 1038 devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0); 1039 1040 csw = devvn_refthread(be_lun->vn, &dev, &ref); 1041 if (csw) { 1042 bio->bio_dev = dev; 1043 csw->d_strategy(bio); 1044 dev_relthread(dev, ref); 1045 } else { 1046 bio->bio_error = ENXIO; 1047 ctl_be_block_biodone(bio); 1048 } 1049 } 1050 1051 static void 1052 ctl_be_block_unmap_dev_range(struct ctl_be_block_lun *be_lun, 1053 struct ctl_be_block_io *beio, 1054 uint64_t off, uint64_t len, int last) 1055 { 1056 struct bio *bio; 1057 uint64_t maxlen; 1058 struct cdevsw *csw; 1059 struct cdev *dev; 1060 int ref; 1061 1062 csw = devvn_refthread(be_lun->vn, &dev, &ref); 1063 maxlen = LONG_MAX - (LONG_MAX % be_lun->cbe_lun.blocksize); 1064 while (len > 0) { 1065 bio = g_alloc_bio(); 1066 bio->bio_cmd = BIO_DELETE; 1067 bio->bio_dev = dev; 1068 bio->bio_offset = off; 1069 bio->bio_length = MIN(len, maxlen); 1070 bio->bio_data = 0; 1071 bio->bio_done = ctl_be_block_biodone; 1072 bio->bio_caller1 = beio; 1073 bio->bio_pblkno = off / be_lun->cbe_lun.blocksize; 1074 1075 off += bio->bio_length; 1076 len -= bio->bio_length; 1077 1078 mtx_lock(&be_lun->io_lock); 1079 beio->num_bios_sent++; 1080 if (last && len == 0) 1081 beio->send_complete = 1; 1082 mtx_unlock(&be_lun->io_lock); 1083 1084 if (csw) { 1085 csw->d_strategy(bio); 1086 } else { 1087 bio->bio_error = ENXIO; 1088 ctl_be_block_biodone(bio); 1089 } 1090 } 1091 if (csw) 1092 dev_relthread(dev, ref); 1093 } 1094 1095 static void 1096 ctl_be_block_unmap_dev(struct ctl_be_block_lun *be_lun, 1097 struct ctl_be_block_io *beio) 1098 { 1099 union ctl_io *io; 1100 struct ctl_ptr_len_flags *ptrlen; 1101 struct scsi_unmap_desc *buf, *end; 1102 uint64_t len; 1103 1104 io = beio->io; 1105 1106 DPRINTF("entered\n"); 1107 1108 binuptime(&beio->ds_t0); 1109 devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0); 1110 1111 if (beio->io_offset == -1) { 1112 beio->io_len = 0; 1113 ptrlen = (struct ctl_ptr_len_flags *)&io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 1114 buf = (struct scsi_unmap_desc *)ptrlen->ptr; 1115 end = buf + ptrlen->len / sizeof(*buf); 1116 for (; buf < end; buf++) { 1117 len = (uint64_t)scsi_4btoul(buf->length) * 1118 be_lun->cbe_lun.blocksize; 1119 beio->io_len += len; 1120 ctl_be_block_unmap_dev_range(be_lun, beio, 1121 scsi_8btou64(buf->lba) * be_lun->cbe_lun.blocksize, 1122 len, (end - buf < 2) ? TRUE : FALSE); 1123 } 1124 } else 1125 ctl_be_block_unmap_dev_range(be_lun, beio, 1126 beio->io_offset, beio->io_len, TRUE); 1127 } 1128 1129 static void 1130 ctl_be_block_dispatch_dev(struct ctl_be_block_lun *be_lun, 1131 struct ctl_be_block_io *beio) 1132 { 1133 TAILQ_HEAD(, bio) queue = TAILQ_HEAD_INITIALIZER(queue); 1134 struct bio *bio; 1135 struct cdevsw *csw; 1136 struct cdev *dev; 1137 off_t cur_offset; 1138 int i, max_iosize, ref; 1139 1140 DPRINTF("entered\n"); 1141 csw = devvn_refthread(be_lun->vn, &dev, &ref); 1142 1143 /* 1144 * We have to limit our I/O size to the maximum supported by the 1145 * backend device. 1146 */ 1147 if (csw) { 1148 max_iosize = dev->si_iosize_max; 1149 if (max_iosize < PAGE_SIZE) 1150 max_iosize = DFLTPHYS; 1151 } else 1152 max_iosize = DFLTPHYS; 1153 1154 cur_offset = beio->io_offset; 1155 for (i = 0; i < beio->num_segs; i++) { 1156 size_t cur_size; 1157 uint8_t *cur_ptr; 1158 1159 cur_size = beio->sg_segs[i].len; 1160 cur_ptr = beio->sg_segs[i].addr; 1161 1162 while (cur_size > 0) { 1163 /* This can't fail, it's a blocking allocation. */ 1164 bio = g_alloc_bio(); 1165 1166 KASSERT(bio != NULL, ("g_alloc_bio() failed!\n")); 1167 1168 bio->bio_cmd = beio->bio_cmd; 1169 bio->bio_dev = dev; 1170 bio->bio_caller1 = beio; 1171 bio->bio_length = min(cur_size, max_iosize); 1172 bio->bio_offset = cur_offset; 1173 bio->bio_data = cur_ptr; 1174 bio->bio_done = ctl_be_block_biodone; 1175 bio->bio_pblkno = cur_offset / be_lun->cbe_lun.blocksize; 1176 1177 cur_offset += bio->bio_length; 1178 cur_ptr += bio->bio_length; 1179 cur_size -= bio->bio_length; 1180 1181 TAILQ_INSERT_TAIL(&queue, bio, bio_queue); 1182 beio->num_bios_sent++; 1183 } 1184 } 1185 beio->send_complete = 1; 1186 binuptime(&beio->ds_t0); 1187 devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0); 1188 1189 /* 1190 * Fire off all allocated requests! 1191 */ 1192 while ((bio = TAILQ_FIRST(&queue)) != NULL) { 1193 TAILQ_REMOVE(&queue, bio, bio_queue); 1194 if (csw) 1195 csw->d_strategy(bio); 1196 else { 1197 bio->bio_error = ENXIO; 1198 ctl_be_block_biodone(bio); 1199 } 1200 } 1201 if (csw) 1202 dev_relthread(dev, ref); 1203 } 1204 1205 static uint64_t 1206 ctl_be_block_getattr_dev(struct ctl_be_block_lun *be_lun, const char *attrname) 1207 { 1208 struct diocgattr_arg arg; 1209 struct cdevsw *csw; 1210 struct cdev *dev; 1211 int error, ref; 1212 1213 csw = devvn_refthread(be_lun->vn, &dev, &ref); 1214 if (csw == NULL) 1215 return (UINT64_MAX); 1216 strlcpy(arg.name, attrname, sizeof(arg.name)); 1217 arg.len = sizeof(arg.value.off); 1218 if (csw->d_ioctl) { 1219 error = csw->d_ioctl(dev, DIOCGATTR, (caddr_t)&arg, FREAD, 1220 curthread); 1221 } else 1222 error = ENODEV; 1223 dev_relthread(dev, ref); 1224 if (error != 0) 1225 return (UINT64_MAX); 1226 return (arg.value.off); 1227 } 1228 1229 static void 1230 ctl_be_block_cw_dispatch_sync(struct ctl_be_block_lun *be_lun, 1231 union ctl_io *io) 1232 { 1233 struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun; 1234 struct ctl_be_block_io *beio; 1235 struct ctl_lba_len_flags *lbalen; 1236 1237 DPRINTF("entered\n"); 1238 beio = (struct ctl_be_block_io *)PRIV(io)->ptr; 1239 lbalen = (struct ctl_lba_len_flags *)&io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 1240 1241 beio->io_len = lbalen->len * cbe_lun->blocksize; 1242 beio->io_offset = lbalen->lba * cbe_lun->blocksize; 1243 beio->io_arg = (lbalen->flags & SSC_IMMED) != 0; 1244 beio->bio_cmd = BIO_FLUSH; 1245 beio->ds_trans_type = DEVSTAT_NO_DATA; 1246 DPRINTF("SYNC\n"); 1247 be_lun->lun_flush(be_lun, beio); 1248 } 1249 1250 static void 1251 ctl_be_block_cw_done_ws(struct ctl_be_block_io *beio) 1252 { 1253 union ctl_io *io; 1254 1255 io = beio->io; 1256 ctl_free_beio(beio); 1257 if ((io->io_hdr.flags & CTL_FLAG_ABORT) || 1258 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 1259 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) { 1260 ctl_config_write_done(io); 1261 return; 1262 } 1263 1264 ctl_be_block_config_write(io); 1265 } 1266 1267 static void 1268 ctl_be_block_cw_dispatch_ws(struct ctl_be_block_lun *be_lun, 1269 union ctl_io *io) 1270 { 1271 struct ctl_be_block_softc *softc = be_lun->softc; 1272 struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun; 1273 struct ctl_be_block_io *beio; 1274 struct ctl_lba_len_flags *lbalen; 1275 uint64_t len_left, lba; 1276 uint32_t pb, pbo, adj; 1277 int i, seglen; 1278 uint8_t *buf, *end; 1279 1280 DPRINTF("entered\n"); 1281 1282 beio = (struct ctl_be_block_io *)PRIV(io)->ptr; 1283 lbalen = ARGS(io); 1284 1285 if (lbalen->flags & ~(SWS_LBDATA | SWS_UNMAP | SWS_ANCHOR | SWS_NDOB) || 1286 (lbalen->flags & (SWS_UNMAP | SWS_ANCHOR) && be_lun->unmap == NULL)) { 1287 ctl_free_beio(beio); 1288 ctl_set_invalid_field(&io->scsiio, 1289 /*sks_valid*/ 1, 1290 /*command*/ 1, 1291 /*field*/ 1, 1292 /*bit_valid*/ 0, 1293 /*bit*/ 0); 1294 ctl_config_write_done(io); 1295 return; 1296 } 1297 1298 if (lbalen->flags & (SWS_UNMAP | SWS_ANCHOR)) { 1299 beio->io_offset = lbalen->lba * cbe_lun->blocksize; 1300 beio->io_len = (uint64_t)lbalen->len * cbe_lun->blocksize; 1301 beio->bio_cmd = BIO_DELETE; 1302 beio->ds_trans_type = DEVSTAT_FREE; 1303 1304 be_lun->unmap(be_lun, beio); 1305 return; 1306 } 1307 1308 beio->bio_cmd = BIO_WRITE; 1309 beio->ds_trans_type = DEVSTAT_WRITE; 1310 1311 DPRINTF("WRITE SAME at LBA %jx len %u\n", 1312 (uintmax_t)lbalen->lba, lbalen->len); 1313 1314 pb = cbe_lun->blocksize << be_lun->cbe_lun.pblockexp; 1315 if (be_lun->cbe_lun.pblockoff > 0) 1316 pbo = pb - cbe_lun->blocksize * be_lun->cbe_lun.pblockoff; 1317 else 1318 pbo = 0; 1319 len_left = (uint64_t)lbalen->len * cbe_lun->blocksize; 1320 for (i = 0, lba = 0; i < CTLBLK_NUM_SEGS && len_left > 0; i++) { 1321 /* 1322 * Setup the S/G entry for this chunk. 1323 */ 1324 seglen = MIN(CTLBLK_MAX_SEG, len_left); 1325 if (pb > cbe_lun->blocksize) { 1326 adj = ((lbalen->lba + lba) * cbe_lun->blocksize + 1327 seglen - pbo) % pb; 1328 if (seglen > adj) 1329 seglen -= adj; 1330 else 1331 seglen -= seglen % cbe_lun->blocksize; 1332 } else 1333 seglen -= seglen % cbe_lun->blocksize; 1334 ctl_alloc_seg(softc, &beio->sg_segs[i], seglen); 1335 1336 DPRINTF("segment %d addr %p len %zd\n", i, 1337 beio->sg_segs[i].addr, beio->sg_segs[i].len); 1338 1339 beio->num_segs++; 1340 len_left -= seglen; 1341 1342 buf = beio->sg_segs[i].addr; 1343 end = buf + seglen; 1344 for (; buf < end; buf += cbe_lun->blocksize) { 1345 if (lbalen->flags & SWS_NDOB) { 1346 memset(buf, 0, cbe_lun->blocksize); 1347 } else { 1348 memcpy(buf, io->scsiio.kern_data_ptr, 1349 cbe_lun->blocksize); 1350 } 1351 if (lbalen->flags & SWS_LBDATA) 1352 scsi_ulto4b(lbalen->lba + lba, buf); 1353 lba++; 1354 } 1355 } 1356 1357 beio->io_offset = lbalen->lba * cbe_lun->blocksize; 1358 beio->io_len = lba * cbe_lun->blocksize; 1359 1360 /* We can not do all in one run. Correct and schedule rerun. */ 1361 if (len_left > 0) { 1362 lbalen->lba += lba; 1363 lbalen->len -= lba; 1364 beio->beio_cont = ctl_be_block_cw_done_ws; 1365 } 1366 1367 be_lun->dispatch(be_lun, beio); 1368 } 1369 1370 static void 1371 ctl_be_block_cw_dispatch_unmap(struct ctl_be_block_lun *be_lun, 1372 union ctl_io *io) 1373 { 1374 struct ctl_be_block_io *beio; 1375 struct ctl_ptr_len_flags *ptrlen; 1376 1377 DPRINTF("entered\n"); 1378 1379 beio = (struct ctl_be_block_io *)PRIV(io)->ptr; 1380 ptrlen = (struct ctl_ptr_len_flags *)&io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 1381 1382 if ((ptrlen->flags & ~SU_ANCHOR) != 0 || be_lun->unmap == NULL) { 1383 ctl_free_beio(beio); 1384 ctl_set_invalid_field(&io->scsiio, 1385 /*sks_valid*/ 0, 1386 /*command*/ 1, 1387 /*field*/ 0, 1388 /*bit_valid*/ 0, 1389 /*bit*/ 0); 1390 ctl_config_write_done(io); 1391 return; 1392 } 1393 1394 beio->io_len = 0; 1395 beio->io_offset = -1; 1396 beio->bio_cmd = BIO_DELETE; 1397 beio->ds_trans_type = DEVSTAT_FREE; 1398 DPRINTF("UNMAP\n"); 1399 be_lun->unmap(be_lun, beio); 1400 } 1401 1402 static void 1403 ctl_be_block_cr_done(struct ctl_be_block_io *beio) 1404 { 1405 union ctl_io *io; 1406 1407 io = beio->io; 1408 ctl_free_beio(beio); 1409 ctl_config_read_done(io); 1410 } 1411 1412 static void 1413 ctl_be_block_cr_dispatch(struct ctl_be_block_lun *be_lun, 1414 union ctl_io *io) 1415 { 1416 struct ctl_be_block_io *beio; 1417 struct ctl_be_block_softc *softc; 1418 1419 DPRINTF("entered\n"); 1420 1421 softc = be_lun->softc; 1422 beio = ctl_alloc_beio(softc); 1423 beio->io = io; 1424 beio->lun = be_lun; 1425 beio->beio_cont = ctl_be_block_cr_done; 1426 PRIV(io)->ptr = (void *)beio; 1427 1428 switch (io->scsiio.cdb[0]) { 1429 case SERVICE_ACTION_IN: /* GET LBA STATUS */ 1430 beio->bio_cmd = -1; 1431 beio->ds_trans_type = DEVSTAT_NO_DATA; 1432 beio->ds_tag_type = DEVSTAT_TAG_ORDERED; 1433 beio->io_len = 0; 1434 if (be_lun->get_lba_status) 1435 be_lun->get_lba_status(be_lun, beio); 1436 else 1437 ctl_be_block_cr_done(beio); 1438 break; 1439 default: 1440 panic("Unhandled CDB type %#x", io->scsiio.cdb[0]); 1441 break; 1442 } 1443 } 1444 1445 static void 1446 ctl_be_block_cw_done(struct ctl_be_block_io *beio) 1447 { 1448 union ctl_io *io; 1449 1450 io = beio->io; 1451 ctl_free_beio(beio); 1452 ctl_config_write_done(io); 1453 } 1454 1455 static void 1456 ctl_be_block_cw_dispatch(struct ctl_be_block_lun *be_lun, 1457 union ctl_io *io) 1458 { 1459 struct ctl_be_block_io *beio; 1460 struct ctl_be_block_softc *softc; 1461 1462 DPRINTF("entered\n"); 1463 1464 softc = be_lun->softc; 1465 beio = ctl_alloc_beio(softc); 1466 beio->io = io; 1467 beio->lun = be_lun; 1468 beio->beio_cont = ctl_be_block_cw_done; 1469 switch (io->scsiio.tag_type) { 1470 case CTL_TAG_ORDERED: 1471 beio->ds_tag_type = DEVSTAT_TAG_ORDERED; 1472 break; 1473 case CTL_TAG_HEAD_OF_QUEUE: 1474 beio->ds_tag_type = DEVSTAT_TAG_HEAD; 1475 break; 1476 case CTL_TAG_UNTAGGED: 1477 case CTL_TAG_SIMPLE: 1478 case CTL_TAG_ACA: 1479 default: 1480 beio->ds_tag_type = DEVSTAT_TAG_SIMPLE; 1481 break; 1482 } 1483 PRIV(io)->ptr = (void *)beio; 1484 1485 switch (io->scsiio.cdb[0]) { 1486 case SYNCHRONIZE_CACHE: 1487 case SYNCHRONIZE_CACHE_16: 1488 ctl_be_block_cw_dispatch_sync(be_lun, io); 1489 break; 1490 case WRITE_SAME_10: 1491 case WRITE_SAME_16: 1492 ctl_be_block_cw_dispatch_ws(be_lun, io); 1493 break; 1494 case UNMAP: 1495 ctl_be_block_cw_dispatch_unmap(be_lun, io); 1496 break; 1497 default: 1498 panic("Unhandled CDB type %#x", io->scsiio.cdb[0]); 1499 break; 1500 } 1501 } 1502 1503 SDT_PROBE_DEFINE1(cbb, , read, start, "uint64_t"); 1504 SDT_PROBE_DEFINE1(cbb, , write, start, "uint64_t"); 1505 SDT_PROBE_DEFINE1(cbb, , read, alloc_done, "uint64_t"); 1506 SDT_PROBE_DEFINE1(cbb, , write, alloc_done, "uint64_t"); 1507 1508 static void 1509 ctl_be_block_next(struct ctl_be_block_io *beio) 1510 { 1511 struct ctl_be_block_lun *be_lun; 1512 union ctl_io *io; 1513 1514 io = beio->io; 1515 be_lun = beio->lun; 1516 ctl_free_beio(beio); 1517 if ((io->io_hdr.flags & CTL_FLAG_ABORT) || 1518 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 1519 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) { 1520 ctl_data_submit_done(io); 1521 return; 1522 } 1523 1524 io->io_hdr.status &= ~CTL_STATUS_MASK; 1525 io->io_hdr.status |= CTL_STATUS_NONE; 1526 1527 mtx_lock(&be_lun->queue_lock); 1528 STAILQ_INSERT_TAIL(&be_lun->input_queue, &io->io_hdr, links); 1529 mtx_unlock(&be_lun->queue_lock); 1530 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); 1531 } 1532 1533 static void 1534 ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun, 1535 union ctl_io *io) 1536 { 1537 struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun; 1538 struct ctl_be_block_io *beio; 1539 struct ctl_be_block_softc *softc; 1540 struct ctl_lba_len_flags *lbalen; 1541 struct ctl_ptr_len_flags *bptrlen; 1542 uint64_t len_left, lbas; 1543 int i; 1544 1545 softc = be_lun->softc; 1546 1547 DPRINTF("entered\n"); 1548 1549 lbalen = ARGS(io); 1550 if (lbalen->flags & CTL_LLF_WRITE) { 1551 SDT_PROBE0(cbb, , write, start); 1552 } else { 1553 SDT_PROBE0(cbb, , read, start); 1554 } 1555 1556 beio = ctl_alloc_beio(softc); 1557 beio->io = io; 1558 beio->lun = be_lun; 1559 bptrlen = PRIV(io); 1560 bptrlen->ptr = (void *)beio; 1561 1562 switch (io->scsiio.tag_type) { 1563 case CTL_TAG_ORDERED: 1564 beio->ds_tag_type = DEVSTAT_TAG_ORDERED; 1565 break; 1566 case CTL_TAG_HEAD_OF_QUEUE: 1567 beio->ds_tag_type = DEVSTAT_TAG_HEAD; 1568 break; 1569 case CTL_TAG_UNTAGGED: 1570 case CTL_TAG_SIMPLE: 1571 case CTL_TAG_ACA: 1572 default: 1573 beio->ds_tag_type = DEVSTAT_TAG_SIMPLE; 1574 break; 1575 } 1576 1577 if (lbalen->flags & CTL_LLF_WRITE) { 1578 beio->bio_cmd = BIO_WRITE; 1579 beio->ds_trans_type = DEVSTAT_WRITE; 1580 } else { 1581 beio->bio_cmd = BIO_READ; 1582 beio->ds_trans_type = DEVSTAT_READ; 1583 } 1584 1585 DPRINTF("%s at LBA %jx len %u @%ju\n", 1586 (beio->bio_cmd == BIO_READ) ? "READ" : "WRITE", 1587 (uintmax_t)lbalen->lba, lbalen->len, bptrlen->len); 1588 if (lbalen->flags & CTL_LLF_COMPARE) { 1589 beio->two_sglists = 1; 1590 lbas = CTLBLK_HALF_IO_SIZE; 1591 } else { 1592 lbas = CTLBLK_MAX_IO_SIZE; 1593 } 1594 lbas = MIN(lbalen->len - bptrlen->len, lbas / cbe_lun->blocksize); 1595 beio->io_offset = (lbalen->lba + bptrlen->len) * cbe_lun->blocksize; 1596 beio->io_len = lbas * cbe_lun->blocksize; 1597 bptrlen->len += lbas; 1598 1599 for (i = 0, len_left = beio->io_len; len_left > 0; i++) { 1600 KASSERT(i < CTLBLK_MAX_SEGS, ("Too many segs (%d >= %d)", 1601 i, CTLBLK_MAX_SEGS)); 1602 1603 /* 1604 * Setup the S/G entry for this chunk. 1605 */ 1606 ctl_alloc_seg(softc, &beio->sg_segs[i], 1607 MIN(CTLBLK_MAX_SEG, len_left)); 1608 1609 DPRINTF("segment %d addr %p len %zd\n", i, 1610 beio->sg_segs[i].addr, beio->sg_segs[i].len); 1611 1612 /* Set up second segment for compare operation. */ 1613 if (beio->two_sglists) { 1614 ctl_alloc_seg(softc, 1615 &beio->sg_segs[i + CTLBLK_HALF_SEGS], 1616 beio->sg_segs[i].len); 1617 } 1618 1619 beio->num_segs++; 1620 len_left -= beio->sg_segs[i].len; 1621 } 1622 if (bptrlen->len < lbalen->len) 1623 beio->beio_cont = ctl_be_block_next; 1624 io->scsiio.be_move_done = ctl_be_block_move_done; 1625 /* For compare we have separate S/G lists for read and datamove. */ 1626 if (beio->two_sglists) 1627 io->scsiio.kern_data_ptr = (uint8_t *)&beio->sg_segs[CTLBLK_HALF_SEGS]; 1628 else 1629 io->scsiio.kern_data_ptr = (uint8_t *)beio->sg_segs; 1630 io->scsiio.kern_data_len = beio->io_len; 1631 io->scsiio.kern_sg_entries = beio->num_segs; 1632 io->scsiio.kern_data_ref = ctl_refcnt_beio; 1633 io->scsiio.kern_data_arg = beio; 1634 io->io_hdr.flags |= CTL_FLAG_ALLOCATED; 1635 1636 /* 1637 * For the read case, we need to read the data into our buffers and 1638 * then we can send it back to the user. For the write case, we 1639 * need to get the data from the user first. 1640 */ 1641 if (beio->bio_cmd == BIO_READ) { 1642 SDT_PROBE0(cbb, , read, alloc_done); 1643 be_lun->dispatch(be_lun, beio); 1644 } else { 1645 SDT_PROBE0(cbb, , write, alloc_done); 1646 ctl_datamove(io); 1647 } 1648 } 1649 1650 static void 1651 ctl_be_block_worker(void *context, int pending) 1652 { 1653 struct ctl_be_block_lun *be_lun = (struct ctl_be_block_lun *)context; 1654 struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun; 1655 union ctl_io *io; 1656 struct ctl_be_block_io *beio; 1657 1658 DPRINTF("entered\n"); 1659 /* 1660 * Fetch and process I/Os from all queues. If we detect LUN 1661 * CTL_LUN_FLAG_NO_MEDIA status here -- it is result of a race, 1662 * so make response maximally opaque to not confuse initiator. 1663 */ 1664 for (;;) { 1665 mtx_lock(&be_lun->queue_lock); 1666 io = (union ctl_io *)STAILQ_FIRST(&be_lun->datamove_queue); 1667 if (io != NULL) { 1668 DPRINTF("datamove queue\n"); 1669 STAILQ_REMOVE_HEAD(&be_lun->datamove_queue, links); 1670 mtx_unlock(&be_lun->queue_lock); 1671 beio = (struct ctl_be_block_io *)PRIV(io)->ptr; 1672 if (cbe_lun->flags & CTL_LUN_FLAG_NO_MEDIA) { 1673 ctl_set_busy(&io->scsiio); 1674 ctl_complete_beio(beio); 1675 continue; 1676 } 1677 be_lun->dispatch(be_lun, beio); 1678 continue; 1679 } 1680 io = (union ctl_io *)STAILQ_FIRST(&be_lun->config_write_queue); 1681 if (io != NULL) { 1682 DPRINTF("config write queue\n"); 1683 STAILQ_REMOVE_HEAD(&be_lun->config_write_queue, links); 1684 mtx_unlock(&be_lun->queue_lock); 1685 if (cbe_lun->flags & CTL_LUN_FLAG_NO_MEDIA) { 1686 ctl_set_busy(&io->scsiio); 1687 ctl_config_write_done(io); 1688 continue; 1689 } 1690 ctl_be_block_cw_dispatch(be_lun, io); 1691 continue; 1692 } 1693 io = (union ctl_io *)STAILQ_FIRST(&be_lun->config_read_queue); 1694 if (io != NULL) { 1695 DPRINTF("config read queue\n"); 1696 STAILQ_REMOVE_HEAD(&be_lun->config_read_queue, links); 1697 mtx_unlock(&be_lun->queue_lock); 1698 if (cbe_lun->flags & CTL_LUN_FLAG_NO_MEDIA) { 1699 ctl_set_busy(&io->scsiio); 1700 ctl_config_read_done(io); 1701 continue; 1702 } 1703 ctl_be_block_cr_dispatch(be_lun, io); 1704 continue; 1705 } 1706 io = (union ctl_io *)STAILQ_FIRST(&be_lun->input_queue); 1707 if (io != NULL) { 1708 DPRINTF("input queue\n"); 1709 STAILQ_REMOVE_HEAD(&be_lun->input_queue, links); 1710 mtx_unlock(&be_lun->queue_lock); 1711 if (cbe_lun->flags & CTL_LUN_FLAG_NO_MEDIA) { 1712 ctl_set_busy(&io->scsiio); 1713 ctl_data_submit_done(io); 1714 continue; 1715 } 1716 ctl_be_block_dispatch(be_lun, io); 1717 continue; 1718 } 1719 1720 /* 1721 * If we get here, there is no work left in the queues, so 1722 * just break out and let the task queue go to sleep. 1723 */ 1724 mtx_unlock(&be_lun->queue_lock); 1725 break; 1726 } 1727 } 1728 1729 /* 1730 * Entry point from CTL to the backend for I/O. We queue everything to a 1731 * work thread, so this just puts the I/O on a queue and wakes up the 1732 * thread. 1733 */ 1734 static int 1735 ctl_be_block_submit(union ctl_io *io) 1736 { 1737 struct ctl_be_block_lun *be_lun; 1738 1739 DPRINTF("entered\n"); 1740 1741 be_lun = (struct ctl_be_block_lun *)CTL_BACKEND_LUN(io); 1742 1743 KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, 1744 ("%s: unexpected I/O type %x", __func__, io->io_hdr.io_type)); 1745 1746 PRIV(io)->len = 0; 1747 1748 mtx_lock(&be_lun->queue_lock); 1749 STAILQ_INSERT_TAIL(&be_lun->input_queue, &io->io_hdr, links); 1750 mtx_unlock(&be_lun->queue_lock); 1751 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); 1752 1753 return (CTL_RETVAL_COMPLETE); 1754 } 1755 1756 static int 1757 ctl_be_block_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, 1758 int flag, struct thread *td) 1759 { 1760 struct ctl_be_block_softc *softc = &backend_block_softc; 1761 int error; 1762 1763 error = 0; 1764 switch (cmd) { 1765 case CTL_LUN_REQ: { 1766 struct ctl_lun_req *lun_req; 1767 1768 lun_req = (struct ctl_lun_req *)addr; 1769 1770 switch (lun_req->reqtype) { 1771 case CTL_LUNREQ_CREATE: 1772 error = ctl_be_block_create(softc, lun_req); 1773 break; 1774 case CTL_LUNREQ_RM: 1775 error = ctl_be_block_rm(softc, lun_req); 1776 break; 1777 case CTL_LUNREQ_MODIFY: 1778 error = ctl_be_block_modify(softc, lun_req); 1779 break; 1780 default: 1781 lun_req->status = CTL_LUN_ERROR; 1782 snprintf(lun_req->error_str, sizeof(lun_req->error_str), 1783 "invalid LUN request type %d", 1784 lun_req->reqtype); 1785 break; 1786 } 1787 break; 1788 } 1789 default: 1790 error = ENOTTY; 1791 break; 1792 } 1793 1794 return (error); 1795 } 1796 1797 static int 1798 ctl_be_block_open_file(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req) 1799 { 1800 struct ctl_be_lun *cbe_lun; 1801 struct ctl_be_block_filedata *file_data; 1802 struct ctl_lun_create_params *params; 1803 const char *value; 1804 struct vattr vattr; 1805 off_t ps, pss, po, pos, us, uss, uo, uos; 1806 int error; 1807 1808 cbe_lun = &be_lun->cbe_lun; 1809 file_data = &be_lun->backend.file; 1810 params = &be_lun->params; 1811 1812 be_lun->dev_type = CTL_BE_BLOCK_FILE; 1813 be_lun->dispatch = ctl_be_block_dispatch_file; 1814 be_lun->lun_flush = ctl_be_block_flush_file; 1815 be_lun->get_lba_status = ctl_be_block_gls_file; 1816 be_lun->getattr = ctl_be_block_getattr_file; 1817 be_lun->unmap = NULL; 1818 cbe_lun->flags &= ~CTL_LUN_FLAG_UNMAP; 1819 1820 error = VOP_GETATTR(be_lun->vn, &vattr, curthread->td_ucred); 1821 if (error != 0) { 1822 snprintf(req->error_str, sizeof(req->error_str), 1823 "error calling VOP_GETATTR() for file %s", 1824 be_lun->dev_path); 1825 return (error); 1826 } 1827 1828 file_data->cred = crhold(curthread->td_ucred); 1829 if (params->lun_size_bytes != 0) 1830 be_lun->size_bytes = params->lun_size_bytes; 1831 else 1832 be_lun->size_bytes = vattr.va_size; 1833 1834 /* 1835 * For files we can use any logical block size. Prefer 512 bytes 1836 * for compatibility reasons. If file's vattr.va_blocksize 1837 * (preferred I/O block size) is bigger and multiple to chosen 1838 * logical block size -- report it as physical block size. 1839 */ 1840 if (params->blocksize_bytes != 0) 1841 cbe_lun->blocksize = params->blocksize_bytes; 1842 else if (cbe_lun->lun_type == T_CDROM) 1843 cbe_lun->blocksize = 2048; 1844 else 1845 cbe_lun->blocksize = 512; 1846 be_lun->size_blocks = be_lun->size_bytes / cbe_lun->blocksize; 1847 cbe_lun->maxlba = (be_lun->size_blocks == 0) ? 1848 0 : (be_lun->size_blocks - 1); 1849 1850 us = ps = vattr.va_blocksize; 1851 uo = po = 0; 1852 1853 value = dnvlist_get_string(cbe_lun->options, "pblocksize", NULL); 1854 if (value != NULL) 1855 ctl_expand_number(value, &ps); 1856 value = dnvlist_get_string(cbe_lun->options, "pblockoffset", NULL); 1857 if (value != NULL) 1858 ctl_expand_number(value, &po); 1859 pss = ps / cbe_lun->blocksize; 1860 pos = po / cbe_lun->blocksize; 1861 if ((pss > 0) && (pss * cbe_lun->blocksize == ps) && (pss >= pos) && 1862 ((pss & (pss - 1)) == 0) && (pos * cbe_lun->blocksize == po)) { 1863 cbe_lun->pblockexp = fls(pss) - 1; 1864 cbe_lun->pblockoff = (pss - pos) % pss; 1865 } 1866 1867 value = dnvlist_get_string(cbe_lun->options, "ublocksize", NULL); 1868 if (value != NULL) 1869 ctl_expand_number(value, &us); 1870 value = dnvlist_get_string(cbe_lun->options, "ublockoffset", NULL); 1871 if (value != NULL) 1872 ctl_expand_number(value, &uo); 1873 uss = us / cbe_lun->blocksize; 1874 uos = uo / cbe_lun->blocksize; 1875 if ((uss > 0) && (uss * cbe_lun->blocksize == us) && (uss >= uos) && 1876 ((uss & (uss - 1)) == 0) && (uos * cbe_lun->blocksize == uo)) { 1877 cbe_lun->ublockexp = fls(uss) - 1; 1878 cbe_lun->ublockoff = (uss - uos) % uss; 1879 } 1880 1881 /* 1882 * Sanity check. The media size has to be at least one 1883 * sector long. 1884 */ 1885 if (be_lun->size_bytes < cbe_lun->blocksize) { 1886 error = EINVAL; 1887 snprintf(req->error_str, sizeof(req->error_str), 1888 "file %s size %ju < block size %u", be_lun->dev_path, 1889 (uintmax_t)be_lun->size_bytes, cbe_lun->blocksize); 1890 } 1891 1892 cbe_lun->opttxferlen = CTLBLK_MAX_IO_SIZE / cbe_lun->blocksize; 1893 return (error); 1894 } 1895 1896 static int 1897 ctl_be_block_open_dev(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req) 1898 { 1899 struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun; 1900 struct ctl_lun_create_params *params; 1901 struct cdevsw *csw; 1902 struct cdev *dev; 1903 const char *value; 1904 int error, atomic, maxio, ref, unmap, tmp; 1905 off_t ps, pss, po, pos, us, uss, uo, uos, otmp; 1906 1907 params = &be_lun->params; 1908 1909 be_lun->dev_type = CTL_BE_BLOCK_DEV; 1910 csw = devvn_refthread(be_lun->vn, &dev, &ref); 1911 if (csw == NULL) 1912 return (ENXIO); 1913 if (strcmp(csw->d_name, "zvol") == 0) { 1914 be_lun->dispatch = ctl_be_block_dispatch_zvol; 1915 be_lun->get_lba_status = ctl_be_block_gls_zvol; 1916 atomic = maxio = CTLBLK_MAX_IO_SIZE; 1917 } else { 1918 be_lun->dispatch = ctl_be_block_dispatch_dev; 1919 be_lun->get_lba_status = NULL; 1920 atomic = 0; 1921 maxio = dev->si_iosize_max; 1922 if (maxio <= 0) 1923 maxio = DFLTPHYS; 1924 if (maxio > CTLBLK_MAX_SEG) 1925 maxio = CTLBLK_MAX_SEG; 1926 } 1927 be_lun->lun_flush = ctl_be_block_flush_dev; 1928 be_lun->getattr = ctl_be_block_getattr_dev; 1929 be_lun->unmap = ctl_be_block_unmap_dev; 1930 1931 if (!csw->d_ioctl) { 1932 dev_relthread(dev, ref); 1933 snprintf(req->error_str, sizeof(req->error_str), 1934 "no d_ioctl for device %s!", be_lun->dev_path); 1935 return (ENODEV); 1936 } 1937 1938 error = csw->d_ioctl(dev, DIOCGSECTORSIZE, (caddr_t)&tmp, FREAD, 1939 curthread); 1940 if (error) { 1941 dev_relthread(dev, ref); 1942 snprintf(req->error_str, sizeof(req->error_str), 1943 "error %d returned for DIOCGSECTORSIZE ioctl " 1944 "on %s!", error, be_lun->dev_path); 1945 return (error); 1946 } 1947 1948 /* 1949 * If the user has asked for a blocksize that is greater than the 1950 * backing device's blocksize, we can do it only if the blocksize 1951 * the user is asking for is an even multiple of the underlying 1952 * device's blocksize. 1953 */ 1954 if ((params->blocksize_bytes != 0) && 1955 (params->blocksize_bytes >= tmp)) { 1956 if (params->blocksize_bytes % tmp == 0) { 1957 cbe_lun->blocksize = params->blocksize_bytes; 1958 } else { 1959 dev_relthread(dev, ref); 1960 snprintf(req->error_str, sizeof(req->error_str), 1961 "requested blocksize %u is not an even " 1962 "multiple of backing device blocksize %u", 1963 params->blocksize_bytes, tmp); 1964 return (EINVAL); 1965 } 1966 } else if (params->blocksize_bytes != 0) { 1967 dev_relthread(dev, ref); 1968 snprintf(req->error_str, sizeof(req->error_str), 1969 "requested blocksize %u < backing device " 1970 "blocksize %u", params->blocksize_bytes, tmp); 1971 return (EINVAL); 1972 } else if (cbe_lun->lun_type == T_CDROM) 1973 cbe_lun->blocksize = MAX(tmp, 2048); 1974 else 1975 cbe_lun->blocksize = tmp; 1976 1977 error = csw->d_ioctl(dev, DIOCGMEDIASIZE, (caddr_t)&otmp, FREAD, 1978 curthread); 1979 if (error) { 1980 dev_relthread(dev, ref); 1981 snprintf(req->error_str, sizeof(req->error_str), 1982 "error %d returned for DIOCGMEDIASIZE " 1983 " ioctl on %s!", error, 1984 be_lun->dev_path); 1985 return (error); 1986 } 1987 1988 if (params->lun_size_bytes != 0) { 1989 if (params->lun_size_bytes > otmp) { 1990 dev_relthread(dev, ref); 1991 snprintf(req->error_str, sizeof(req->error_str), 1992 "requested LUN size %ju > backing device " 1993 "size %ju", 1994 (uintmax_t)params->lun_size_bytes, 1995 (uintmax_t)otmp); 1996 return (EINVAL); 1997 } 1998 1999 be_lun->size_bytes = params->lun_size_bytes; 2000 } else 2001 be_lun->size_bytes = otmp; 2002 be_lun->size_blocks = be_lun->size_bytes / cbe_lun->blocksize; 2003 cbe_lun->maxlba = (be_lun->size_blocks == 0) ? 2004 0 : (be_lun->size_blocks - 1); 2005 2006 error = csw->d_ioctl(dev, DIOCGSTRIPESIZE, (caddr_t)&ps, FREAD, 2007 curthread); 2008 if (error) 2009 ps = po = 0; 2010 else { 2011 error = csw->d_ioctl(dev, DIOCGSTRIPEOFFSET, (caddr_t)&po, 2012 FREAD, curthread); 2013 if (error) 2014 po = 0; 2015 } 2016 us = ps; 2017 uo = po; 2018 2019 value = dnvlist_get_string(cbe_lun->options, "pblocksize", NULL); 2020 if (value != NULL) 2021 ctl_expand_number(value, &ps); 2022 value = dnvlist_get_string(cbe_lun->options, "pblockoffset", NULL); 2023 if (value != NULL) 2024 ctl_expand_number(value, &po); 2025 pss = ps / cbe_lun->blocksize; 2026 pos = po / cbe_lun->blocksize; 2027 if ((pss > 0) && (pss * cbe_lun->blocksize == ps) && (pss >= pos) && 2028 ((pss & (pss - 1)) == 0) && (pos * cbe_lun->blocksize == po)) { 2029 cbe_lun->pblockexp = fls(pss) - 1; 2030 cbe_lun->pblockoff = (pss - pos) % pss; 2031 } 2032 2033 value = dnvlist_get_string(cbe_lun->options, "ublocksize", NULL); 2034 if (value != NULL) 2035 ctl_expand_number(value, &us); 2036 value = dnvlist_get_string(cbe_lun->options, "ublockoffset", NULL); 2037 if (value != NULL) 2038 ctl_expand_number(value, &uo); 2039 uss = us / cbe_lun->blocksize; 2040 uos = uo / cbe_lun->blocksize; 2041 if ((uss > 0) && (uss * cbe_lun->blocksize == us) && (uss >= uos) && 2042 ((uss & (uss - 1)) == 0) && (uos * cbe_lun->blocksize == uo)) { 2043 cbe_lun->ublockexp = fls(uss) - 1; 2044 cbe_lun->ublockoff = (uss - uos) % uss; 2045 } 2046 2047 cbe_lun->atomicblock = atomic / cbe_lun->blocksize; 2048 cbe_lun->opttxferlen = maxio / cbe_lun->blocksize; 2049 2050 if (be_lun->dispatch == ctl_be_block_dispatch_zvol) { 2051 unmap = 1; 2052 } else { 2053 struct diocgattr_arg arg; 2054 2055 strlcpy(arg.name, "GEOM::candelete", sizeof(arg.name)); 2056 arg.len = sizeof(arg.value.i); 2057 error = csw->d_ioctl(dev, DIOCGATTR, (caddr_t)&arg, FREAD, 2058 curthread); 2059 unmap = (error == 0) ? arg.value.i : 0; 2060 } 2061 value = dnvlist_get_string(cbe_lun->options, "unmap", NULL); 2062 if (value != NULL) 2063 unmap = (strcmp(value, "on") == 0); 2064 if (unmap) 2065 cbe_lun->flags |= CTL_LUN_FLAG_UNMAP; 2066 else 2067 cbe_lun->flags &= ~CTL_LUN_FLAG_UNMAP; 2068 2069 dev_relthread(dev, ref); 2070 return (0); 2071 } 2072 2073 static int 2074 ctl_be_block_close(struct ctl_be_block_lun *be_lun) 2075 { 2076 struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun; 2077 int flags; 2078 2079 if (be_lun->vn) { 2080 flags = FREAD; 2081 if ((cbe_lun->flags & CTL_LUN_FLAG_READONLY) == 0) 2082 flags |= FWRITE; 2083 (void)vn_close(be_lun->vn, flags, NOCRED, curthread); 2084 be_lun->vn = NULL; 2085 2086 switch (be_lun->dev_type) { 2087 case CTL_BE_BLOCK_DEV: 2088 break; 2089 case CTL_BE_BLOCK_FILE: 2090 if (be_lun->backend.file.cred != NULL) { 2091 crfree(be_lun->backend.file.cred); 2092 be_lun->backend.file.cred = NULL; 2093 } 2094 break; 2095 case CTL_BE_BLOCK_NONE: 2096 break; 2097 default: 2098 panic("Unexpected backend type %d", be_lun->dev_type); 2099 break; 2100 } 2101 be_lun->dev_type = CTL_BE_BLOCK_NONE; 2102 } 2103 return (0); 2104 } 2105 2106 static int 2107 ctl_be_block_open(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req) 2108 { 2109 struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun; 2110 struct nameidata nd; 2111 const char *value; 2112 int error, flags; 2113 2114 error = 0; 2115 if (rootvnode == NULL) { 2116 snprintf(req->error_str, sizeof(req->error_str), 2117 "Root filesystem is not mounted"); 2118 return (1); 2119 } 2120 pwd_ensure_dirs(); 2121 2122 value = dnvlist_get_string(cbe_lun->options, "file", NULL); 2123 if (value == NULL) { 2124 snprintf(req->error_str, sizeof(req->error_str), 2125 "no file argument specified"); 2126 return (1); 2127 } 2128 free(be_lun->dev_path, M_CTLBLK); 2129 be_lun->dev_path = strdup(value, M_CTLBLK); 2130 2131 flags = FREAD; 2132 value = dnvlist_get_string(cbe_lun->options, "readonly", NULL); 2133 if (value != NULL) { 2134 if (strcmp(value, "on") != 0) 2135 flags |= FWRITE; 2136 } else if (cbe_lun->lun_type == T_DIRECT) 2137 flags |= FWRITE; 2138 2139 again: 2140 NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, be_lun->dev_path, curthread); 2141 error = vn_open(&nd, &flags, 0, NULL); 2142 if ((error == EROFS || error == EACCES) && (flags & FWRITE)) { 2143 flags &= ~FWRITE; 2144 goto again; 2145 } 2146 if (error) { 2147 /* 2148 * This is the only reasonable guess we can make as far as 2149 * path if the user doesn't give us a fully qualified path. 2150 * If they want to specify a file, they need to specify the 2151 * full path. 2152 */ 2153 if (be_lun->dev_path[0] != '/') { 2154 char *dev_name; 2155 2156 asprintf(&dev_name, M_CTLBLK, "/dev/%s", 2157 be_lun->dev_path); 2158 free(be_lun->dev_path, M_CTLBLK); 2159 be_lun->dev_path = dev_name; 2160 goto again; 2161 } 2162 snprintf(req->error_str, sizeof(req->error_str), 2163 "error opening %s: %d", be_lun->dev_path, error); 2164 return (error); 2165 } 2166 if (flags & FWRITE) 2167 cbe_lun->flags &= ~CTL_LUN_FLAG_READONLY; 2168 else 2169 cbe_lun->flags |= CTL_LUN_FLAG_READONLY; 2170 2171 NDFREE(&nd, NDF_ONLY_PNBUF); 2172 be_lun->vn = nd.ni_vp; 2173 2174 /* We only support disks and files. */ 2175 if (vn_isdisk_error(be_lun->vn, &error)) { 2176 error = ctl_be_block_open_dev(be_lun, req); 2177 } else if (be_lun->vn->v_type == VREG) { 2178 error = ctl_be_block_open_file(be_lun, req); 2179 } else { 2180 error = EINVAL; 2181 snprintf(req->error_str, sizeof(req->error_str), 2182 "%s is not a disk or plain file", be_lun->dev_path); 2183 } 2184 VOP_UNLOCK(be_lun->vn); 2185 2186 if (error != 0) 2187 ctl_be_block_close(be_lun); 2188 cbe_lun->serseq = CTL_LUN_SERSEQ_OFF; 2189 if (be_lun->dispatch != ctl_be_block_dispatch_dev) 2190 cbe_lun->serseq = CTL_LUN_SERSEQ_SOFT; 2191 value = dnvlist_get_string(cbe_lun->options, "serseq", NULL); 2192 if (value != NULL && strcmp(value, "on") == 0) 2193 cbe_lun->serseq = CTL_LUN_SERSEQ_ON; 2194 else if (value != NULL && strcmp(value, "read") == 0) 2195 cbe_lun->serseq = CTL_LUN_SERSEQ_READ; 2196 else if (value != NULL && strcmp(value, "soft") == 0) 2197 cbe_lun->serseq = CTL_LUN_SERSEQ_SOFT; 2198 else if (value != NULL && strcmp(value, "off") == 0) 2199 cbe_lun->serseq = CTL_LUN_SERSEQ_OFF; 2200 return (0); 2201 } 2202 2203 static int 2204 ctl_be_block_create(struct ctl_be_block_softc *softc, struct ctl_lun_req *req) 2205 { 2206 struct ctl_be_lun *cbe_lun; 2207 struct ctl_be_block_lun *be_lun; 2208 struct ctl_lun_create_params *params; 2209 char num_thread_str[16]; 2210 char tmpstr[32]; 2211 const char *value; 2212 int retval, num_threads; 2213 int tmp_num_threads; 2214 2215 params = &req->reqdata.create; 2216 retval = 0; 2217 req->status = CTL_LUN_OK; 2218 2219 be_lun = malloc(sizeof(*be_lun), M_CTLBLK, M_ZERO | M_WAITOK); 2220 cbe_lun = &be_lun->cbe_lun; 2221 be_lun->params = req->reqdata.create; 2222 be_lun->softc = softc; 2223 STAILQ_INIT(&be_lun->input_queue); 2224 STAILQ_INIT(&be_lun->config_read_queue); 2225 STAILQ_INIT(&be_lun->config_write_queue); 2226 STAILQ_INIT(&be_lun->datamove_queue); 2227 mtx_init(&be_lun->io_lock, "ctlblock io", NULL, MTX_DEF); 2228 mtx_init(&be_lun->queue_lock, "ctlblock queue", NULL, MTX_DEF); 2229 cbe_lun->options = nvlist_clone(req->args_nvl); 2230 2231 if (params->flags & CTL_LUN_FLAG_DEV_TYPE) 2232 cbe_lun->lun_type = params->device_type; 2233 else 2234 cbe_lun->lun_type = T_DIRECT; 2235 be_lun->flags = 0; 2236 cbe_lun->flags = 0; 2237 value = dnvlist_get_string(cbe_lun->options, "ha_role", NULL); 2238 if (value != NULL) { 2239 if (strcmp(value, "primary") == 0) 2240 cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY; 2241 } else if (control_softc->flags & CTL_FLAG_ACTIVE_SHELF) 2242 cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY; 2243 2244 if (cbe_lun->lun_type == T_DIRECT || 2245 cbe_lun->lun_type == T_CDROM) { 2246 be_lun->size_bytes = params->lun_size_bytes; 2247 if (params->blocksize_bytes != 0) 2248 cbe_lun->blocksize = params->blocksize_bytes; 2249 else if (cbe_lun->lun_type == T_CDROM) 2250 cbe_lun->blocksize = 2048; 2251 else 2252 cbe_lun->blocksize = 512; 2253 be_lun->size_blocks = be_lun->size_bytes / cbe_lun->blocksize; 2254 cbe_lun->maxlba = (be_lun->size_blocks == 0) ? 2255 0 : (be_lun->size_blocks - 1); 2256 2257 if ((cbe_lun->flags & CTL_LUN_FLAG_PRIMARY) || 2258 control_softc->ha_mode == CTL_HA_MODE_SER_ONLY) { 2259 retval = ctl_be_block_open(be_lun, req); 2260 if (retval != 0) { 2261 retval = 0; 2262 req->status = CTL_LUN_WARNING; 2263 } 2264 } 2265 num_threads = cbb_num_threads; 2266 } else { 2267 num_threads = 1; 2268 } 2269 2270 value = dnvlist_get_string(cbe_lun->options, "num_threads", NULL); 2271 if (value != NULL) { 2272 tmp_num_threads = strtol(value, NULL, 0); 2273 2274 /* 2275 * We don't let the user specify less than one 2276 * thread, but hope he's clueful enough not to 2277 * specify 1000 threads. 2278 */ 2279 if (tmp_num_threads < 1) { 2280 snprintf(req->error_str, sizeof(req->error_str), 2281 "invalid number of threads %s", 2282 num_thread_str); 2283 goto bailout_error; 2284 } 2285 num_threads = tmp_num_threads; 2286 } 2287 2288 if (be_lun->vn == NULL) 2289 cbe_lun->flags |= CTL_LUN_FLAG_NO_MEDIA; 2290 /* Tell the user the blocksize we ended up using */ 2291 params->lun_size_bytes = be_lun->size_bytes; 2292 params->blocksize_bytes = cbe_lun->blocksize; 2293 if (params->flags & CTL_LUN_FLAG_ID_REQ) { 2294 cbe_lun->req_lun_id = params->req_lun_id; 2295 cbe_lun->flags |= CTL_LUN_FLAG_ID_REQ; 2296 } else 2297 cbe_lun->req_lun_id = 0; 2298 2299 cbe_lun->lun_shutdown = ctl_be_block_lun_shutdown; 2300 cbe_lun->be = &ctl_be_block_driver; 2301 2302 if ((params->flags & CTL_LUN_FLAG_SERIAL_NUM) == 0) { 2303 snprintf(tmpstr, sizeof(tmpstr), "MYSERIAL%04d", 2304 softc->num_luns); 2305 strncpy((char *)cbe_lun->serial_num, tmpstr, 2306 MIN(sizeof(cbe_lun->serial_num), sizeof(tmpstr))); 2307 2308 /* Tell the user what we used for a serial number */ 2309 strncpy((char *)params->serial_num, tmpstr, 2310 MIN(sizeof(params->serial_num), sizeof(tmpstr))); 2311 } else { 2312 strncpy((char *)cbe_lun->serial_num, params->serial_num, 2313 MIN(sizeof(cbe_lun->serial_num), 2314 sizeof(params->serial_num))); 2315 } 2316 if ((params->flags & CTL_LUN_FLAG_DEVID) == 0) { 2317 snprintf(tmpstr, sizeof(tmpstr), "MYDEVID%04d", softc->num_luns); 2318 strncpy((char *)cbe_lun->device_id, tmpstr, 2319 MIN(sizeof(cbe_lun->device_id), sizeof(tmpstr))); 2320 2321 /* Tell the user what we used for a device ID */ 2322 strncpy((char *)params->device_id, tmpstr, 2323 MIN(sizeof(params->device_id), sizeof(tmpstr))); 2324 } else { 2325 strncpy((char *)cbe_lun->device_id, params->device_id, 2326 MIN(sizeof(cbe_lun->device_id), 2327 sizeof(params->device_id))); 2328 } 2329 2330 TASK_INIT(&be_lun->io_task, /*priority*/0, ctl_be_block_worker, be_lun); 2331 2332 be_lun->io_taskqueue = taskqueue_create("ctlblocktq", M_WAITOK, 2333 taskqueue_thread_enqueue, /*context*/&be_lun->io_taskqueue); 2334 2335 if (be_lun->io_taskqueue == NULL) { 2336 snprintf(req->error_str, sizeof(req->error_str), 2337 "unable to create taskqueue"); 2338 goto bailout_error; 2339 } 2340 2341 /* 2342 * Note that we start the same number of threads by default for 2343 * both the file case and the block device case. For the file 2344 * case, we need multiple threads to allow concurrency, because the 2345 * vnode interface is designed to be a blocking interface. For the 2346 * block device case, ZFS zvols at least will block the caller's 2347 * context in many instances, and so we need multiple threads to 2348 * overcome that problem. Other block devices don't need as many 2349 * threads, but they shouldn't cause too many problems. 2350 * 2351 * If the user wants to just have a single thread for a block 2352 * device, he can specify that when the LUN is created, or change 2353 * the tunable/sysctl to alter the default number of threads. 2354 */ 2355 retval = taskqueue_start_threads_in_proc(&be_lun->io_taskqueue, 2356 /*num threads*/num_threads, 2357 /*priority*/PUSER, 2358 /*proc*/control_softc->ctl_proc, 2359 /*thread name*/"block"); 2360 2361 if (retval != 0) 2362 goto bailout_error; 2363 2364 be_lun->num_threads = num_threads; 2365 2366 retval = ctl_add_lun(&be_lun->cbe_lun); 2367 if (retval != 0) { 2368 snprintf(req->error_str, sizeof(req->error_str), 2369 "ctl_add_lun() returned error %d, see dmesg for " 2370 "details", retval); 2371 retval = 0; 2372 goto bailout_error; 2373 } 2374 2375 be_lun->disk_stats = devstat_new_entry("cbb", cbe_lun->lun_id, 2376 cbe_lun->blocksize, 2377 DEVSTAT_ALL_SUPPORTED, 2378 cbe_lun->lun_type 2379 | DEVSTAT_TYPE_IF_OTHER, 2380 DEVSTAT_PRIORITY_OTHER); 2381 2382 mtx_lock(&softc->lock); 2383 softc->num_luns++; 2384 SLIST_INSERT_HEAD(&softc->lun_list, be_lun, links); 2385 mtx_unlock(&softc->lock); 2386 2387 params->req_lun_id = cbe_lun->lun_id; 2388 2389 return (retval); 2390 2391 bailout_error: 2392 req->status = CTL_LUN_ERROR; 2393 2394 if (be_lun->io_taskqueue != NULL) 2395 taskqueue_free(be_lun->io_taskqueue); 2396 ctl_be_block_close(be_lun); 2397 if (be_lun->dev_path != NULL) 2398 free(be_lun->dev_path, M_CTLBLK); 2399 nvlist_destroy(cbe_lun->options); 2400 mtx_destroy(&be_lun->queue_lock); 2401 mtx_destroy(&be_lun->io_lock); 2402 free(be_lun, M_CTLBLK); 2403 2404 return (retval); 2405 } 2406 2407 static int 2408 ctl_be_block_rm(struct ctl_be_block_softc *softc, struct ctl_lun_req *req) 2409 { 2410 struct ctl_lun_rm_params *params; 2411 struct ctl_be_block_lun *be_lun; 2412 struct ctl_be_lun *cbe_lun; 2413 int retval; 2414 2415 params = &req->reqdata.rm; 2416 2417 sx_xlock(&softc->modify_lock); 2418 mtx_lock(&softc->lock); 2419 SLIST_FOREACH(be_lun, &softc->lun_list, links) { 2420 if (be_lun->cbe_lun.lun_id == params->lun_id) { 2421 SLIST_REMOVE(&softc->lun_list, be_lun, 2422 ctl_be_block_lun, links); 2423 softc->num_luns--; 2424 break; 2425 } 2426 } 2427 mtx_unlock(&softc->lock); 2428 sx_xunlock(&softc->modify_lock); 2429 if (be_lun == NULL) { 2430 snprintf(req->error_str, sizeof(req->error_str), 2431 "LUN %u is not managed by the block backend", 2432 params->lun_id); 2433 goto bailout_error; 2434 } 2435 cbe_lun = &be_lun->cbe_lun; 2436 2437 if (be_lun->vn != NULL) { 2438 cbe_lun->flags |= CTL_LUN_FLAG_NO_MEDIA; 2439 ctl_lun_no_media(cbe_lun); 2440 taskqueue_drain_all(be_lun->io_taskqueue); 2441 ctl_be_block_close(be_lun); 2442 } 2443 2444 mtx_lock(&softc->lock); 2445 be_lun->flags |= CTL_BE_BLOCK_LUN_WAITING; 2446 mtx_unlock(&softc->lock); 2447 2448 retval = ctl_remove_lun(cbe_lun); 2449 if (retval != 0) { 2450 snprintf(req->error_str, sizeof(req->error_str), 2451 "error %d returned from ctl_remove_lun() for " 2452 "LUN %d", retval, params->lun_id); 2453 mtx_lock(&softc->lock); 2454 be_lun->flags &= ~CTL_BE_BLOCK_LUN_WAITING; 2455 mtx_unlock(&softc->lock); 2456 goto bailout_error; 2457 } 2458 2459 mtx_lock(&softc->lock); 2460 while ((be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) == 0) { 2461 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlblockrm", 0); 2462 if (retval == EINTR) 2463 break; 2464 } 2465 be_lun->flags &= ~CTL_BE_BLOCK_LUN_WAITING; 2466 if (be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) { 2467 mtx_unlock(&softc->lock); 2468 free(be_lun, M_CTLBLK); 2469 } else { 2470 mtx_unlock(&softc->lock); 2471 return (EINTR); 2472 } 2473 2474 req->status = CTL_LUN_OK; 2475 return (0); 2476 2477 bailout_error: 2478 req->status = CTL_LUN_ERROR; 2479 return (0); 2480 } 2481 2482 static int 2483 ctl_be_block_modify(struct ctl_be_block_softc *softc, struct ctl_lun_req *req) 2484 { 2485 struct ctl_lun_modify_params *params; 2486 struct ctl_be_block_lun *be_lun; 2487 struct ctl_be_lun *cbe_lun; 2488 const char *value; 2489 uint64_t oldsize; 2490 int error, wasprim; 2491 2492 params = &req->reqdata.modify; 2493 2494 sx_xlock(&softc->modify_lock); 2495 mtx_lock(&softc->lock); 2496 SLIST_FOREACH(be_lun, &softc->lun_list, links) { 2497 if (be_lun->cbe_lun.lun_id == params->lun_id) 2498 break; 2499 } 2500 mtx_unlock(&softc->lock); 2501 if (be_lun == NULL) { 2502 snprintf(req->error_str, sizeof(req->error_str), 2503 "LUN %u is not managed by the block backend", 2504 params->lun_id); 2505 goto bailout_error; 2506 } 2507 cbe_lun = &be_lun->cbe_lun; 2508 2509 if (params->lun_size_bytes != 0) 2510 be_lun->params.lun_size_bytes = params->lun_size_bytes; 2511 2512 if (req->args_nvl != NULL) { 2513 nvlist_destroy(cbe_lun->options); 2514 cbe_lun->options = nvlist_clone(req->args_nvl); 2515 } 2516 2517 wasprim = (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY); 2518 value = dnvlist_get_string(cbe_lun->options, "ha_role", NULL); 2519 if (value != NULL) { 2520 if (strcmp(value, "primary") == 0) 2521 cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY; 2522 else 2523 cbe_lun->flags &= ~CTL_LUN_FLAG_PRIMARY; 2524 } else if (control_softc->flags & CTL_FLAG_ACTIVE_SHELF) 2525 cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY; 2526 else 2527 cbe_lun->flags &= ~CTL_LUN_FLAG_PRIMARY; 2528 if (wasprim != (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY)) { 2529 if (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY) 2530 ctl_lun_primary(cbe_lun); 2531 else 2532 ctl_lun_secondary(cbe_lun); 2533 } 2534 2535 oldsize = be_lun->size_blocks; 2536 if ((cbe_lun->flags & CTL_LUN_FLAG_PRIMARY) || 2537 control_softc->ha_mode == CTL_HA_MODE_SER_ONLY) { 2538 if (be_lun->vn == NULL) 2539 error = ctl_be_block_open(be_lun, req); 2540 else if (vn_isdisk_error(be_lun->vn, &error)) 2541 error = ctl_be_block_open_dev(be_lun, req); 2542 else if (be_lun->vn->v_type == VREG) { 2543 vn_lock(be_lun->vn, LK_SHARED | LK_RETRY); 2544 error = ctl_be_block_open_file(be_lun, req); 2545 VOP_UNLOCK(be_lun->vn); 2546 } else 2547 error = EINVAL; 2548 if ((cbe_lun->flags & CTL_LUN_FLAG_NO_MEDIA) && 2549 be_lun->vn != NULL) { 2550 cbe_lun->flags &= ~CTL_LUN_FLAG_NO_MEDIA; 2551 ctl_lun_has_media(cbe_lun); 2552 } else if ((cbe_lun->flags & CTL_LUN_FLAG_NO_MEDIA) == 0 && 2553 be_lun->vn == NULL) { 2554 cbe_lun->flags |= CTL_LUN_FLAG_NO_MEDIA; 2555 ctl_lun_no_media(cbe_lun); 2556 } 2557 cbe_lun->flags &= ~CTL_LUN_FLAG_EJECTED; 2558 } else { 2559 if (be_lun->vn != NULL) { 2560 cbe_lun->flags |= CTL_LUN_FLAG_NO_MEDIA; 2561 ctl_lun_no_media(cbe_lun); 2562 taskqueue_drain_all(be_lun->io_taskqueue); 2563 error = ctl_be_block_close(be_lun); 2564 } else 2565 error = 0; 2566 } 2567 if (be_lun->size_blocks != oldsize) 2568 ctl_lun_capacity_changed(cbe_lun); 2569 2570 /* Tell the user the exact size we ended up using */ 2571 params->lun_size_bytes = be_lun->size_bytes; 2572 2573 sx_xunlock(&softc->modify_lock); 2574 req->status = error ? CTL_LUN_WARNING : CTL_LUN_OK; 2575 return (0); 2576 2577 bailout_error: 2578 sx_xunlock(&softc->modify_lock); 2579 req->status = CTL_LUN_ERROR; 2580 return (0); 2581 } 2582 2583 static void 2584 ctl_be_block_lun_shutdown(struct ctl_be_lun *cbe_lun) 2585 { 2586 struct ctl_be_block_lun *be_lun = (struct ctl_be_block_lun *)cbe_lun; 2587 struct ctl_be_block_softc *softc = be_lun->softc; 2588 2589 taskqueue_drain_all(be_lun->io_taskqueue); 2590 taskqueue_free(be_lun->io_taskqueue); 2591 if (be_lun->disk_stats != NULL) 2592 devstat_remove_entry(be_lun->disk_stats); 2593 nvlist_destroy(be_lun->cbe_lun.options); 2594 free(be_lun->dev_path, M_CTLBLK); 2595 mtx_destroy(&be_lun->queue_lock); 2596 mtx_destroy(&be_lun->io_lock); 2597 2598 mtx_lock(&softc->lock); 2599 be_lun->flags |= CTL_BE_BLOCK_LUN_UNCONFIGURED; 2600 if (be_lun->flags & CTL_BE_BLOCK_LUN_WAITING) 2601 wakeup(be_lun); 2602 else 2603 free(be_lun, M_CTLBLK); 2604 mtx_unlock(&softc->lock); 2605 } 2606 2607 static int 2608 ctl_be_block_config_write(union ctl_io *io) 2609 { 2610 struct ctl_be_block_lun *be_lun; 2611 struct ctl_be_lun *cbe_lun; 2612 int retval; 2613 2614 DPRINTF("entered\n"); 2615 2616 cbe_lun = CTL_BACKEND_LUN(io); 2617 be_lun = (struct ctl_be_block_lun *)cbe_lun; 2618 2619 retval = 0; 2620 switch (io->scsiio.cdb[0]) { 2621 case SYNCHRONIZE_CACHE: 2622 case SYNCHRONIZE_CACHE_16: 2623 case WRITE_SAME_10: 2624 case WRITE_SAME_16: 2625 case UNMAP: 2626 /* 2627 * The upper level CTL code will filter out any CDBs with 2628 * the immediate bit set and return the proper error. 2629 * 2630 * We don't really need to worry about what LBA range the 2631 * user asked to be synced out. When they issue a sync 2632 * cache command, we'll sync out the whole thing. 2633 */ 2634 mtx_lock(&be_lun->queue_lock); 2635 STAILQ_INSERT_TAIL(&be_lun->config_write_queue, &io->io_hdr, 2636 links); 2637 mtx_unlock(&be_lun->queue_lock); 2638 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); 2639 break; 2640 case START_STOP_UNIT: { 2641 struct scsi_start_stop_unit *cdb; 2642 struct ctl_lun_req req; 2643 2644 cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb; 2645 if ((cdb->how & SSS_PC_MASK) != 0) { 2646 ctl_set_success(&io->scsiio); 2647 ctl_config_write_done(io); 2648 break; 2649 } 2650 if (cdb->how & SSS_START) { 2651 if ((cdb->how & SSS_LOEJ) && be_lun->vn == NULL) { 2652 retval = ctl_be_block_open(be_lun, &req); 2653 cbe_lun->flags &= ~CTL_LUN_FLAG_EJECTED; 2654 if (retval == 0) { 2655 cbe_lun->flags &= ~CTL_LUN_FLAG_NO_MEDIA; 2656 ctl_lun_has_media(cbe_lun); 2657 } else { 2658 cbe_lun->flags |= CTL_LUN_FLAG_NO_MEDIA; 2659 ctl_lun_no_media(cbe_lun); 2660 } 2661 } 2662 ctl_start_lun(cbe_lun); 2663 } else { 2664 ctl_stop_lun(cbe_lun); 2665 if (cdb->how & SSS_LOEJ) { 2666 cbe_lun->flags |= CTL_LUN_FLAG_NO_MEDIA; 2667 cbe_lun->flags |= CTL_LUN_FLAG_EJECTED; 2668 ctl_lun_ejected(cbe_lun); 2669 if (be_lun->vn != NULL) 2670 ctl_be_block_close(be_lun); 2671 } 2672 } 2673 2674 ctl_set_success(&io->scsiio); 2675 ctl_config_write_done(io); 2676 break; 2677 } 2678 case PREVENT_ALLOW: 2679 ctl_set_success(&io->scsiio); 2680 ctl_config_write_done(io); 2681 break; 2682 default: 2683 ctl_set_invalid_opcode(&io->scsiio); 2684 ctl_config_write_done(io); 2685 retval = CTL_RETVAL_COMPLETE; 2686 break; 2687 } 2688 2689 return (retval); 2690 } 2691 2692 static int 2693 ctl_be_block_config_read(union ctl_io *io) 2694 { 2695 struct ctl_be_block_lun *be_lun; 2696 int retval = 0; 2697 2698 DPRINTF("entered\n"); 2699 2700 be_lun = (struct ctl_be_block_lun *)CTL_BACKEND_LUN(io); 2701 2702 switch (io->scsiio.cdb[0]) { 2703 case SERVICE_ACTION_IN: 2704 if (io->scsiio.cdb[1] == SGLS_SERVICE_ACTION) { 2705 mtx_lock(&be_lun->queue_lock); 2706 STAILQ_INSERT_TAIL(&be_lun->config_read_queue, 2707 &io->io_hdr, links); 2708 mtx_unlock(&be_lun->queue_lock); 2709 taskqueue_enqueue(be_lun->io_taskqueue, 2710 &be_lun->io_task); 2711 retval = CTL_RETVAL_QUEUED; 2712 break; 2713 } 2714 ctl_set_invalid_field(&io->scsiio, 2715 /*sks_valid*/ 1, 2716 /*command*/ 1, 2717 /*field*/ 1, 2718 /*bit_valid*/ 1, 2719 /*bit*/ 4); 2720 ctl_config_read_done(io); 2721 retval = CTL_RETVAL_COMPLETE; 2722 break; 2723 default: 2724 ctl_set_invalid_opcode(&io->scsiio); 2725 ctl_config_read_done(io); 2726 retval = CTL_RETVAL_COMPLETE; 2727 break; 2728 } 2729 2730 return (retval); 2731 } 2732 2733 static int 2734 ctl_be_block_lun_info(struct ctl_be_lun *cbe_lun, struct sbuf *sb) 2735 { 2736 struct ctl_be_block_lun *lun = (struct ctl_be_block_lun *)cbe_lun; 2737 int retval; 2738 2739 retval = sbuf_printf(sb, "\t<num_threads>"); 2740 if (retval != 0) 2741 goto bailout; 2742 retval = sbuf_printf(sb, "%d", lun->num_threads); 2743 if (retval != 0) 2744 goto bailout; 2745 retval = sbuf_printf(sb, "</num_threads>\n"); 2746 2747 bailout: 2748 return (retval); 2749 } 2750 2751 static uint64_t 2752 ctl_be_block_lun_attr(struct ctl_be_lun *cbe_lun, const char *attrname) 2753 { 2754 struct ctl_be_block_lun *lun = (struct ctl_be_block_lun *)cbe_lun; 2755 2756 if (lun->getattr == NULL) 2757 return (UINT64_MAX); 2758 return (lun->getattr(lun, attrname)); 2759 } 2760 2761 static int 2762 ctl_be_block_init(void) 2763 { 2764 struct ctl_be_block_softc *softc = &backend_block_softc; 2765 2766 sx_init(&softc->modify_lock, "ctlblock modify"); 2767 mtx_init(&softc->lock, "ctlblock", NULL, MTX_DEF); 2768 softc->beio_zone = uma_zcreate("beio", sizeof(struct ctl_be_block_io), 2769 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 2770 softc->bufmin_zone = uma_zcreate("ctlblockmin", CTLBLK_MIN_SEG, 2771 NULL, NULL, NULL, NULL, /*align*/ 0, /*flags*/0); 2772 if (CTLBLK_MIN_SEG < CTLBLK_MAX_SEG) 2773 softc->bufmax_zone = uma_zcreate("ctlblockmax", CTLBLK_MAX_SEG, 2774 NULL, NULL, NULL, NULL, /*align*/ 0, /*flags*/0); 2775 SLIST_INIT(&softc->lun_list); 2776 return (0); 2777 } 2778 2779 static int 2780 ctl_be_block_shutdown(void) 2781 { 2782 struct ctl_be_block_softc *softc = &backend_block_softc; 2783 struct ctl_be_block_lun *lun; 2784 2785 mtx_lock(&softc->lock); 2786 while ((lun = SLIST_FIRST(&softc->lun_list)) != NULL) { 2787 SLIST_REMOVE_HEAD(&softc->lun_list, links); 2788 softc->num_luns--; 2789 /* 2790 * Drop our lock here. Since ctl_remove_lun() can call 2791 * back into us, this could potentially lead to a recursive 2792 * lock of the same mutex, which would cause a hang. 2793 */ 2794 mtx_unlock(&softc->lock); 2795 ctl_remove_lun(&lun->cbe_lun); 2796 mtx_lock(&softc->lock); 2797 } 2798 mtx_unlock(&softc->lock); 2799 uma_zdestroy(softc->bufmin_zone); 2800 if (CTLBLK_MIN_SEG < CTLBLK_MAX_SEG) 2801 uma_zdestroy(softc->bufmax_zone); 2802 uma_zdestroy(softc->beio_zone); 2803 mtx_destroy(&softc->lock); 2804 sx_destroy(&softc->modify_lock); 2805 return (0); 2806 } 2807