1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2003 Silicon Graphics International Corp. 5 * Copyright (c) 2009-2011 Spectra Logic Corporation 6 * Copyright (c) 2012 The FreeBSD Foundation 7 * Copyright (c) 2014-2015 Alexander Motin <mav@FreeBSD.org> 8 * All rights reserved. 9 * 10 * Portions of this software were developed by Edward Tomasz Napierala 11 * under sponsorship from the FreeBSD Foundation. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions, and the following disclaimer, 18 * without modification. 19 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 20 * substantially similar to the "NO WARRANTY" disclaimer below 21 * ("Disclaimer") and any redistribution must be conditioned upon 22 * including a substantially similar Disclaimer requirement for further 23 * binary redistribution. 24 * 25 * NO WARRANTY 26 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 27 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 28 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 29 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 30 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 34 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 35 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGES. 37 * 38 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend_block.c#5 $ 39 */ 40 /* 41 * CAM Target Layer driver backend for block devices. 42 * 43 * Author: Ken Merry <ken@FreeBSD.org> 44 */ 45 #include <sys/cdefs.h> 46 __FBSDID("$FreeBSD$"); 47 48 #include <sys/param.h> 49 #include <sys/systm.h> 50 #include <sys/kernel.h> 51 #include <sys/types.h> 52 #include <sys/kthread.h> 53 #include <sys/bio.h> 54 #include <sys/fcntl.h> 55 #include <sys/limits.h> 56 #include <sys/lock.h> 57 #include <sys/mutex.h> 58 #include <sys/condvar.h> 59 #include <sys/malloc.h> 60 #include <sys/conf.h> 61 #include <sys/ioccom.h> 62 #include <sys/queue.h> 63 #include <sys/sbuf.h> 64 #include <sys/endian.h> 65 #include <sys/uio.h> 66 #include <sys/buf.h> 67 #include <sys/taskqueue.h> 68 #include <sys/vnode.h> 69 #include <sys/namei.h> 70 #include <sys/mount.h> 71 #include <sys/disk.h> 72 #include <sys/fcntl.h> 73 #include <sys/filedesc.h> 74 #include <sys/filio.h> 75 #include <sys/proc.h> 76 #include <sys/pcpu.h> 77 #include <sys/module.h> 78 #include <sys/sdt.h> 79 #include <sys/devicestat.h> 80 #include <sys/sysctl.h> 81 #include <sys/nv.h> 82 #include <sys/dnv.h> 83 #include <sys/sx.h> 84 85 #include <geom/geom.h> 86 87 #include <cam/cam.h> 88 #include <cam/scsi/scsi_all.h> 89 #include <cam/scsi/scsi_da.h> 90 #include <cam/ctl/ctl_io.h> 91 #include <cam/ctl/ctl.h> 92 #include <cam/ctl/ctl_backend.h> 93 #include <cam/ctl/ctl_ioctl.h> 94 #include <cam/ctl/ctl_ha.h> 95 #include <cam/ctl/ctl_scsi_all.h> 96 #include <cam/ctl/ctl_private.h> 97 #include <cam/ctl/ctl_error.h> 98 99 /* 100 * The idea here is that we'll allocate enough S/G space to hold a 1MB 101 * I/O. If we get an I/O larger than that, we'll split it. 102 */ 103 #define CTLBLK_HALF_IO_SIZE (512 * 1024) 104 #define CTLBLK_MAX_IO_SIZE (CTLBLK_HALF_IO_SIZE * 2) 105 #define CTLBLK_MAX_SEG MAXPHYS 106 #define CTLBLK_HALF_SEGS MAX(CTLBLK_HALF_IO_SIZE / CTLBLK_MAX_SEG, 1) 107 #define CTLBLK_MAX_SEGS (CTLBLK_HALF_SEGS * 2) 108 109 #ifdef CTLBLK_DEBUG 110 #define DPRINTF(fmt, args...) \ 111 printf("cbb(%s:%d): " fmt, __FUNCTION__, __LINE__, ##args) 112 #else 113 #define DPRINTF(fmt, args...) do {} while(0) 114 #endif 115 116 #define PRIV(io) \ 117 ((struct ctl_ptr_len_flags *)&(io)->io_hdr.ctl_private[CTL_PRIV_BACKEND]) 118 #define ARGS(io) \ 119 ((struct ctl_lba_len_flags *)&(io)->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]) 120 121 SDT_PROVIDER_DEFINE(cbb); 122 123 typedef enum { 124 CTL_BE_BLOCK_LUN_UNCONFIGURED = 0x01, 125 CTL_BE_BLOCK_LUN_WAITING = 0x04, 126 } ctl_be_block_lun_flags; 127 128 typedef enum { 129 CTL_BE_BLOCK_NONE, 130 CTL_BE_BLOCK_DEV, 131 CTL_BE_BLOCK_FILE 132 } ctl_be_block_type; 133 134 struct ctl_be_block_filedata { 135 struct ucred *cred; 136 }; 137 138 union ctl_be_block_bedata { 139 struct ctl_be_block_filedata file; 140 }; 141 142 struct ctl_be_block_io; 143 struct ctl_be_block_lun; 144 145 typedef void (*cbb_dispatch_t)(struct ctl_be_block_lun *be_lun, 146 struct ctl_be_block_io *beio); 147 typedef uint64_t (*cbb_getattr_t)(struct ctl_be_block_lun *be_lun, 148 const char *attrname); 149 150 /* 151 * Backend LUN structure. There is a 1:1 mapping between a block device 152 * and a backend block LUN, and between a backend block LUN and a CTL LUN. 153 */ 154 struct ctl_be_block_lun { 155 struct ctl_be_lun cbe_lun; /* Must be first element. */ 156 struct ctl_lun_create_params params; 157 char *dev_path; 158 ctl_be_block_type dev_type; 159 struct vnode *vn; 160 union ctl_be_block_bedata backend; 161 cbb_dispatch_t dispatch; 162 cbb_dispatch_t lun_flush; 163 cbb_dispatch_t unmap; 164 cbb_dispatch_t get_lba_status; 165 cbb_getattr_t getattr; 166 uint64_t size_blocks; 167 uint64_t size_bytes; 168 struct ctl_be_block_softc *softc; 169 struct devstat *disk_stats; 170 ctl_be_block_lun_flags flags; 171 SLIST_ENTRY(ctl_be_block_lun) links; 172 struct taskqueue *io_taskqueue; 173 struct task io_task; 174 int num_threads; 175 STAILQ_HEAD(, ctl_io_hdr) input_queue; 176 STAILQ_HEAD(, ctl_io_hdr) config_read_queue; 177 STAILQ_HEAD(, ctl_io_hdr) config_write_queue; 178 STAILQ_HEAD(, ctl_io_hdr) datamove_queue; 179 struct mtx_padalign io_lock; 180 struct mtx_padalign queue_lock; 181 }; 182 183 /* 184 * Overall softc structure for the block backend module. 185 */ 186 struct ctl_be_block_softc { 187 struct sx modify_lock; 188 struct mtx lock; 189 int num_luns; 190 SLIST_HEAD(, ctl_be_block_lun) lun_list; 191 uma_zone_t beio_zone; 192 uma_zone_t buf_zone; 193 }; 194 195 static struct ctl_be_block_softc backend_block_softc; 196 197 /* 198 * Per-I/O information. 199 */ 200 struct ctl_be_block_io { 201 union ctl_io *io; 202 struct ctl_sg_entry sg_segs[CTLBLK_MAX_SEGS]; 203 struct iovec xiovecs[CTLBLK_MAX_SEGS]; 204 int refcnt; 205 int bio_cmd; 206 int two_sglists; 207 int num_segs; 208 int num_bios_sent; 209 int num_bios_done; 210 int send_complete; 211 int first_error; 212 uint64_t first_error_offset; 213 struct bintime ds_t0; 214 devstat_tag_type ds_tag_type; 215 devstat_trans_flags ds_trans_type; 216 uint64_t io_len; 217 uint64_t io_offset; 218 int io_arg; 219 struct ctl_be_block_softc *softc; 220 struct ctl_be_block_lun *lun; 221 void (*beio_cont)(struct ctl_be_block_io *beio); /* to continue processing */ 222 }; 223 224 extern struct ctl_softc *control_softc; 225 226 static int cbb_num_threads = 14; 227 SYSCTL_NODE(_kern_cam_ctl, OID_AUTO, block, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 228 "CAM Target Layer Block Backend"); 229 SYSCTL_INT(_kern_cam_ctl_block, OID_AUTO, num_threads, CTLFLAG_RWTUN, 230 &cbb_num_threads, 0, "Number of threads per backing file"); 231 232 static struct ctl_be_block_io *ctl_alloc_beio(struct ctl_be_block_softc *softc); 233 static void ctl_free_beio(struct ctl_be_block_io *beio); 234 static void ctl_complete_beio(struct ctl_be_block_io *beio); 235 static int ctl_be_block_move_done(union ctl_io *io); 236 static void ctl_be_block_biodone(struct bio *bio); 237 static void ctl_be_block_flush_file(struct ctl_be_block_lun *be_lun, 238 struct ctl_be_block_io *beio); 239 static void ctl_be_block_dispatch_file(struct ctl_be_block_lun *be_lun, 240 struct ctl_be_block_io *beio); 241 static void ctl_be_block_gls_file(struct ctl_be_block_lun *be_lun, 242 struct ctl_be_block_io *beio); 243 static uint64_t ctl_be_block_getattr_file(struct ctl_be_block_lun *be_lun, 244 const char *attrname); 245 static void ctl_be_block_flush_dev(struct ctl_be_block_lun *be_lun, 246 struct ctl_be_block_io *beio); 247 static void ctl_be_block_unmap_dev(struct ctl_be_block_lun *be_lun, 248 struct ctl_be_block_io *beio); 249 static void ctl_be_block_dispatch_dev(struct ctl_be_block_lun *be_lun, 250 struct ctl_be_block_io *beio); 251 static uint64_t ctl_be_block_getattr_dev(struct ctl_be_block_lun *be_lun, 252 const char *attrname); 253 static void ctl_be_block_cr_dispatch(struct ctl_be_block_lun *be_lun, 254 union ctl_io *io); 255 static void ctl_be_block_cw_dispatch(struct ctl_be_block_lun *be_lun, 256 union ctl_io *io); 257 static void ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun, 258 union ctl_io *io); 259 static void ctl_be_block_worker(void *context, int pending); 260 static int ctl_be_block_submit(union ctl_io *io); 261 static int ctl_be_block_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, 262 int flag, struct thread *td); 263 static int ctl_be_block_open_file(struct ctl_be_block_lun *be_lun, 264 struct ctl_lun_req *req); 265 static int ctl_be_block_open_dev(struct ctl_be_block_lun *be_lun, 266 struct ctl_lun_req *req); 267 static int ctl_be_block_close(struct ctl_be_block_lun *be_lun); 268 static int ctl_be_block_open(struct ctl_be_block_lun *be_lun, 269 struct ctl_lun_req *req); 270 static int ctl_be_block_create(struct ctl_be_block_softc *softc, 271 struct ctl_lun_req *req); 272 static int ctl_be_block_rm(struct ctl_be_block_softc *softc, 273 struct ctl_lun_req *req); 274 static int ctl_be_block_modify(struct ctl_be_block_softc *softc, 275 struct ctl_lun_req *req); 276 static void ctl_be_block_lun_shutdown(struct ctl_be_lun *cbe_lun); 277 static int ctl_be_block_config_write(union ctl_io *io); 278 static int ctl_be_block_config_read(union ctl_io *io); 279 static int ctl_be_block_lun_info(struct ctl_be_lun *cbe_lun, struct sbuf *sb); 280 static uint64_t ctl_be_block_lun_attr(struct ctl_be_lun *cbe_lun, const char *attrname); 281 static int ctl_be_block_init(void); 282 static int ctl_be_block_shutdown(void); 283 284 static struct ctl_backend_driver ctl_be_block_driver = 285 { 286 .name = "block", 287 .flags = CTL_BE_FLAG_HAS_CONFIG, 288 .init = ctl_be_block_init, 289 .shutdown = ctl_be_block_shutdown, 290 .data_submit = ctl_be_block_submit, 291 .data_move_done = ctl_be_block_move_done, 292 .config_read = ctl_be_block_config_read, 293 .config_write = ctl_be_block_config_write, 294 .ioctl = ctl_be_block_ioctl, 295 .lun_info = ctl_be_block_lun_info, 296 .lun_attr = ctl_be_block_lun_attr 297 }; 298 299 MALLOC_DEFINE(M_CTLBLK, "ctlblock", "Memory used for CTL block backend"); 300 CTL_BACKEND_DECLARE(cbb, ctl_be_block_driver); 301 302 static struct ctl_be_block_io * 303 ctl_alloc_beio(struct ctl_be_block_softc *softc) 304 { 305 struct ctl_be_block_io *beio; 306 307 beio = uma_zalloc(softc->beio_zone, M_WAITOK | M_ZERO); 308 beio->softc = softc; 309 beio->refcnt = 1; 310 return (beio); 311 } 312 313 static void 314 ctl_real_free_beio(struct ctl_be_block_io *beio) 315 { 316 struct ctl_be_block_softc *softc = beio->softc; 317 int i; 318 319 for (i = 0; i < beio->num_segs; i++) { 320 uma_zfree(softc->buf_zone, beio->sg_segs[i].addr); 321 322 /* For compare we had two equal S/G lists. */ 323 if (beio->two_sglists) { 324 uma_zfree(softc->buf_zone, 325 beio->sg_segs[i + CTLBLK_HALF_SEGS].addr); 326 } 327 } 328 329 uma_zfree(softc->beio_zone, beio); 330 } 331 332 static void 333 ctl_refcnt_beio(void *arg, int diff) 334 { 335 struct ctl_be_block_io *beio = arg; 336 337 if (atomic_fetchadd_int(&beio->refcnt, diff) + diff == 0) 338 ctl_real_free_beio(beio); 339 } 340 341 static void 342 ctl_free_beio(struct ctl_be_block_io *beio) 343 { 344 345 ctl_refcnt_beio(beio, -1); 346 } 347 348 static void 349 ctl_complete_beio(struct ctl_be_block_io *beio) 350 { 351 union ctl_io *io = beio->io; 352 353 if (beio->beio_cont != NULL) { 354 beio->beio_cont(beio); 355 } else { 356 ctl_free_beio(beio); 357 ctl_data_submit_done(io); 358 } 359 } 360 361 static size_t 362 cmp(uint8_t *a, uint8_t *b, size_t size) 363 { 364 size_t i; 365 366 for (i = 0; i < size; i++) { 367 if (a[i] != b[i]) 368 break; 369 } 370 return (i); 371 } 372 373 static void 374 ctl_be_block_compare(union ctl_io *io) 375 { 376 struct ctl_be_block_io *beio; 377 uint64_t off, res; 378 int i; 379 uint8_t info[8]; 380 381 beio = (struct ctl_be_block_io *)PRIV(io)->ptr; 382 off = 0; 383 for (i = 0; i < beio->num_segs; i++) { 384 res = cmp(beio->sg_segs[i].addr, 385 beio->sg_segs[i + CTLBLK_HALF_SEGS].addr, 386 beio->sg_segs[i].len); 387 off += res; 388 if (res < beio->sg_segs[i].len) 389 break; 390 } 391 if (i < beio->num_segs) { 392 scsi_u64to8b(off, info); 393 ctl_set_sense(&io->scsiio, /*current_error*/ 1, 394 /*sense_key*/ SSD_KEY_MISCOMPARE, 395 /*asc*/ 0x1D, /*ascq*/ 0x00, 396 /*type*/ SSD_ELEM_INFO, 397 /*size*/ sizeof(info), /*data*/ &info, 398 /*type*/ SSD_ELEM_NONE); 399 } else 400 ctl_set_success(&io->scsiio); 401 } 402 403 static int 404 ctl_be_block_move_done(union ctl_io *io) 405 { 406 struct ctl_be_block_io *beio; 407 struct ctl_be_block_lun *be_lun; 408 struct ctl_lba_len_flags *lbalen; 409 #ifdef CTL_TIME_IO 410 struct bintime cur_bt; 411 #endif 412 413 beio = (struct ctl_be_block_io *)PRIV(io)->ptr; 414 be_lun = beio->lun; 415 416 DPRINTF("entered\n"); 417 418 #ifdef CTL_TIME_IO 419 getbinuptime(&cur_bt); 420 bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt); 421 bintime_add(&io->io_hdr.dma_bt, &cur_bt); 422 #endif 423 io->io_hdr.num_dmas++; 424 io->scsiio.kern_rel_offset += io->scsiio.kern_data_len; 425 426 /* 427 * We set status at this point for read commands, and write 428 * commands with errors. 429 */ 430 if (io->io_hdr.flags & CTL_FLAG_ABORT) { 431 ; 432 } else if ((io->io_hdr.port_status != 0) && 433 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 434 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 435 ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 1, 436 /*retry_count*/ io->io_hdr.port_status); 437 } else if (io->scsiio.kern_data_resid != 0 && 438 (io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT && 439 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 440 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 441 ctl_set_invalid_field_ciu(&io->scsiio); 442 } else if ((io->io_hdr.port_status == 0) && 443 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) { 444 lbalen = ARGS(beio->io); 445 if (lbalen->flags & CTL_LLF_READ) { 446 ctl_set_success(&io->scsiio); 447 } else if (lbalen->flags & CTL_LLF_COMPARE) { 448 /* We have two data blocks ready for comparison. */ 449 ctl_be_block_compare(io); 450 } 451 } 452 453 /* 454 * If this is a read, or a write with errors, it is done. 455 */ 456 if ((beio->bio_cmd == BIO_READ) 457 || ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0) 458 || ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE)) { 459 ctl_complete_beio(beio); 460 return (0); 461 } 462 463 /* 464 * At this point, we have a write and the DMA completed 465 * successfully. We now have to queue it to the task queue to 466 * execute the backend I/O. That is because we do blocking 467 * memory allocations, and in the file backing case, blocking I/O. 468 * This move done routine is generally called in the SIM's 469 * interrupt context, and therefore we cannot block. 470 */ 471 mtx_lock(&be_lun->queue_lock); 472 STAILQ_INSERT_TAIL(&be_lun->datamove_queue, &io->io_hdr, links); 473 mtx_unlock(&be_lun->queue_lock); 474 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); 475 476 return (0); 477 } 478 479 static void 480 ctl_be_block_biodone(struct bio *bio) 481 { 482 struct ctl_be_block_io *beio; 483 struct ctl_be_block_lun *be_lun; 484 union ctl_io *io; 485 int error; 486 487 beio = bio->bio_caller1; 488 be_lun = beio->lun; 489 io = beio->io; 490 491 DPRINTF("entered\n"); 492 493 error = bio->bio_error; 494 mtx_lock(&be_lun->io_lock); 495 if (error != 0 && 496 (beio->first_error == 0 || 497 bio->bio_offset < beio->first_error_offset)) { 498 beio->first_error = error; 499 beio->first_error_offset = bio->bio_offset; 500 } 501 502 beio->num_bios_done++; 503 504 /* 505 * XXX KDM will this cause WITNESS to complain? Holding a lock 506 * during the free might cause it to complain. 507 */ 508 g_destroy_bio(bio); 509 510 /* 511 * If the send complete bit isn't set, or we aren't the last I/O to 512 * complete, then we're done. 513 */ 514 if ((beio->send_complete == 0) 515 || (beio->num_bios_done < beio->num_bios_sent)) { 516 mtx_unlock(&be_lun->io_lock); 517 return; 518 } 519 520 /* 521 * At this point, we've verified that we are the last I/O to 522 * complete, so it's safe to drop the lock. 523 */ 524 devstat_end_transaction(beio->lun->disk_stats, beio->io_len, 525 beio->ds_tag_type, beio->ds_trans_type, 526 /*now*/ NULL, /*then*/&beio->ds_t0); 527 mtx_unlock(&be_lun->io_lock); 528 529 /* 530 * If there are any errors from the backing device, we fail the 531 * entire I/O with a medium error. 532 */ 533 error = beio->first_error; 534 if (error != 0) { 535 if (error == EOPNOTSUPP) { 536 ctl_set_invalid_opcode(&io->scsiio); 537 } else if (error == ENOSPC || error == EDQUOT) { 538 ctl_set_space_alloc_fail(&io->scsiio); 539 } else if (error == EROFS || error == EACCES) { 540 ctl_set_hw_write_protected(&io->scsiio); 541 } else if (beio->bio_cmd == BIO_FLUSH) { 542 /* XXX KDM is there is a better error here? */ 543 ctl_set_internal_failure(&io->scsiio, 544 /*sks_valid*/ 1, 545 /*retry_count*/ 0xbad2); 546 } else { 547 ctl_set_medium_error(&io->scsiio, 548 beio->bio_cmd == BIO_READ); 549 } 550 ctl_complete_beio(beio); 551 return; 552 } 553 554 /* 555 * If this is a write, a flush, a delete or verify, we're all done. 556 * If this is a read, we can now send the data to the user. 557 */ 558 if ((beio->bio_cmd == BIO_WRITE) 559 || (beio->bio_cmd == BIO_FLUSH) 560 || (beio->bio_cmd == BIO_DELETE) 561 || (ARGS(io)->flags & CTL_LLF_VERIFY)) { 562 ctl_set_success(&io->scsiio); 563 ctl_complete_beio(beio); 564 } else { 565 if ((ARGS(io)->flags & CTL_LLF_READ) && 566 beio->beio_cont == NULL) { 567 ctl_set_success(&io->scsiio); 568 ctl_serseq_done(io); 569 } 570 #ifdef CTL_TIME_IO 571 getbinuptime(&io->io_hdr.dma_start_bt); 572 #endif 573 ctl_datamove(io); 574 } 575 } 576 577 static void 578 ctl_be_block_flush_file(struct ctl_be_block_lun *be_lun, 579 struct ctl_be_block_io *beio) 580 { 581 union ctl_io *io = beio->io; 582 struct mount *mountpoint; 583 int error, lock_flags; 584 585 DPRINTF("entered\n"); 586 587 binuptime(&beio->ds_t0); 588 devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0); 589 590 (void) vn_start_write(be_lun->vn, &mountpoint, V_WAIT); 591 592 if (MNT_SHARED_WRITES(mountpoint) || 593 ((mountpoint == NULL) && MNT_SHARED_WRITES(be_lun->vn->v_mount))) 594 lock_flags = LK_SHARED; 595 else 596 lock_flags = LK_EXCLUSIVE; 597 vn_lock(be_lun->vn, lock_flags | LK_RETRY); 598 error = VOP_FSYNC(be_lun->vn, beio->io_arg ? MNT_NOWAIT : MNT_WAIT, 599 curthread); 600 VOP_UNLOCK(be_lun->vn); 601 602 vn_finished_write(mountpoint); 603 604 mtx_lock(&be_lun->io_lock); 605 devstat_end_transaction(beio->lun->disk_stats, beio->io_len, 606 beio->ds_tag_type, beio->ds_trans_type, 607 /*now*/ NULL, /*then*/&beio->ds_t0); 608 mtx_unlock(&be_lun->io_lock); 609 610 if (error == 0) 611 ctl_set_success(&io->scsiio); 612 else { 613 /* XXX KDM is there is a better error here? */ 614 ctl_set_internal_failure(&io->scsiio, 615 /*sks_valid*/ 1, 616 /*retry_count*/ 0xbad1); 617 } 618 619 ctl_complete_beio(beio); 620 } 621 622 SDT_PROBE_DEFINE1(cbb, , read, file_start, "uint64_t"); 623 SDT_PROBE_DEFINE1(cbb, , write, file_start, "uint64_t"); 624 SDT_PROBE_DEFINE1(cbb, , read, file_done,"uint64_t"); 625 SDT_PROBE_DEFINE1(cbb, , write, file_done, "uint64_t"); 626 627 static void 628 ctl_be_block_dispatch_file(struct ctl_be_block_lun *be_lun, 629 struct ctl_be_block_io *beio) 630 { 631 struct ctl_be_block_filedata *file_data; 632 union ctl_io *io; 633 struct uio xuio; 634 struct iovec *xiovec; 635 size_t s; 636 int error, flags, i; 637 638 DPRINTF("entered\n"); 639 640 file_data = &be_lun->backend.file; 641 io = beio->io; 642 flags = 0; 643 if (ARGS(io)->flags & CTL_LLF_DPO) 644 flags |= IO_DIRECT; 645 if (beio->bio_cmd == BIO_WRITE && ARGS(io)->flags & CTL_LLF_FUA) 646 flags |= IO_SYNC; 647 648 bzero(&xuio, sizeof(xuio)); 649 if (beio->bio_cmd == BIO_READ) { 650 SDT_PROBE0(cbb, , read, file_start); 651 xuio.uio_rw = UIO_READ; 652 } else { 653 SDT_PROBE0(cbb, , write, file_start); 654 xuio.uio_rw = UIO_WRITE; 655 } 656 xuio.uio_offset = beio->io_offset; 657 xuio.uio_resid = beio->io_len; 658 xuio.uio_segflg = UIO_SYSSPACE; 659 xuio.uio_iov = beio->xiovecs; 660 xuio.uio_iovcnt = beio->num_segs; 661 xuio.uio_td = curthread; 662 663 for (i = 0, xiovec = xuio.uio_iov; i < xuio.uio_iovcnt; i++, xiovec++) { 664 xiovec->iov_base = beio->sg_segs[i].addr; 665 xiovec->iov_len = beio->sg_segs[i].len; 666 } 667 668 binuptime(&beio->ds_t0); 669 devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0); 670 671 if (beio->bio_cmd == BIO_READ) { 672 vn_lock(be_lun->vn, LK_SHARED | LK_RETRY); 673 674 /* 675 * UFS pays attention to IO_DIRECT for reads. If the 676 * DIRECTIO option is configured into the kernel, it calls 677 * ffs_rawread(). But that only works for single-segment 678 * uios with user space addresses. In our case, with a 679 * kernel uio, it still reads into the buffer cache, but it 680 * will just try to release the buffer from the cache later 681 * on in ffs_read(). 682 * 683 * ZFS does not pay attention to IO_DIRECT for reads. 684 * 685 * UFS does not pay attention to IO_SYNC for reads. 686 * 687 * ZFS pays attention to IO_SYNC (which translates into the 688 * Solaris define FRSYNC for zfs_read()) for reads. It 689 * attempts to sync the file before reading. 690 */ 691 error = VOP_READ(be_lun->vn, &xuio, flags, file_data->cred); 692 693 VOP_UNLOCK(be_lun->vn); 694 SDT_PROBE0(cbb, , read, file_done); 695 if (error == 0 && xuio.uio_resid > 0) { 696 /* 697 * If we red less then requested (EOF), then 698 * we should clean the rest of the buffer. 699 */ 700 s = beio->io_len - xuio.uio_resid; 701 for (i = 0; i < beio->num_segs; i++) { 702 if (s >= beio->sg_segs[i].len) { 703 s -= beio->sg_segs[i].len; 704 continue; 705 } 706 bzero((uint8_t *)beio->sg_segs[i].addr + s, 707 beio->sg_segs[i].len - s); 708 s = 0; 709 } 710 } 711 } else { 712 struct mount *mountpoint; 713 int lock_flags; 714 715 (void)vn_start_write(be_lun->vn, &mountpoint, V_WAIT); 716 717 if (MNT_SHARED_WRITES(mountpoint) || ((mountpoint == NULL) 718 && MNT_SHARED_WRITES(be_lun->vn->v_mount))) 719 lock_flags = LK_SHARED; 720 else 721 lock_flags = LK_EXCLUSIVE; 722 vn_lock(be_lun->vn, lock_flags | LK_RETRY); 723 724 /* 725 * UFS pays attention to IO_DIRECT for writes. The write 726 * is done asynchronously. (Normally the write would just 727 * get put into cache. 728 * 729 * UFS pays attention to IO_SYNC for writes. It will 730 * attempt to write the buffer out synchronously if that 731 * flag is set. 732 * 733 * ZFS does not pay attention to IO_DIRECT for writes. 734 * 735 * ZFS pays attention to IO_SYNC (a.k.a. FSYNC or FRSYNC) 736 * for writes. It will flush the transaction from the 737 * cache before returning. 738 */ 739 error = VOP_WRITE(be_lun->vn, &xuio, flags, file_data->cred); 740 VOP_UNLOCK(be_lun->vn); 741 742 vn_finished_write(mountpoint); 743 SDT_PROBE0(cbb, , write, file_done); 744 } 745 746 mtx_lock(&be_lun->io_lock); 747 devstat_end_transaction(beio->lun->disk_stats, beio->io_len, 748 beio->ds_tag_type, beio->ds_trans_type, 749 /*now*/ NULL, /*then*/&beio->ds_t0); 750 mtx_unlock(&be_lun->io_lock); 751 752 /* 753 * If we got an error, set the sense data to "MEDIUM ERROR" and 754 * return the I/O to the user. 755 */ 756 if (error != 0) { 757 if (error == ENOSPC || error == EDQUOT) { 758 ctl_set_space_alloc_fail(&io->scsiio); 759 } else if (error == EROFS || error == EACCES) { 760 ctl_set_hw_write_protected(&io->scsiio); 761 } else { 762 ctl_set_medium_error(&io->scsiio, 763 beio->bio_cmd == BIO_READ); 764 } 765 ctl_complete_beio(beio); 766 return; 767 } 768 769 /* 770 * If this is a write or a verify, we're all done. 771 * If this is a read, we can now send the data to the user. 772 */ 773 if ((beio->bio_cmd == BIO_WRITE) || 774 (ARGS(io)->flags & CTL_LLF_VERIFY)) { 775 ctl_set_success(&io->scsiio); 776 ctl_complete_beio(beio); 777 } else { 778 if ((ARGS(io)->flags & CTL_LLF_READ) && 779 beio->beio_cont == NULL) { 780 ctl_set_success(&io->scsiio); 781 ctl_serseq_done(io); 782 } 783 #ifdef CTL_TIME_IO 784 getbinuptime(&io->io_hdr.dma_start_bt); 785 #endif 786 ctl_datamove(io); 787 } 788 } 789 790 static void 791 ctl_be_block_gls_file(struct ctl_be_block_lun *be_lun, 792 struct ctl_be_block_io *beio) 793 { 794 union ctl_io *io = beio->io; 795 struct ctl_lba_len_flags *lbalen = ARGS(io); 796 struct scsi_get_lba_status_data *data; 797 off_t roff, off; 798 int error, status; 799 800 DPRINTF("entered\n"); 801 802 off = roff = ((off_t)lbalen->lba) * be_lun->cbe_lun.blocksize; 803 vn_lock(be_lun->vn, LK_SHARED | LK_RETRY); 804 error = VOP_IOCTL(be_lun->vn, FIOSEEKHOLE, &off, 805 0, curthread->td_ucred, curthread); 806 if (error == 0 && off > roff) 807 status = 0; /* mapped up to off */ 808 else { 809 error = VOP_IOCTL(be_lun->vn, FIOSEEKDATA, &off, 810 0, curthread->td_ucred, curthread); 811 if (error == 0 && off > roff) 812 status = 1; /* deallocated up to off */ 813 else { 814 status = 0; /* unknown up to the end */ 815 off = be_lun->size_bytes; 816 } 817 } 818 VOP_UNLOCK(be_lun->vn); 819 820 data = (struct scsi_get_lba_status_data *)io->scsiio.kern_data_ptr; 821 scsi_u64to8b(lbalen->lba, data->descr[0].addr); 822 scsi_ulto4b(MIN(UINT32_MAX, off / be_lun->cbe_lun.blocksize - 823 lbalen->lba), data->descr[0].length); 824 data->descr[0].status = status; 825 826 ctl_complete_beio(beio); 827 } 828 829 static uint64_t 830 ctl_be_block_getattr_file(struct ctl_be_block_lun *be_lun, const char *attrname) 831 { 832 struct vattr vattr; 833 struct statfs statfs; 834 uint64_t val; 835 int error; 836 837 val = UINT64_MAX; 838 if (be_lun->vn == NULL) 839 return (val); 840 vn_lock(be_lun->vn, LK_SHARED | LK_RETRY); 841 if (strcmp(attrname, "blocksused") == 0) { 842 error = VOP_GETATTR(be_lun->vn, &vattr, curthread->td_ucred); 843 if (error == 0) 844 val = vattr.va_bytes / be_lun->cbe_lun.blocksize; 845 } 846 if (strcmp(attrname, "blocksavail") == 0 && 847 !VN_IS_DOOMED(be_lun->vn)) { 848 error = VFS_STATFS(be_lun->vn->v_mount, &statfs); 849 if (error == 0) 850 val = statfs.f_bavail * statfs.f_bsize / 851 be_lun->cbe_lun.blocksize; 852 } 853 VOP_UNLOCK(be_lun->vn); 854 return (val); 855 } 856 857 static void 858 ctl_be_block_dispatch_zvol(struct ctl_be_block_lun *be_lun, 859 struct ctl_be_block_io *beio) 860 { 861 union ctl_io *io; 862 struct cdevsw *csw; 863 struct cdev *dev; 864 struct uio xuio; 865 struct iovec *xiovec; 866 int error, flags, i, ref; 867 868 DPRINTF("entered\n"); 869 870 io = beio->io; 871 flags = 0; 872 if (ARGS(io)->flags & CTL_LLF_DPO) 873 flags |= IO_DIRECT; 874 if (beio->bio_cmd == BIO_WRITE && ARGS(io)->flags & CTL_LLF_FUA) 875 flags |= IO_SYNC; 876 877 bzero(&xuio, sizeof(xuio)); 878 if (beio->bio_cmd == BIO_READ) { 879 SDT_PROBE0(cbb, , read, file_start); 880 xuio.uio_rw = UIO_READ; 881 } else { 882 SDT_PROBE0(cbb, , write, file_start); 883 xuio.uio_rw = UIO_WRITE; 884 } 885 xuio.uio_offset = beio->io_offset; 886 xuio.uio_resid = beio->io_len; 887 xuio.uio_segflg = UIO_SYSSPACE; 888 xuio.uio_iov = beio->xiovecs; 889 xuio.uio_iovcnt = beio->num_segs; 890 xuio.uio_td = curthread; 891 892 for (i = 0, xiovec = xuio.uio_iov; i < xuio.uio_iovcnt; i++, xiovec++) { 893 xiovec->iov_base = beio->sg_segs[i].addr; 894 xiovec->iov_len = beio->sg_segs[i].len; 895 } 896 897 binuptime(&beio->ds_t0); 898 devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0); 899 900 csw = devvn_refthread(be_lun->vn, &dev, &ref); 901 if (csw) { 902 if (beio->bio_cmd == BIO_READ) 903 error = csw->d_read(dev, &xuio, flags); 904 else 905 error = csw->d_write(dev, &xuio, flags); 906 dev_relthread(dev, ref); 907 } else 908 error = ENXIO; 909 910 if (beio->bio_cmd == BIO_READ) 911 SDT_PROBE0(cbb, , read, file_done); 912 else 913 SDT_PROBE0(cbb, , write, file_done); 914 915 mtx_lock(&be_lun->io_lock); 916 devstat_end_transaction(beio->lun->disk_stats, beio->io_len, 917 beio->ds_tag_type, beio->ds_trans_type, 918 /*now*/ NULL, /*then*/&beio->ds_t0); 919 mtx_unlock(&be_lun->io_lock); 920 921 /* 922 * If we got an error, set the sense data to "MEDIUM ERROR" and 923 * return the I/O to the user. 924 */ 925 if (error != 0) { 926 if (error == ENOSPC || error == EDQUOT) { 927 ctl_set_space_alloc_fail(&io->scsiio); 928 } else if (error == EROFS || error == EACCES) { 929 ctl_set_hw_write_protected(&io->scsiio); 930 } else { 931 ctl_set_medium_error(&io->scsiio, 932 beio->bio_cmd == BIO_READ); 933 } 934 ctl_complete_beio(beio); 935 return; 936 } 937 938 /* 939 * If this is a write or a verify, we're all done. 940 * If this is a read, we can now send the data to the user. 941 */ 942 if ((beio->bio_cmd == BIO_WRITE) || 943 (ARGS(io)->flags & CTL_LLF_VERIFY)) { 944 ctl_set_success(&io->scsiio); 945 ctl_complete_beio(beio); 946 } else { 947 if ((ARGS(io)->flags & CTL_LLF_READ) && 948 beio->beio_cont == NULL) { 949 ctl_set_success(&io->scsiio); 950 ctl_serseq_done(io); 951 } 952 #ifdef CTL_TIME_IO 953 getbinuptime(&io->io_hdr.dma_start_bt); 954 #endif 955 ctl_datamove(io); 956 } 957 } 958 959 static void 960 ctl_be_block_gls_zvol(struct ctl_be_block_lun *be_lun, 961 struct ctl_be_block_io *beio) 962 { 963 union ctl_io *io = beio->io; 964 struct cdevsw *csw; 965 struct cdev *dev; 966 struct ctl_lba_len_flags *lbalen = ARGS(io); 967 struct scsi_get_lba_status_data *data; 968 off_t roff, off; 969 int error, ref, status; 970 971 DPRINTF("entered\n"); 972 973 csw = devvn_refthread(be_lun->vn, &dev, &ref); 974 if (csw == NULL) { 975 status = 0; /* unknown up to the end */ 976 off = be_lun->size_bytes; 977 goto done; 978 } 979 off = roff = ((off_t)lbalen->lba) * be_lun->cbe_lun.blocksize; 980 error = csw->d_ioctl(dev, FIOSEEKHOLE, (caddr_t)&off, FREAD, 981 curthread); 982 if (error == 0 && off > roff) 983 status = 0; /* mapped up to off */ 984 else { 985 error = csw->d_ioctl(dev, FIOSEEKDATA, (caddr_t)&off, FREAD, 986 curthread); 987 if (error == 0 && off > roff) 988 status = 1; /* deallocated up to off */ 989 else { 990 status = 0; /* unknown up to the end */ 991 off = be_lun->size_bytes; 992 } 993 } 994 dev_relthread(dev, ref); 995 996 done: 997 data = (struct scsi_get_lba_status_data *)io->scsiio.kern_data_ptr; 998 scsi_u64to8b(lbalen->lba, data->descr[0].addr); 999 scsi_ulto4b(MIN(UINT32_MAX, off / be_lun->cbe_lun.blocksize - 1000 lbalen->lba), data->descr[0].length); 1001 data->descr[0].status = status; 1002 1003 ctl_complete_beio(beio); 1004 } 1005 1006 static void 1007 ctl_be_block_flush_dev(struct ctl_be_block_lun *be_lun, 1008 struct ctl_be_block_io *beio) 1009 { 1010 struct bio *bio; 1011 struct cdevsw *csw; 1012 struct cdev *dev; 1013 int ref; 1014 1015 DPRINTF("entered\n"); 1016 1017 /* This can't fail, it's a blocking allocation. */ 1018 bio = g_alloc_bio(); 1019 1020 bio->bio_cmd = BIO_FLUSH; 1021 bio->bio_offset = 0; 1022 bio->bio_data = 0; 1023 bio->bio_done = ctl_be_block_biodone; 1024 bio->bio_caller1 = beio; 1025 bio->bio_pblkno = 0; 1026 1027 /* 1028 * We don't need to acquire the LUN lock here, because we are only 1029 * sending one bio, and so there is no other context to synchronize 1030 * with. 1031 */ 1032 beio->num_bios_sent = 1; 1033 beio->send_complete = 1; 1034 1035 binuptime(&beio->ds_t0); 1036 devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0); 1037 1038 csw = devvn_refthread(be_lun->vn, &dev, &ref); 1039 if (csw) { 1040 bio->bio_dev = dev; 1041 csw->d_strategy(bio); 1042 dev_relthread(dev, ref); 1043 } else { 1044 bio->bio_error = ENXIO; 1045 ctl_be_block_biodone(bio); 1046 } 1047 } 1048 1049 static void 1050 ctl_be_block_unmap_dev_range(struct ctl_be_block_lun *be_lun, 1051 struct ctl_be_block_io *beio, 1052 uint64_t off, uint64_t len, int last) 1053 { 1054 struct bio *bio; 1055 uint64_t maxlen; 1056 struct cdevsw *csw; 1057 struct cdev *dev; 1058 int ref; 1059 1060 csw = devvn_refthread(be_lun->vn, &dev, &ref); 1061 maxlen = LONG_MAX - (LONG_MAX % be_lun->cbe_lun.blocksize); 1062 while (len > 0) { 1063 bio = g_alloc_bio(); 1064 bio->bio_cmd = BIO_DELETE; 1065 bio->bio_dev = dev; 1066 bio->bio_offset = off; 1067 bio->bio_length = MIN(len, maxlen); 1068 bio->bio_data = 0; 1069 bio->bio_done = ctl_be_block_biodone; 1070 bio->bio_caller1 = beio; 1071 bio->bio_pblkno = off / be_lun->cbe_lun.blocksize; 1072 1073 off += bio->bio_length; 1074 len -= bio->bio_length; 1075 1076 mtx_lock(&be_lun->io_lock); 1077 beio->num_bios_sent++; 1078 if (last && len == 0) 1079 beio->send_complete = 1; 1080 mtx_unlock(&be_lun->io_lock); 1081 1082 if (csw) { 1083 csw->d_strategy(bio); 1084 } else { 1085 bio->bio_error = ENXIO; 1086 ctl_be_block_biodone(bio); 1087 } 1088 } 1089 if (csw) 1090 dev_relthread(dev, ref); 1091 } 1092 1093 static void 1094 ctl_be_block_unmap_dev(struct ctl_be_block_lun *be_lun, 1095 struct ctl_be_block_io *beio) 1096 { 1097 union ctl_io *io; 1098 struct ctl_ptr_len_flags *ptrlen; 1099 struct scsi_unmap_desc *buf, *end; 1100 uint64_t len; 1101 1102 io = beio->io; 1103 1104 DPRINTF("entered\n"); 1105 1106 binuptime(&beio->ds_t0); 1107 devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0); 1108 1109 if (beio->io_offset == -1) { 1110 beio->io_len = 0; 1111 ptrlen = (struct ctl_ptr_len_flags *)&io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 1112 buf = (struct scsi_unmap_desc *)ptrlen->ptr; 1113 end = buf + ptrlen->len / sizeof(*buf); 1114 for (; buf < end; buf++) { 1115 len = (uint64_t)scsi_4btoul(buf->length) * 1116 be_lun->cbe_lun.blocksize; 1117 beio->io_len += len; 1118 ctl_be_block_unmap_dev_range(be_lun, beio, 1119 scsi_8btou64(buf->lba) * be_lun->cbe_lun.blocksize, 1120 len, (end - buf < 2) ? TRUE : FALSE); 1121 } 1122 } else 1123 ctl_be_block_unmap_dev_range(be_lun, beio, 1124 beio->io_offset, beio->io_len, TRUE); 1125 } 1126 1127 static void 1128 ctl_be_block_dispatch_dev(struct ctl_be_block_lun *be_lun, 1129 struct ctl_be_block_io *beio) 1130 { 1131 TAILQ_HEAD(, bio) queue = TAILQ_HEAD_INITIALIZER(queue); 1132 struct bio *bio; 1133 struct cdevsw *csw; 1134 struct cdev *dev; 1135 off_t cur_offset; 1136 int i, max_iosize, ref; 1137 1138 DPRINTF("entered\n"); 1139 csw = devvn_refthread(be_lun->vn, &dev, &ref); 1140 1141 /* 1142 * We have to limit our I/O size to the maximum supported by the 1143 * backend device. Hopefully it is MAXPHYS. If the driver doesn't 1144 * set it properly, use DFLTPHYS. 1145 */ 1146 if (csw) { 1147 max_iosize = dev->si_iosize_max; 1148 if (max_iosize < PAGE_SIZE) 1149 max_iosize = DFLTPHYS; 1150 } else 1151 max_iosize = DFLTPHYS; 1152 1153 cur_offset = beio->io_offset; 1154 for (i = 0; i < beio->num_segs; i++) { 1155 size_t cur_size; 1156 uint8_t *cur_ptr; 1157 1158 cur_size = beio->sg_segs[i].len; 1159 cur_ptr = beio->sg_segs[i].addr; 1160 1161 while (cur_size > 0) { 1162 /* This can't fail, it's a blocking allocation. */ 1163 bio = g_alloc_bio(); 1164 1165 KASSERT(bio != NULL, ("g_alloc_bio() failed!\n")); 1166 1167 bio->bio_cmd = beio->bio_cmd; 1168 bio->bio_dev = dev; 1169 bio->bio_caller1 = beio; 1170 bio->bio_length = min(cur_size, max_iosize); 1171 bio->bio_offset = cur_offset; 1172 bio->bio_data = cur_ptr; 1173 bio->bio_done = ctl_be_block_biodone; 1174 bio->bio_pblkno = cur_offset / be_lun->cbe_lun.blocksize; 1175 1176 cur_offset += bio->bio_length; 1177 cur_ptr += bio->bio_length; 1178 cur_size -= bio->bio_length; 1179 1180 TAILQ_INSERT_TAIL(&queue, bio, bio_queue); 1181 beio->num_bios_sent++; 1182 } 1183 } 1184 beio->send_complete = 1; 1185 binuptime(&beio->ds_t0); 1186 devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0); 1187 1188 /* 1189 * Fire off all allocated requests! 1190 */ 1191 while ((bio = TAILQ_FIRST(&queue)) != NULL) { 1192 TAILQ_REMOVE(&queue, bio, bio_queue); 1193 if (csw) 1194 csw->d_strategy(bio); 1195 else { 1196 bio->bio_error = ENXIO; 1197 ctl_be_block_biodone(bio); 1198 } 1199 } 1200 if (csw) 1201 dev_relthread(dev, ref); 1202 } 1203 1204 static uint64_t 1205 ctl_be_block_getattr_dev(struct ctl_be_block_lun *be_lun, const char *attrname) 1206 { 1207 struct diocgattr_arg arg; 1208 struct cdevsw *csw; 1209 struct cdev *dev; 1210 int error, ref; 1211 1212 csw = devvn_refthread(be_lun->vn, &dev, &ref); 1213 if (csw == NULL) 1214 return (UINT64_MAX); 1215 strlcpy(arg.name, attrname, sizeof(arg.name)); 1216 arg.len = sizeof(arg.value.off); 1217 if (csw->d_ioctl) { 1218 error = csw->d_ioctl(dev, DIOCGATTR, (caddr_t)&arg, FREAD, 1219 curthread); 1220 } else 1221 error = ENODEV; 1222 dev_relthread(dev, ref); 1223 if (error != 0) 1224 return (UINT64_MAX); 1225 return (arg.value.off); 1226 } 1227 1228 static void 1229 ctl_be_block_cw_dispatch_sync(struct ctl_be_block_lun *be_lun, 1230 union ctl_io *io) 1231 { 1232 struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun; 1233 struct ctl_be_block_io *beio; 1234 struct ctl_lba_len_flags *lbalen; 1235 1236 DPRINTF("entered\n"); 1237 beio = (struct ctl_be_block_io *)PRIV(io)->ptr; 1238 lbalen = (struct ctl_lba_len_flags *)&io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 1239 1240 beio->io_len = lbalen->len * cbe_lun->blocksize; 1241 beio->io_offset = lbalen->lba * cbe_lun->blocksize; 1242 beio->io_arg = (lbalen->flags & SSC_IMMED) != 0; 1243 beio->bio_cmd = BIO_FLUSH; 1244 beio->ds_trans_type = DEVSTAT_NO_DATA; 1245 DPRINTF("SYNC\n"); 1246 be_lun->lun_flush(be_lun, beio); 1247 } 1248 1249 static void 1250 ctl_be_block_cw_done_ws(struct ctl_be_block_io *beio) 1251 { 1252 union ctl_io *io; 1253 1254 io = beio->io; 1255 ctl_free_beio(beio); 1256 if ((io->io_hdr.flags & CTL_FLAG_ABORT) || 1257 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 1258 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) { 1259 ctl_config_write_done(io); 1260 return; 1261 } 1262 1263 ctl_be_block_config_write(io); 1264 } 1265 1266 static void 1267 ctl_be_block_cw_dispatch_ws(struct ctl_be_block_lun *be_lun, 1268 union ctl_io *io) 1269 { 1270 struct ctl_be_block_softc *softc = be_lun->softc; 1271 struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun; 1272 struct ctl_be_block_io *beio; 1273 struct ctl_lba_len_flags *lbalen; 1274 uint64_t len_left, lba; 1275 uint32_t pb, pbo, adj; 1276 int i, seglen; 1277 uint8_t *buf, *end; 1278 1279 DPRINTF("entered\n"); 1280 1281 beio = (struct ctl_be_block_io *)PRIV(io)->ptr; 1282 lbalen = ARGS(beio->io); 1283 1284 if (lbalen->flags & ~(SWS_LBDATA | SWS_UNMAP | SWS_ANCHOR | SWS_NDOB) || 1285 (lbalen->flags & (SWS_UNMAP | SWS_ANCHOR) && be_lun->unmap == NULL)) { 1286 ctl_free_beio(beio); 1287 ctl_set_invalid_field(&io->scsiio, 1288 /*sks_valid*/ 1, 1289 /*command*/ 1, 1290 /*field*/ 1, 1291 /*bit_valid*/ 0, 1292 /*bit*/ 0); 1293 ctl_config_write_done(io); 1294 return; 1295 } 1296 1297 if (lbalen->flags & (SWS_UNMAP | SWS_ANCHOR)) { 1298 beio->io_offset = lbalen->lba * cbe_lun->blocksize; 1299 beio->io_len = (uint64_t)lbalen->len * cbe_lun->blocksize; 1300 beio->bio_cmd = BIO_DELETE; 1301 beio->ds_trans_type = DEVSTAT_FREE; 1302 1303 be_lun->unmap(be_lun, beio); 1304 return; 1305 } 1306 1307 beio->bio_cmd = BIO_WRITE; 1308 beio->ds_trans_type = DEVSTAT_WRITE; 1309 1310 DPRINTF("WRITE SAME at LBA %jx len %u\n", 1311 (uintmax_t)lbalen->lba, lbalen->len); 1312 1313 pb = cbe_lun->blocksize << be_lun->cbe_lun.pblockexp; 1314 if (be_lun->cbe_lun.pblockoff > 0) 1315 pbo = pb - cbe_lun->blocksize * be_lun->cbe_lun.pblockoff; 1316 else 1317 pbo = 0; 1318 len_left = (uint64_t)lbalen->len * cbe_lun->blocksize; 1319 for (i = 0, lba = 0; i < CTLBLK_MAX_SEGS && len_left > 0; i++) { 1320 1321 /* 1322 * Setup the S/G entry for this chunk. 1323 */ 1324 seglen = MIN(CTLBLK_MAX_SEG, len_left); 1325 if (pb > cbe_lun->blocksize) { 1326 adj = ((lbalen->lba + lba) * cbe_lun->blocksize + 1327 seglen - pbo) % pb; 1328 if (seglen > adj) 1329 seglen -= adj; 1330 else 1331 seglen -= seglen % cbe_lun->blocksize; 1332 } else 1333 seglen -= seglen % cbe_lun->blocksize; 1334 beio->sg_segs[i].len = seglen; 1335 beio->sg_segs[i].addr = uma_zalloc(softc->buf_zone, M_WAITOK); 1336 1337 DPRINTF("segment %d addr %p len %zd\n", i, 1338 beio->sg_segs[i].addr, beio->sg_segs[i].len); 1339 1340 beio->num_segs++; 1341 len_left -= seglen; 1342 1343 buf = beio->sg_segs[i].addr; 1344 end = buf + seglen; 1345 for (; buf < end; buf += cbe_lun->blocksize) { 1346 if (lbalen->flags & SWS_NDOB) { 1347 memset(buf, 0, cbe_lun->blocksize); 1348 } else { 1349 memcpy(buf, io->scsiio.kern_data_ptr, 1350 cbe_lun->blocksize); 1351 } 1352 if (lbalen->flags & SWS_LBDATA) 1353 scsi_ulto4b(lbalen->lba + lba, buf); 1354 lba++; 1355 } 1356 } 1357 1358 beio->io_offset = lbalen->lba * cbe_lun->blocksize; 1359 beio->io_len = lba * cbe_lun->blocksize; 1360 1361 /* We can not do all in one run. Correct and schedule rerun. */ 1362 if (len_left > 0) { 1363 lbalen->lba += lba; 1364 lbalen->len -= lba; 1365 beio->beio_cont = ctl_be_block_cw_done_ws; 1366 } 1367 1368 be_lun->dispatch(be_lun, beio); 1369 } 1370 1371 static void 1372 ctl_be_block_cw_dispatch_unmap(struct ctl_be_block_lun *be_lun, 1373 union ctl_io *io) 1374 { 1375 struct ctl_be_block_io *beio; 1376 struct ctl_ptr_len_flags *ptrlen; 1377 1378 DPRINTF("entered\n"); 1379 1380 beio = (struct ctl_be_block_io *)PRIV(io)->ptr; 1381 ptrlen = (struct ctl_ptr_len_flags *)&io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 1382 1383 if ((ptrlen->flags & ~SU_ANCHOR) != 0 || be_lun->unmap == NULL) { 1384 ctl_free_beio(beio); 1385 ctl_set_invalid_field(&io->scsiio, 1386 /*sks_valid*/ 0, 1387 /*command*/ 1, 1388 /*field*/ 0, 1389 /*bit_valid*/ 0, 1390 /*bit*/ 0); 1391 ctl_config_write_done(io); 1392 return; 1393 } 1394 1395 beio->io_len = 0; 1396 beio->io_offset = -1; 1397 beio->bio_cmd = BIO_DELETE; 1398 beio->ds_trans_type = DEVSTAT_FREE; 1399 DPRINTF("UNMAP\n"); 1400 be_lun->unmap(be_lun, beio); 1401 } 1402 1403 static void 1404 ctl_be_block_cr_done(struct ctl_be_block_io *beio) 1405 { 1406 union ctl_io *io; 1407 1408 io = beio->io; 1409 ctl_free_beio(beio); 1410 ctl_config_read_done(io); 1411 } 1412 1413 static void 1414 ctl_be_block_cr_dispatch(struct ctl_be_block_lun *be_lun, 1415 union ctl_io *io) 1416 { 1417 struct ctl_be_block_io *beio; 1418 struct ctl_be_block_softc *softc; 1419 1420 DPRINTF("entered\n"); 1421 1422 softc = be_lun->softc; 1423 beio = ctl_alloc_beio(softc); 1424 beio->io = io; 1425 beio->lun = be_lun; 1426 beio->beio_cont = ctl_be_block_cr_done; 1427 PRIV(io)->ptr = (void *)beio; 1428 1429 switch (io->scsiio.cdb[0]) { 1430 case SERVICE_ACTION_IN: /* GET LBA STATUS */ 1431 beio->bio_cmd = -1; 1432 beio->ds_trans_type = DEVSTAT_NO_DATA; 1433 beio->ds_tag_type = DEVSTAT_TAG_ORDERED; 1434 beio->io_len = 0; 1435 if (be_lun->get_lba_status) 1436 be_lun->get_lba_status(be_lun, beio); 1437 else 1438 ctl_be_block_cr_done(beio); 1439 break; 1440 default: 1441 panic("Unhandled CDB type %#x", io->scsiio.cdb[0]); 1442 break; 1443 } 1444 } 1445 1446 static void 1447 ctl_be_block_cw_done(struct ctl_be_block_io *beio) 1448 { 1449 union ctl_io *io; 1450 1451 io = beio->io; 1452 ctl_free_beio(beio); 1453 ctl_config_write_done(io); 1454 } 1455 1456 static void 1457 ctl_be_block_cw_dispatch(struct ctl_be_block_lun *be_lun, 1458 union ctl_io *io) 1459 { 1460 struct ctl_be_block_io *beio; 1461 struct ctl_be_block_softc *softc; 1462 1463 DPRINTF("entered\n"); 1464 1465 softc = be_lun->softc; 1466 beio = ctl_alloc_beio(softc); 1467 beio->io = io; 1468 beio->lun = be_lun; 1469 beio->beio_cont = ctl_be_block_cw_done; 1470 switch (io->scsiio.tag_type) { 1471 case CTL_TAG_ORDERED: 1472 beio->ds_tag_type = DEVSTAT_TAG_ORDERED; 1473 break; 1474 case CTL_TAG_HEAD_OF_QUEUE: 1475 beio->ds_tag_type = DEVSTAT_TAG_HEAD; 1476 break; 1477 case CTL_TAG_UNTAGGED: 1478 case CTL_TAG_SIMPLE: 1479 case CTL_TAG_ACA: 1480 default: 1481 beio->ds_tag_type = DEVSTAT_TAG_SIMPLE; 1482 break; 1483 } 1484 PRIV(io)->ptr = (void *)beio; 1485 1486 switch (io->scsiio.cdb[0]) { 1487 case SYNCHRONIZE_CACHE: 1488 case SYNCHRONIZE_CACHE_16: 1489 ctl_be_block_cw_dispatch_sync(be_lun, io); 1490 break; 1491 case WRITE_SAME_10: 1492 case WRITE_SAME_16: 1493 ctl_be_block_cw_dispatch_ws(be_lun, io); 1494 break; 1495 case UNMAP: 1496 ctl_be_block_cw_dispatch_unmap(be_lun, io); 1497 break; 1498 default: 1499 panic("Unhandled CDB type %#x", io->scsiio.cdb[0]); 1500 break; 1501 } 1502 } 1503 1504 SDT_PROBE_DEFINE1(cbb, , read, start, "uint64_t"); 1505 SDT_PROBE_DEFINE1(cbb, , write, start, "uint64_t"); 1506 SDT_PROBE_DEFINE1(cbb, , read, alloc_done, "uint64_t"); 1507 SDT_PROBE_DEFINE1(cbb, , write, alloc_done, "uint64_t"); 1508 1509 static void 1510 ctl_be_block_next(struct ctl_be_block_io *beio) 1511 { 1512 struct ctl_be_block_lun *be_lun; 1513 union ctl_io *io; 1514 1515 io = beio->io; 1516 be_lun = beio->lun; 1517 ctl_free_beio(beio); 1518 if ((io->io_hdr.flags & CTL_FLAG_ABORT) || 1519 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 1520 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) { 1521 ctl_data_submit_done(io); 1522 return; 1523 } 1524 1525 io->io_hdr.status &= ~CTL_STATUS_MASK; 1526 io->io_hdr.status |= CTL_STATUS_NONE; 1527 1528 mtx_lock(&be_lun->queue_lock); 1529 STAILQ_INSERT_TAIL(&be_lun->input_queue, &io->io_hdr, links); 1530 mtx_unlock(&be_lun->queue_lock); 1531 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); 1532 } 1533 1534 static void 1535 ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun, 1536 union ctl_io *io) 1537 { 1538 struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun; 1539 struct ctl_be_block_io *beio; 1540 struct ctl_be_block_softc *softc; 1541 struct ctl_lba_len_flags *lbalen; 1542 struct ctl_ptr_len_flags *bptrlen; 1543 uint64_t len_left, lbas; 1544 int i; 1545 1546 softc = be_lun->softc; 1547 1548 DPRINTF("entered\n"); 1549 1550 lbalen = ARGS(io); 1551 if (lbalen->flags & CTL_LLF_WRITE) { 1552 SDT_PROBE0(cbb, , write, start); 1553 } else { 1554 SDT_PROBE0(cbb, , read, start); 1555 } 1556 1557 beio = ctl_alloc_beio(softc); 1558 beio->io = io; 1559 beio->lun = be_lun; 1560 bptrlen = PRIV(io); 1561 bptrlen->ptr = (void *)beio; 1562 1563 switch (io->scsiio.tag_type) { 1564 case CTL_TAG_ORDERED: 1565 beio->ds_tag_type = DEVSTAT_TAG_ORDERED; 1566 break; 1567 case CTL_TAG_HEAD_OF_QUEUE: 1568 beio->ds_tag_type = DEVSTAT_TAG_HEAD; 1569 break; 1570 case CTL_TAG_UNTAGGED: 1571 case CTL_TAG_SIMPLE: 1572 case CTL_TAG_ACA: 1573 default: 1574 beio->ds_tag_type = DEVSTAT_TAG_SIMPLE; 1575 break; 1576 } 1577 1578 if (lbalen->flags & CTL_LLF_WRITE) { 1579 beio->bio_cmd = BIO_WRITE; 1580 beio->ds_trans_type = DEVSTAT_WRITE; 1581 } else { 1582 beio->bio_cmd = BIO_READ; 1583 beio->ds_trans_type = DEVSTAT_READ; 1584 } 1585 1586 DPRINTF("%s at LBA %jx len %u @%ju\n", 1587 (beio->bio_cmd == BIO_READ) ? "READ" : "WRITE", 1588 (uintmax_t)lbalen->lba, lbalen->len, bptrlen->len); 1589 if (lbalen->flags & CTL_LLF_COMPARE) { 1590 beio->two_sglists = 1; 1591 lbas = CTLBLK_HALF_IO_SIZE; 1592 } else { 1593 lbas = CTLBLK_MAX_IO_SIZE; 1594 } 1595 lbas = MIN(lbalen->len - bptrlen->len, lbas / cbe_lun->blocksize); 1596 beio->io_offset = (lbalen->lba + bptrlen->len) * cbe_lun->blocksize; 1597 beio->io_len = lbas * cbe_lun->blocksize; 1598 bptrlen->len += lbas; 1599 1600 for (i = 0, len_left = beio->io_len; len_left > 0; i++) { 1601 KASSERT(i < CTLBLK_MAX_SEGS, ("Too many segs (%d >= %d)", 1602 i, CTLBLK_MAX_SEGS)); 1603 1604 /* 1605 * Setup the S/G entry for this chunk. 1606 */ 1607 beio->sg_segs[i].len = min(CTLBLK_MAX_SEG, len_left); 1608 beio->sg_segs[i].addr = uma_zalloc(softc->buf_zone, M_WAITOK); 1609 1610 DPRINTF("segment %d addr %p len %zd\n", i, 1611 beio->sg_segs[i].addr, beio->sg_segs[i].len); 1612 1613 /* Set up second segment for compare operation. */ 1614 if (beio->two_sglists) { 1615 beio->sg_segs[i + CTLBLK_HALF_SEGS].len = 1616 beio->sg_segs[i].len; 1617 beio->sg_segs[i + CTLBLK_HALF_SEGS].addr = 1618 uma_zalloc(softc->buf_zone, M_WAITOK); 1619 } 1620 1621 beio->num_segs++; 1622 len_left -= beio->sg_segs[i].len; 1623 } 1624 if (bptrlen->len < lbalen->len) 1625 beio->beio_cont = ctl_be_block_next; 1626 io->scsiio.be_move_done = ctl_be_block_move_done; 1627 /* For compare we have separate S/G lists for read and datamove. */ 1628 if (beio->two_sglists) 1629 io->scsiio.kern_data_ptr = (uint8_t *)&beio->sg_segs[CTLBLK_HALF_SEGS]; 1630 else 1631 io->scsiio.kern_data_ptr = (uint8_t *)beio->sg_segs; 1632 io->scsiio.kern_data_len = beio->io_len; 1633 io->scsiio.kern_sg_entries = beio->num_segs; 1634 io->scsiio.kern_data_ref = ctl_refcnt_beio; 1635 io->scsiio.kern_data_arg = beio; 1636 io->io_hdr.flags |= CTL_FLAG_ALLOCATED; 1637 1638 /* 1639 * For the read case, we need to read the data into our buffers and 1640 * then we can send it back to the user. For the write case, we 1641 * need to get the data from the user first. 1642 */ 1643 if (beio->bio_cmd == BIO_READ) { 1644 SDT_PROBE0(cbb, , read, alloc_done); 1645 be_lun->dispatch(be_lun, beio); 1646 } else { 1647 SDT_PROBE0(cbb, , write, alloc_done); 1648 #ifdef CTL_TIME_IO 1649 getbinuptime(&io->io_hdr.dma_start_bt); 1650 #endif 1651 ctl_datamove(io); 1652 } 1653 } 1654 1655 static void 1656 ctl_be_block_worker(void *context, int pending) 1657 { 1658 struct ctl_be_block_lun *be_lun = (struct ctl_be_block_lun *)context; 1659 struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun; 1660 union ctl_io *io; 1661 struct ctl_be_block_io *beio; 1662 1663 DPRINTF("entered\n"); 1664 /* 1665 * Fetch and process I/Os from all queues. If we detect LUN 1666 * CTL_LUN_FLAG_NO_MEDIA status here -- it is result of a race, 1667 * so make response maximally opaque to not confuse initiator. 1668 */ 1669 for (;;) { 1670 mtx_lock(&be_lun->queue_lock); 1671 io = (union ctl_io *)STAILQ_FIRST(&be_lun->datamove_queue); 1672 if (io != NULL) { 1673 DPRINTF("datamove queue\n"); 1674 STAILQ_REMOVE(&be_lun->datamove_queue, &io->io_hdr, 1675 ctl_io_hdr, links); 1676 mtx_unlock(&be_lun->queue_lock); 1677 beio = (struct ctl_be_block_io *)PRIV(io)->ptr; 1678 if (cbe_lun->flags & CTL_LUN_FLAG_NO_MEDIA) { 1679 ctl_set_busy(&io->scsiio); 1680 ctl_complete_beio(beio); 1681 return; 1682 } 1683 be_lun->dispatch(be_lun, beio); 1684 continue; 1685 } 1686 io = (union ctl_io *)STAILQ_FIRST(&be_lun->config_write_queue); 1687 if (io != NULL) { 1688 DPRINTF("config write queue\n"); 1689 STAILQ_REMOVE(&be_lun->config_write_queue, &io->io_hdr, 1690 ctl_io_hdr, links); 1691 mtx_unlock(&be_lun->queue_lock); 1692 if (cbe_lun->flags & CTL_LUN_FLAG_NO_MEDIA) { 1693 ctl_set_busy(&io->scsiio); 1694 ctl_config_write_done(io); 1695 return; 1696 } 1697 ctl_be_block_cw_dispatch(be_lun, io); 1698 continue; 1699 } 1700 io = (union ctl_io *)STAILQ_FIRST(&be_lun->config_read_queue); 1701 if (io != NULL) { 1702 DPRINTF("config read queue\n"); 1703 STAILQ_REMOVE(&be_lun->config_read_queue, &io->io_hdr, 1704 ctl_io_hdr, links); 1705 mtx_unlock(&be_lun->queue_lock); 1706 if (cbe_lun->flags & CTL_LUN_FLAG_NO_MEDIA) { 1707 ctl_set_busy(&io->scsiio); 1708 ctl_config_read_done(io); 1709 return; 1710 } 1711 ctl_be_block_cr_dispatch(be_lun, io); 1712 continue; 1713 } 1714 io = (union ctl_io *)STAILQ_FIRST(&be_lun->input_queue); 1715 if (io != NULL) { 1716 DPRINTF("input queue\n"); 1717 STAILQ_REMOVE(&be_lun->input_queue, &io->io_hdr, 1718 ctl_io_hdr, links); 1719 mtx_unlock(&be_lun->queue_lock); 1720 if (cbe_lun->flags & CTL_LUN_FLAG_NO_MEDIA) { 1721 ctl_set_busy(&io->scsiio); 1722 ctl_data_submit_done(io); 1723 return; 1724 } 1725 ctl_be_block_dispatch(be_lun, io); 1726 continue; 1727 } 1728 1729 /* 1730 * If we get here, there is no work left in the queues, so 1731 * just break out and let the task queue go to sleep. 1732 */ 1733 mtx_unlock(&be_lun->queue_lock); 1734 break; 1735 } 1736 } 1737 1738 /* 1739 * Entry point from CTL to the backend for I/O. We queue everything to a 1740 * work thread, so this just puts the I/O on a queue and wakes up the 1741 * thread. 1742 */ 1743 static int 1744 ctl_be_block_submit(union ctl_io *io) 1745 { 1746 struct ctl_be_block_lun *be_lun; 1747 1748 DPRINTF("entered\n"); 1749 1750 be_lun = (struct ctl_be_block_lun *)CTL_BACKEND_LUN(io); 1751 1752 /* 1753 * Make sure we only get SCSI I/O. 1754 */ 1755 KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, ("Non-SCSI I/O (type " 1756 "%#x) encountered", io->io_hdr.io_type)); 1757 1758 PRIV(io)->len = 0; 1759 1760 mtx_lock(&be_lun->queue_lock); 1761 STAILQ_INSERT_TAIL(&be_lun->input_queue, &io->io_hdr, links); 1762 mtx_unlock(&be_lun->queue_lock); 1763 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); 1764 1765 return (CTL_RETVAL_COMPLETE); 1766 } 1767 1768 static int 1769 ctl_be_block_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, 1770 int flag, struct thread *td) 1771 { 1772 struct ctl_be_block_softc *softc = &backend_block_softc; 1773 int error; 1774 1775 error = 0; 1776 switch (cmd) { 1777 case CTL_LUN_REQ: { 1778 struct ctl_lun_req *lun_req; 1779 1780 lun_req = (struct ctl_lun_req *)addr; 1781 1782 switch (lun_req->reqtype) { 1783 case CTL_LUNREQ_CREATE: 1784 error = ctl_be_block_create(softc, lun_req); 1785 break; 1786 case CTL_LUNREQ_RM: 1787 error = ctl_be_block_rm(softc, lun_req); 1788 break; 1789 case CTL_LUNREQ_MODIFY: 1790 error = ctl_be_block_modify(softc, lun_req); 1791 break; 1792 default: 1793 lun_req->status = CTL_LUN_ERROR; 1794 snprintf(lun_req->error_str, sizeof(lun_req->error_str), 1795 "invalid LUN request type %d", 1796 lun_req->reqtype); 1797 break; 1798 } 1799 break; 1800 } 1801 default: 1802 error = ENOTTY; 1803 break; 1804 } 1805 1806 return (error); 1807 } 1808 1809 static int 1810 ctl_be_block_open_file(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req) 1811 { 1812 struct ctl_be_lun *cbe_lun; 1813 struct ctl_be_block_filedata *file_data; 1814 struct ctl_lun_create_params *params; 1815 const char *value; 1816 struct vattr vattr; 1817 off_t ps, pss, po, pos, us, uss, uo, uos; 1818 int error; 1819 1820 cbe_lun = &be_lun->cbe_lun; 1821 file_data = &be_lun->backend.file; 1822 params = &be_lun->params; 1823 1824 be_lun->dev_type = CTL_BE_BLOCK_FILE; 1825 be_lun->dispatch = ctl_be_block_dispatch_file; 1826 be_lun->lun_flush = ctl_be_block_flush_file; 1827 be_lun->get_lba_status = ctl_be_block_gls_file; 1828 be_lun->getattr = ctl_be_block_getattr_file; 1829 be_lun->unmap = NULL; 1830 cbe_lun->flags &= ~CTL_LUN_FLAG_UNMAP; 1831 1832 error = VOP_GETATTR(be_lun->vn, &vattr, curthread->td_ucred); 1833 if (error != 0) { 1834 snprintf(req->error_str, sizeof(req->error_str), 1835 "error calling VOP_GETATTR() for file %s", 1836 be_lun->dev_path); 1837 return (error); 1838 } 1839 1840 file_data->cred = crhold(curthread->td_ucred); 1841 if (params->lun_size_bytes != 0) 1842 be_lun->size_bytes = params->lun_size_bytes; 1843 else 1844 be_lun->size_bytes = vattr.va_size; 1845 1846 /* 1847 * For files we can use any logical block size. Prefer 512 bytes 1848 * for compatibility reasons. If file's vattr.va_blocksize 1849 * (preferred I/O block size) is bigger and multiple to chosen 1850 * logical block size -- report it as physical block size. 1851 */ 1852 if (params->blocksize_bytes != 0) 1853 cbe_lun->blocksize = params->blocksize_bytes; 1854 else if (cbe_lun->lun_type == T_CDROM) 1855 cbe_lun->blocksize = 2048; 1856 else 1857 cbe_lun->blocksize = 512; 1858 be_lun->size_blocks = be_lun->size_bytes / cbe_lun->blocksize; 1859 cbe_lun->maxlba = (be_lun->size_blocks == 0) ? 1860 0 : (be_lun->size_blocks - 1); 1861 1862 us = ps = vattr.va_blocksize; 1863 uo = po = 0; 1864 1865 value = dnvlist_get_string(cbe_lun->options, "pblocksize", NULL); 1866 if (value != NULL) 1867 ctl_expand_number(value, &ps); 1868 value = dnvlist_get_string(cbe_lun->options, "pblockoffset", NULL); 1869 if (value != NULL) 1870 ctl_expand_number(value, &po); 1871 pss = ps / cbe_lun->blocksize; 1872 pos = po / cbe_lun->blocksize; 1873 if ((pss > 0) && (pss * cbe_lun->blocksize == ps) && (pss >= pos) && 1874 ((pss & (pss - 1)) == 0) && (pos * cbe_lun->blocksize == po)) { 1875 cbe_lun->pblockexp = fls(pss) - 1; 1876 cbe_lun->pblockoff = (pss - pos) % pss; 1877 } 1878 1879 value = dnvlist_get_string(cbe_lun->options, "ublocksize", NULL); 1880 if (value != NULL) 1881 ctl_expand_number(value, &us); 1882 value = dnvlist_get_string(cbe_lun->options, "ublockoffset", NULL); 1883 if (value != NULL) 1884 ctl_expand_number(value, &uo); 1885 uss = us / cbe_lun->blocksize; 1886 uos = uo / cbe_lun->blocksize; 1887 if ((uss > 0) && (uss * cbe_lun->blocksize == us) && (uss >= uos) && 1888 ((uss & (uss - 1)) == 0) && (uos * cbe_lun->blocksize == uo)) { 1889 cbe_lun->ublockexp = fls(uss) - 1; 1890 cbe_lun->ublockoff = (uss - uos) % uss; 1891 } 1892 1893 /* 1894 * Sanity check. The media size has to be at least one 1895 * sector long. 1896 */ 1897 if (be_lun->size_bytes < cbe_lun->blocksize) { 1898 error = EINVAL; 1899 snprintf(req->error_str, sizeof(req->error_str), 1900 "file %s size %ju < block size %u", be_lun->dev_path, 1901 (uintmax_t)be_lun->size_bytes, cbe_lun->blocksize); 1902 } 1903 1904 cbe_lun->opttxferlen = CTLBLK_MAX_IO_SIZE / cbe_lun->blocksize; 1905 return (error); 1906 } 1907 1908 static int 1909 ctl_be_block_open_dev(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req) 1910 { 1911 struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun; 1912 struct ctl_lun_create_params *params; 1913 struct cdevsw *csw; 1914 struct cdev *dev; 1915 const char *value; 1916 int error, atomic, maxio, ref, unmap, tmp; 1917 off_t ps, pss, po, pos, us, uss, uo, uos, otmp; 1918 1919 params = &be_lun->params; 1920 1921 be_lun->dev_type = CTL_BE_BLOCK_DEV; 1922 csw = devvn_refthread(be_lun->vn, &dev, &ref); 1923 if (csw == NULL) 1924 return (ENXIO); 1925 if (strcmp(csw->d_name, "zvol") == 0) { 1926 be_lun->dispatch = ctl_be_block_dispatch_zvol; 1927 be_lun->get_lba_status = ctl_be_block_gls_zvol; 1928 atomic = maxio = CTLBLK_MAX_IO_SIZE; 1929 } else { 1930 be_lun->dispatch = ctl_be_block_dispatch_dev; 1931 be_lun->get_lba_status = NULL; 1932 atomic = 0; 1933 maxio = dev->si_iosize_max; 1934 if (maxio <= 0) 1935 maxio = DFLTPHYS; 1936 if (maxio > CTLBLK_MAX_IO_SIZE) 1937 maxio = CTLBLK_MAX_IO_SIZE; 1938 } 1939 be_lun->lun_flush = ctl_be_block_flush_dev; 1940 be_lun->getattr = ctl_be_block_getattr_dev; 1941 be_lun->unmap = ctl_be_block_unmap_dev; 1942 1943 if (!csw->d_ioctl) { 1944 dev_relthread(dev, ref); 1945 snprintf(req->error_str, sizeof(req->error_str), 1946 "no d_ioctl for device %s!", be_lun->dev_path); 1947 return (ENODEV); 1948 } 1949 1950 error = csw->d_ioctl(dev, DIOCGSECTORSIZE, (caddr_t)&tmp, FREAD, 1951 curthread); 1952 if (error) { 1953 dev_relthread(dev, ref); 1954 snprintf(req->error_str, sizeof(req->error_str), 1955 "error %d returned for DIOCGSECTORSIZE ioctl " 1956 "on %s!", error, be_lun->dev_path); 1957 return (error); 1958 } 1959 1960 /* 1961 * If the user has asked for a blocksize that is greater than the 1962 * backing device's blocksize, we can do it only if the blocksize 1963 * the user is asking for is an even multiple of the underlying 1964 * device's blocksize. 1965 */ 1966 if ((params->blocksize_bytes != 0) && 1967 (params->blocksize_bytes >= tmp)) { 1968 if (params->blocksize_bytes % tmp == 0) { 1969 cbe_lun->blocksize = params->blocksize_bytes; 1970 } else { 1971 dev_relthread(dev, ref); 1972 snprintf(req->error_str, sizeof(req->error_str), 1973 "requested blocksize %u is not an even " 1974 "multiple of backing device blocksize %u", 1975 params->blocksize_bytes, tmp); 1976 return (EINVAL); 1977 } 1978 } else if (params->blocksize_bytes != 0) { 1979 dev_relthread(dev, ref); 1980 snprintf(req->error_str, sizeof(req->error_str), 1981 "requested blocksize %u < backing device " 1982 "blocksize %u", params->blocksize_bytes, tmp); 1983 return (EINVAL); 1984 } else if (cbe_lun->lun_type == T_CDROM) 1985 cbe_lun->blocksize = MAX(tmp, 2048); 1986 else 1987 cbe_lun->blocksize = tmp; 1988 1989 error = csw->d_ioctl(dev, DIOCGMEDIASIZE, (caddr_t)&otmp, FREAD, 1990 curthread); 1991 if (error) { 1992 dev_relthread(dev, ref); 1993 snprintf(req->error_str, sizeof(req->error_str), 1994 "error %d returned for DIOCGMEDIASIZE " 1995 " ioctl on %s!", error, 1996 be_lun->dev_path); 1997 return (error); 1998 } 1999 2000 if (params->lun_size_bytes != 0) { 2001 if (params->lun_size_bytes > otmp) { 2002 dev_relthread(dev, ref); 2003 snprintf(req->error_str, sizeof(req->error_str), 2004 "requested LUN size %ju > backing device " 2005 "size %ju", 2006 (uintmax_t)params->lun_size_bytes, 2007 (uintmax_t)otmp); 2008 return (EINVAL); 2009 } 2010 2011 be_lun->size_bytes = params->lun_size_bytes; 2012 } else 2013 be_lun->size_bytes = otmp; 2014 be_lun->size_blocks = be_lun->size_bytes / cbe_lun->blocksize; 2015 cbe_lun->maxlba = (be_lun->size_blocks == 0) ? 2016 0 : (be_lun->size_blocks - 1); 2017 2018 error = csw->d_ioctl(dev, DIOCGSTRIPESIZE, (caddr_t)&ps, FREAD, 2019 curthread); 2020 if (error) 2021 ps = po = 0; 2022 else { 2023 error = csw->d_ioctl(dev, DIOCGSTRIPEOFFSET, (caddr_t)&po, 2024 FREAD, curthread); 2025 if (error) 2026 po = 0; 2027 } 2028 us = ps; 2029 uo = po; 2030 2031 value = dnvlist_get_string(cbe_lun->options, "pblocksize", NULL); 2032 if (value != NULL) 2033 ctl_expand_number(value, &ps); 2034 value = dnvlist_get_string(cbe_lun->options, "pblockoffset", NULL); 2035 if (value != NULL) 2036 ctl_expand_number(value, &po); 2037 pss = ps / cbe_lun->blocksize; 2038 pos = po / cbe_lun->blocksize; 2039 if ((pss > 0) && (pss * cbe_lun->blocksize == ps) && (pss >= pos) && 2040 ((pss & (pss - 1)) == 0) && (pos * cbe_lun->blocksize == po)) { 2041 cbe_lun->pblockexp = fls(pss) - 1; 2042 cbe_lun->pblockoff = (pss - pos) % pss; 2043 } 2044 2045 value = dnvlist_get_string(cbe_lun->options, "ublocksize", NULL); 2046 if (value != NULL) 2047 ctl_expand_number(value, &us); 2048 value = dnvlist_get_string(cbe_lun->options, "ublockoffset", NULL); 2049 if (value != NULL) 2050 ctl_expand_number(value, &uo); 2051 uss = us / cbe_lun->blocksize; 2052 uos = uo / cbe_lun->blocksize; 2053 if ((uss > 0) && (uss * cbe_lun->blocksize == us) && (uss >= uos) && 2054 ((uss & (uss - 1)) == 0) && (uos * cbe_lun->blocksize == uo)) { 2055 cbe_lun->ublockexp = fls(uss) - 1; 2056 cbe_lun->ublockoff = (uss - uos) % uss; 2057 } 2058 2059 cbe_lun->atomicblock = atomic / cbe_lun->blocksize; 2060 cbe_lun->opttxferlen = maxio / cbe_lun->blocksize; 2061 2062 if (be_lun->dispatch == ctl_be_block_dispatch_zvol) { 2063 unmap = 1; 2064 } else { 2065 struct diocgattr_arg arg; 2066 2067 strlcpy(arg.name, "GEOM::candelete", sizeof(arg.name)); 2068 arg.len = sizeof(arg.value.i); 2069 error = csw->d_ioctl(dev, DIOCGATTR, (caddr_t)&arg, FREAD, 2070 curthread); 2071 unmap = (error == 0) ? arg.value.i : 0; 2072 } 2073 value = dnvlist_get_string(cbe_lun->options, "unmap", NULL); 2074 if (value != NULL) 2075 unmap = (strcmp(value, "on") == 0); 2076 if (unmap) 2077 cbe_lun->flags |= CTL_LUN_FLAG_UNMAP; 2078 else 2079 cbe_lun->flags &= ~CTL_LUN_FLAG_UNMAP; 2080 2081 dev_relthread(dev, ref); 2082 return (0); 2083 } 2084 2085 static int 2086 ctl_be_block_close(struct ctl_be_block_lun *be_lun) 2087 { 2088 struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun; 2089 int flags; 2090 2091 if (be_lun->vn) { 2092 flags = FREAD; 2093 if ((cbe_lun->flags & CTL_LUN_FLAG_READONLY) == 0) 2094 flags |= FWRITE; 2095 (void)vn_close(be_lun->vn, flags, NOCRED, curthread); 2096 be_lun->vn = NULL; 2097 2098 switch (be_lun->dev_type) { 2099 case CTL_BE_BLOCK_DEV: 2100 break; 2101 case CTL_BE_BLOCK_FILE: 2102 if (be_lun->backend.file.cred != NULL) { 2103 crfree(be_lun->backend.file.cred); 2104 be_lun->backend.file.cred = NULL; 2105 } 2106 break; 2107 case CTL_BE_BLOCK_NONE: 2108 break; 2109 default: 2110 panic("Unexpected backend type %d", be_lun->dev_type); 2111 break; 2112 } 2113 be_lun->dev_type = CTL_BE_BLOCK_NONE; 2114 } 2115 return (0); 2116 } 2117 2118 static int 2119 ctl_be_block_open(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req) 2120 { 2121 struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun; 2122 struct nameidata nd; 2123 const char *value; 2124 int error, flags; 2125 2126 error = 0; 2127 if (rootvnode == NULL) { 2128 snprintf(req->error_str, sizeof(req->error_str), 2129 "Root filesystem is not mounted"); 2130 return (1); 2131 } 2132 pwd_ensure_dirs(); 2133 2134 value = dnvlist_get_string(cbe_lun->options, "file", NULL); 2135 if (value == NULL) { 2136 snprintf(req->error_str, sizeof(req->error_str), 2137 "no file argument specified"); 2138 return (1); 2139 } 2140 free(be_lun->dev_path, M_CTLBLK); 2141 be_lun->dev_path = strdup(value, M_CTLBLK); 2142 2143 flags = FREAD; 2144 value = dnvlist_get_string(cbe_lun->options, "readonly", NULL); 2145 if (value != NULL) { 2146 if (strcmp(value, "on") != 0) 2147 flags |= FWRITE; 2148 } else if (cbe_lun->lun_type == T_DIRECT) 2149 flags |= FWRITE; 2150 2151 again: 2152 NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, be_lun->dev_path, curthread); 2153 error = vn_open(&nd, &flags, 0, NULL); 2154 if ((error == EROFS || error == EACCES) && (flags & FWRITE)) { 2155 flags &= ~FWRITE; 2156 goto again; 2157 } 2158 if (error) { 2159 /* 2160 * This is the only reasonable guess we can make as far as 2161 * path if the user doesn't give us a fully qualified path. 2162 * If they want to specify a file, they need to specify the 2163 * full path. 2164 */ 2165 if (be_lun->dev_path[0] != '/') { 2166 char *dev_name; 2167 2168 asprintf(&dev_name, M_CTLBLK, "/dev/%s", 2169 be_lun->dev_path); 2170 free(be_lun->dev_path, M_CTLBLK); 2171 be_lun->dev_path = dev_name; 2172 goto again; 2173 } 2174 snprintf(req->error_str, sizeof(req->error_str), 2175 "error opening %s: %d", be_lun->dev_path, error); 2176 return (error); 2177 } 2178 if (flags & FWRITE) 2179 cbe_lun->flags &= ~CTL_LUN_FLAG_READONLY; 2180 else 2181 cbe_lun->flags |= CTL_LUN_FLAG_READONLY; 2182 2183 NDFREE(&nd, NDF_ONLY_PNBUF); 2184 be_lun->vn = nd.ni_vp; 2185 2186 /* We only support disks and files. */ 2187 if (vn_isdisk(be_lun->vn, &error)) { 2188 error = ctl_be_block_open_dev(be_lun, req); 2189 } else if (be_lun->vn->v_type == VREG) { 2190 error = ctl_be_block_open_file(be_lun, req); 2191 } else { 2192 error = EINVAL; 2193 snprintf(req->error_str, sizeof(req->error_str), 2194 "%s is not a disk or plain file", be_lun->dev_path); 2195 } 2196 VOP_UNLOCK(be_lun->vn); 2197 2198 if (error != 0) 2199 ctl_be_block_close(be_lun); 2200 cbe_lun->serseq = CTL_LUN_SERSEQ_OFF; 2201 if (be_lun->dispatch != ctl_be_block_dispatch_dev) 2202 cbe_lun->serseq = CTL_LUN_SERSEQ_READ; 2203 value = dnvlist_get_string(cbe_lun->options, "serseq", NULL); 2204 if (value != NULL && strcmp(value, "on") == 0) 2205 cbe_lun->serseq = CTL_LUN_SERSEQ_ON; 2206 else if (value != NULL && strcmp(value, "read") == 0) 2207 cbe_lun->serseq = CTL_LUN_SERSEQ_READ; 2208 else if (value != NULL && strcmp(value, "off") == 0) 2209 cbe_lun->serseq = CTL_LUN_SERSEQ_OFF; 2210 return (0); 2211 } 2212 2213 static int 2214 ctl_be_block_create(struct ctl_be_block_softc *softc, struct ctl_lun_req *req) 2215 { 2216 struct ctl_be_lun *cbe_lun; 2217 struct ctl_be_block_lun *be_lun; 2218 struct ctl_lun_create_params *params; 2219 char num_thread_str[16]; 2220 char tmpstr[32]; 2221 const char *value; 2222 int retval, num_threads; 2223 int tmp_num_threads; 2224 2225 params = &req->reqdata.create; 2226 retval = 0; 2227 req->status = CTL_LUN_OK; 2228 2229 be_lun = malloc(sizeof(*be_lun), M_CTLBLK, M_ZERO | M_WAITOK); 2230 cbe_lun = &be_lun->cbe_lun; 2231 be_lun->params = req->reqdata.create; 2232 be_lun->softc = softc; 2233 STAILQ_INIT(&be_lun->input_queue); 2234 STAILQ_INIT(&be_lun->config_read_queue); 2235 STAILQ_INIT(&be_lun->config_write_queue); 2236 STAILQ_INIT(&be_lun->datamove_queue); 2237 mtx_init(&be_lun->io_lock, "ctlblock io", NULL, MTX_DEF); 2238 mtx_init(&be_lun->queue_lock, "ctlblock queue", NULL, MTX_DEF); 2239 cbe_lun->options = nvlist_clone(req->args_nvl); 2240 2241 if (params->flags & CTL_LUN_FLAG_DEV_TYPE) 2242 cbe_lun->lun_type = params->device_type; 2243 else 2244 cbe_lun->lun_type = T_DIRECT; 2245 be_lun->flags = 0; 2246 cbe_lun->flags = 0; 2247 value = dnvlist_get_string(cbe_lun->options, "ha_role", NULL); 2248 if (value != NULL) { 2249 if (strcmp(value, "primary") == 0) 2250 cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY; 2251 } else if (control_softc->flags & CTL_FLAG_ACTIVE_SHELF) 2252 cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY; 2253 2254 if (cbe_lun->lun_type == T_DIRECT || 2255 cbe_lun->lun_type == T_CDROM) { 2256 be_lun->size_bytes = params->lun_size_bytes; 2257 if (params->blocksize_bytes != 0) 2258 cbe_lun->blocksize = params->blocksize_bytes; 2259 else if (cbe_lun->lun_type == T_CDROM) 2260 cbe_lun->blocksize = 2048; 2261 else 2262 cbe_lun->blocksize = 512; 2263 be_lun->size_blocks = be_lun->size_bytes / cbe_lun->blocksize; 2264 cbe_lun->maxlba = (be_lun->size_blocks == 0) ? 2265 0 : (be_lun->size_blocks - 1); 2266 2267 if ((cbe_lun->flags & CTL_LUN_FLAG_PRIMARY) || 2268 control_softc->ha_mode == CTL_HA_MODE_SER_ONLY) { 2269 retval = ctl_be_block_open(be_lun, req); 2270 if (retval != 0) { 2271 retval = 0; 2272 req->status = CTL_LUN_WARNING; 2273 } 2274 } 2275 num_threads = cbb_num_threads; 2276 } else { 2277 num_threads = 1; 2278 } 2279 2280 value = dnvlist_get_string(cbe_lun->options, "num_threads", NULL); 2281 if (value != NULL) { 2282 tmp_num_threads = strtol(value, NULL, 0); 2283 2284 /* 2285 * We don't let the user specify less than one 2286 * thread, but hope he's clueful enough not to 2287 * specify 1000 threads. 2288 */ 2289 if (tmp_num_threads < 1) { 2290 snprintf(req->error_str, sizeof(req->error_str), 2291 "invalid number of threads %s", 2292 num_thread_str); 2293 goto bailout_error; 2294 } 2295 num_threads = tmp_num_threads; 2296 } 2297 2298 if (be_lun->vn == NULL) 2299 cbe_lun->flags |= CTL_LUN_FLAG_NO_MEDIA; 2300 /* Tell the user the blocksize we ended up using */ 2301 params->lun_size_bytes = be_lun->size_bytes; 2302 params->blocksize_bytes = cbe_lun->blocksize; 2303 if (params->flags & CTL_LUN_FLAG_ID_REQ) { 2304 cbe_lun->req_lun_id = params->req_lun_id; 2305 cbe_lun->flags |= CTL_LUN_FLAG_ID_REQ; 2306 } else 2307 cbe_lun->req_lun_id = 0; 2308 2309 cbe_lun->lun_shutdown = ctl_be_block_lun_shutdown; 2310 cbe_lun->be = &ctl_be_block_driver; 2311 2312 if ((params->flags & CTL_LUN_FLAG_SERIAL_NUM) == 0) { 2313 snprintf(tmpstr, sizeof(tmpstr), "MYSERIAL%04d", 2314 softc->num_luns); 2315 strncpy((char *)cbe_lun->serial_num, tmpstr, 2316 MIN(sizeof(cbe_lun->serial_num), sizeof(tmpstr))); 2317 2318 /* Tell the user what we used for a serial number */ 2319 strncpy((char *)params->serial_num, tmpstr, 2320 MIN(sizeof(params->serial_num), sizeof(tmpstr))); 2321 } else { 2322 strncpy((char *)cbe_lun->serial_num, params->serial_num, 2323 MIN(sizeof(cbe_lun->serial_num), 2324 sizeof(params->serial_num))); 2325 } 2326 if ((params->flags & CTL_LUN_FLAG_DEVID) == 0) { 2327 snprintf(tmpstr, sizeof(tmpstr), "MYDEVID%04d", softc->num_luns); 2328 strncpy((char *)cbe_lun->device_id, tmpstr, 2329 MIN(sizeof(cbe_lun->device_id), sizeof(tmpstr))); 2330 2331 /* Tell the user what we used for a device ID */ 2332 strncpy((char *)params->device_id, tmpstr, 2333 MIN(sizeof(params->device_id), sizeof(tmpstr))); 2334 } else { 2335 strncpy((char *)cbe_lun->device_id, params->device_id, 2336 MIN(sizeof(cbe_lun->device_id), 2337 sizeof(params->device_id))); 2338 } 2339 2340 TASK_INIT(&be_lun->io_task, /*priority*/0, ctl_be_block_worker, be_lun); 2341 2342 be_lun->io_taskqueue = taskqueue_create("ctlblocktq", M_WAITOK, 2343 taskqueue_thread_enqueue, /*context*/&be_lun->io_taskqueue); 2344 2345 if (be_lun->io_taskqueue == NULL) { 2346 snprintf(req->error_str, sizeof(req->error_str), 2347 "unable to create taskqueue"); 2348 goto bailout_error; 2349 } 2350 2351 /* 2352 * Note that we start the same number of threads by default for 2353 * both the file case and the block device case. For the file 2354 * case, we need multiple threads to allow concurrency, because the 2355 * vnode interface is designed to be a blocking interface. For the 2356 * block device case, ZFS zvols at least will block the caller's 2357 * context in many instances, and so we need multiple threads to 2358 * overcome that problem. Other block devices don't need as many 2359 * threads, but they shouldn't cause too many problems. 2360 * 2361 * If the user wants to just have a single thread for a block 2362 * device, he can specify that when the LUN is created, or change 2363 * the tunable/sysctl to alter the default number of threads. 2364 */ 2365 retval = taskqueue_start_threads_in_proc(&be_lun->io_taskqueue, 2366 /*num threads*/num_threads, 2367 /*priority*/PUSER, 2368 /*proc*/control_softc->ctl_proc, 2369 /*thread name*/"block"); 2370 2371 if (retval != 0) 2372 goto bailout_error; 2373 2374 be_lun->num_threads = num_threads; 2375 2376 retval = ctl_add_lun(&be_lun->cbe_lun); 2377 if (retval != 0) { 2378 snprintf(req->error_str, sizeof(req->error_str), 2379 "ctl_add_lun() returned error %d, see dmesg for " 2380 "details", retval); 2381 retval = 0; 2382 goto bailout_error; 2383 } 2384 2385 be_lun->disk_stats = devstat_new_entry("cbb", cbe_lun->lun_id, 2386 cbe_lun->blocksize, 2387 DEVSTAT_ALL_SUPPORTED, 2388 cbe_lun->lun_type 2389 | DEVSTAT_TYPE_IF_OTHER, 2390 DEVSTAT_PRIORITY_OTHER); 2391 2392 mtx_lock(&softc->lock); 2393 softc->num_luns++; 2394 SLIST_INSERT_HEAD(&softc->lun_list, be_lun, links); 2395 mtx_unlock(&softc->lock); 2396 2397 params->req_lun_id = cbe_lun->lun_id; 2398 2399 return (retval); 2400 2401 bailout_error: 2402 req->status = CTL_LUN_ERROR; 2403 2404 if (be_lun->io_taskqueue != NULL) 2405 taskqueue_free(be_lun->io_taskqueue); 2406 ctl_be_block_close(be_lun); 2407 if (be_lun->dev_path != NULL) 2408 free(be_lun->dev_path, M_CTLBLK); 2409 nvlist_destroy(cbe_lun->options); 2410 mtx_destroy(&be_lun->queue_lock); 2411 mtx_destroy(&be_lun->io_lock); 2412 free(be_lun, M_CTLBLK); 2413 2414 return (retval); 2415 } 2416 2417 static int 2418 ctl_be_block_rm(struct ctl_be_block_softc *softc, struct ctl_lun_req *req) 2419 { 2420 struct ctl_lun_rm_params *params; 2421 struct ctl_be_block_lun *be_lun; 2422 struct ctl_be_lun *cbe_lun; 2423 int retval; 2424 2425 params = &req->reqdata.rm; 2426 2427 sx_xlock(&softc->modify_lock); 2428 mtx_lock(&softc->lock); 2429 SLIST_FOREACH(be_lun, &softc->lun_list, links) { 2430 if (be_lun->cbe_lun.lun_id == params->lun_id) { 2431 SLIST_REMOVE(&softc->lun_list, be_lun, 2432 ctl_be_block_lun, links); 2433 softc->num_luns--; 2434 break; 2435 } 2436 } 2437 mtx_unlock(&softc->lock); 2438 sx_xunlock(&softc->modify_lock); 2439 if (be_lun == NULL) { 2440 snprintf(req->error_str, sizeof(req->error_str), 2441 "LUN %u is not managed by the block backend", 2442 params->lun_id); 2443 goto bailout_error; 2444 } 2445 cbe_lun = &be_lun->cbe_lun; 2446 2447 if (be_lun->vn != NULL) { 2448 cbe_lun->flags |= CTL_LUN_FLAG_NO_MEDIA; 2449 ctl_lun_no_media(cbe_lun); 2450 taskqueue_drain_all(be_lun->io_taskqueue); 2451 ctl_be_block_close(be_lun); 2452 } 2453 2454 mtx_lock(&softc->lock); 2455 be_lun->flags |= CTL_BE_BLOCK_LUN_WAITING; 2456 mtx_unlock(&softc->lock); 2457 2458 retval = ctl_remove_lun(cbe_lun); 2459 if (retval != 0) { 2460 snprintf(req->error_str, sizeof(req->error_str), 2461 "error %d returned from ctl_remove_lun() for " 2462 "LUN %d", retval, params->lun_id); 2463 mtx_lock(&softc->lock); 2464 be_lun->flags &= ~CTL_BE_BLOCK_LUN_WAITING; 2465 mtx_unlock(&softc->lock); 2466 goto bailout_error; 2467 } 2468 2469 mtx_lock(&softc->lock); 2470 while ((be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) == 0) { 2471 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlblockrm", 0); 2472 if (retval == EINTR) 2473 break; 2474 } 2475 be_lun->flags &= ~CTL_BE_BLOCK_LUN_WAITING; 2476 if (be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) { 2477 mtx_unlock(&softc->lock); 2478 free(be_lun, M_CTLBLK); 2479 } else { 2480 mtx_unlock(&softc->lock); 2481 return (EINTR); 2482 } 2483 2484 req->status = CTL_LUN_OK; 2485 return (0); 2486 2487 bailout_error: 2488 req->status = CTL_LUN_ERROR; 2489 return (0); 2490 } 2491 2492 static int 2493 ctl_be_block_modify(struct ctl_be_block_softc *softc, struct ctl_lun_req *req) 2494 { 2495 struct ctl_lun_modify_params *params; 2496 struct ctl_be_block_lun *be_lun; 2497 struct ctl_be_lun *cbe_lun; 2498 const char *value; 2499 uint64_t oldsize; 2500 int error, wasprim; 2501 2502 params = &req->reqdata.modify; 2503 2504 sx_xlock(&softc->modify_lock); 2505 mtx_lock(&softc->lock); 2506 SLIST_FOREACH(be_lun, &softc->lun_list, links) { 2507 if (be_lun->cbe_lun.lun_id == params->lun_id) 2508 break; 2509 } 2510 mtx_unlock(&softc->lock); 2511 if (be_lun == NULL) { 2512 snprintf(req->error_str, sizeof(req->error_str), 2513 "LUN %u is not managed by the block backend", 2514 params->lun_id); 2515 goto bailout_error; 2516 } 2517 cbe_lun = &be_lun->cbe_lun; 2518 2519 if (params->lun_size_bytes != 0) 2520 be_lun->params.lun_size_bytes = params->lun_size_bytes; 2521 2522 if (req->args_nvl != NULL) { 2523 nvlist_destroy(cbe_lun->options); 2524 cbe_lun->options = nvlist_clone(req->args_nvl); 2525 } 2526 2527 wasprim = (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY); 2528 value = dnvlist_get_string(cbe_lun->options, "ha_role", NULL); 2529 if (value != NULL) { 2530 if (strcmp(value, "primary") == 0) 2531 cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY; 2532 else 2533 cbe_lun->flags &= ~CTL_LUN_FLAG_PRIMARY; 2534 } else if (control_softc->flags & CTL_FLAG_ACTIVE_SHELF) 2535 cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY; 2536 else 2537 cbe_lun->flags &= ~CTL_LUN_FLAG_PRIMARY; 2538 if (wasprim != (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY)) { 2539 if (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY) 2540 ctl_lun_primary(cbe_lun); 2541 else 2542 ctl_lun_secondary(cbe_lun); 2543 } 2544 2545 oldsize = be_lun->size_blocks; 2546 if ((cbe_lun->flags & CTL_LUN_FLAG_PRIMARY) || 2547 control_softc->ha_mode == CTL_HA_MODE_SER_ONLY) { 2548 if (be_lun->vn == NULL) 2549 error = ctl_be_block_open(be_lun, req); 2550 else if (vn_isdisk(be_lun->vn, &error)) 2551 error = ctl_be_block_open_dev(be_lun, req); 2552 else if (be_lun->vn->v_type == VREG) { 2553 vn_lock(be_lun->vn, LK_SHARED | LK_RETRY); 2554 error = ctl_be_block_open_file(be_lun, req); 2555 VOP_UNLOCK(be_lun->vn); 2556 } else 2557 error = EINVAL; 2558 if ((cbe_lun->flags & CTL_LUN_FLAG_NO_MEDIA) && 2559 be_lun->vn != NULL) { 2560 cbe_lun->flags &= ~CTL_LUN_FLAG_NO_MEDIA; 2561 ctl_lun_has_media(cbe_lun); 2562 } else if ((cbe_lun->flags & CTL_LUN_FLAG_NO_MEDIA) == 0 && 2563 be_lun->vn == NULL) { 2564 cbe_lun->flags |= CTL_LUN_FLAG_NO_MEDIA; 2565 ctl_lun_no_media(cbe_lun); 2566 } 2567 cbe_lun->flags &= ~CTL_LUN_FLAG_EJECTED; 2568 } else { 2569 if (be_lun->vn != NULL) { 2570 cbe_lun->flags |= CTL_LUN_FLAG_NO_MEDIA; 2571 ctl_lun_no_media(cbe_lun); 2572 taskqueue_drain_all(be_lun->io_taskqueue); 2573 error = ctl_be_block_close(be_lun); 2574 } else 2575 error = 0; 2576 } 2577 if (be_lun->size_blocks != oldsize) 2578 ctl_lun_capacity_changed(cbe_lun); 2579 2580 /* Tell the user the exact size we ended up using */ 2581 params->lun_size_bytes = be_lun->size_bytes; 2582 2583 sx_xunlock(&softc->modify_lock); 2584 req->status = error ? CTL_LUN_WARNING : CTL_LUN_OK; 2585 return (0); 2586 2587 bailout_error: 2588 sx_xunlock(&softc->modify_lock); 2589 req->status = CTL_LUN_ERROR; 2590 return (0); 2591 } 2592 2593 static void 2594 ctl_be_block_lun_shutdown(struct ctl_be_lun *cbe_lun) 2595 { 2596 struct ctl_be_block_lun *be_lun = (struct ctl_be_block_lun *)cbe_lun; 2597 struct ctl_be_block_softc *softc = be_lun->softc; 2598 2599 taskqueue_drain_all(be_lun->io_taskqueue); 2600 taskqueue_free(be_lun->io_taskqueue); 2601 if (be_lun->disk_stats != NULL) 2602 devstat_remove_entry(be_lun->disk_stats); 2603 nvlist_destroy(be_lun->cbe_lun.options); 2604 free(be_lun->dev_path, M_CTLBLK); 2605 mtx_destroy(&be_lun->queue_lock); 2606 mtx_destroy(&be_lun->io_lock); 2607 2608 mtx_lock(&softc->lock); 2609 be_lun->flags |= CTL_BE_BLOCK_LUN_UNCONFIGURED; 2610 if (be_lun->flags & CTL_BE_BLOCK_LUN_WAITING) 2611 wakeup(be_lun); 2612 else 2613 free(be_lun, M_CTLBLK); 2614 mtx_unlock(&softc->lock); 2615 } 2616 2617 static int 2618 ctl_be_block_config_write(union ctl_io *io) 2619 { 2620 struct ctl_be_block_lun *be_lun; 2621 struct ctl_be_lun *cbe_lun; 2622 int retval; 2623 2624 DPRINTF("entered\n"); 2625 2626 cbe_lun = CTL_BACKEND_LUN(io); 2627 be_lun = (struct ctl_be_block_lun *)cbe_lun; 2628 2629 retval = 0; 2630 switch (io->scsiio.cdb[0]) { 2631 case SYNCHRONIZE_CACHE: 2632 case SYNCHRONIZE_CACHE_16: 2633 case WRITE_SAME_10: 2634 case WRITE_SAME_16: 2635 case UNMAP: 2636 /* 2637 * The upper level CTL code will filter out any CDBs with 2638 * the immediate bit set and return the proper error. 2639 * 2640 * We don't really need to worry about what LBA range the 2641 * user asked to be synced out. When they issue a sync 2642 * cache command, we'll sync out the whole thing. 2643 */ 2644 mtx_lock(&be_lun->queue_lock); 2645 STAILQ_INSERT_TAIL(&be_lun->config_write_queue, &io->io_hdr, 2646 links); 2647 mtx_unlock(&be_lun->queue_lock); 2648 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); 2649 break; 2650 case START_STOP_UNIT: { 2651 struct scsi_start_stop_unit *cdb; 2652 struct ctl_lun_req req; 2653 2654 cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb; 2655 if ((cdb->how & SSS_PC_MASK) != 0) { 2656 ctl_set_success(&io->scsiio); 2657 ctl_config_write_done(io); 2658 break; 2659 } 2660 if (cdb->how & SSS_START) { 2661 if ((cdb->how & SSS_LOEJ) && be_lun->vn == NULL) { 2662 retval = ctl_be_block_open(be_lun, &req); 2663 cbe_lun->flags &= ~CTL_LUN_FLAG_EJECTED; 2664 if (retval == 0) { 2665 cbe_lun->flags &= ~CTL_LUN_FLAG_NO_MEDIA; 2666 ctl_lun_has_media(cbe_lun); 2667 } else { 2668 cbe_lun->flags |= CTL_LUN_FLAG_NO_MEDIA; 2669 ctl_lun_no_media(cbe_lun); 2670 } 2671 } 2672 ctl_start_lun(cbe_lun); 2673 } else { 2674 ctl_stop_lun(cbe_lun); 2675 if (cdb->how & SSS_LOEJ) { 2676 cbe_lun->flags |= CTL_LUN_FLAG_NO_MEDIA; 2677 cbe_lun->flags |= CTL_LUN_FLAG_EJECTED; 2678 ctl_lun_ejected(cbe_lun); 2679 if (be_lun->vn != NULL) 2680 ctl_be_block_close(be_lun); 2681 } 2682 } 2683 2684 ctl_set_success(&io->scsiio); 2685 ctl_config_write_done(io); 2686 break; 2687 } 2688 case PREVENT_ALLOW: 2689 ctl_set_success(&io->scsiio); 2690 ctl_config_write_done(io); 2691 break; 2692 default: 2693 ctl_set_invalid_opcode(&io->scsiio); 2694 ctl_config_write_done(io); 2695 retval = CTL_RETVAL_COMPLETE; 2696 break; 2697 } 2698 2699 return (retval); 2700 } 2701 2702 static int 2703 ctl_be_block_config_read(union ctl_io *io) 2704 { 2705 struct ctl_be_block_lun *be_lun; 2706 int retval = 0; 2707 2708 DPRINTF("entered\n"); 2709 2710 be_lun = (struct ctl_be_block_lun *)CTL_BACKEND_LUN(io); 2711 2712 switch (io->scsiio.cdb[0]) { 2713 case SERVICE_ACTION_IN: 2714 if (io->scsiio.cdb[1] == SGLS_SERVICE_ACTION) { 2715 mtx_lock(&be_lun->queue_lock); 2716 STAILQ_INSERT_TAIL(&be_lun->config_read_queue, 2717 &io->io_hdr, links); 2718 mtx_unlock(&be_lun->queue_lock); 2719 taskqueue_enqueue(be_lun->io_taskqueue, 2720 &be_lun->io_task); 2721 retval = CTL_RETVAL_QUEUED; 2722 break; 2723 } 2724 ctl_set_invalid_field(&io->scsiio, 2725 /*sks_valid*/ 1, 2726 /*command*/ 1, 2727 /*field*/ 1, 2728 /*bit_valid*/ 1, 2729 /*bit*/ 4); 2730 ctl_config_read_done(io); 2731 retval = CTL_RETVAL_COMPLETE; 2732 break; 2733 default: 2734 ctl_set_invalid_opcode(&io->scsiio); 2735 ctl_config_read_done(io); 2736 retval = CTL_RETVAL_COMPLETE; 2737 break; 2738 } 2739 2740 return (retval); 2741 } 2742 2743 static int 2744 ctl_be_block_lun_info(struct ctl_be_lun *cbe_lun, struct sbuf *sb) 2745 { 2746 struct ctl_be_block_lun *lun = (struct ctl_be_block_lun *)cbe_lun; 2747 int retval; 2748 2749 retval = sbuf_printf(sb, "\t<num_threads>"); 2750 if (retval != 0) 2751 goto bailout; 2752 retval = sbuf_printf(sb, "%d", lun->num_threads); 2753 if (retval != 0) 2754 goto bailout; 2755 retval = sbuf_printf(sb, "</num_threads>\n"); 2756 2757 bailout: 2758 return (retval); 2759 } 2760 2761 static uint64_t 2762 ctl_be_block_lun_attr(struct ctl_be_lun *cbe_lun, const char *attrname) 2763 { 2764 struct ctl_be_block_lun *lun = (struct ctl_be_block_lun *)cbe_lun; 2765 2766 if (lun->getattr == NULL) 2767 return (UINT64_MAX); 2768 return (lun->getattr(lun, attrname)); 2769 } 2770 2771 static int 2772 ctl_be_block_init(void) 2773 { 2774 struct ctl_be_block_softc *softc = &backend_block_softc; 2775 2776 sx_init(&softc->modify_lock, "ctlblock modify"); 2777 mtx_init(&softc->lock, "ctlblock", NULL, MTX_DEF); 2778 softc->beio_zone = uma_zcreate("beio", sizeof(struct ctl_be_block_io), 2779 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 2780 softc->buf_zone = uma_zcreate("ctlblock", CTLBLK_MAX_SEG, 2781 NULL, NULL, NULL, NULL, /*align*/ 0, /*flags*/0); 2782 SLIST_INIT(&softc->lun_list); 2783 return (0); 2784 } 2785 2786 2787 static int 2788 ctl_be_block_shutdown(void) 2789 { 2790 struct ctl_be_block_softc *softc = &backend_block_softc; 2791 struct ctl_be_block_lun *lun; 2792 2793 mtx_lock(&softc->lock); 2794 while ((lun = SLIST_FIRST(&softc->lun_list)) != NULL) { 2795 SLIST_REMOVE_HEAD(&softc->lun_list, links); 2796 softc->num_luns--; 2797 /* 2798 * Drop our lock here. Since ctl_remove_lun() can call 2799 * back into us, this could potentially lead to a recursive 2800 * lock of the same mutex, which would cause a hang. 2801 */ 2802 mtx_unlock(&softc->lock); 2803 ctl_remove_lun(&lun->cbe_lun); 2804 mtx_lock(&softc->lock); 2805 } 2806 mtx_unlock(&softc->lock); 2807 uma_zdestroy(softc->buf_zone); 2808 uma_zdestroy(softc->beio_zone); 2809 mtx_destroy(&softc->lock); 2810 sx_destroy(&softc->modify_lock); 2811 return (0); 2812 } 2813