1 /*- 2 * Copyright (c) 2003 Silicon Graphics International Corp. 3 * Copyright (c) 2009-2011 Spectra Logic Corporation 4 * Copyright (c) 2012 The FreeBSD Foundation 5 * Copyright (c) 2014-2015 Alexander Motin <mav@FreeBSD.org> 6 * All rights reserved. 7 * 8 * Portions of this software were developed by Edward Tomasz Napierala 9 * under sponsorship from the FreeBSD Foundation. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions, and the following disclaimer, 16 * without modification. 17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 18 * substantially similar to the "NO WARRANTY" disclaimer below 19 * ("Disclaimer") and any redistribution must be conditioned upon 20 * including a substantially similar Disclaimer requirement for further 21 * binary redistribution. 22 * 23 * NO WARRANTY 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 25 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 26 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 27 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 28 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 32 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 33 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGES. 35 * 36 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend_block.c#5 $ 37 */ 38 /* 39 * CAM Target Layer driver backend for block devices. 40 * 41 * Author: Ken Merry <ken@FreeBSD.org> 42 */ 43 #include <sys/cdefs.h> 44 __FBSDID("$FreeBSD$"); 45 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/kernel.h> 49 #include <sys/types.h> 50 #include <sys/kthread.h> 51 #include <sys/bio.h> 52 #include <sys/fcntl.h> 53 #include <sys/limits.h> 54 #include <sys/lock.h> 55 #include <sys/mutex.h> 56 #include <sys/condvar.h> 57 #include <sys/malloc.h> 58 #include <sys/conf.h> 59 #include <sys/ioccom.h> 60 #include <sys/queue.h> 61 #include <sys/sbuf.h> 62 #include <sys/endian.h> 63 #include <sys/uio.h> 64 #include <sys/buf.h> 65 #include <sys/taskqueue.h> 66 #include <sys/vnode.h> 67 #include <sys/namei.h> 68 #include <sys/mount.h> 69 #include <sys/disk.h> 70 #include <sys/fcntl.h> 71 #include <sys/filedesc.h> 72 #include <sys/filio.h> 73 #include <sys/proc.h> 74 #include <sys/pcpu.h> 75 #include <sys/module.h> 76 #include <sys/sdt.h> 77 #include <sys/devicestat.h> 78 #include <sys/sysctl.h> 79 80 #include <geom/geom.h> 81 82 #include <cam/cam.h> 83 #include <cam/scsi/scsi_all.h> 84 #include <cam/scsi/scsi_da.h> 85 #include <cam/ctl/ctl_io.h> 86 #include <cam/ctl/ctl.h> 87 #include <cam/ctl/ctl_backend.h> 88 #include <cam/ctl/ctl_ioctl.h> 89 #include <cam/ctl/ctl_ha.h> 90 #include <cam/ctl/ctl_scsi_all.h> 91 #include <cam/ctl/ctl_private.h> 92 #include <cam/ctl/ctl_error.h> 93 94 /* 95 * The idea here is that we'll allocate enough S/G space to hold a 1MB 96 * I/O. If we get an I/O larger than that, we'll split it. 97 */ 98 #define CTLBLK_HALF_IO_SIZE (512 * 1024) 99 #define CTLBLK_MAX_IO_SIZE (CTLBLK_HALF_IO_SIZE * 2) 100 #define CTLBLK_MAX_SEG MAXPHYS 101 #define CTLBLK_HALF_SEGS MAX(CTLBLK_HALF_IO_SIZE / CTLBLK_MAX_SEG, 1) 102 #define CTLBLK_MAX_SEGS (CTLBLK_HALF_SEGS * 2) 103 104 #ifdef CTLBLK_DEBUG 105 #define DPRINTF(fmt, args...) \ 106 printf("cbb(%s:%d): " fmt, __FUNCTION__, __LINE__, ##args) 107 #else 108 #define DPRINTF(fmt, args...) do {} while(0) 109 #endif 110 111 #define PRIV(io) \ 112 ((struct ctl_ptr_len_flags *)&(io)->io_hdr.ctl_private[CTL_PRIV_BACKEND]) 113 #define ARGS(io) \ 114 ((struct ctl_lba_len_flags *)&(io)->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]) 115 116 SDT_PROVIDER_DEFINE(cbb); 117 118 typedef enum { 119 CTL_BE_BLOCK_LUN_UNCONFIGURED = 0x01, 120 CTL_BE_BLOCK_LUN_CONFIG_ERR = 0x02, 121 CTL_BE_BLOCK_LUN_WAITING = 0x04, 122 } ctl_be_block_lun_flags; 123 124 typedef enum { 125 CTL_BE_BLOCK_NONE, 126 CTL_BE_BLOCK_DEV, 127 CTL_BE_BLOCK_FILE 128 } ctl_be_block_type; 129 130 struct ctl_be_block_filedata { 131 struct ucred *cred; 132 }; 133 134 union ctl_be_block_bedata { 135 struct ctl_be_block_filedata file; 136 }; 137 138 struct ctl_be_block_io; 139 struct ctl_be_block_lun; 140 141 typedef void (*cbb_dispatch_t)(struct ctl_be_block_lun *be_lun, 142 struct ctl_be_block_io *beio); 143 typedef uint64_t (*cbb_getattr_t)(struct ctl_be_block_lun *be_lun, 144 const char *attrname); 145 146 /* 147 * Backend LUN structure. There is a 1:1 mapping between a block device 148 * and a backend block LUN, and between a backend block LUN and a CTL LUN. 149 */ 150 struct ctl_be_block_lun { 151 struct ctl_lun_create_params params; 152 char lunname[32]; 153 char *dev_path; 154 ctl_be_block_type dev_type; 155 struct vnode *vn; 156 union ctl_be_block_bedata backend; 157 cbb_dispatch_t dispatch; 158 cbb_dispatch_t lun_flush; 159 cbb_dispatch_t unmap; 160 cbb_dispatch_t get_lba_status; 161 cbb_getattr_t getattr; 162 uma_zone_t lun_zone; 163 uint64_t size_blocks; 164 uint64_t size_bytes; 165 struct ctl_be_block_softc *softc; 166 struct devstat *disk_stats; 167 ctl_be_block_lun_flags flags; 168 STAILQ_ENTRY(ctl_be_block_lun) links; 169 struct ctl_be_lun cbe_lun; 170 struct taskqueue *io_taskqueue; 171 struct task io_task; 172 int num_threads; 173 STAILQ_HEAD(, ctl_io_hdr) input_queue; 174 STAILQ_HEAD(, ctl_io_hdr) config_read_queue; 175 STAILQ_HEAD(, ctl_io_hdr) config_write_queue; 176 STAILQ_HEAD(, ctl_io_hdr) datamove_queue; 177 struct mtx_padalign io_lock; 178 struct mtx_padalign queue_lock; 179 }; 180 181 /* 182 * Overall softc structure for the block backend module. 183 */ 184 struct ctl_be_block_softc { 185 struct mtx lock; 186 uma_zone_t beio_zone; 187 int num_luns; 188 STAILQ_HEAD(, ctl_be_block_lun) lun_list; 189 }; 190 191 static struct ctl_be_block_softc backend_block_softc; 192 193 /* 194 * Per-I/O information. 195 */ 196 struct ctl_be_block_io { 197 union ctl_io *io; 198 struct ctl_sg_entry sg_segs[CTLBLK_MAX_SEGS]; 199 struct iovec xiovecs[CTLBLK_MAX_SEGS]; 200 int bio_cmd; 201 int num_segs; 202 int num_bios_sent; 203 int num_bios_done; 204 int send_complete; 205 int first_error; 206 uint64_t first_error_offset; 207 struct bintime ds_t0; 208 devstat_tag_type ds_tag_type; 209 devstat_trans_flags ds_trans_type; 210 uint64_t io_len; 211 uint64_t io_offset; 212 int io_arg; 213 struct ctl_be_block_softc *softc; 214 struct ctl_be_block_lun *lun; 215 void (*beio_cont)(struct ctl_be_block_io *beio); /* to continue processing */ 216 }; 217 218 extern struct ctl_softc *control_softc; 219 220 static int cbb_num_threads = 14; 221 SYSCTL_NODE(_kern_cam_ctl, OID_AUTO, block, CTLFLAG_RD, 0, 222 "CAM Target Layer Block Backend"); 223 SYSCTL_INT(_kern_cam_ctl_block, OID_AUTO, num_threads, CTLFLAG_RWTUN, 224 &cbb_num_threads, 0, "Number of threads per backing file"); 225 226 static struct ctl_be_block_io *ctl_alloc_beio(struct ctl_be_block_softc *softc); 227 static void ctl_free_beio(struct ctl_be_block_io *beio); 228 static void ctl_complete_beio(struct ctl_be_block_io *beio); 229 static int ctl_be_block_move_done(union ctl_io *io); 230 static void ctl_be_block_biodone(struct bio *bio); 231 static void ctl_be_block_flush_file(struct ctl_be_block_lun *be_lun, 232 struct ctl_be_block_io *beio); 233 static void ctl_be_block_dispatch_file(struct ctl_be_block_lun *be_lun, 234 struct ctl_be_block_io *beio); 235 static void ctl_be_block_gls_file(struct ctl_be_block_lun *be_lun, 236 struct ctl_be_block_io *beio); 237 static uint64_t ctl_be_block_getattr_file(struct ctl_be_block_lun *be_lun, 238 const char *attrname); 239 static void ctl_be_block_flush_dev(struct ctl_be_block_lun *be_lun, 240 struct ctl_be_block_io *beio); 241 static void ctl_be_block_unmap_dev(struct ctl_be_block_lun *be_lun, 242 struct ctl_be_block_io *beio); 243 static void ctl_be_block_dispatch_dev(struct ctl_be_block_lun *be_lun, 244 struct ctl_be_block_io *beio); 245 static uint64_t ctl_be_block_getattr_dev(struct ctl_be_block_lun *be_lun, 246 const char *attrname); 247 static void ctl_be_block_cr_dispatch(struct ctl_be_block_lun *be_lun, 248 union ctl_io *io); 249 static void ctl_be_block_cw_dispatch(struct ctl_be_block_lun *be_lun, 250 union ctl_io *io); 251 static void ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun, 252 union ctl_io *io); 253 static void ctl_be_block_worker(void *context, int pending); 254 static int ctl_be_block_submit(union ctl_io *io); 255 static int ctl_be_block_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, 256 int flag, struct thread *td); 257 static int ctl_be_block_open_file(struct ctl_be_block_lun *be_lun, 258 struct ctl_lun_req *req); 259 static int ctl_be_block_open_dev(struct ctl_be_block_lun *be_lun, 260 struct ctl_lun_req *req); 261 static int ctl_be_block_close(struct ctl_be_block_lun *be_lun); 262 static int ctl_be_block_open(struct ctl_be_block_lun *be_lun, 263 struct ctl_lun_req *req); 264 static int ctl_be_block_create(struct ctl_be_block_softc *softc, 265 struct ctl_lun_req *req); 266 static int ctl_be_block_rm(struct ctl_be_block_softc *softc, 267 struct ctl_lun_req *req); 268 static int ctl_be_block_modify(struct ctl_be_block_softc *softc, 269 struct ctl_lun_req *req); 270 static void ctl_be_block_lun_shutdown(void *be_lun); 271 static void ctl_be_block_lun_config_status(void *be_lun, 272 ctl_lun_config_status status); 273 static int ctl_be_block_config_write(union ctl_io *io); 274 static int ctl_be_block_config_read(union ctl_io *io); 275 static int ctl_be_block_lun_info(void *be_lun, struct sbuf *sb); 276 static uint64_t ctl_be_block_lun_attr(void *be_lun, const char *attrname); 277 static int ctl_be_block_init(void); 278 static int ctl_be_block_shutdown(void); 279 280 static struct ctl_backend_driver ctl_be_block_driver = 281 { 282 .name = "block", 283 .flags = CTL_BE_FLAG_HAS_CONFIG, 284 .init = ctl_be_block_init, 285 .shutdown = ctl_be_block_shutdown, 286 .data_submit = ctl_be_block_submit, 287 .data_move_done = ctl_be_block_move_done, 288 .config_read = ctl_be_block_config_read, 289 .config_write = ctl_be_block_config_write, 290 .ioctl = ctl_be_block_ioctl, 291 .lun_info = ctl_be_block_lun_info, 292 .lun_attr = ctl_be_block_lun_attr 293 }; 294 295 MALLOC_DEFINE(M_CTLBLK, "ctlblk", "Memory used for CTL block backend"); 296 CTL_BACKEND_DECLARE(cbb, ctl_be_block_driver); 297 298 static struct ctl_be_block_io * 299 ctl_alloc_beio(struct ctl_be_block_softc *softc) 300 { 301 struct ctl_be_block_io *beio; 302 303 beio = uma_zalloc(softc->beio_zone, M_WAITOK | M_ZERO); 304 beio->softc = softc; 305 return (beio); 306 } 307 308 static void 309 ctl_free_beio(struct ctl_be_block_io *beio) 310 { 311 int duplicate_free; 312 int i; 313 314 duplicate_free = 0; 315 316 for (i = 0; i < beio->num_segs; i++) { 317 if (beio->sg_segs[i].addr == NULL) 318 duplicate_free++; 319 320 uma_zfree(beio->lun->lun_zone, beio->sg_segs[i].addr); 321 beio->sg_segs[i].addr = NULL; 322 323 /* For compare we had two equal S/G lists. */ 324 if (ARGS(beio->io)->flags & CTL_LLF_COMPARE) { 325 uma_zfree(beio->lun->lun_zone, 326 beio->sg_segs[i + CTLBLK_HALF_SEGS].addr); 327 beio->sg_segs[i + CTLBLK_HALF_SEGS].addr = NULL; 328 } 329 } 330 331 if (duplicate_free > 0) { 332 printf("%s: %d duplicate frees out of %d segments\n", __func__, 333 duplicate_free, beio->num_segs); 334 } 335 336 uma_zfree(beio->softc->beio_zone, beio); 337 } 338 339 static void 340 ctl_complete_beio(struct ctl_be_block_io *beio) 341 { 342 union ctl_io *io = beio->io; 343 344 if (beio->beio_cont != NULL) { 345 beio->beio_cont(beio); 346 } else { 347 ctl_free_beio(beio); 348 ctl_data_submit_done(io); 349 } 350 } 351 352 static size_t 353 cmp(uint8_t *a, uint8_t *b, size_t size) 354 { 355 size_t i; 356 357 for (i = 0; i < size; i++) { 358 if (a[i] != b[i]) 359 break; 360 } 361 return (i); 362 } 363 364 static void 365 ctl_be_block_compare(union ctl_io *io) 366 { 367 struct ctl_be_block_io *beio; 368 uint64_t off, res; 369 int i; 370 uint8_t info[8]; 371 372 beio = (struct ctl_be_block_io *)PRIV(io)->ptr; 373 off = 0; 374 for (i = 0; i < beio->num_segs; i++) { 375 res = cmp(beio->sg_segs[i].addr, 376 beio->sg_segs[i + CTLBLK_HALF_SEGS].addr, 377 beio->sg_segs[i].len); 378 off += res; 379 if (res < beio->sg_segs[i].len) 380 break; 381 } 382 if (i < beio->num_segs) { 383 scsi_u64to8b(off, info); 384 ctl_set_sense(&io->scsiio, /*current_error*/ 1, 385 /*sense_key*/ SSD_KEY_MISCOMPARE, 386 /*asc*/ 0x1D, /*ascq*/ 0x00, 387 /*type*/ SSD_ELEM_INFO, 388 /*size*/ sizeof(info), /*data*/ &info, 389 /*type*/ SSD_ELEM_NONE); 390 } else 391 ctl_set_success(&io->scsiio); 392 } 393 394 static int 395 ctl_be_block_move_done(union ctl_io *io) 396 { 397 struct ctl_be_block_io *beio; 398 struct ctl_be_block_lun *be_lun; 399 struct ctl_lba_len_flags *lbalen; 400 #ifdef CTL_TIME_IO 401 struct bintime cur_bt; 402 #endif 403 404 beio = (struct ctl_be_block_io *)PRIV(io)->ptr; 405 be_lun = beio->lun; 406 407 DPRINTF("entered\n"); 408 409 #ifdef CTL_TIME_IO 410 getbinuptime(&cur_bt); 411 bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt); 412 bintime_add(&io->io_hdr.dma_bt, &cur_bt); 413 #endif 414 io->io_hdr.num_dmas++; 415 io->scsiio.kern_rel_offset += io->scsiio.kern_data_len; 416 417 /* 418 * We set status at this point for read commands, and write 419 * commands with errors. 420 */ 421 if (io->io_hdr.flags & CTL_FLAG_ABORT) { 422 ; 423 } else if ((io->io_hdr.port_status != 0) && 424 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 425 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 426 ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 1, 427 /*retry_count*/ io->io_hdr.port_status); 428 } else if (io->scsiio.kern_data_resid != 0 && 429 (io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT && 430 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 431 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 432 ctl_set_invalid_field_ciu(&io->scsiio); 433 } else if ((io->io_hdr.port_status == 0) && 434 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) { 435 lbalen = ARGS(beio->io); 436 if (lbalen->flags & CTL_LLF_READ) { 437 ctl_set_success(&io->scsiio); 438 } else if (lbalen->flags & CTL_LLF_COMPARE) { 439 /* We have two data blocks ready for comparison. */ 440 ctl_be_block_compare(io); 441 } 442 } 443 444 /* 445 * If this is a read, or a write with errors, it is done. 446 */ 447 if ((beio->bio_cmd == BIO_READ) 448 || ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0) 449 || ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE)) { 450 ctl_complete_beio(beio); 451 return (0); 452 } 453 454 /* 455 * At this point, we have a write and the DMA completed 456 * successfully. We now have to queue it to the task queue to 457 * execute the backend I/O. That is because we do blocking 458 * memory allocations, and in the file backing case, blocking I/O. 459 * This move done routine is generally called in the SIM's 460 * interrupt context, and therefore we cannot block. 461 */ 462 mtx_lock(&be_lun->queue_lock); 463 STAILQ_INSERT_TAIL(&be_lun->datamove_queue, &io->io_hdr, links); 464 mtx_unlock(&be_lun->queue_lock); 465 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); 466 467 return (0); 468 } 469 470 static void 471 ctl_be_block_biodone(struct bio *bio) 472 { 473 struct ctl_be_block_io *beio; 474 struct ctl_be_block_lun *be_lun; 475 union ctl_io *io; 476 int error; 477 478 beio = bio->bio_caller1; 479 be_lun = beio->lun; 480 io = beio->io; 481 482 DPRINTF("entered\n"); 483 484 error = bio->bio_error; 485 mtx_lock(&be_lun->io_lock); 486 if (error != 0 && 487 (beio->first_error == 0 || 488 bio->bio_offset < beio->first_error_offset)) { 489 beio->first_error = error; 490 beio->first_error_offset = bio->bio_offset; 491 } 492 493 beio->num_bios_done++; 494 495 /* 496 * XXX KDM will this cause WITNESS to complain? Holding a lock 497 * during the free might cause it to complain. 498 */ 499 g_destroy_bio(bio); 500 501 /* 502 * If the send complete bit isn't set, or we aren't the last I/O to 503 * complete, then we're done. 504 */ 505 if ((beio->send_complete == 0) 506 || (beio->num_bios_done < beio->num_bios_sent)) { 507 mtx_unlock(&be_lun->io_lock); 508 return; 509 } 510 511 /* 512 * At this point, we've verified that we are the last I/O to 513 * complete, so it's safe to drop the lock. 514 */ 515 devstat_end_transaction(beio->lun->disk_stats, beio->io_len, 516 beio->ds_tag_type, beio->ds_trans_type, 517 /*now*/ NULL, /*then*/&beio->ds_t0); 518 mtx_unlock(&be_lun->io_lock); 519 520 /* 521 * If there are any errors from the backing device, we fail the 522 * entire I/O with a medium error. 523 */ 524 error = beio->first_error; 525 if (error != 0) { 526 if (error == EOPNOTSUPP) { 527 ctl_set_invalid_opcode(&io->scsiio); 528 } else if (error == ENOSPC || error == EDQUOT) { 529 ctl_set_space_alloc_fail(&io->scsiio); 530 } else if (error == EROFS || error == EACCES) { 531 ctl_set_hw_write_protected(&io->scsiio); 532 } else if (beio->bio_cmd == BIO_FLUSH) { 533 /* XXX KDM is there is a better error here? */ 534 ctl_set_internal_failure(&io->scsiio, 535 /*sks_valid*/ 1, 536 /*retry_count*/ 0xbad2); 537 } else { 538 ctl_set_medium_error(&io->scsiio, 539 beio->bio_cmd == BIO_READ); 540 } 541 ctl_complete_beio(beio); 542 return; 543 } 544 545 /* 546 * If this is a write, a flush, a delete or verify, we're all done. 547 * If this is a read, we can now send the data to the user. 548 */ 549 if ((beio->bio_cmd == BIO_WRITE) 550 || (beio->bio_cmd == BIO_FLUSH) 551 || (beio->bio_cmd == BIO_DELETE) 552 || (ARGS(io)->flags & CTL_LLF_VERIFY)) { 553 ctl_set_success(&io->scsiio); 554 ctl_complete_beio(beio); 555 } else { 556 if ((ARGS(io)->flags & CTL_LLF_READ) && 557 beio->beio_cont == NULL) { 558 ctl_set_success(&io->scsiio); 559 ctl_serseq_done(io); 560 } 561 #ifdef CTL_TIME_IO 562 getbinuptime(&io->io_hdr.dma_start_bt); 563 #endif 564 ctl_datamove(io); 565 } 566 } 567 568 static void 569 ctl_be_block_flush_file(struct ctl_be_block_lun *be_lun, 570 struct ctl_be_block_io *beio) 571 { 572 union ctl_io *io = beio->io; 573 struct mount *mountpoint; 574 int error, lock_flags; 575 576 DPRINTF("entered\n"); 577 578 binuptime(&beio->ds_t0); 579 mtx_lock(&be_lun->io_lock); 580 devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0); 581 mtx_unlock(&be_lun->io_lock); 582 583 (void) vn_start_write(be_lun->vn, &mountpoint, V_WAIT); 584 585 if (MNT_SHARED_WRITES(mountpoint) || 586 ((mountpoint == NULL) && MNT_SHARED_WRITES(be_lun->vn->v_mount))) 587 lock_flags = LK_SHARED; 588 else 589 lock_flags = LK_EXCLUSIVE; 590 vn_lock(be_lun->vn, lock_flags | LK_RETRY); 591 error = VOP_FSYNC(be_lun->vn, beio->io_arg ? MNT_NOWAIT : MNT_WAIT, 592 curthread); 593 VOP_UNLOCK(be_lun->vn, 0); 594 595 vn_finished_write(mountpoint); 596 597 mtx_lock(&be_lun->io_lock); 598 devstat_end_transaction(beio->lun->disk_stats, beio->io_len, 599 beio->ds_tag_type, beio->ds_trans_type, 600 /*now*/ NULL, /*then*/&beio->ds_t0); 601 mtx_unlock(&be_lun->io_lock); 602 603 if (error == 0) 604 ctl_set_success(&io->scsiio); 605 else { 606 /* XXX KDM is there is a better error here? */ 607 ctl_set_internal_failure(&io->scsiio, 608 /*sks_valid*/ 1, 609 /*retry_count*/ 0xbad1); 610 } 611 612 ctl_complete_beio(beio); 613 } 614 615 SDT_PROBE_DEFINE1(cbb, , read, file_start, "uint64_t"); 616 SDT_PROBE_DEFINE1(cbb, , write, file_start, "uint64_t"); 617 SDT_PROBE_DEFINE1(cbb, , read, file_done,"uint64_t"); 618 SDT_PROBE_DEFINE1(cbb, , write, file_done, "uint64_t"); 619 620 static void 621 ctl_be_block_dispatch_file(struct ctl_be_block_lun *be_lun, 622 struct ctl_be_block_io *beio) 623 { 624 struct ctl_be_block_filedata *file_data; 625 union ctl_io *io; 626 struct uio xuio; 627 struct iovec *xiovec; 628 size_t s; 629 int error, flags, i; 630 631 DPRINTF("entered\n"); 632 633 file_data = &be_lun->backend.file; 634 io = beio->io; 635 flags = 0; 636 if (ARGS(io)->flags & CTL_LLF_DPO) 637 flags |= IO_DIRECT; 638 if (beio->bio_cmd == BIO_WRITE && ARGS(io)->flags & CTL_LLF_FUA) 639 flags |= IO_SYNC; 640 641 bzero(&xuio, sizeof(xuio)); 642 if (beio->bio_cmd == BIO_READ) { 643 SDT_PROBE0(cbb, , read, file_start); 644 xuio.uio_rw = UIO_READ; 645 } else { 646 SDT_PROBE0(cbb, , write, file_start); 647 xuio.uio_rw = UIO_WRITE; 648 } 649 xuio.uio_offset = beio->io_offset; 650 xuio.uio_resid = beio->io_len; 651 xuio.uio_segflg = UIO_SYSSPACE; 652 xuio.uio_iov = beio->xiovecs; 653 xuio.uio_iovcnt = beio->num_segs; 654 xuio.uio_td = curthread; 655 656 for (i = 0, xiovec = xuio.uio_iov; i < xuio.uio_iovcnt; i++, xiovec++) { 657 xiovec->iov_base = beio->sg_segs[i].addr; 658 xiovec->iov_len = beio->sg_segs[i].len; 659 } 660 661 binuptime(&beio->ds_t0); 662 mtx_lock(&be_lun->io_lock); 663 devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0); 664 mtx_unlock(&be_lun->io_lock); 665 666 if (beio->bio_cmd == BIO_READ) { 667 vn_lock(be_lun->vn, LK_SHARED | LK_RETRY); 668 669 /* 670 * UFS pays attention to IO_DIRECT for reads. If the 671 * DIRECTIO option is configured into the kernel, it calls 672 * ffs_rawread(). But that only works for single-segment 673 * uios with user space addresses. In our case, with a 674 * kernel uio, it still reads into the buffer cache, but it 675 * will just try to release the buffer from the cache later 676 * on in ffs_read(). 677 * 678 * ZFS does not pay attention to IO_DIRECT for reads. 679 * 680 * UFS does not pay attention to IO_SYNC for reads. 681 * 682 * ZFS pays attention to IO_SYNC (which translates into the 683 * Solaris define FRSYNC for zfs_read()) for reads. It 684 * attempts to sync the file before reading. 685 */ 686 error = VOP_READ(be_lun->vn, &xuio, flags, file_data->cred); 687 688 VOP_UNLOCK(be_lun->vn, 0); 689 SDT_PROBE0(cbb, , read, file_done); 690 if (error == 0 && xuio.uio_resid > 0) { 691 /* 692 * If we red less then requested (EOF), then 693 * we should clean the rest of the buffer. 694 */ 695 s = beio->io_len - xuio.uio_resid; 696 for (i = 0; i < beio->num_segs; i++) { 697 if (s >= beio->sg_segs[i].len) { 698 s -= beio->sg_segs[i].len; 699 continue; 700 } 701 bzero((uint8_t *)beio->sg_segs[i].addr + s, 702 beio->sg_segs[i].len - s); 703 s = 0; 704 } 705 } 706 } else { 707 struct mount *mountpoint; 708 int lock_flags; 709 710 (void)vn_start_write(be_lun->vn, &mountpoint, V_WAIT); 711 712 if (MNT_SHARED_WRITES(mountpoint) || ((mountpoint == NULL) 713 && MNT_SHARED_WRITES(be_lun->vn->v_mount))) 714 lock_flags = LK_SHARED; 715 else 716 lock_flags = LK_EXCLUSIVE; 717 vn_lock(be_lun->vn, lock_flags | LK_RETRY); 718 719 /* 720 * UFS pays attention to IO_DIRECT for writes. The write 721 * is done asynchronously. (Normally the write would just 722 * get put into cache. 723 * 724 * UFS pays attention to IO_SYNC for writes. It will 725 * attempt to write the buffer out synchronously if that 726 * flag is set. 727 * 728 * ZFS does not pay attention to IO_DIRECT for writes. 729 * 730 * ZFS pays attention to IO_SYNC (a.k.a. FSYNC or FRSYNC) 731 * for writes. It will flush the transaction from the 732 * cache before returning. 733 */ 734 error = VOP_WRITE(be_lun->vn, &xuio, flags, file_data->cred); 735 VOP_UNLOCK(be_lun->vn, 0); 736 737 vn_finished_write(mountpoint); 738 SDT_PROBE0(cbb, , write, file_done); 739 } 740 741 mtx_lock(&be_lun->io_lock); 742 devstat_end_transaction(beio->lun->disk_stats, beio->io_len, 743 beio->ds_tag_type, beio->ds_trans_type, 744 /*now*/ NULL, /*then*/&beio->ds_t0); 745 mtx_unlock(&be_lun->io_lock); 746 747 /* 748 * If we got an error, set the sense data to "MEDIUM ERROR" and 749 * return the I/O to the user. 750 */ 751 if (error != 0) { 752 if (error == ENOSPC || error == EDQUOT) { 753 ctl_set_space_alloc_fail(&io->scsiio); 754 } else if (error == EROFS || error == EACCES) { 755 ctl_set_hw_write_protected(&io->scsiio); 756 } else { 757 ctl_set_medium_error(&io->scsiio, 758 beio->bio_cmd == BIO_READ); 759 } 760 ctl_complete_beio(beio); 761 return; 762 } 763 764 /* 765 * If this is a write or a verify, we're all done. 766 * If this is a read, we can now send the data to the user. 767 */ 768 if ((beio->bio_cmd == BIO_WRITE) || 769 (ARGS(io)->flags & CTL_LLF_VERIFY)) { 770 ctl_set_success(&io->scsiio); 771 ctl_complete_beio(beio); 772 } else { 773 if ((ARGS(io)->flags & CTL_LLF_READ) && 774 beio->beio_cont == NULL) { 775 ctl_set_success(&io->scsiio); 776 ctl_serseq_done(io); 777 } 778 #ifdef CTL_TIME_IO 779 getbinuptime(&io->io_hdr.dma_start_bt); 780 #endif 781 ctl_datamove(io); 782 } 783 } 784 785 static void 786 ctl_be_block_gls_file(struct ctl_be_block_lun *be_lun, 787 struct ctl_be_block_io *beio) 788 { 789 union ctl_io *io = beio->io; 790 struct ctl_lba_len_flags *lbalen = ARGS(io); 791 struct scsi_get_lba_status_data *data; 792 off_t roff, off; 793 int error, status; 794 795 DPRINTF("entered\n"); 796 797 off = roff = ((off_t)lbalen->lba) * be_lun->cbe_lun.blocksize; 798 vn_lock(be_lun->vn, LK_SHARED | LK_RETRY); 799 error = VOP_IOCTL(be_lun->vn, FIOSEEKHOLE, &off, 800 0, curthread->td_ucred, curthread); 801 if (error == 0 && off > roff) 802 status = 0; /* mapped up to off */ 803 else { 804 error = VOP_IOCTL(be_lun->vn, FIOSEEKDATA, &off, 805 0, curthread->td_ucred, curthread); 806 if (error == 0 && off > roff) 807 status = 1; /* deallocated up to off */ 808 else { 809 status = 0; /* unknown up to the end */ 810 off = be_lun->size_bytes; 811 } 812 } 813 VOP_UNLOCK(be_lun->vn, 0); 814 815 data = (struct scsi_get_lba_status_data *)io->scsiio.kern_data_ptr; 816 scsi_u64to8b(lbalen->lba, data->descr[0].addr); 817 scsi_ulto4b(MIN(UINT32_MAX, off / be_lun->cbe_lun.blocksize - 818 lbalen->lba), data->descr[0].length); 819 data->descr[0].status = status; 820 821 ctl_complete_beio(beio); 822 } 823 824 static uint64_t 825 ctl_be_block_getattr_file(struct ctl_be_block_lun *be_lun, const char *attrname) 826 { 827 struct vattr vattr; 828 struct statfs statfs; 829 uint64_t val; 830 int error; 831 832 val = UINT64_MAX; 833 if (be_lun->vn == NULL) 834 return (val); 835 vn_lock(be_lun->vn, LK_SHARED | LK_RETRY); 836 if (strcmp(attrname, "blocksused") == 0) { 837 error = VOP_GETATTR(be_lun->vn, &vattr, curthread->td_ucred); 838 if (error == 0) 839 val = vattr.va_bytes / be_lun->cbe_lun.blocksize; 840 } 841 if (strcmp(attrname, "blocksavail") == 0 && 842 (be_lun->vn->v_iflag & VI_DOOMED) == 0) { 843 error = VFS_STATFS(be_lun->vn->v_mount, &statfs); 844 if (error == 0) 845 val = statfs.f_bavail * statfs.f_bsize / 846 be_lun->cbe_lun.blocksize; 847 } 848 VOP_UNLOCK(be_lun->vn, 0); 849 return (val); 850 } 851 852 static void 853 ctl_be_block_dispatch_zvol(struct ctl_be_block_lun *be_lun, 854 struct ctl_be_block_io *beio) 855 { 856 union ctl_io *io; 857 struct cdevsw *csw; 858 struct cdev *dev; 859 struct uio xuio; 860 struct iovec *xiovec; 861 int error, flags, i, ref; 862 863 DPRINTF("entered\n"); 864 865 io = beio->io; 866 flags = 0; 867 if (ARGS(io)->flags & CTL_LLF_DPO) 868 flags |= IO_DIRECT; 869 if (beio->bio_cmd == BIO_WRITE && ARGS(io)->flags & CTL_LLF_FUA) 870 flags |= IO_SYNC; 871 872 bzero(&xuio, sizeof(xuio)); 873 if (beio->bio_cmd == BIO_READ) { 874 SDT_PROBE0(cbb, , read, file_start); 875 xuio.uio_rw = UIO_READ; 876 } else { 877 SDT_PROBE0(cbb, , write, file_start); 878 xuio.uio_rw = UIO_WRITE; 879 } 880 xuio.uio_offset = beio->io_offset; 881 xuio.uio_resid = beio->io_len; 882 xuio.uio_segflg = UIO_SYSSPACE; 883 xuio.uio_iov = beio->xiovecs; 884 xuio.uio_iovcnt = beio->num_segs; 885 xuio.uio_td = curthread; 886 887 for (i = 0, xiovec = xuio.uio_iov; i < xuio.uio_iovcnt; i++, xiovec++) { 888 xiovec->iov_base = beio->sg_segs[i].addr; 889 xiovec->iov_len = beio->sg_segs[i].len; 890 } 891 892 binuptime(&beio->ds_t0); 893 mtx_lock(&be_lun->io_lock); 894 devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0); 895 mtx_unlock(&be_lun->io_lock); 896 897 csw = devvn_refthread(be_lun->vn, &dev, &ref); 898 if (csw) { 899 if (beio->bio_cmd == BIO_READ) 900 error = csw->d_read(dev, &xuio, flags); 901 else 902 error = csw->d_write(dev, &xuio, flags); 903 dev_relthread(dev, ref); 904 } else 905 error = ENXIO; 906 907 if (beio->bio_cmd == BIO_READ) 908 SDT_PROBE0(cbb, , read, file_done); 909 else 910 SDT_PROBE0(cbb, , write, file_done); 911 912 mtx_lock(&be_lun->io_lock); 913 devstat_end_transaction(beio->lun->disk_stats, beio->io_len, 914 beio->ds_tag_type, beio->ds_trans_type, 915 /*now*/ NULL, /*then*/&beio->ds_t0); 916 mtx_unlock(&be_lun->io_lock); 917 918 /* 919 * If we got an error, set the sense data to "MEDIUM ERROR" and 920 * return the I/O to the user. 921 */ 922 if (error != 0) { 923 if (error == ENOSPC || error == EDQUOT) { 924 ctl_set_space_alloc_fail(&io->scsiio); 925 } else if (error == EROFS || error == EACCES) { 926 ctl_set_hw_write_protected(&io->scsiio); 927 } else { 928 ctl_set_medium_error(&io->scsiio, 929 beio->bio_cmd == BIO_READ); 930 } 931 ctl_complete_beio(beio); 932 return; 933 } 934 935 /* 936 * If this is a write or a verify, we're all done. 937 * If this is a read, we can now send the data to the user. 938 */ 939 if ((beio->bio_cmd == BIO_WRITE) || 940 (ARGS(io)->flags & CTL_LLF_VERIFY)) { 941 ctl_set_success(&io->scsiio); 942 ctl_complete_beio(beio); 943 } else { 944 if ((ARGS(io)->flags & CTL_LLF_READ) && 945 beio->beio_cont == NULL) { 946 ctl_set_success(&io->scsiio); 947 ctl_serseq_done(io); 948 } 949 #ifdef CTL_TIME_IO 950 getbinuptime(&io->io_hdr.dma_start_bt); 951 #endif 952 ctl_datamove(io); 953 } 954 } 955 956 static void 957 ctl_be_block_gls_zvol(struct ctl_be_block_lun *be_lun, 958 struct ctl_be_block_io *beio) 959 { 960 union ctl_io *io = beio->io; 961 struct cdevsw *csw; 962 struct cdev *dev; 963 struct ctl_lba_len_flags *lbalen = ARGS(io); 964 struct scsi_get_lba_status_data *data; 965 off_t roff, off; 966 int error, ref, status; 967 968 DPRINTF("entered\n"); 969 970 csw = devvn_refthread(be_lun->vn, &dev, &ref); 971 if (csw == NULL) { 972 status = 0; /* unknown up to the end */ 973 off = be_lun->size_bytes; 974 goto done; 975 } 976 off = roff = ((off_t)lbalen->lba) * be_lun->cbe_lun.blocksize; 977 error = csw->d_ioctl(dev, FIOSEEKHOLE, (caddr_t)&off, FREAD, 978 curthread); 979 if (error == 0 && off > roff) 980 status = 0; /* mapped up to off */ 981 else { 982 error = csw->d_ioctl(dev, FIOSEEKDATA, (caddr_t)&off, FREAD, 983 curthread); 984 if (error == 0 && off > roff) 985 status = 1; /* deallocated up to off */ 986 else { 987 status = 0; /* unknown up to the end */ 988 off = be_lun->size_bytes; 989 } 990 } 991 dev_relthread(dev, ref); 992 993 done: 994 data = (struct scsi_get_lba_status_data *)io->scsiio.kern_data_ptr; 995 scsi_u64to8b(lbalen->lba, data->descr[0].addr); 996 scsi_ulto4b(MIN(UINT32_MAX, off / be_lun->cbe_lun.blocksize - 997 lbalen->lba), data->descr[0].length); 998 data->descr[0].status = status; 999 1000 ctl_complete_beio(beio); 1001 } 1002 1003 static void 1004 ctl_be_block_flush_dev(struct ctl_be_block_lun *be_lun, 1005 struct ctl_be_block_io *beio) 1006 { 1007 struct bio *bio; 1008 struct cdevsw *csw; 1009 struct cdev *dev; 1010 int ref; 1011 1012 DPRINTF("entered\n"); 1013 1014 /* This can't fail, it's a blocking allocation. */ 1015 bio = g_alloc_bio(); 1016 1017 bio->bio_cmd = BIO_FLUSH; 1018 bio->bio_offset = 0; 1019 bio->bio_data = 0; 1020 bio->bio_done = ctl_be_block_biodone; 1021 bio->bio_caller1 = beio; 1022 bio->bio_pblkno = 0; 1023 1024 /* 1025 * We don't need to acquire the LUN lock here, because we are only 1026 * sending one bio, and so there is no other context to synchronize 1027 * with. 1028 */ 1029 beio->num_bios_sent = 1; 1030 beio->send_complete = 1; 1031 1032 binuptime(&beio->ds_t0); 1033 mtx_lock(&be_lun->io_lock); 1034 devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0); 1035 mtx_unlock(&be_lun->io_lock); 1036 1037 csw = devvn_refthread(be_lun->vn, &dev, &ref); 1038 if (csw) { 1039 bio->bio_dev = dev; 1040 csw->d_strategy(bio); 1041 dev_relthread(dev, ref); 1042 } else { 1043 bio->bio_error = ENXIO; 1044 ctl_be_block_biodone(bio); 1045 } 1046 } 1047 1048 static void 1049 ctl_be_block_unmap_dev_range(struct ctl_be_block_lun *be_lun, 1050 struct ctl_be_block_io *beio, 1051 uint64_t off, uint64_t len, int last) 1052 { 1053 struct bio *bio; 1054 uint64_t maxlen; 1055 struct cdevsw *csw; 1056 struct cdev *dev; 1057 int ref; 1058 1059 csw = devvn_refthread(be_lun->vn, &dev, &ref); 1060 maxlen = LONG_MAX - (LONG_MAX % be_lun->cbe_lun.blocksize); 1061 while (len > 0) { 1062 bio = g_alloc_bio(); 1063 bio->bio_cmd = BIO_DELETE; 1064 bio->bio_dev = dev; 1065 bio->bio_offset = off; 1066 bio->bio_length = MIN(len, maxlen); 1067 bio->bio_data = 0; 1068 bio->bio_done = ctl_be_block_biodone; 1069 bio->bio_caller1 = beio; 1070 bio->bio_pblkno = off / be_lun->cbe_lun.blocksize; 1071 1072 off += bio->bio_length; 1073 len -= bio->bio_length; 1074 1075 mtx_lock(&be_lun->io_lock); 1076 beio->num_bios_sent++; 1077 if (last && len == 0) 1078 beio->send_complete = 1; 1079 mtx_unlock(&be_lun->io_lock); 1080 1081 if (csw) { 1082 csw->d_strategy(bio); 1083 } else { 1084 bio->bio_error = ENXIO; 1085 ctl_be_block_biodone(bio); 1086 } 1087 } 1088 if (csw) 1089 dev_relthread(dev, ref); 1090 } 1091 1092 static void 1093 ctl_be_block_unmap_dev(struct ctl_be_block_lun *be_lun, 1094 struct ctl_be_block_io *beio) 1095 { 1096 union ctl_io *io; 1097 struct ctl_ptr_len_flags *ptrlen; 1098 struct scsi_unmap_desc *buf, *end; 1099 uint64_t len; 1100 1101 io = beio->io; 1102 1103 DPRINTF("entered\n"); 1104 1105 binuptime(&beio->ds_t0); 1106 mtx_lock(&be_lun->io_lock); 1107 devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0); 1108 mtx_unlock(&be_lun->io_lock); 1109 1110 if (beio->io_offset == -1) { 1111 beio->io_len = 0; 1112 ptrlen = (struct ctl_ptr_len_flags *)&io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 1113 buf = (struct scsi_unmap_desc *)ptrlen->ptr; 1114 end = buf + ptrlen->len / sizeof(*buf); 1115 for (; buf < end; buf++) { 1116 len = (uint64_t)scsi_4btoul(buf->length) * 1117 be_lun->cbe_lun.blocksize; 1118 beio->io_len += len; 1119 ctl_be_block_unmap_dev_range(be_lun, beio, 1120 scsi_8btou64(buf->lba) * be_lun->cbe_lun.blocksize, 1121 len, (end - buf < 2) ? TRUE : FALSE); 1122 } 1123 } else 1124 ctl_be_block_unmap_dev_range(be_lun, beio, 1125 beio->io_offset, beio->io_len, TRUE); 1126 } 1127 1128 static void 1129 ctl_be_block_dispatch_dev(struct ctl_be_block_lun *be_lun, 1130 struct ctl_be_block_io *beio) 1131 { 1132 TAILQ_HEAD(, bio) queue = TAILQ_HEAD_INITIALIZER(queue); 1133 struct bio *bio; 1134 struct cdevsw *csw; 1135 struct cdev *dev; 1136 off_t cur_offset; 1137 int i, max_iosize, ref; 1138 1139 DPRINTF("entered\n"); 1140 csw = devvn_refthread(be_lun->vn, &dev, &ref); 1141 1142 /* 1143 * We have to limit our I/O size to the maximum supported by the 1144 * backend device. Hopefully it is MAXPHYS. If the driver doesn't 1145 * set it properly, use DFLTPHYS. 1146 */ 1147 if (csw) { 1148 max_iosize = dev->si_iosize_max; 1149 if (max_iosize < PAGE_SIZE) 1150 max_iosize = DFLTPHYS; 1151 } else 1152 max_iosize = DFLTPHYS; 1153 1154 cur_offset = beio->io_offset; 1155 for (i = 0; i < beio->num_segs; i++) { 1156 size_t cur_size; 1157 uint8_t *cur_ptr; 1158 1159 cur_size = beio->sg_segs[i].len; 1160 cur_ptr = beio->sg_segs[i].addr; 1161 1162 while (cur_size > 0) { 1163 /* This can't fail, it's a blocking allocation. */ 1164 bio = g_alloc_bio(); 1165 1166 KASSERT(bio != NULL, ("g_alloc_bio() failed!\n")); 1167 1168 bio->bio_cmd = beio->bio_cmd; 1169 bio->bio_dev = dev; 1170 bio->bio_caller1 = beio; 1171 bio->bio_length = min(cur_size, max_iosize); 1172 bio->bio_offset = cur_offset; 1173 bio->bio_data = cur_ptr; 1174 bio->bio_done = ctl_be_block_biodone; 1175 bio->bio_pblkno = cur_offset / be_lun->cbe_lun.blocksize; 1176 1177 cur_offset += bio->bio_length; 1178 cur_ptr += bio->bio_length; 1179 cur_size -= bio->bio_length; 1180 1181 TAILQ_INSERT_TAIL(&queue, bio, bio_queue); 1182 beio->num_bios_sent++; 1183 } 1184 } 1185 binuptime(&beio->ds_t0); 1186 mtx_lock(&be_lun->io_lock); 1187 devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0); 1188 beio->send_complete = 1; 1189 mtx_unlock(&be_lun->io_lock); 1190 1191 /* 1192 * Fire off all allocated requests! 1193 */ 1194 while ((bio = TAILQ_FIRST(&queue)) != NULL) { 1195 TAILQ_REMOVE(&queue, bio, bio_queue); 1196 if (csw) 1197 csw->d_strategy(bio); 1198 else { 1199 bio->bio_error = ENXIO; 1200 ctl_be_block_biodone(bio); 1201 } 1202 } 1203 if (csw) 1204 dev_relthread(dev, ref); 1205 } 1206 1207 static uint64_t 1208 ctl_be_block_getattr_dev(struct ctl_be_block_lun *be_lun, const char *attrname) 1209 { 1210 struct diocgattr_arg arg; 1211 struct cdevsw *csw; 1212 struct cdev *dev; 1213 int error, ref; 1214 1215 csw = devvn_refthread(be_lun->vn, &dev, &ref); 1216 if (csw == NULL) 1217 return (UINT64_MAX); 1218 strlcpy(arg.name, attrname, sizeof(arg.name)); 1219 arg.len = sizeof(arg.value.off); 1220 if (csw->d_ioctl) { 1221 error = csw->d_ioctl(dev, DIOCGATTR, (caddr_t)&arg, FREAD, 1222 curthread); 1223 } else 1224 error = ENODEV; 1225 dev_relthread(dev, ref); 1226 if (error != 0) 1227 return (UINT64_MAX); 1228 return (arg.value.off); 1229 } 1230 1231 static void 1232 ctl_be_block_cw_dispatch_sync(struct ctl_be_block_lun *be_lun, 1233 union ctl_io *io) 1234 { 1235 struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun; 1236 struct ctl_be_block_io *beio; 1237 struct ctl_lba_len_flags *lbalen; 1238 1239 DPRINTF("entered\n"); 1240 beio = (struct ctl_be_block_io *)PRIV(io)->ptr; 1241 lbalen = (struct ctl_lba_len_flags *)&io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 1242 1243 beio->io_len = lbalen->len * cbe_lun->blocksize; 1244 beio->io_offset = lbalen->lba * cbe_lun->blocksize; 1245 beio->io_arg = (lbalen->flags & SSC_IMMED) != 0; 1246 beio->bio_cmd = BIO_FLUSH; 1247 beio->ds_trans_type = DEVSTAT_NO_DATA; 1248 DPRINTF("SYNC\n"); 1249 be_lun->lun_flush(be_lun, beio); 1250 } 1251 1252 static void 1253 ctl_be_block_cw_done_ws(struct ctl_be_block_io *beio) 1254 { 1255 union ctl_io *io; 1256 1257 io = beio->io; 1258 ctl_free_beio(beio); 1259 if ((io->io_hdr.flags & CTL_FLAG_ABORT) || 1260 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 1261 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) { 1262 ctl_config_write_done(io); 1263 return; 1264 } 1265 1266 ctl_be_block_config_write(io); 1267 } 1268 1269 static void 1270 ctl_be_block_cw_dispatch_ws(struct ctl_be_block_lun *be_lun, 1271 union ctl_io *io) 1272 { 1273 struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun; 1274 struct ctl_be_block_io *beio; 1275 struct ctl_lba_len_flags *lbalen; 1276 uint64_t len_left, lba; 1277 uint32_t pb, pbo, adj; 1278 int i, seglen; 1279 uint8_t *buf, *end; 1280 1281 DPRINTF("entered\n"); 1282 1283 beio = (struct ctl_be_block_io *)PRIV(io)->ptr; 1284 lbalen = ARGS(beio->io); 1285 1286 if (lbalen->flags & ~(SWS_LBDATA | SWS_UNMAP | SWS_ANCHOR | SWS_NDOB) || 1287 (lbalen->flags & (SWS_UNMAP | SWS_ANCHOR) && be_lun->unmap == NULL)) { 1288 ctl_free_beio(beio); 1289 ctl_set_invalid_field(&io->scsiio, 1290 /*sks_valid*/ 1, 1291 /*command*/ 1, 1292 /*field*/ 1, 1293 /*bit_valid*/ 0, 1294 /*bit*/ 0); 1295 ctl_config_write_done(io); 1296 return; 1297 } 1298 1299 if (lbalen->flags & (SWS_UNMAP | SWS_ANCHOR)) { 1300 beio->io_offset = lbalen->lba * cbe_lun->blocksize; 1301 beio->io_len = (uint64_t)lbalen->len * cbe_lun->blocksize; 1302 beio->bio_cmd = BIO_DELETE; 1303 beio->ds_trans_type = DEVSTAT_FREE; 1304 1305 be_lun->unmap(be_lun, beio); 1306 return; 1307 } 1308 1309 beio->bio_cmd = BIO_WRITE; 1310 beio->ds_trans_type = DEVSTAT_WRITE; 1311 1312 DPRINTF("WRITE SAME at LBA %jx len %u\n", 1313 (uintmax_t)lbalen->lba, lbalen->len); 1314 1315 pb = cbe_lun->blocksize << be_lun->cbe_lun.pblockexp; 1316 if (be_lun->cbe_lun.pblockoff > 0) 1317 pbo = pb - cbe_lun->blocksize * be_lun->cbe_lun.pblockoff; 1318 else 1319 pbo = 0; 1320 len_left = (uint64_t)lbalen->len * cbe_lun->blocksize; 1321 for (i = 0, lba = 0; i < CTLBLK_MAX_SEGS && len_left > 0; i++) { 1322 1323 /* 1324 * Setup the S/G entry for this chunk. 1325 */ 1326 seglen = MIN(CTLBLK_MAX_SEG, len_left); 1327 if (pb > cbe_lun->blocksize) { 1328 adj = ((lbalen->lba + lba) * cbe_lun->blocksize + 1329 seglen - pbo) % pb; 1330 if (seglen > adj) 1331 seglen -= adj; 1332 else 1333 seglen -= seglen % cbe_lun->blocksize; 1334 } else 1335 seglen -= seglen % cbe_lun->blocksize; 1336 beio->sg_segs[i].len = seglen; 1337 beio->sg_segs[i].addr = uma_zalloc(be_lun->lun_zone, M_WAITOK); 1338 1339 DPRINTF("segment %d addr %p len %zd\n", i, 1340 beio->sg_segs[i].addr, beio->sg_segs[i].len); 1341 1342 beio->num_segs++; 1343 len_left -= seglen; 1344 1345 buf = beio->sg_segs[i].addr; 1346 end = buf + seglen; 1347 for (; buf < end; buf += cbe_lun->blocksize) { 1348 if (lbalen->flags & SWS_NDOB) { 1349 memset(buf, 0, cbe_lun->blocksize); 1350 } else { 1351 memcpy(buf, io->scsiio.kern_data_ptr, 1352 cbe_lun->blocksize); 1353 } 1354 if (lbalen->flags & SWS_LBDATA) 1355 scsi_ulto4b(lbalen->lba + lba, buf); 1356 lba++; 1357 } 1358 } 1359 1360 beio->io_offset = lbalen->lba * cbe_lun->blocksize; 1361 beio->io_len = lba * cbe_lun->blocksize; 1362 1363 /* We can not do all in one run. Correct and schedule rerun. */ 1364 if (len_left > 0) { 1365 lbalen->lba += lba; 1366 lbalen->len -= lba; 1367 beio->beio_cont = ctl_be_block_cw_done_ws; 1368 } 1369 1370 be_lun->dispatch(be_lun, beio); 1371 } 1372 1373 static void 1374 ctl_be_block_cw_dispatch_unmap(struct ctl_be_block_lun *be_lun, 1375 union ctl_io *io) 1376 { 1377 struct ctl_be_block_io *beio; 1378 struct ctl_ptr_len_flags *ptrlen; 1379 1380 DPRINTF("entered\n"); 1381 1382 beio = (struct ctl_be_block_io *)PRIV(io)->ptr; 1383 ptrlen = (struct ctl_ptr_len_flags *)&io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 1384 1385 if ((ptrlen->flags & ~SU_ANCHOR) != 0 || be_lun->unmap == NULL) { 1386 ctl_free_beio(beio); 1387 ctl_set_invalid_field(&io->scsiio, 1388 /*sks_valid*/ 0, 1389 /*command*/ 1, 1390 /*field*/ 0, 1391 /*bit_valid*/ 0, 1392 /*bit*/ 0); 1393 ctl_config_write_done(io); 1394 return; 1395 } 1396 1397 beio->io_len = 0; 1398 beio->io_offset = -1; 1399 beio->bio_cmd = BIO_DELETE; 1400 beio->ds_trans_type = DEVSTAT_FREE; 1401 DPRINTF("UNMAP\n"); 1402 be_lun->unmap(be_lun, beio); 1403 } 1404 1405 static void 1406 ctl_be_block_cr_done(struct ctl_be_block_io *beio) 1407 { 1408 union ctl_io *io; 1409 1410 io = beio->io; 1411 ctl_free_beio(beio); 1412 ctl_config_read_done(io); 1413 } 1414 1415 static void 1416 ctl_be_block_cr_dispatch(struct ctl_be_block_lun *be_lun, 1417 union ctl_io *io) 1418 { 1419 struct ctl_be_block_io *beio; 1420 struct ctl_be_block_softc *softc; 1421 1422 DPRINTF("entered\n"); 1423 1424 softc = be_lun->softc; 1425 beio = ctl_alloc_beio(softc); 1426 beio->io = io; 1427 beio->lun = be_lun; 1428 beio->beio_cont = ctl_be_block_cr_done; 1429 PRIV(io)->ptr = (void *)beio; 1430 1431 switch (io->scsiio.cdb[0]) { 1432 case SERVICE_ACTION_IN: /* GET LBA STATUS */ 1433 beio->bio_cmd = -1; 1434 beio->ds_trans_type = DEVSTAT_NO_DATA; 1435 beio->ds_tag_type = DEVSTAT_TAG_ORDERED; 1436 beio->io_len = 0; 1437 if (be_lun->get_lba_status) 1438 be_lun->get_lba_status(be_lun, beio); 1439 else 1440 ctl_be_block_cr_done(beio); 1441 break; 1442 default: 1443 panic("Unhandled CDB type %#x", io->scsiio.cdb[0]); 1444 break; 1445 } 1446 } 1447 1448 static void 1449 ctl_be_block_cw_done(struct ctl_be_block_io *beio) 1450 { 1451 union ctl_io *io; 1452 1453 io = beio->io; 1454 ctl_free_beio(beio); 1455 ctl_config_write_done(io); 1456 } 1457 1458 static void 1459 ctl_be_block_cw_dispatch(struct ctl_be_block_lun *be_lun, 1460 union ctl_io *io) 1461 { 1462 struct ctl_be_block_io *beio; 1463 struct ctl_be_block_softc *softc; 1464 1465 DPRINTF("entered\n"); 1466 1467 softc = be_lun->softc; 1468 beio = ctl_alloc_beio(softc); 1469 beio->io = io; 1470 beio->lun = be_lun; 1471 beio->beio_cont = ctl_be_block_cw_done; 1472 switch (io->scsiio.tag_type) { 1473 case CTL_TAG_ORDERED: 1474 beio->ds_tag_type = DEVSTAT_TAG_ORDERED; 1475 break; 1476 case CTL_TAG_HEAD_OF_QUEUE: 1477 beio->ds_tag_type = DEVSTAT_TAG_HEAD; 1478 break; 1479 case CTL_TAG_UNTAGGED: 1480 case CTL_TAG_SIMPLE: 1481 case CTL_TAG_ACA: 1482 default: 1483 beio->ds_tag_type = DEVSTAT_TAG_SIMPLE; 1484 break; 1485 } 1486 PRIV(io)->ptr = (void *)beio; 1487 1488 switch (io->scsiio.cdb[0]) { 1489 case SYNCHRONIZE_CACHE: 1490 case SYNCHRONIZE_CACHE_16: 1491 ctl_be_block_cw_dispatch_sync(be_lun, io); 1492 break; 1493 case WRITE_SAME_10: 1494 case WRITE_SAME_16: 1495 ctl_be_block_cw_dispatch_ws(be_lun, io); 1496 break; 1497 case UNMAP: 1498 ctl_be_block_cw_dispatch_unmap(be_lun, io); 1499 break; 1500 default: 1501 panic("Unhandled CDB type %#x", io->scsiio.cdb[0]); 1502 break; 1503 } 1504 } 1505 1506 SDT_PROBE_DEFINE1(cbb, , read, start, "uint64_t"); 1507 SDT_PROBE_DEFINE1(cbb, , write, start, "uint64_t"); 1508 SDT_PROBE_DEFINE1(cbb, , read, alloc_done, "uint64_t"); 1509 SDT_PROBE_DEFINE1(cbb, , write, alloc_done, "uint64_t"); 1510 1511 static void 1512 ctl_be_block_next(struct ctl_be_block_io *beio) 1513 { 1514 struct ctl_be_block_lun *be_lun; 1515 union ctl_io *io; 1516 1517 io = beio->io; 1518 be_lun = beio->lun; 1519 ctl_free_beio(beio); 1520 if ((io->io_hdr.flags & CTL_FLAG_ABORT) || 1521 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 1522 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) { 1523 ctl_data_submit_done(io); 1524 return; 1525 } 1526 1527 io->io_hdr.status &= ~CTL_STATUS_MASK; 1528 io->io_hdr.status |= CTL_STATUS_NONE; 1529 1530 mtx_lock(&be_lun->queue_lock); 1531 STAILQ_INSERT_TAIL(&be_lun->input_queue, &io->io_hdr, links); 1532 mtx_unlock(&be_lun->queue_lock); 1533 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); 1534 } 1535 1536 static void 1537 ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun, 1538 union ctl_io *io) 1539 { 1540 struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun; 1541 struct ctl_be_block_io *beio; 1542 struct ctl_be_block_softc *softc; 1543 struct ctl_lba_len_flags *lbalen; 1544 struct ctl_ptr_len_flags *bptrlen; 1545 uint64_t len_left, lbas; 1546 int i; 1547 1548 softc = be_lun->softc; 1549 1550 DPRINTF("entered\n"); 1551 1552 lbalen = ARGS(io); 1553 if (lbalen->flags & CTL_LLF_WRITE) { 1554 SDT_PROBE0(cbb, , write, start); 1555 } else { 1556 SDT_PROBE0(cbb, , read, start); 1557 } 1558 1559 beio = ctl_alloc_beio(softc); 1560 beio->io = io; 1561 beio->lun = be_lun; 1562 bptrlen = PRIV(io); 1563 bptrlen->ptr = (void *)beio; 1564 1565 switch (io->scsiio.tag_type) { 1566 case CTL_TAG_ORDERED: 1567 beio->ds_tag_type = DEVSTAT_TAG_ORDERED; 1568 break; 1569 case CTL_TAG_HEAD_OF_QUEUE: 1570 beio->ds_tag_type = DEVSTAT_TAG_HEAD; 1571 break; 1572 case CTL_TAG_UNTAGGED: 1573 case CTL_TAG_SIMPLE: 1574 case CTL_TAG_ACA: 1575 default: 1576 beio->ds_tag_type = DEVSTAT_TAG_SIMPLE; 1577 break; 1578 } 1579 1580 if (lbalen->flags & CTL_LLF_WRITE) { 1581 beio->bio_cmd = BIO_WRITE; 1582 beio->ds_trans_type = DEVSTAT_WRITE; 1583 } else { 1584 beio->bio_cmd = BIO_READ; 1585 beio->ds_trans_type = DEVSTAT_READ; 1586 } 1587 1588 DPRINTF("%s at LBA %jx len %u @%ju\n", 1589 (beio->bio_cmd == BIO_READ) ? "READ" : "WRITE", 1590 (uintmax_t)lbalen->lba, lbalen->len, bptrlen->len); 1591 if (lbalen->flags & CTL_LLF_COMPARE) 1592 lbas = CTLBLK_HALF_IO_SIZE; 1593 else 1594 lbas = CTLBLK_MAX_IO_SIZE; 1595 lbas = MIN(lbalen->len - bptrlen->len, lbas / cbe_lun->blocksize); 1596 beio->io_offset = (lbalen->lba + bptrlen->len) * cbe_lun->blocksize; 1597 beio->io_len = lbas * cbe_lun->blocksize; 1598 bptrlen->len += lbas; 1599 1600 for (i = 0, len_left = beio->io_len; len_left > 0; i++) { 1601 KASSERT(i < CTLBLK_MAX_SEGS, ("Too many segs (%d >= %d)", 1602 i, CTLBLK_MAX_SEGS)); 1603 1604 /* 1605 * Setup the S/G entry for this chunk. 1606 */ 1607 beio->sg_segs[i].len = min(CTLBLK_MAX_SEG, len_left); 1608 beio->sg_segs[i].addr = uma_zalloc(be_lun->lun_zone, M_WAITOK); 1609 1610 DPRINTF("segment %d addr %p len %zd\n", i, 1611 beio->sg_segs[i].addr, beio->sg_segs[i].len); 1612 1613 /* Set up second segment for compare operation. */ 1614 if (lbalen->flags & CTL_LLF_COMPARE) { 1615 beio->sg_segs[i + CTLBLK_HALF_SEGS].len = 1616 beio->sg_segs[i].len; 1617 beio->sg_segs[i + CTLBLK_HALF_SEGS].addr = 1618 uma_zalloc(be_lun->lun_zone, M_WAITOK); 1619 } 1620 1621 beio->num_segs++; 1622 len_left -= beio->sg_segs[i].len; 1623 } 1624 if (bptrlen->len < lbalen->len) 1625 beio->beio_cont = ctl_be_block_next; 1626 io->scsiio.be_move_done = ctl_be_block_move_done; 1627 /* For compare we have separate S/G lists for read and datamove. */ 1628 if (lbalen->flags & CTL_LLF_COMPARE) 1629 io->scsiio.kern_data_ptr = (uint8_t *)&beio->sg_segs[CTLBLK_HALF_SEGS]; 1630 else 1631 io->scsiio.kern_data_ptr = (uint8_t *)beio->sg_segs; 1632 io->scsiio.kern_data_len = beio->io_len; 1633 io->scsiio.kern_sg_entries = beio->num_segs; 1634 io->io_hdr.flags |= CTL_FLAG_ALLOCATED; 1635 1636 /* 1637 * For the read case, we need to read the data into our buffers and 1638 * then we can send it back to the user. For the write case, we 1639 * need to get the data from the user first. 1640 */ 1641 if (beio->bio_cmd == BIO_READ) { 1642 SDT_PROBE0(cbb, , read, alloc_done); 1643 be_lun->dispatch(be_lun, beio); 1644 } else { 1645 SDT_PROBE0(cbb, , write, alloc_done); 1646 #ifdef CTL_TIME_IO 1647 getbinuptime(&io->io_hdr.dma_start_bt); 1648 #endif 1649 ctl_datamove(io); 1650 } 1651 } 1652 1653 static void 1654 ctl_be_block_worker(void *context, int pending) 1655 { 1656 struct ctl_be_block_lun *be_lun = (struct ctl_be_block_lun *)context; 1657 struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun; 1658 union ctl_io *io; 1659 struct ctl_be_block_io *beio; 1660 1661 DPRINTF("entered\n"); 1662 /* 1663 * Fetch and process I/Os from all queues. If we detect LUN 1664 * CTL_LUN_FLAG_NO_MEDIA status here -- it is result of a race, 1665 * so make response maximally opaque to not confuse initiator. 1666 */ 1667 for (;;) { 1668 mtx_lock(&be_lun->queue_lock); 1669 io = (union ctl_io *)STAILQ_FIRST(&be_lun->datamove_queue); 1670 if (io != NULL) { 1671 DPRINTF("datamove queue\n"); 1672 STAILQ_REMOVE(&be_lun->datamove_queue, &io->io_hdr, 1673 ctl_io_hdr, links); 1674 mtx_unlock(&be_lun->queue_lock); 1675 beio = (struct ctl_be_block_io *)PRIV(io)->ptr; 1676 if (cbe_lun->flags & CTL_LUN_FLAG_NO_MEDIA) { 1677 ctl_set_busy(&io->scsiio); 1678 ctl_complete_beio(beio); 1679 return; 1680 } 1681 be_lun->dispatch(be_lun, beio); 1682 continue; 1683 } 1684 io = (union ctl_io *)STAILQ_FIRST(&be_lun->config_write_queue); 1685 if (io != NULL) { 1686 DPRINTF("config write queue\n"); 1687 STAILQ_REMOVE(&be_lun->config_write_queue, &io->io_hdr, 1688 ctl_io_hdr, links); 1689 mtx_unlock(&be_lun->queue_lock); 1690 if (cbe_lun->flags & CTL_LUN_FLAG_NO_MEDIA) { 1691 ctl_set_busy(&io->scsiio); 1692 ctl_config_write_done(io); 1693 return; 1694 } 1695 ctl_be_block_cw_dispatch(be_lun, io); 1696 continue; 1697 } 1698 io = (union ctl_io *)STAILQ_FIRST(&be_lun->config_read_queue); 1699 if (io != NULL) { 1700 DPRINTF("config read queue\n"); 1701 STAILQ_REMOVE(&be_lun->config_read_queue, &io->io_hdr, 1702 ctl_io_hdr, links); 1703 mtx_unlock(&be_lun->queue_lock); 1704 if (cbe_lun->flags & CTL_LUN_FLAG_NO_MEDIA) { 1705 ctl_set_busy(&io->scsiio); 1706 ctl_config_read_done(io); 1707 return; 1708 } 1709 ctl_be_block_cr_dispatch(be_lun, io); 1710 continue; 1711 } 1712 io = (union ctl_io *)STAILQ_FIRST(&be_lun->input_queue); 1713 if (io != NULL) { 1714 DPRINTF("input queue\n"); 1715 STAILQ_REMOVE(&be_lun->input_queue, &io->io_hdr, 1716 ctl_io_hdr, links); 1717 mtx_unlock(&be_lun->queue_lock); 1718 if (cbe_lun->flags & CTL_LUN_FLAG_NO_MEDIA) { 1719 ctl_set_busy(&io->scsiio); 1720 ctl_data_submit_done(io); 1721 return; 1722 } 1723 ctl_be_block_dispatch(be_lun, io); 1724 continue; 1725 } 1726 1727 /* 1728 * If we get here, there is no work left in the queues, so 1729 * just break out and let the task queue go to sleep. 1730 */ 1731 mtx_unlock(&be_lun->queue_lock); 1732 break; 1733 } 1734 } 1735 1736 /* 1737 * Entry point from CTL to the backend for I/O. We queue everything to a 1738 * work thread, so this just puts the I/O on a queue and wakes up the 1739 * thread. 1740 */ 1741 static int 1742 ctl_be_block_submit(union ctl_io *io) 1743 { 1744 struct ctl_be_block_lun *be_lun; 1745 struct ctl_be_lun *cbe_lun; 1746 1747 DPRINTF("entered\n"); 1748 1749 cbe_lun = CTL_BACKEND_LUN(io); 1750 be_lun = (struct ctl_be_block_lun *)cbe_lun->be_lun; 1751 1752 /* 1753 * Make sure we only get SCSI I/O. 1754 */ 1755 KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, ("Non-SCSI I/O (type " 1756 "%#x) encountered", io->io_hdr.io_type)); 1757 1758 PRIV(io)->len = 0; 1759 1760 mtx_lock(&be_lun->queue_lock); 1761 STAILQ_INSERT_TAIL(&be_lun->input_queue, &io->io_hdr, links); 1762 mtx_unlock(&be_lun->queue_lock); 1763 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); 1764 1765 return (CTL_RETVAL_COMPLETE); 1766 } 1767 1768 static int 1769 ctl_be_block_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, 1770 int flag, struct thread *td) 1771 { 1772 struct ctl_be_block_softc *softc; 1773 int error; 1774 1775 softc = &backend_block_softc; 1776 1777 error = 0; 1778 1779 switch (cmd) { 1780 case CTL_LUN_REQ: { 1781 struct ctl_lun_req *lun_req; 1782 1783 lun_req = (struct ctl_lun_req *)addr; 1784 1785 switch (lun_req->reqtype) { 1786 case CTL_LUNREQ_CREATE: 1787 error = ctl_be_block_create(softc, lun_req); 1788 break; 1789 case CTL_LUNREQ_RM: 1790 error = ctl_be_block_rm(softc, lun_req); 1791 break; 1792 case CTL_LUNREQ_MODIFY: 1793 error = ctl_be_block_modify(softc, lun_req); 1794 break; 1795 default: 1796 lun_req->status = CTL_LUN_ERROR; 1797 snprintf(lun_req->error_str, sizeof(lun_req->error_str), 1798 "invalid LUN request type %d", 1799 lun_req->reqtype); 1800 break; 1801 } 1802 break; 1803 } 1804 default: 1805 error = ENOTTY; 1806 break; 1807 } 1808 1809 return (error); 1810 } 1811 1812 static int 1813 ctl_be_block_open_file(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req) 1814 { 1815 struct ctl_be_lun *cbe_lun; 1816 struct ctl_be_block_filedata *file_data; 1817 struct ctl_lun_create_params *params; 1818 char *value; 1819 struct vattr vattr; 1820 off_t ps, pss, po, pos, us, uss, uo, uos; 1821 int error; 1822 1823 cbe_lun = &be_lun->cbe_lun; 1824 file_data = &be_lun->backend.file; 1825 params = &be_lun->params; 1826 1827 be_lun->dev_type = CTL_BE_BLOCK_FILE; 1828 be_lun->dispatch = ctl_be_block_dispatch_file; 1829 be_lun->lun_flush = ctl_be_block_flush_file; 1830 be_lun->get_lba_status = ctl_be_block_gls_file; 1831 be_lun->getattr = ctl_be_block_getattr_file; 1832 be_lun->unmap = NULL; 1833 cbe_lun->flags &= ~CTL_LUN_FLAG_UNMAP; 1834 1835 error = VOP_GETATTR(be_lun->vn, &vattr, curthread->td_ucred); 1836 if (error != 0) { 1837 snprintf(req->error_str, sizeof(req->error_str), 1838 "error calling VOP_GETATTR() for file %s", 1839 be_lun->dev_path); 1840 return (error); 1841 } 1842 1843 file_data->cred = crhold(curthread->td_ucred); 1844 if (params->lun_size_bytes != 0) 1845 be_lun->size_bytes = params->lun_size_bytes; 1846 else 1847 be_lun->size_bytes = vattr.va_size; 1848 1849 /* 1850 * For files we can use any logical block size. Prefer 512 bytes 1851 * for compatibility reasons. If file's vattr.va_blocksize 1852 * (preferred I/O block size) is bigger and multiple to chosen 1853 * logical block size -- report it as physical block size. 1854 */ 1855 if (params->blocksize_bytes != 0) 1856 cbe_lun->blocksize = params->blocksize_bytes; 1857 else if (cbe_lun->lun_type == T_CDROM) 1858 cbe_lun->blocksize = 2048; 1859 else 1860 cbe_lun->blocksize = 512; 1861 be_lun->size_blocks = be_lun->size_bytes / cbe_lun->blocksize; 1862 cbe_lun->maxlba = (be_lun->size_blocks == 0) ? 1863 0 : (be_lun->size_blocks - 1); 1864 1865 us = ps = vattr.va_blocksize; 1866 uo = po = 0; 1867 1868 value = ctl_get_opt(&cbe_lun->options, "pblocksize"); 1869 if (value != NULL) 1870 ctl_expand_number(value, &ps); 1871 value = ctl_get_opt(&cbe_lun->options, "pblockoffset"); 1872 if (value != NULL) 1873 ctl_expand_number(value, &po); 1874 pss = ps / cbe_lun->blocksize; 1875 pos = po / cbe_lun->blocksize; 1876 if ((pss > 0) && (pss * cbe_lun->blocksize == ps) && (pss >= pos) && 1877 ((pss & (pss - 1)) == 0) && (pos * cbe_lun->blocksize == po)) { 1878 cbe_lun->pblockexp = fls(pss) - 1; 1879 cbe_lun->pblockoff = (pss - pos) % pss; 1880 } 1881 1882 value = ctl_get_opt(&cbe_lun->options, "ublocksize"); 1883 if (value != NULL) 1884 ctl_expand_number(value, &us); 1885 value = ctl_get_opt(&cbe_lun->options, "ublockoffset"); 1886 if (value != NULL) 1887 ctl_expand_number(value, &uo); 1888 uss = us / cbe_lun->blocksize; 1889 uos = uo / cbe_lun->blocksize; 1890 if ((uss > 0) && (uss * cbe_lun->blocksize == us) && (uss >= uos) && 1891 ((uss & (uss - 1)) == 0) && (uos * cbe_lun->blocksize == uo)) { 1892 cbe_lun->ublockexp = fls(uss) - 1; 1893 cbe_lun->ublockoff = (uss - uos) % uss; 1894 } 1895 1896 /* 1897 * Sanity check. The media size has to be at least one 1898 * sector long. 1899 */ 1900 if (be_lun->size_bytes < cbe_lun->blocksize) { 1901 error = EINVAL; 1902 snprintf(req->error_str, sizeof(req->error_str), 1903 "file %s size %ju < block size %u", be_lun->dev_path, 1904 (uintmax_t)be_lun->size_bytes, cbe_lun->blocksize); 1905 } 1906 1907 cbe_lun->opttxferlen = CTLBLK_MAX_IO_SIZE / cbe_lun->blocksize; 1908 return (error); 1909 } 1910 1911 static int 1912 ctl_be_block_open_dev(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req) 1913 { 1914 struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun; 1915 struct ctl_lun_create_params *params; 1916 struct cdevsw *csw; 1917 struct cdev *dev; 1918 char *value; 1919 int error, atomic, maxio, ref, unmap, tmp; 1920 off_t ps, pss, po, pos, us, uss, uo, uos, otmp; 1921 1922 params = &be_lun->params; 1923 1924 be_lun->dev_type = CTL_BE_BLOCK_DEV; 1925 csw = devvn_refthread(be_lun->vn, &dev, &ref); 1926 if (csw == NULL) 1927 return (ENXIO); 1928 if (strcmp(csw->d_name, "zvol") == 0) { 1929 be_lun->dispatch = ctl_be_block_dispatch_zvol; 1930 be_lun->get_lba_status = ctl_be_block_gls_zvol; 1931 atomic = maxio = CTLBLK_MAX_IO_SIZE; 1932 } else { 1933 be_lun->dispatch = ctl_be_block_dispatch_dev; 1934 be_lun->get_lba_status = NULL; 1935 atomic = 0; 1936 maxio = dev->si_iosize_max; 1937 if (maxio <= 0) 1938 maxio = DFLTPHYS; 1939 if (maxio > CTLBLK_MAX_IO_SIZE) 1940 maxio = CTLBLK_MAX_IO_SIZE; 1941 } 1942 be_lun->lun_flush = ctl_be_block_flush_dev; 1943 be_lun->getattr = ctl_be_block_getattr_dev; 1944 be_lun->unmap = ctl_be_block_unmap_dev; 1945 1946 if (!csw->d_ioctl) { 1947 dev_relthread(dev, ref); 1948 snprintf(req->error_str, sizeof(req->error_str), 1949 "no d_ioctl for device %s!", be_lun->dev_path); 1950 return (ENODEV); 1951 } 1952 1953 error = csw->d_ioctl(dev, DIOCGSECTORSIZE, (caddr_t)&tmp, FREAD, 1954 curthread); 1955 if (error) { 1956 dev_relthread(dev, ref); 1957 snprintf(req->error_str, sizeof(req->error_str), 1958 "error %d returned for DIOCGSECTORSIZE ioctl " 1959 "on %s!", error, be_lun->dev_path); 1960 return (error); 1961 } 1962 1963 /* 1964 * If the user has asked for a blocksize that is greater than the 1965 * backing device's blocksize, we can do it only if the blocksize 1966 * the user is asking for is an even multiple of the underlying 1967 * device's blocksize. 1968 */ 1969 if ((params->blocksize_bytes != 0) && 1970 (params->blocksize_bytes >= tmp)) { 1971 if (params->blocksize_bytes % tmp == 0) { 1972 cbe_lun->blocksize = params->blocksize_bytes; 1973 } else { 1974 dev_relthread(dev, ref); 1975 snprintf(req->error_str, sizeof(req->error_str), 1976 "requested blocksize %u is not an even " 1977 "multiple of backing device blocksize %u", 1978 params->blocksize_bytes, tmp); 1979 return (EINVAL); 1980 } 1981 } else if (params->blocksize_bytes != 0) { 1982 dev_relthread(dev, ref); 1983 snprintf(req->error_str, sizeof(req->error_str), 1984 "requested blocksize %u < backing device " 1985 "blocksize %u", params->blocksize_bytes, tmp); 1986 return (EINVAL); 1987 } else if (cbe_lun->lun_type == T_CDROM) 1988 cbe_lun->blocksize = MAX(tmp, 2048); 1989 else 1990 cbe_lun->blocksize = tmp; 1991 1992 error = csw->d_ioctl(dev, DIOCGMEDIASIZE, (caddr_t)&otmp, FREAD, 1993 curthread); 1994 if (error) { 1995 dev_relthread(dev, ref); 1996 snprintf(req->error_str, sizeof(req->error_str), 1997 "error %d returned for DIOCGMEDIASIZE " 1998 " ioctl on %s!", error, 1999 be_lun->dev_path); 2000 return (error); 2001 } 2002 2003 if (params->lun_size_bytes != 0) { 2004 if (params->lun_size_bytes > otmp) { 2005 dev_relthread(dev, ref); 2006 snprintf(req->error_str, sizeof(req->error_str), 2007 "requested LUN size %ju > backing device " 2008 "size %ju", 2009 (uintmax_t)params->lun_size_bytes, 2010 (uintmax_t)otmp); 2011 return (EINVAL); 2012 } 2013 2014 be_lun->size_bytes = params->lun_size_bytes; 2015 } else 2016 be_lun->size_bytes = otmp; 2017 be_lun->size_blocks = be_lun->size_bytes / cbe_lun->blocksize; 2018 cbe_lun->maxlba = (be_lun->size_blocks == 0) ? 2019 0 : (be_lun->size_blocks - 1); 2020 2021 error = csw->d_ioctl(dev, DIOCGSTRIPESIZE, (caddr_t)&ps, FREAD, 2022 curthread); 2023 if (error) 2024 ps = po = 0; 2025 else { 2026 error = csw->d_ioctl(dev, DIOCGSTRIPEOFFSET, (caddr_t)&po, 2027 FREAD, curthread); 2028 if (error) 2029 po = 0; 2030 } 2031 us = ps; 2032 uo = po; 2033 2034 value = ctl_get_opt(&cbe_lun->options, "pblocksize"); 2035 if (value != NULL) 2036 ctl_expand_number(value, &ps); 2037 value = ctl_get_opt(&cbe_lun->options, "pblockoffset"); 2038 if (value != NULL) 2039 ctl_expand_number(value, &po); 2040 pss = ps / cbe_lun->blocksize; 2041 pos = po / cbe_lun->blocksize; 2042 if ((pss > 0) && (pss * cbe_lun->blocksize == ps) && (pss >= pos) && 2043 ((pss & (pss - 1)) == 0) && (pos * cbe_lun->blocksize == po)) { 2044 cbe_lun->pblockexp = fls(pss) - 1; 2045 cbe_lun->pblockoff = (pss - pos) % pss; 2046 } 2047 2048 value = ctl_get_opt(&cbe_lun->options, "ublocksize"); 2049 if (value != NULL) 2050 ctl_expand_number(value, &us); 2051 value = ctl_get_opt(&cbe_lun->options, "ublockoffset"); 2052 if (value != NULL) 2053 ctl_expand_number(value, &uo); 2054 uss = us / cbe_lun->blocksize; 2055 uos = uo / cbe_lun->blocksize; 2056 if ((uss > 0) && (uss * cbe_lun->blocksize == us) && (uss >= uos) && 2057 ((uss & (uss - 1)) == 0) && (uos * cbe_lun->blocksize == uo)) { 2058 cbe_lun->ublockexp = fls(uss) - 1; 2059 cbe_lun->ublockoff = (uss - uos) % uss; 2060 } 2061 2062 cbe_lun->atomicblock = atomic / cbe_lun->blocksize; 2063 cbe_lun->opttxferlen = maxio / cbe_lun->blocksize; 2064 2065 if (be_lun->dispatch == ctl_be_block_dispatch_zvol) { 2066 unmap = 1; 2067 } else { 2068 struct diocgattr_arg arg; 2069 2070 strlcpy(arg.name, "GEOM::candelete", sizeof(arg.name)); 2071 arg.len = sizeof(arg.value.i); 2072 error = csw->d_ioctl(dev, DIOCGATTR, (caddr_t)&arg, FREAD, 2073 curthread); 2074 unmap = (error == 0) ? arg.value.i : 0; 2075 } 2076 value = ctl_get_opt(&cbe_lun->options, "unmap"); 2077 if (value != NULL) 2078 unmap = (strcmp(value, "on") == 0); 2079 if (unmap) 2080 cbe_lun->flags |= CTL_LUN_FLAG_UNMAP; 2081 else 2082 cbe_lun->flags &= ~CTL_LUN_FLAG_UNMAP; 2083 2084 dev_relthread(dev, ref); 2085 return (0); 2086 } 2087 2088 static int 2089 ctl_be_block_close(struct ctl_be_block_lun *be_lun) 2090 { 2091 struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun; 2092 int flags; 2093 2094 if (be_lun->vn) { 2095 flags = FREAD; 2096 if ((cbe_lun->flags & CTL_LUN_FLAG_READONLY) == 0) 2097 flags |= FWRITE; 2098 (void)vn_close(be_lun->vn, flags, NOCRED, curthread); 2099 be_lun->vn = NULL; 2100 2101 switch (be_lun->dev_type) { 2102 case CTL_BE_BLOCK_DEV: 2103 break; 2104 case CTL_BE_BLOCK_FILE: 2105 if (be_lun->backend.file.cred != NULL) { 2106 crfree(be_lun->backend.file.cred); 2107 be_lun->backend.file.cred = NULL; 2108 } 2109 break; 2110 case CTL_BE_BLOCK_NONE: 2111 break; 2112 default: 2113 panic("Unexpected backend type %d", be_lun->dev_type); 2114 break; 2115 } 2116 be_lun->dev_type = CTL_BE_BLOCK_NONE; 2117 } 2118 return (0); 2119 } 2120 2121 static int 2122 ctl_be_block_open(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req) 2123 { 2124 struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun; 2125 struct nameidata nd; 2126 char *value; 2127 int error, flags; 2128 2129 error = 0; 2130 if (rootvnode == NULL) { 2131 snprintf(req->error_str, sizeof(req->error_str), 2132 "Root filesystem is not mounted"); 2133 return (1); 2134 } 2135 pwd_ensure_dirs(); 2136 2137 value = ctl_get_opt(&cbe_lun->options, "file"); 2138 if (value == NULL) { 2139 snprintf(req->error_str, sizeof(req->error_str), 2140 "no file argument specified"); 2141 return (1); 2142 } 2143 free(be_lun->dev_path, M_CTLBLK); 2144 be_lun->dev_path = strdup(value, M_CTLBLK); 2145 2146 flags = FREAD; 2147 value = ctl_get_opt(&cbe_lun->options, "readonly"); 2148 if (value != NULL) { 2149 if (strcmp(value, "on") != 0) 2150 flags |= FWRITE; 2151 } else if (cbe_lun->lun_type == T_DIRECT) 2152 flags |= FWRITE; 2153 2154 again: 2155 NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, be_lun->dev_path, curthread); 2156 error = vn_open(&nd, &flags, 0, NULL); 2157 if ((error == EROFS || error == EACCES) && (flags & FWRITE)) { 2158 flags &= ~FWRITE; 2159 goto again; 2160 } 2161 if (error) { 2162 /* 2163 * This is the only reasonable guess we can make as far as 2164 * path if the user doesn't give us a fully qualified path. 2165 * If they want to specify a file, they need to specify the 2166 * full path. 2167 */ 2168 if (be_lun->dev_path[0] != '/') { 2169 char *dev_name; 2170 2171 asprintf(&dev_name, M_CTLBLK, "/dev/%s", 2172 be_lun->dev_path); 2173 free(be_lun->dev_path, M_CTLBLK); 2174 be_lun->dev_path = dev_name; 2175 goto again; 2176 } 2177 snprintf(req->error_str, sizeof(req->error_str), 2178 "error opening %s: %d", be_lun->dev_path, error); 2179 return (error); 2180 } 2181 if (flags & FWRITE) 2182 cbe_lun->flags &= ~CTL_LUN_FLAG_READONLY; 2183 else 2184 cbe_lun->flags |= CTL_LUN_FLAG_READONLY; 2185 2186 NDFREE(&nd, NDF_ONLY_PNBUF); 2187 be_lun->vn = nd.ni_vp; 2188 2189 /* We only support disks and files. */ 2190 if (vn_isdisk(be_lun->vn, &error)) { 2191 error = ctl_be_block_open_dev(be_lun, req); 2192 } else if (be_lun->vn->v_type == VREG) { 2193 error = ctl_be_block_open_file(be_lun, req); 2194 } else { 2195 error = EINVAL; 2196 snprintf(req->error_str, sizeof(req->error_str), 2197 "%s is not a disk or plain file", be_lun->dev_path); 2198 } 2199 VOP_UNLOCK(be_lun->vn, 0); 2200 2201 if (error != 0) 2202 ctl_be_block_close(be_lun); 2203 cbe_lun->serseq = CTL_LUN_SERSEQ_OFF; 2204 if (be_lun->dispatch != ctl_be_block_dispatch_dev) 2205 cbe_lun->serseq = CTL_LUN_SERSEQ_READ; 2206 value = ctl_get_opt(&cbe_lun->options, "serseq"); 2207 if (value != NULL && strcmp(value, "on") == 0) 2208 cbe_lun->serseq = CTL_LUN_SERSEQ_ON; 2209 else if (value != NULL && strcmp(value, "read") == 0) 2210 cbe_lun->serseq = CTL_LUN_SERSEQ_READ; 2211 else if (value != NULL && strcmp(value, "off") == 0) 2212 cbe_lun->serseq = CTL_LUN_SERSEQ_OFF; 2213 return (0); 2214 } 2215 2216 static int 2217 ctl_be_block_create(struct ctl_be_block_softc *softc, struct ctl_lun_req *req) 2218 { 2219 struct ctl_be_lun *cbe_lun; 2220 struct ctl_be_block_lun *be_lun; 2221 struct ctl_lun_create_params *params; 2222 char num_thread_str[16]; 2223 char tmpstr[32]; 2224 char *value; 2225 int retval, num_threads; 2226 int tmp_num_threads; 2227 2228 params = &req->reqdata.create; 2229 retval = 0; 2230 req->status = CTL_LUN_OK; 2231 2232 be_lun = malloc(sizeof(*be_lun), M_CTLBLK, M_ZERO | M_WAITOK); 2233 cbe_lun = &be_lun->cbe_lun; 2234 cbe_lun->be_lun = be_lun; 2235 be_lun->params = req->reqdata.create; 2236 be_lun->softc = softc; 2237 STAILQ_INIT(&be_lun->input_queue); 2238 STAILQ_INIT(&be_lun->config_read_queue); 2239 STAILQ_INIT(&be_lun->config_write_queue); 2240 STAILQ_INIT(&be_lun->datamove_queue); 2241 sprintf(be_lun->lunname, "cblk%d", softc->num_luns); 2242 mtx_init(&be_lun->io_lock, "cblk io lock", NULL, MTX_DEF); 2243 mtx_init(&be_lun->queue_lock, "cblk queue lock", NULL, MTX_DEF); 2244 ctl_init_opts(&cbe_lun->options, 2245 req->num_be_args, req->kern_be_args); 2246 be_lun->lun_zone = uma_zcreate(be_lun->lunname, CTLBLK_MAX_SEG, 2247 NULL, NULL, NULL, NULL, /*align*/ 0, /*flags*/0); 2248 if (be_lun->lun_zone == NULL) { 2249 snprintf(req->error_str, sizeof(req->error_str), 2250 "error allocating UMA zone"); 2251 goto bailout_error; 2252 } 2253 2254 if (params->flags & CTL_LUN_FLAG_DEV_TYPE) 2255 cbe_lun->lun_type = params->device_type; 2256 else 2257 cbe_lun->lun_type = T_DIRECT; 2258 be_lun->flags = CTL_BE_BLOCK_LUN_UNCONFIGURED; 2259 cbe_lun->flags = 0; 2260 value = ctl_get_opt(&cbe_lun->options, "ha_role"); 2261 if (value != NULL) { 2262 if (strcmp(value, "primary") == 0) 2263 cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY; 2264 } else if (control_softc->flags & CTL_FLAG_ACTIVE_SHELF) 2265 cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY; 2266 2267 if (cbe_lun->lun_type == T_DIRECT || 2268 cbe_lun->lun_type == T_CDROM) { 2269 be_lun->size_bytes = params->lun_size_bytes; 2270 if (params->blocksize_bytes != 0) 2271 cbe_lun->blocksize = params->blocksize_bytes; 2272 else if (cbe_lun->lun_type == T_CDROM) 2273 cbe_lun->blocksize = 2048; 2274 else 2275 cbe_lun->blocksize = 512; 2276 be_lun->size_blocks = be_lun->size_bytes / cbe_lun->blocksize; 2277 cbe_lun->maxlba = (be_lun->size_blocks == 0) ? 2278 0 : (be_lun->size_blocks - 1); 2279 2280 if ((cbe_lun->flags & CTL_LUN_FLAG_PRIMARY) || 2281 control_softc->ha_mode == CTL_HA_MODE_SER_ONLY) { 2282 retval = ctl_be_block_open(be_lun, req); 2283 if (retval != 0) { 2284 retval = 0; 2285 req->status = CTL_LUN_WARNING; 2286 } 2287 } 2288 num_threads = cbb_num_threads; 2289 } else { 2290 num_threads = 1; 2291 } 2292 2293 value = ctl_get_opt(&cbe_lun->options, "num_threads"); 2294 if (value != NULL) { 2295 tmp_num_threads = strtol(value, NULL, 0); 2296 2297 /* 2298 * We don't let the user specify less than one 2299 * thread, but hope he's clueful enough not to 2300 * specify 1000 threads. 2301 */ 2302 if (tmp_num_threads < 1) { 2303 snprintf(req->error_str, sizeof(req->error_str), 2304 "invalid number of threads %s", 2305 num_thread_str); 2306 goto bailout_error; 2307 } 2308 num_threads = tmp_num_threads; 2309 } 2310 2311 if (be_lun->vn == NULL) 2312 cbe_lun->flags |= CTL_LUN_FLAG_NO_MEDIA; 2313 /* Tell the user the blocksize we ended up using */ 2314 params->lun_size_bytes = be_lun->size_bytes; 2315 params->blocksize_bytes = cbe_lun->blocksize; 2316 if (params->flags & CTL_LUN_FLAG_ID_REQ) { 2317 cbe_lun->req_lun_id = params->req_lun_id; 2318 cbe_lun->flags |= CTL_LUN_FLAG_ID_REQ; 2319 } else 2320 cbe_lun->req_lun_id = 0; 2321 2322 cbe_lun->lun_shutdown = ctl_be_block_lun_shutdown; 2323 cbe_lun->lun_config_status = ctl_be_block_lun_config_status; 2324 cbe_lun->be = &ctl_be_block_driver; 2325 2326 if ((params->flags & CTL_LUN_FLAG_SERIAL_NUM) == 0) { 2327 snprintf(tmpstr, sizeof(tmpstr), "MYSERIAL%4d", 2328 softc->num_luns); 2329 strncpy((char *)cbe_lun->serial_num, tmpstr, 2330 MIN(sizeof(cbe_lun->serial_num), sizeof(tmpstr))); 2331 2332 /* Tell the user what we used for a serial number */ 2333 strncpy((char *)params->serial_num, tmpstr, 2334 MIN(sizeof(params->serial_num), sizeof(tmpstr))); 2335 } else { 2336 strncpy((char *)cbe_lun->serial_num, params->serial_num, 2337 MIN(sizeof(cbe_lun->serial_num), 2338 sizeof(params->serial_num))); 2339 } 2340 if ((params->flags & CTL_LUN_FLAG_DEVID) == 0) { 2341 snprintf(tmpstr, sizeof(tmpstr), "MYDEVID%4d", softc->num_luns); 2342 strncpy((char *)cbe_lun->device_id, tmpstr, 2343 MIN(sizeof(cbe_lun->device_id), sizeof(tmpstr))); 2344 2345 /* Tell the user what we used for a device ID */ 2346 strncpy((char *)params->device_id, tmpstr, 2347 MIN(sizeof(params->device_id), sizeof(tmpstr))); 2348 } else { 2349 strncpy((char *)cbe_lun->device_id, params->device_id, 2350 MIN(sizeof(cbe_lun->device_id), 2351 sizeof(params->device_id))); 2352 } 2353 2354 TASK_INIT(&be_lun->io_task, /*priority*/0, ctl_be_block_worker, be_lun); 2355 2356 be_lun->io_taskqueue = taskqueue_create(be_lun->lunname, M_WAITOK, 2357 taskqueue_thread_enqueue, /*context*/&be_lun->io_taskqueue); 2358 2359 if (be_lun->io_taskqueue == NULL) { 2360 snprintf(req->error_str, sizeof(req->error_str), 2361 "unable to create taskqueue"); 2362 goto bailout_error; 2363 } 2364 2365 /* 2366 * Note that we start the same number of threads by default for 2367 * both the file case and the block device case. For the file 2368 * case, we need multiple threads to allow concurrency, because the 2369 * vnode interface is designed to be a blocking interface. For the 2370 * block device case, ZFS zvols at least will block the caller's 2371 * context in many instances, and so we need multiple threads to 2372 * overcome that problem. Other block devices don't need as many 2373 * threads, but they shouldn't cause too many problems. 2374 * 2375 * If the user wants to just have a single thread for a block 2376 * device, he can specify that when the LUN is created, or change 2377 * the tunable/sysctl to alter the default number of threads. 2378 */ 2379 retval = taskqueue_start_threads(&be_lun->io_taskqueue, 2380 /*num threads*/num_threads, 2381 /*priority*/PWAIT, 2382 /*thread name*/ 2383 "%s taskq", be_lun->lunname); 2384 2385 if (retval != 0) 2386 goto bailout_error; 2387 2388 be_lun->num_threads = num_threads; 2389 2390 mtx_lock(&softc->lock); 2391 softc->num_luns++; 2392 STAILQ_INSERT_TAIL(&softc->lun_list, be_lun, links); 2393 2394 mtx_unlock(&softc->lock); 2395 2396 retval = ctl_add_lun(&be_lun->cbe_lun); 2397 if (retval != 0) { 2398 mtx_lock(&softc->lock); 2399 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_block_lun, 2400 links); 2401 softc->num_luns--; 2402 mtx_unlock(&softc->lock); 2403 snprintf(req->error_str, sizeof(req->error_str), 2404 "ctl_add_lun() returned error %d, see dmesg for " 2405 "details", retval); 2406 retval = 0; 2407 goto bailout_error; 2408 } 2409 2410 mtx_lock(&softc->lock); 2411 2412 /* 2413 * Tell the config_status routine that we're waiting so it won't 2414 * clean up the LUN in the event of an error. 2415 */ 2416 be_lun->flags |= CTL_BE_BLOCK_LUN_WAITING; 2417 2418 while (be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) { 2419 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlblk", 0); 2420 if (retval == EINTR) 2421 break; 2422 } 2423 be_lun->flags &= ~CTL_BE_BLOCK_LUN_WAITING; 2424 2425 if (be_lun->flags & CTL_BE_BLOCK_LUN_CONFIG_ERR) { 2426 snprintf(req->error_str, sizeof(req->error_str), 2427 "LUN configuration error, see dmesg for details"); 2428 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_block_lun, 2429 links); 2430 softc->num_luns--; 2431 mtx_unlock(&softc->lock); 2432 goto bailout_error; 2433 } else { 2434 params->req_lun_id = cbe_lun->lun_id; 2435 } 2436 2437 mtx_unlock(&softc->lock); 2438 2439 be_lun->disk_stats = devstat_new_entry("cbb", params->req_lun_id, 2440 cbe_lun->blocksize, 2441 DEVSTAT_ALL_SUPPORTED, 2442 cbe_lun->lun_type 2443 | DEVSTAT_TYPE_IF_OTHER, 2444 DEVSTAT_PRIORITY_OTHER); 2445 2446 return (retval); 2447 2448 bailout_error: 2449 req->status = CTL_LUN_ERROR; 2450 2451 if (be_lun->io_taskqueue != NULL) 2452 taskqueue_free(be_lun->io_taskqueue); 2453 ctl_be_block_close(be_lun); 2454 if (be_lun->dev_path != NULL) 2455 free(be_lun->dev_path, M_CTLBLK); 2456 if (be_lun->lun_zone != NULL) 2457 uma_zdestroy(be_lun->lun_zone); 2458 ctl_free_opts(&cbe_lun->options); 2459 mtx_destroy(&be_lun->queue_lock); 2460 mtx_destroy(&be_lun->io_lock); 2461 free(be_lun, M_CTLBLK); 2462 2463 return (retval); 2464 } 2465 2466 static int 2467 ctl_be_block_rm(struct ctl_be_block_softc *softc, struct ctl_lun_req *req) 2468 { 2469 struct ctl_lun_rm_params *params; 2470 struct ctl_be_block_lun *be_lun; 2471 struct ctl_be_lun *cbe_lun; 2472 int retval; 2473 2474 params = &req->reqdata.rm; 2475 2476 mtx_lock(&softc->lock); 2477 STAILQ_FOREACH(be_lun, &softc->lun_list, links) { 2478 if (be_lun->cbe_lun.lun_id == params->lun_id) 2479 break; 2480 } 2481 mtx_unlock(&softc->lock); 2482 if (be_lun == NULL) { 2483 snprintf(req->error_str, sizeof(req->error_str), 2484 "LUN %u is not managed by the block backend", 2485 params->lun_id); 2486 goto bailout_error; 2487 } 2488 cbe_lun = &be_lun->cbe_lun; 2489 2490 retval = ctl_disable_lun(cbe_lun); 2491 if (retval != 0) { 2492 snprintf(req->error_str, sizeof(req->error_str), 2493 "error %d returned from ctl_disable_lun() for " 2494 "LUN %d", retval, params->lun_id); 2495 goto bailout_error; 2496 } 2497 2498 if (be_lun->vn != NULL) { 2499 cbe_lun->flags |= CTL_LUN_FLAG_NO_MEDIA; 2500 ctl_lun_no_media(cbe_lun); 2501 taskqueue_drain_all(be_lun->io_taskqueue); 2502 ctl_be_block_close(be_lun); 2503 } 2504 2505 retval = ctl_invalidate_lun(cbe_lun); 2506 if (retval != 0) { 2507 snprintf(req->error_str, sizeof(req->error_str), 2508 "error %d returned from ctl_invalidate_lun() for " 2509 "LUN %d", retval, params->lun_id); 2510 goto bailout_error; 2511 } 2512 2513 mtx_lock(&softc->lock); 2514 be_lun->flags |= CTL_BE_BLOCK_LUN_WAITING; 2515 while ((be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) == 0) { 2516 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlblk", 0); 2517 if (retval == EINTR) 2518 break; 2519 } 2520 be_lun->flags &= ~CTL_BE_BLOCK_LUN_WAITING; 2521 2522 if ((be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) == 0) { 2523 snprintf(req->error_str, sizeof(req->error_str), 2524 "interrupted waiting for LUN to be freed"); 2525 mtx_unlock(&softc->lock); 2526 goto bailout_error; 2527 } 2528 2529 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_block_lun, links); 2530 2531 softc->num_luns--; 2532 mtx_unlock(&softc->lock); 2533 2534 taskqueue_drain_all(be_lun->io_taskqueue); 2535 taskqueue_free(be_lun->io_taskqueue); 2536 2537 if (be_lun->disk_stats != NULL) 2538 devstat_remove_entry(be_lun->disk_stats); 2539 2540 uma_zdestroy(be_lun->lun_zone); 2541 2542 ctl_free_opts(&cbe_lun->options); 2543 free(be_lun->dev_path, M_CTLBLK); 2544 mtx_destroy(&be_lun->queue_lock); 2545 mtx_destroy(&be_lun->io_lock); 2546 free(be_lun, M_CTLBLK); 2547 2548 req->status = CTL_LUN_OK; 2549 return (0); 2550 2551 bailout_error: 2552 req->status = CTL_LUN_ERROR; 2553 return (0); 2554 } 2555 2556 static int 2557 ctl_be_block_modify(struct ctl_be_block_softc *softc, struct ctl_lun_req *req) 2558 { 2559 struct ctl_lun_modify_params *params; 2560 struct ctl_be_block_lun *be_lun; 2561 struct ctl_be_lun *cbe_lun; 2562 char *value; 2563 uint64_t oldsize; 2564 int error, wasprim; 2565 2566 params = &req->reqdata.modify; 2567 2568 mtx_lock(&softc->lock); 2569 STAILQ_FOREACH(be_lun, &softc->lun_list, links) { 2570 if (be_lun->cbe_lun.lun_id == params->lun_id) 2571 break; 2572 } 2573 mtx_unlock(&softc->lock); 2574 if (be_lun == NULL) { 2575 snprintf(req->error_str, sizeof(req->error_str), 2576 "LUN %u is not managed by the block backend", 2577 params->lun_id); 2578 goto bailout_error; 2579 } 2580 cbe_lun = &be_lun->cbe_lun; 2581 2582 if (params->lun_size_bytes != 0) 2583 be_lun->params.lun_size_bytes = params->lun_size_bytes; 2584 ctl_update_opts(&cbe_lun->options, req->num_be_args, req->kern_be_args); 2585 2586 wasprim = (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY); 2587 value = ctl_get_opt(&cbe_lun->options, "ha_role"); 2588 if (value != NULL) { 2589 if (strcmp(value, "primary") == 0) 2590 cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY; 2591 else 2592 cbe_lun->flags &= ~CTL_LUN_FLAG_PRIMARY; 2593 } else if (control_softc->flags & CTL_FLAG_ACTIVE_SHELF) 2594 cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY; 2595 else 2596 cbe_lun->flags &= ~CTL_LUN_FLAG_PRIMARY; 2597 if (wasprim != (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY)) { 2598 if (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY) 2599 ctl_lun_primary(cbe_lun); 2600 else 2601 ctl_lun_secondary(cbe_lun); 2602 } 2603 2604 oldsize = be_lun->size_blocks; 2605 if ((cbe_lun->flags & CTL_LUN_FLAG_PRIMARY) || 2606 control_softc->ha_mode == CTL_HA_MODE_SER_ONLY) { 2607 if (be_lun->vn == NULL) 2608 error = ctl_be_block_open(be_lun, req); 2609 else if (vn_isdisk(be_lun->vn, &error)) 2610 error = ctl_be_block_open_dev(be_lun, req); 2611 else if (be_lun->vn->v_type == VREG) { 2612 vn_lock(be_lun->vn, LK_SHARED | LK_RETRY); 2613 error = ctl_be_block_open_file(be_lun, req); 2614 VOP_UNLOCK(be_lun->vn, 0); 2615 } else 2616 error = EINVAL; 2617 if ((cbe_lun->flags & CTL_LUN_FLAG_NO_MEDIA) && 2618 be_lun->vn != NULL) { 2619 cbe_lun->flags &= ~CTL_LUN_FLAG_NO_MEDIA; 2620 ctl_lun_has_media(cbe_lun); 2621 } else if ((cbe_lun->flags & CTL_LUN_FLAG_NO_MEDIA) == 0 && 2622 be_lun->vn == NULL) { 2623 cbe_lun->flags |= CTL_LUN_FLAG_NO_MEDIA; 2624 ctl_lun_no_media(cbe_lun); 2625 } 2626 cbe_lun->flags &= ~CTL_LUN_FLAG_EJECTED; 2627 } else { 2628 if (be_lun->vn != NULL) { 2629 cbe_lun->flags |= CTL_LUN_FLAG_NO_MEDIA; 2630 ctl_lun_no_media(cbe_lun); 2631 taskqueue_drain_all(be_lun->io_taskqueue); 2632 error = ctl_be_block_close(be_lun); 2633 } else 2634 error = 0; 2635 } 2636 if (be_lun->size_blocks != oldsize) 2637 ctl_lun_capacity_changed(cbe_lun); 2638 2639 /* Tell the user the exact size we ended up using */ 2640 params->lun_size_bytes = be_lun->size_bytes; 2641 2642 req->status = error ? CTL_LUN_WARNING : CTL_LUN_OK; 2643 return (0); 2644 2645 bailout_error: 2646 req->status = CTL_LUN_ERROR; 2647 return (0); 2648 } 2649 2650 static void 2651 ctl_be_block_lun_shutdown(void *be_lun) 2652 { 2653 struct ctl_be_block_lun *lun; 2654 struct ctl_be_block_softc *softc; 2655 2656 lun = (struct ctl_be_block_lun *)be_lun; 2657 softc = lun->softc; 2658 2659 mtx_lock(&softc->lock); 2660 lun->flags |= CTL_BE_BLOCK_LUN_UNCONFIGURED; 2661 if (lun->flags & CTL_BE_BLOCK_LUN_WAITING) 2662 wakeup(lun); 2663 mtx_unlock(&softc->lock); 2664 } 2665 2666 static void 2667 ctl_be_block_lun_config_status(void *be_lun, ctl_lun_config_status status) 2668 { 2669 struct ctl_be_block_lun *lun; 2670 struct ctl_be_block_softc *softc; 2671 2672 lun = (struct ctl_be_block_lun *)be_lun; 2673 softc = lun->softc; 2674 2675 if (status == CTL_LUN_CONFIG_OK) { 2676 mtx_lock(&softc->lock); 2677 lun->flags &= ~CTL_BE_BLOCK_LUN_UNCONFIGURED; 2678 if (lun->flags & CTL_BE_BLOCK_LUN_WAITING) 2679 wakeup(lun); 2680 mtx_unlock(&softc->lock); 2681 2682 /* 2683 * We successfully added the LUN, attempt to enable it. 2684 */ 2685 if (ctl_enable_lun(&lun->cbe_lun) != 0) { 2686 printf("%s: ctl_enable_lun() failed!\n", __func__); 2687 if (ctl_invalidate_lun(&lun->cbe_lun) != 0) { 2688 printf("%s: ctl_invalidate_lun() failed!\n", 2689 __func__); 2690 } 2691 } 2692 2693 return; 2694 } 2695 2696 2697 mtx_lock(&softc->lock); 2698 lun->flags &= ~CTL_BE_BLOCK_LUN_UNCONFIGURED; 2699 lun->flags |= CTL_BE_BLOCK_LUN_CONFIG_ERR; 2700 wakeup(lun); 2701 mtx_unlock(&softc->lock); 2702 } 2703 2704 2705 static int 2706 ctl_be_block_config_write(union ctl_io *io) 2707 { 2708 struct ctl_be_block_lun *be_lun; 2709 struct ctl_be_lun *cbe_lun; 2710 int retval; 2711 2712 DPRINTF("entered\n"); 2713 2714 cbe_lun = CTL_BACKEND_LUN(io); 2715 be_lun = (struct ctl_be_block_lun *)cbe_lun->be_lun; 2716 2717 retval = 0; 2718 switch (io->scsiio.cdb[0]) { 2719 case SYNCHRONIZE_CACHE: 2720 case SYNCHRONIZE_CACHE_16: 2721 case WRITE_SAME_10: 2722 case WRITE_SAME_16: 2723 case UNMAP: 2724 /* 2725 * The upper level CTL code will filter out any CDBs with 2726 * the immediate bit set and return the proper error. 2727 * 2728 * We don't really need to worry about what LBA range the 2729 * user asked to be synced out. When they issue a sync 2730 * cache command, we'll sync out the whole thing. 2731 */ 2732 mtx_lock(&be_lun->queue_lock); 2733 STAILQ_INSERT_TAIL(&be_lun->config_write_queue, &io->io_hdr, 2734 links); 2735 mtx_unlock(&be_lun->queue_lock); 2736 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); 2737 break; 2738 case START_STOP_UNIT: { 2739 struct scsi_start_stop_unit *cdb; 2740 struct ctl_lun_req req; 2741 2742 cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb; 2743 if ((cdb->how & SSS_PC_MASK) != 0) { 2744 ctl_set_success(&io->scsiio); 2745 ctl_config_write_done(io); 2746 break; 2747 } 2748 if (cdb->how & SSS_START) { 2749 if ((cdb->how & SSS_LOEJ) && be_lun->vn == NULL) { 2750 retval = ctl_be_block_open(be_lun, &req); 2751 cbe_lun->flags &= ~CTL_LUN_FLAG_EJECTED; 2752 if (retval == 0) { 2753 cbe_lun->flags &= ~CTL_LUN_FLAG_NO_MEDIA; 2754 ctl_lun_has_media(cbe_lun); 2755 } else { 2756 cbe_lun->flags |= CTL_LUN_FLAG_NO_MEDIA; 2757 ctl_lun_no_media(cbe_lun); 2758 } 2759 } 2760 ctl_start_lun(cbe_lun); 2761 } else { 2762 ctl_stop_lun(cbe_lun); 2763 if (cdb->how & SSS_LOEJ) { 2764 cbe_lun->flags |= CTL_LUN_FLAG_NO_MEDIA; 2765 cbe_lun->flags |= CTL_LUN_FLAG_EJECTED; 2766 ctl_lun_ejected(cbe_lun); 2767 if (be_lun->vn != NULL) 2768 ctl_be_block_close(be_lun); 2769 } 2770 } 2771 2772 ctl_set_success(&io->scsiio); 2773 ctl_config_write_done(io); 2774 break; 2775 } 2776 case PREVENT_ALLOW: 2777 ctl_set_success(&io->scsiio); 2778 ctl_config_write_done(io); 2779 break; 2780 default: 2781 ctl_set_invalid_opcode(&io->scsiio); 2782 ctl_config_write_done(io); 2783 retval = CTL_RETVAL_COMPLETE; 2784 break; 2785 } 2786 2787 return (retval); 2788 } 2789 2790 static int 2791 ctl_be_block_config_read(union ctl_io *io) 2792 { 2793 struct ctl_be_block_lun *be_lun; 2794 struct ctl_be_lun *cbe_lun; 2795 int retval = 0; 2796 2797 DPRINTF("entered\n"); 2798 2799 cbe_lun = CTL_BACKEND_LUN(io); 2800 be_lun = (struct ctl_be_block_lun *)cbe_lun->be_lun; 2801 2802 switch (io->scsiio.cdb[0]) { 2803 case SERVICE_ACTION_IN: 2804 if (io->scsiio.cdb[1] == SGLS_SERVICE_ACTION) { 2805 mtx_lock(&be_lun->queue_lock); 2806 STAILQ_INSERT_TAIL(&be_lun->config_read_queue, 2807 &io->io_hdr, links); 2808 mtx_unlock(&be_lun->queue_lock); 2809 taskqueue_enqueue(be_lun->io_taskqueue, 2810 &be_lun->io_task); 2811 retval = CTL_RETVAL_QUEUED; 2812 break; 2813 } 2814 ctl_set_invalid_field(&io->scsiio, 2815 /*sks_valid*/ 1, 2816 /*command*/ 1, 2817 /*field*/ 1, 2818 /*bit_valid*/ 1, 2819 /*bit*/ 4); 2820 ctl_config_read_done(io); 2821 retval = CTL_RETVAL_COMPLETE; 2822 break; 2823 default: 2824 ctl_set_invalid_opcode(&io->scsiio); 2825 ctl_config_read_done(io); 2826 retval = CTL_RETVAL_COMPLETE; 2827 break; 2828 } 2829 2830 return (retval); 2831 } 2832 2833 static int 2834 ctl_be_block_lun_info(void *be_lun, struct sbuf *sb) 2835 { 2836 struct ctl_be_block_lun *lun; 2837 int retval; 2838 2839 lun = (struct ctl_be_block_lun *)be_lun; 2840 2841 retval = sbuf_printf(sb, "\t<num_threads>"); 2842 if (retval != 0) 2843 goto bailout; 2844 retval = sbuf_printf(sb, "%d", lun->num_threads); 2845 if (retval != 0) 2846 goto bailout; 2847 retval = sbuf_printf(sb, "</num_threads>\n"); 2848 2849 bailout: 2850 return (retval); 2851 } 2852 2853 static uint64_t 2854 ctl_be_block_lun_attr(void *be_lun, const char *attrname) 2855 { 2856 struct ctl_be_block_lun *lun = (struct ctl_be_block_lun *)be_lun; 2857 2858 if (lun->getattr == NULL) 2859 return (UINT64_MAX); 2860 return (lun->getattr(lun, attrname)); 2861 } 2862 2863 static int 2864 ctl_be_block_init(void) 2865 { 2866 struct ctl_be_block_softc *softc = &backend_block_softc; 2867 2868 mtx_init(&softc->lock, "ctlblock", NULL, MTX_DEF); 2869 softc->beio_zone = uma_zcreate("beio", sizeof(struct ctl_be_block_io), 2870 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 2871 STAILQ_INIT(&softc->lun_list); 2872 return (0); 2873 } 2874 2875 2876 static int 2877 ctl_be_block_shutdown(void) 2878 { 2879 struct ctl_be_block_softc *softc = &backend_block_softc; 2880 struct ctl_be_block_lun *lun, *next_lun; 2881 2882 mtx_lock(&softc->lock); 2883 STAILQ_FOREACH_SAFE(lun, &softc->lun_list, links, next_lun) { 2884 /* 2885 * Drop our lock here. Since ctl_invalidate_lun() can call 2886 * back into us, this could potentially lead to a recursive 2887 * lock of the same mutex, which would cause a hang. 2888 */ 2889 mtx_unlock(&softc->lock); 2890 ctl_disable_lun(&lun->cbe_lun); 2891 ctl_invalidate_lun(&lun->cbe_lun); 2892 mtx_lock(&softc->lock); 2893 } 2894 mtx_unlock(&softc->lock); 2895 2896 uma_zdestroy(softc->beio_zone); 2897 mtx_destroy(&softc->lock); 2898 return (0); 2899 } 2900