1 /*- 2 * Copyright (c) 2003 Silicon Graphics International Corp. 3 * Copyright (c) 2009-2011 Spectra Logic Corporation 4 * Copyright (c) 2012 The FreeBSD Foundation 5 * All rights reserved. 6 * 7 * Portions of this software were developed by Edward Tomasz Napierala 8 * under sponsorship from the FreeBSD Foundation. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions, and the following disclaimer, 15 * without modification. 16 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 17 * substantially similar to the "NO WARRANTY" disclaimer below 18 * ("Disclaimer") and any redistribution must be conditioned upon 19 * including a substantially similar Disclaimer requirement for further 20 * binary redistribution. 21 * 22 * NO WARRANTY 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 27 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 31 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 32 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 33 * POSSIBILITY OF SUCH DAMAGES. 34 * 35 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend_block.c#5 $ 36 */ 37 /* 38 * CAM Target Layer driver backend for block devices. 39 * 40 * Author: Ken Merry <ken@FreeBSD.org> 41 */ 42 #include <sys/cdefs.h> 43 __FBSDID("$FreeBSD$"); 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/kernel.h> 48 #include <sys/types.h> 49 #include <sys/kthread.h> 50 #include <sys/bio.h> 51 #include <sys/fcntl.h> 52 #include <sys/limits.h> 53 #include <sys/lock.h> 54 #include <sys/mutex.h> 55 #include <sys/condvar.h> 56 #include <sys/malloc.h> 57 #include <sys/conf.h> 58 #include <sys/ioccom.h> 59 #include <sys/queue.h> 60 #include <sys/sbuf.h> 61 #include <sys/endian.h> 62 #include <sys/uio.h> 63 #include <sys/buf.h> 64 #include <sys/taskqueue.h> 65 #include <sys/vnode.h> 66 #include <sys/namei.h> 67 #include <sys/mount.h> 68 #include <sys/disk.h> 69 #include <sys/fcntl.h> 70 #include <sys/filedesc.h> 71 #include <sys/proc.h> 72 #include <sys/pcpu.h> 73 #include <sys/module.h> 74 #include <sys/sdt.h> 75 #include <sys/devicestat.h> 76 #include <sys/sysctl.h> 77 78 #include <geom/geom.h> 79 80 #include <cam/cam.h> 81 #include <cam/scsi/scsi_all.h> 82 #include <cam/scsi/scsi_da.h> 83 #include <cam/ctl/ctl_io.h> 84 #include <cam/ctl/ctl.h> 85 #include <cam/ctl/ctl_backend.h> 86 #include <cam/ctl/ctl_frontend_internal.h> 87 #include <cam/ctl/ctl_ioctl.h> 88 #include <cam/ctl/ctl_scsi_all.h> 89 #include <cam/ctl/ctl_error.h> 90 91 /* 92 * The idea here is that we'll allocate enough S/G space to hold a 1MB 93 * I/O. If we get an I/O larger than that, we'll split it. 94 */ 95 #define CTLBLK_HALF_IO_SIZE (512 * 1024) 96 #define CTLBLK_MAX_IO_SIZE (CTLBLK_HALF_IO_SIZE * 2) 97 #define CTLBLK_MAX_SEG MAXPHYS 98 #define CTLBLK_HALF_SEGS MAX(CTLBLK_HALF_IO_SIZE / CTLBLK_MAX_SEG, 1) 99 #define CTLBLK_MAX_SEGS (CTLBLK_HALF_SEGS * 2) 100 101 #ifdef CTLBLK_DEBUG 102 #define DPRINTF(fmt, args...) \ 103 printf("cbb(%s:%d): " fmt, __FUNCTION__, __LINE__, ##args) 104 #else 105 #define DPRINTF(fmt, args...) do {} while(0) 106 #endif 107 108 #define PRIV(io) \ 109 ((struct ctl_ptr_len_flags *)&(io)->io_hdr.ctl_private[CTL_PRIV_BACKEND]) 110 #define ARGS(io) \ 111 ((struct ctl_lba_len_flags *)&(io)->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]) 112 113 SDT_PROVIDER_DEFINE(cbb); 114 115 typedef enum { 116 CTL_BE_BLOCK_LUN_UNCONFIGURED = 0x01, 117 CTL_BE_BLOCK_LUN_CONFIG_ERR = 0x02, 118 CTL_BE_BLOCK_LUN_WAITING = 0x04, 119 CTL_BE_BLOCK_LUN_MULTI_THREAD = 0x08 120 } ctl_be_block_lun_flags; 121 122 typedef enum { 123 CTL_BE_BLOCK_NONE, 124 CTL_BE_BLOCK_DEV, 125 CTL_BE_BLOCK_FILE 126 } ctl_be_block_type; 127 128 struct ctl_be_block_devdata { 129 struct cdev *cdev; 130 struct cdevsw *csw; 131 int dev_ref; 132 }; 133 134 struct ctl_be_block_filedata { 135 struct ucred *cred; 136 }; 137 138 union ctl_be_block_bedata { 139 struct ctl_be_block_devdata dev; 140 struct ctl_be_block_filedata file; 141 }; 142 143 struct ctl_be_block_io; 144 struct ctl_be_block_lun; 145 146 typedef void (*cbb_dispatch_t)(struct ctl_be_block_lun *be_lun, 147 struct ctl_be_block_io *beio); 148 149 /* 150 * Backend LUN structure. There is a 1:1 mapping between a block device 151 * and a backend block LUN, and between a backend block LUN and a CTL LUN. 152 */ 153 struct ctl_be_block_lun { 154 struct ctl_block_disk *disk; 155 char lunname[32]; 156 char *dev_path; 157 ctl_be_block_type dev_type; 158 struct vnode *vn; 159 union ctl_be_block_bedata backend; 160 cbb_dispatch_t dispatch; 161 cbb_dispatch_t lun_flush; 162 cbb_dispatch_t unmap; 163 uma_zone_t lun_zone; 164 uint64_t size_blocks; 165 uint64_t size_bytes; 166 uint32_t blocksize; 167 int blocksize_shift; 168 uint16_t pblockexp; 169 uint16_t pblockoff; 170 struct ctl_be_block_softc *softc; 171 struct devstat *disk_stats; 172 ctl_be_block_lun_flags flags; 173 STAILQ_ENTRY(ctl_be_block_lun) links; 174 struct ctl_be_lun ctl_be_lun; 175 struct taskqueue *io_taskqueue; 176 struct task io_task; 177 int num_threads; 178 STAILQ_HEAD(, ctl_io_hdr) input_queue; 179 STAILQ_HEAD(, ctl_io_hdr) config_write_queue; 180 STAILQ_HEAD(, ctl_io_hdr) datamove_queue; 181 struct mtx_padalign io_lock; 182 struct mtx_padalign queue_lock; 183 }; 184 185 /* 186 * Overall softc structure for the block backend module. 187 */ 188 struct ctl_be_block_softc { 189 struct mtx lock; 190 int num_disks; 191 STAILQ_HEAD(, ctl_block_disk) disk_list; 192 int num_luns; 193 STAILQ_HEAD(, ctl_be_block_lun) lun_list; 194 }; 195 196 static struct ctl_be_block_softc backend_block_softc; 197 198 /* 199 * Per-I/O information. 200 */ 201 struct ctl_be_block_io { 202 union ctl_io *io; 203 struct ctl_sg_entry sg_segs[CTLBLK_MAX_SEGS]; 204 struct iovec xiovecs[CTLBLK_MAX_SEGS]; 205 int bio_cmd; 206 int bio_flags; 207 int num_segs; 208 int num_bios_sent; 209 int num_bios_done; 210 int send_complete; 211 int num_errors; 212 struct bintime ds_t0; 213 devstat_tag_type ds_tag_type; 214 devstat_trans_flags ds_trans_type; 215 uint64_t io_len; 216 uint64_t io_offset; 217 struct ctl_be_block_softc *softc; 218 struct ctl_be_block_lun *lun; 219 void (*beio_cont)(struct ctl_be_block_io *beio); /* to continue processing */ 220 }; 221 222 static int cbb_num_threads = 14; 223 SYSCTL_NODE(_kern_cam_ctl, OID_AUTO, block, CTLFLAG_RD, 0, 224 "CAM Target Layer Block Backend"); 225 SYSCTL_INT(_kern_cam_ctl_block, OID_AUTO, num_threads, CTLFLAG_RWTUN, 226 &cbb_num_threads, 0, "Number of threads per backing file"); 227 228 static struct ctl_be_block_io *ctl_alloc_beio(struct ctl_be_block_softc *softc); 229 static void ctl_free_beio(struct ctl_be_block_io *beio); 230 static void ctl_complete_beio(struct ctl_be_block_io *beio); 231 static int ctl_be_block_move_done(union ctl_io *io); 232 static void ctl_be_block_biodone(struct bio *bio); 233 static void ctl_be_block_flush_file(struct ctl_be_block_lun *be_lun, 234 struct ctl_be_block_io *beio); 235 static void ctl_be_block_dispatch_file(struct ctl_be_block_lun *be_lun, 236 struct ctl_be_block_io *beio); 237 static void ctl_be_block_flush_dev(struct ctl_be_block_lun *be_lun, 238 struct ctl_be_block_io *beio); 239 static void ctl_be_block_unmap_dev(struct ctl_be_block_lun *be_lun, 240 struct ctl_be_block_io *beio); 241 static void ctl_be_block_dispatch_dev(struct ctl_be_block_lun *be_lun, 242 struct ctl_be_block_io *beio); 243 static void ctl_be_block_cw_dispatch(struct ctl_be_block_lun *be_lun, 244 union ctl_io *io); 245 static void ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun, 246 union ctl_io *io); 247 static void ctl_be_block_worker(void *context, int pending); 248 static int ctl_be_block_submit(union ctl_io *io); 249 static int ctl_be_block_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, 250 int flag, struct thread *td); 251 static int ctl_be_block_open_file(struct ctl_be_block_lun *be_lun, 252 struct ctl_lun_req *req); 253 static int ctl_be_block_open_dev(struct ctl_be_block_lun *be_lun, 254 struct ctl_lun_req *req); 255 static int ctl_be_block_close(struct ctl_be_block_lun *be_lun); 256 static int ctl_be_block_open(struct ctl_be_block_softc *softc, 257 struct ctl_be_block_lun *be_lun, 258 struct ctl_lun_req *req); 259 static int ctl_be_block_create(struct ctl_be_block_softc *softc, 260 struct ctl_lun_req *req); 261 static int ctl_be_block_rm(struct ctl_be_block_softc *softc, 262 struct ctl_lun_req *req); 263 static int ctl_be_block_modify_file(struct ctl_be_block_lun *be_lun, 264 struct ctl_lun_req *req); 265 static int ctl_be_block_modify_dev(struct ctl_be_block_lun *be_lun, 266 struct ctl_lun_req *req); 267 static int ctl_be_block_modify(struct ctl_be_block_softc *softc, 268 struct ctl_lun_req *req); 269 static void ctl_be_block_lun_shutdown(void *be_lun); 270 static void ctl_be_block_lun_config_status(void *be_lun, 271 ctl_lun_config_status status); 272 static int ctl_be_block_config_write(union ctl_io *io); 273 static int ctl_be_block_config_read(union ctl_io *io); 274 static int ctl_be_block_lun_info(void *be_lun, struct sbuf *sb); 275 int ctl_be_block_init(void); 276 277 static struct ctl_backend_driver ctl_be_block_driver = 278 { 279 .name = "block", 280 .flags = CTL_BE_FLAG_HAS_CONFIG, 281 .init = ctl_be_block_init, 282 .data_submit = ctl_be_block_submit, 283 .data_move_done = ctl_be_block_move_done, 284 .config_read = ctl_be_block_config_read, 285 .config_write = ctl_be_block_config_write, 286 .ioctl = ctl_be_block_ioctl, 287 .lun_info = ctl_be_block_lun_info 288 }; 289 290 MALLOC_DEFINE(M_CTLBLK, "ctlblk", "Memory used for CTL block backend"); 291 CTL_BACKEND_DECLARE(cbb, ctl_be_block_driver); 292 293 static uma_zone_t beio_zone; 294 295 static struct ctl_be_block_io * 296 ctl_alloc_beio(struct ctl_be_block_softc *softc) 297 { 298 struct ctl_be_block_io *beio; 299 300 beio = uma_zalloc(beio_zone, M_WAITOK | M_ZERO); 301 beio->softc = softc; 302 return (beio); 303 } 304 305 static void 306 ctl_free_beio(struct ctl_be_block_io *beio) 307 { 308 int duplicate_free; 309 int i; 310 311 duplicate_free = 0; 312 313 for (i = 0; i < beio->num_segs; i++) { 314 if (beio->sg_segs[i].addr == NULL) 315 duplicate_free++; 316 317 uma_zfree(beio->lun->lun_zone, beio->sg_segs[i].addr); 318 beio->sg_segs[i].addr = NULL; 319 320 /* For compare we had two equal S/G lists. */ 321 if (ARGS(beio->io)->flags & CTL_LLF_COMPARE) { 322 uma_zfree(beio->lun->lun_zone, 323 beio->sg_segs[i + CTLBLK_HALF_SEGS].addr); 324 beio->sg_segs[i + CTLBLK_HALF_SEGS].addr = NULL; 325 } 326 } 327 328 if (duplicate_free > 0) { 329 printf("%s: %d duplicate frees out of %d segments\n", __func__, 330 duplicate_free, beio->num_segs); 331 } 332 333 uma_zfree(beio_zone, beio); 334 } 335 336 static void 337 ctl_complete_beio(struct ctl_be_block_io *beio) 338 { 339 union ctl_io *io = beio->io; 340 341 if (beio->beio_cont != NULL) { 342 beio->beio_cont(beio); 343 } else { 344 ctl_free_beio(beio); 345 ctl_data_submit_done(io); 346 } 347 } 348 349 static int 350 ctl_be_block_move_done(union ctl_io *io) 351 { 352 struct ctl_be_block_io *beio; 353 struct ctl_be_block_lun *be_lun; 354 struct ctl_lba_len_flags *lbalen; 355 #ifdef CTL_TIME_IO 356 struct bintime cur_bt; 357 #endif 358 int i; 359 360 beio = (struct ctl_be_block_io *)PRIV(io)->ptr; 361 be_lun = beio->lun; 362 363 DPRINTF("entered\n"); 364 365 #ifdef CTL_TIME_IO 366 getbintime(&cur_bt); 367 bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt); 368 bintime_add(&io->io_hdr.dma_bt, &cur_bt); 369 io->io_hdr.num_dmas++; 370 #endif 371 io->scsiio.kern_rel_offset += io->scsiio.kern_data_len; 372 373 /* 374 * We set status at this point for read commands, and write 375 * commands with errors. 376 */ 377 if ((io->io_hdr.port_status == 0) && 378 ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0) && 379 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) { 380 lbalen = ARGS(beio->io); 381 if (lbalen->flags & CTL_LLF_READ) { 382 ctl_set_success(&io->scsiio); 383 } else if (lbalen->flags & CTL_LLF_COMPARE) { 384 /* We have two data blocks ready for comparison. */ 385 for (i = 0; i < beio->num_segs; i++) { 386 if (memcmp(beio->sg_segs[i].addr, 387 beio->sg_segs[i + CTLBLK_HALF_SEGS].addr, 388 beio->sg_segs[i].len) != 0) 389 break; 390 } 391 if (i < beio->num_segs) 392 ctl_set_sense(&io->scsiio, 393 /*current_error*/ 1, 394 /*sense_key*/ SSD_KEY_MISCOMPARE, 395 /*asc*/ 0x1D, 396 /*ascq*/ 0x00, 397 SSD_ELEM_NONE); 398 else 399 ctl_set_success(&io->scsiio); 400 } 401 } 402 else if ((io->io_hdr.port_status != 0) 403 && ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0) 404 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) { 405 /* 406 * For hardware error sense keys, the sense key 407 * specific value is defined to be a retry count, 408 * but we use it to pass back an internal FETD 409 * error code. XXX KDM Hopefully the FETD is only 410 * using 16 bits for an error code, since that's 411 * all the space we have in the sks field. 412 */ 413 ctl_set_internal_failure(&io->scsiio, 414 /*sks_valid*/ 1, 415 /*retry_count*/ 416 io->io_hdr.port_status); 417 } 418 419 /* 420 * If this is a read, or a write with errors, it is done. 421 */ 422 if ((beio->bio_cmd == BIO_READ) 423 || ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0) 424 || ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE)) { 425 ctl_complete_beio(beio); 426 return (0); 427 } 428 429 /* 430 * At this point, we have a write and the DMA completed 431 * successfully. We now have to queue it to the task queue to 432 * execute the backend I/O. That is because we do blocking 433 * memory allocations, and in the file backing case, blocking I/O. 434 * This move done routine is generally called in the SIM's 435 * interrupt context, and therefore we cannot block. 436 */ 437 mtx_lock(&be_lun->queue_lock); 438 /* 439 * XXX KDM make sure that links is okay to use at this point. 440 * Otherwise, we either need to add another field to ctl_io_hdr, 441 * or deal with resource allocation here. 442 */ 443 STAILQ_INSERT_TAIL(&be_lun->datamove_queue, &io->io_hdr, links); 444 mtx_unlock(&be_lun->queue_lock); 445 446 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); 447 448 return (0); 449 } 450 451 static void 452 ctl_be_block_biodone(struct bio *bio) 453 { 454 struct ctl_be_block_io *beio; 455 struct ctl_be_block_lun *be_lun; 456 union ctl_io *io; 457 int error; 458 459 beio = bio->bio_caller1; 460 be_lun = beio->lun; 461 io = beio->io; 462 463 DPRINTF("entered\n"); 464 465 error = bio->bio_error; 466 mtx_lock(&be_lun->io_lock); 467 if (error != 0) 468 beio->num_errors++; 469 470 beio->num_bios_done++; 471 472 /* 473 * XXX KDM will this cause WITNESS to complain? Holding a lock 474 * during the free might cause it to complain. 475 */ 476 g_destroy_bio(bio); 477 478 /* 479 * If the send complete bit isn't set, or we aren't the last I/O to 480 * complete, then we're done. 481 */ 482 if ((beio->send_complete == 0) 483 || (beio->num_bios_done < beio->num_bios_sent)) { 484 mtx_unlock(&be_lun->io_lock); 485 return; 486 } 487 488 /* 489 * At this point, we've verified that we are the last I/O to 490 * complete, so it's safe to drop the lock. 491 */ 492 devstat_end_transaction(beio->lun->disk_stats, beio->io_len, 493 beio->ds_tag_type, beio->ds_trans_type, 494 /*now*/ NULL, /*then*/&beio->ds_t0); 495 mtx_unlock(&be_lun->io_lock); 496 497 /* 498 * If there are any errors from the backing device, we fail the 499 * entire I/O with a medium error. 500 */ 501 if (beio->num_errors > 0) { 502 if (error == EOPNOTSUPP) { 503 ctl_set_invalid_opcode(&io->scsiio); 504 } else if (beio->bio_cmd == BIO_FLUSH) { 505 /* XXX KDM is there is a better error here? */ 506 ctl_set_internal_failure(&io->scsiio, 507 /*sks_valid*/ 1, 508 /*retry_count*/ 0xbad2); 509 } else 510 ctl_set_medium_error(&io->scsiio); 511 ctl_complete_beio(beio); 512 return; 513 } 514 515 /* 516 * If this is a write, a flush, a delete or verify, we're all done. 517 * If this is a read, we can now send the data to the user. 518 */ 519 if ((beio->bio_cmd == BIO_WRITE) 520 || (beio->bio_cmd == BIO_FLUSH) 521 || (beio->bio_cmd == BIO_DELETE) 522 || (ARGS(io)->flags & CTL_LLF_VERIFY)) { 523 ctl_set_success(&io->scsiio); 524 ctl_complete_beio(beio); 525 } else { 526 #ifdef CTL_TIME_IO 527 getbintime(&io->io_hdr.dma_start_bt); 528 #endif 529 ctl_datamove(io); 530 } 531 } 532 533 static void 534 ctl_be_block_flush_file(struct ctl_be_block_lun *be_lun, 535 struct ctl_be_block_io *beio) 536 { 537 union ctl_io *io = beio->io; 538 struct mount *mountpoint; 539 int error, lock_flags; 540 541 DPRINTF("entered\n"); 542 543 binuptime(&beio->ds_t0); 544 mtx_lock(&be_lun->io_lock); 545 devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0); 546 mtx_unlock(&be_lun->io_lock); 547 548 (void) vn_start_write(be_lun->vn, &mountpoint, V_WAIT); 549 550 if (MNT_SHARED_WRITES(mountpoint) 551 || ((mountpoint == NULL) 552 && MNT_SHARED_WRITES(be_lun->vn->v_mount))) 553 lock_flags = LK_SHARED; 554 else 555 lock_flags = LK_EXCLUSIVE; 556 557 vn_lock(be_lun->vn, lock_flags | LK_RETRY); 558 559 error = VOP_FSYNC(be_lun->vn, MNT_WAIT, curthread); 560 VOP_UNLOCK(be_lun->vn, 0); 561 562 vn_finished_write(mountpoint); 563 564 mtx_lock(&be_lun->io_lock); 565 devstat_end_transaction(beio->lun->disk_stats, beio->io_len, 566 beio->ds_tag_type, beio->ds_trans_type, 567 /*now*/ NULL, /*then*/&beio->ds_t0); 568 mtx_unlock(&be_lun->io_lock); 569 570 if (error == 0) 571 ctl_set_success(&io->scsiio); 572 else { 573 /* XXX KDM is there is a better error here? */ 574 ctl_set_internal_failure(&io->scsiio, 575 /*sks_valid*/ 1, 576 /*retry_count*/ 0xbad1); 577 } 578 579 ctl_complete_beio(beio); 580 } 581 582 SDT_PROBE_DEFINE1(cbb, kernel, read, file_start, "uint64_t"); 583 SDT_PROBE_DEFINE1(cbb, kernel, write, file_start, "uint64_t"); 584 SDT_PROBE_DEFINE1(cbb, kernel, read, file_done,"uint64_t"); 585 SDT_PROBE_DEFINE1(cbb, kernel, write, file_done, "uint64_t"); 586 587 static void 588 ctl_be_block_dispatch_file(struct ctl_be_block_lun *be_lun, 589 struct ctl_be_block_io *beio) 590 { 591 struct ctl_be_block_filedata *file_data; 592 union ctl_io *io; 593 struct uio xuio; 594 struct iovec *xiovec; 595 int flags; 596 int error, i; 597 598 DPRINTF("entered\n"); 599 600 file_data = &be_lun->backend.file; 601 io = beio->io; 602 flags = beio->bio_flags; 603 604 bzero(&xuio, sizeof(xuio)); 605 if (beio->bio_cmd == BIO_READ) { 606 SDT_PROBE(cbb, kernel, read, file_start, 0, 0, 0, 0, 0); 607 xuio.uio_rw = UIO_READ; 608 } else { 609 SDT_PROBE(cbb, kernel, write, file_start, 0, 0, 0, 0, 0); 610 xuio.uio_rw = UIO_WRITE; 611 } 612 xuio.uio_offset = beio->io_offset; 613 xuio.uio_resid = beio->io_len; 614 xuio.uio_segflg = UIO_SYSSPACE; 615 xuio.uio_iov = beio->xiovecs; 616 xuio.uio_iovcnt = beio->num_segs; 617 xuio.uio_td = curthread; 618 619 for (i = 0, xiovec = xuio.uio_iov; i < xuio.uio_iovcnt; i++, xiovec++) { 620 xiovec->iov_base = beio->sg_segs[i].addr; 621 xiovec->iov_len = beio->sg_segs[i].len; 622 } 623 624 binuptime(&beio->ds_t0); 625 mtx_lock(&be_lun->io_lock); 626 devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0); 627 mtx_unlock(&be_lun->io_lock); 628 629 if (beio->bio_cmd == BIO_READ) { 630 vn_lock(be_lun->vn, LK_SHARED | LK_RETRY); 631 632 /* 633 * UFS pays attention to IO_DIRECT for reads. If the 634 * DIRECTIO option is configured into the kernel, it calls 635 * ffs_rawread(). But that only works for single-segment 636 * uios with user space addresses. In our case, with a 637 * kernel uio, it still reads into the buffer cache, but it 638 * will just try to release the buffer from the cache later 639 * on in ffs_read(). 640 * 641 * ZFS does not pay attention to IO_DIRECT for reads. 642 * 643 * UFS does not pay attention to IO_SYNC for reads. 644 * 645 * ZFS pays attention to IO_SYNC (which translates into the 646 * Solaris define FRSYNC for zfs_read()) for reads. It 647 * attempts to sync the file before reading. 648 * 649 * So, to attempt to provide some barrier semantics in the 650 * BIO_ORDERED case, set both IO_DIRECT and IO_SYNC. 651 */ 652 error = VOP_READ(be_lun->vn, &xuio, (flags & BIO_ORDERED) ? 653 (IO_DIRECT|IO_SYNC) : 0, file_data->cred); 654 655 VOP_UNLOCK(be_lun->vn, 0); 656 SDT_PROBE(cbb, kernel, read, file_done, 0, 0, 0, 0, 0); 657 } else { 658 struct mount *mountpoint; 659 int lock_flags; 660 661 (void)vn_start_write(be_lun->vn, &mountpoint, V_WAIT); 662 663 if (MNT_SHARED_WRITES(mountpoint) 664 || ((mountpoint == NULL) 665 && MNT_SHARED_WRITES(be_lun->vn->v_mount))) 666 lock_flags = LK_SHARED; 667 else 668 lock_flags = LK_EXCLUSIVE; 669 670 vn_lock(be_lun->vn, lock_flags | LK_RETRY); 671 672 /* 673 * UFS pays attention to IO_DIRECT for writes. The write 674 * is done asynchronously. (Normally the write would just 675 * get put into cache. 676 * 677 * UFS pays attention to IO_SYNC for writes. It will 678 * attempt to write the buffer out synchronously if that 679 * flag is set. 680 * 681 * ZFS does not pay attention to IO_DIRECT for writes. 682 * 683 * ZFS pays attention to IO_SYNC (a.k.a. FSYNC or FRSYNC) 684 * for writes. It will flush the transaction from the 685 * cache before returning. 686 * 687 * So if we've got the BIO_ORDERED flag set, we want 688 * IO_SYNC in either the UFS or ZFS case. 689 */ 690 error = VOP_WRITE(be_lun->vn, &xuio, (flags & BIO_ORDERED) ? 691 IO_SYNC : 0, file_data->cred); 692 VOP_UNLOCK(be_lun->vn, 0); 693 694 vn_finished_write(mountpoint); 695 SDT_PROBE(cbb, kernel, write, file_done, 0, 0, 0, 0, 0); 696 } 697 698 mtx_lock(&be_lun->io_lock); 699 devstat_end_transaction(beio->lun->disk_stats, beio->io_len, 700 beio->ds_tag_type, beio->ds_trans_type, 701 /*now*/ NULL, /*then*/&beio->ds_t0); 702 mtx_unlock(&be_lun->io_lock); 703 704 /* 705 * If we got an error, set the sense data to "MEDIUM ERROR" and 706 * return the I/O to the user. 707 */ 708 if (error != 0) { 709 char path_str[32]; 710 711 ctl_scsi_path_string(io, path_str, sizeof(path_str)); 712 /* 713 * XXX KDM ZFS returns ENOSPC when the underlying 714 * filesystem fills up. What kind of SCSI error should we 715 * return for that? 716 */ 717 printf("%s%s command returned errno %d\n", path_str, 718 (beio->bio_cmd == BIO_READ) ? "READ" : "WRITE", error); 719 ctl_set_medium_error(&io->scsiio); 720 ctl_complete_beio(beio); 721 return; 722 } 723 724 /* 725 * If this is a write or a verify, we're all done. 726 * If this is a read, we can now send the data to the user. 727 */ 728 if ((beio->bio_cmd == BIO_WRITE) || 729 (ARGS(io)->flags & CTL_LLF_VERIFY)) { 730 ctl_set_success(&io->scsiio); 731 ctl_complete_beio(beio); 732 } else { 733 #ifdef CTL_TIME_IO 734 getbintime(&io->io_hdr.dma_start_bt); 735 #endif 736 ctl_datamove(io); 737 } 738 } 739 740 static void 741 ctl_be_block_dispatch_zvol(struct ctl_be_block_lun *be_lun, 742 struct ctl_be_block_io *beio) 743 { 744 struct ctl_be_block_devdata *dev_data; 745 union ctl_io *io; 746 struct uio xuio; 747 struct iovec *xiovec; 748 int flags; 749 int error, i; 750 751 DPRINTF("entered\n"); 752 753 dev_data = &be_lun->backend.dev; 754 io = beio->io; 755 flags = beio->bio_flags; 756 757 bzero(&xuio, sizeof(xuio)); 758 if (beio->bio_cmd == BIO_READ) { 759 SDT_PROBE(cbb, kernel, read, file_start, 0, 0, 0, 0, 0); 760 xuio.uio_rw = UIO_READ; 761 } else { 762 SDT_PROBE(cbb, kernel, write, file_start, 0, 0, 0, 0, 0); 763 xuio.uio_rw = UIO_WRITE; 764 } 765 xuio.uio_offset = beio->io_offset; 766 xuio.uio_resid = beio->io_len; 767 xuio.uio_segflg = UIO_SYSSPACE; 768 xuio.uio_iov = beio->xiovecs; 769 xuio.uio_iovcnt = beio->num_segs; 770 xuio.uio_td = curthread; 771 772 for (i = 0, xiovec = xuio.uio_iov; i < xuio.uio_iovcnt; i++, xiovec++) { 773 xiovec->iov_base = beio->sg_segs[i].addr; 774 xiovec->iov_len = beio->sg_segs[i].len; 775 } 776 777 binuptime(&beio->ds_t0); 778 mtx_lock(&be_lun->io_lock); 779 devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0); 780 mtx_unlock(&be_lun->io_lock); 781 782 if (beio->bio_cmd == BIO_READ) { 783 error = (*dev_data->csw->d_read)(dev_data->cdev, &xuio, 0); 784 SDT_PROBE(cbb, kernel, read, file_done, 0, 0, 0, 0, 0); 785 } else { 786 error = (*dev_data->csw->d_write)(dev_data->cdev, &xuio, 0); 787 SDT_PROBE(cbb, kernel, write, file_done, 0, 0, 0, 0, 0); 788 } 789 790 mtx_lock(&be_lun->io_lock); 791 devstat_end_transaction(beio->lun->disk_stats, beio->io_len, 792 beio->ds_tag_type, beio->ds_trans_type, 793 /*now*/ NULL, /*then*/&beio->ds_t0); 794 mtx_unlock(&be_lun->io_lock); 795 796 /* 797 * If we got an error, set the sense data to "MEDIUM ERROR" and 798 * return the I/O to the user. 799 */ 800 if (error != 0) { 801 ctl_set_medium_error(&io->scsiio); 802 ctl_complete_beio(beio); 803 return; 804 } 805 806 /* 807 * If this is a write or a verify, we're all done. 808 * If this is a read, we can now send the data to the user. 809 */ 810 if ((beio->bio_cmd == BIO_WRITE) || 811 (ARGS(io)->flags & CTL_LLF_VERIFY)) { 812 ctl_set_success(&io->scsiio); 813 ctl_complete_beio(beio); 814 } else { 815 #ifdef CTL_TIME_IO 816 getbintime(&io->io_hdr.dma_start_bt); 817 #endif 818 ctl_datamove(io); 819 } 820 } 821 822 static void 823 ctl_be_block_flush_dev(struct ctl_be_block_lun *be_lun, 824 struct ctl_be_block_io *beio) 825 { 826 struct bio *bio; 827 union ctl_io *io; 828 struct ctl_be_block_devdata *dev_data; 829 830 dev_data = &be_lun->backend.dev; 831 io = beio->io; 832 833 DPRINTF("entered\n"); 834 835 /* This can't fail, it's a blocking allocation. */ 836 bio = g_alloc_bio(); 837 838 bio->bio_cmd = BIO_FLUSH; 839 bio->bio_flags |= BIO_ORDERED; 840 bio->bio_dev = dev_data->cdev; 841 bio->bio_offset = 0; 842 bio->bio_data = 0; 843 bio->bio_done = ctl_be_block_biodone; 844 bio->bio_caller1 = beio; 845 bio->bio_pblkno = 0; 846 847 /* 848 * We don't need to acquire the LUN lock here, because we are only 849 * sending one bio, and so there is no other context to synchronize 850 * with. 851 */ 852 beio->num_bios_sent = 1; 853 beio->send_complete = 1; 854 855 binuptime(&beio->ds_t0); 856 mtx_lock(&be_lun->io_lock); 857 devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0); 858 mtx_unlock(&be_lun->io_lock); 859 860 (*dev_data->csw->d_strategy)(bio); 861 } 862 863 static void 864 ctl_be_block_unmap_dev_range(struct ctl_be_block_lun *be_lun, 865 struct ctl_be_block_io *beio, 866 uint64_t off, uint64_t len, int last) 867 { 868 struct bio *bio; 869 struct ctl_be_block_devdata *dev_data; 870 uint64_t maxlen; 871 872 dev_data = &be_lun->backend.dev; 873 maxlen = LONG_MAX - (LONG_MAX % be_lun->blocksize); 874 while (len > 0) { 875 bio = g_alloc_bio(); 876 bio->bio_cmd = BIO_DELETE; 877 bio->bio_flags |= beio->bio_flags; 878 bio->bio_dev = dev_data->cdev; 879 bio->bio_offset = off; 880 bio->bio_length = MIN(len, maxlen); 881 bio->bio_data = 0; 882 bio->bio_done = ctl_be_block_biodone; 883 bio->bio_caller1 = beio; 884 bio->bio_pblkno = off / be_lun->blocksize; 885 886 off += bio->bio_length; 887 len -= bio->bio_length; 888 889 mtx_lock(&be_lun->io_lock); 890 beio->num_bios_sent++; 891 if (last && len == 0) 892 beio->send_complete = 1; 893 mtx_unlock(&be_lun->io_lock); 894 895 (*dev_data->csw->d_strategy)(bio); 896 } 897 } 898 899 static void 900 ctl_be_block_unmap_dev(struct ctl_be_block_lun *be_lun, 901 struct ctl_be_block_io *beio) 902 { 903 union ctl_io *io; 904 struct ctl_be_block_devdata *dev_data; 905 struct ctl_ptr_len_flags *ptrlen; 906 struct scsi_unmap_desc *buf, *end; 907 uint64_t len; 908 909 dev_data = &be_lun->backend.dev; 910 io = beio->io; 911 912 DPRINTF("entered\n"); 913 914 binuptime(&beio->ds_t0); 915 mtx_lock(&be_lun->io_lock); 916 devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0); 917 mtx_unlock(&be_lun->io_lock); 918 919 if (beio->io_offset == -1) { 920 beio->io_len = 0; 921 ptrlen = (struct ctl_ptr_len_flags *)&io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 922 buf = (struct scsi_unmap_desc *)ptrlen->ptr; 923 end = buf + ptrlen->len / sizeof(*buf); 924 for (; buf < end; buf++) { 925 len = (uint64_t)scsi_4btoul(buf->length) * 926 be_lun->blocksize; 927 beio->io_len += len; 928 ctl_be_block_unmap_dev_range(be_lun, beio, 929 scsi_8btou64(buf->lba) * be_lun->blocksize, len, 930 (end - buf < 2) ? TRUE : FALSE); 931 } 932 } else 933 ctl_be_block_unmap_dev_range(be_lun, beio, 934 beio->io_offset, beio->io_len, TRUE); 935 } 936 937 static void 938 ctl_be_block_dispatch_dev(struct ctl_be_block_lun *be_lun, 939 struct ctl_be_block_io *beio) 940 { 941 TAILQ_HEAD(, bio) queue = TAILQ_HEAD_INITIALIZER(queue); 942 int i; 943 struct bio *bio; 944 struct ctl_be_block_devdata *dev_data; 945 off_t cur_offset; 946 int max_iosize; 947 948 DPRINTF("entered\n"); 949 950 dev_data = &be_lun->backend.dev; 951 952 /* 953 * We have to limit our I/O size to the maximum supported by the 954 * backend device. Hopefully it is MAXPHYS. If the driver doesn't 955 * set it properly, use DFLTPHYS. 956 */ 957 max_iosize = dev_data->cdev->si_iosize_max; 958 if (max_iosize < PAGE_SIZE) 959 max_iosize = DFLTPHYS; 960 961 cur_offset = beio->io_offset; 962 for (i = 0; i < beio->num_segs; i++) { 963 size_t cur_size; 964 uint8_t *cur_ptr; 965 966 cur_size = beio->sg_segs[i].len; 967 cur_ptr = beio->sg_segs[i].addr; 968 969 while (cur_size > 0) { 970 /* This can't fail, it's a blocking allocation. */ 971 bio = g_alloc_bio(); 972 973 KASSERT(bio != NULL, ("g_alloc_bio() failed!\n")); 974 975 bio->bio_cmd = beio->bio_cmd; 976 bio->bio_flags |= beio->bio_flags; 977 bio->bio_dev = dev_data->cdev; 978 bio->bio_caller1 = beio; 979 bio->bio_length = min(cur_size, max_iosize); 980 bio->bio_offset = cur_offset; 981 bio->bio_data = cur_ptr; 982 bio->bio_done = ctl_be_block_biodone; 983 bio->bio_pblkno = cur_offset / be_lun->blocksize; 984 985 cur_offset += bio->bio_length; 986 cur_ptr += bio->bio_length; 987 cur_size -= bio->bio_length; 988 989 TAILQ_INSERT_TAIL(&queue, bio, bio_queue); 990 beio->num_bios_sent++; 991 } 992 } 993 binuptime(&beio->ds_t0); 994 mtx_lock(&be_lun->io_lock); 995 devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0); 996 beio->send_complete = 1; 997 mtx_unlock(&be_lun->io_lock); 998 999 /* 1000 * Fire off all allocated requests! 1001 */ 1002 while ((bio = TAILQ_FIRST(&queue)) != NULL) { 1003 TAILQ_REMOVE(&queue, bio, bio_queue); 1004 (*dev_data->csw->d_strategy)(bio); 1005 } 1006 } 1007 1008 static void 1009 ctl_be_block_cw_done_ws(struct ctl_be_block_io *beio) 1010 { 1011 union ctl_io *io; 1012 1013 io = beio->io; 1014 ctl_free_beio(beio); 1015 if ((io->io_hdr.flags & CTL_FLAG_ABORT) || 1016 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 1017 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) { 1018 ctl_config_write_done(io); 1019 return; 1020 } 1021 1022 ctl_be_block_config_write(io); 1023 } 1024 1025 static void 1026 ctl_be_block_cw_dispatch_ws(struct ctl_be_block_lun *be_lun, 1027 union ctl_io *io) 1028 { 1029 struct ctl_be_block_io *beio; 1030 struct ctl_be_block_softc *softc; 1031 struct ctl_lba_len_flags *lbalen; 1032 uint64_t len_left, lba; 1033 int i, seglen; 1034 uint8_t *buf, *end; 1035 1036 DPRINTF("entered\n"); 1037 1038 beio = (struct ctl_be_block_io *)PRIV(io)->ptr; 1039 softc = be_lun->softc; 1040 lbalen = ARGS(beio->io); 1041 1042 if (lbalen->flags & ~(SWS_LBDATA | SWS_UNMAP) || 1043 (lbalen->flags & SWS_UNMAP && be_lun->unmap == NULL)) { 1044 ctl_free_beio(beio); 1045 ctl_set_invalid_field(&io->scsiio, 1046 /*sks_valid*/ 1, 1047 /*command*/ 1, 1048 /*field*/ 1, 1049 /*bit_valid*/ 0, 1050 /*bit*/ 0); 1051 ctl_config_write_done(io); 1052 return; 1053 } 1054 1055 /* 1056 * If the I/O came down with an ordered or head of queue tag, set 1057 * the BIO_ORDERED attribute. For head of queue tags, that's 1058 * pretty much the best we can do. 1059 */ 1060 if ((io->scsiio.tag_type == CTL_TAG_ORDERED) 1061 || (io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE)) 1062 beio->bio_flags = BIO_ORDERED; 1063 1064 switch (io->scsiio.tag_type) { 1065 case CTL_TAG_ORDERED: 1066 beio->ds_tag_type = DEVSTAT_TAG_ORDERED; 1067 break; 1068 case CTL_TAG_HEAD_OF_QUEUE: 1069 beio->ds_tag_type = DEVSTAT_TAG_HEAD; 1070 break; 1071 case CTL_TAG_UNTAGGED: 1072 case CTL_TAG_SIMPLE: 1073 case CTL_TAG_ACA: 1074 default: 1075 beio->ds_tag_type = DEVSTAT_TAG_SIMPLE; 1076 break; 1077 } 1078 1079 if (lbalen->flags & SWS_UNMAP) { 1080 beio->io_offset = lbalen->lba * be_lun->blocksize; 1081 beio->io_len = (uint64_t)lbalen->len * be_lun->blocksize; 1082 beio->bio_cmd = BIO_DELETE; 1083 beio->ds_trans_type = DEVSTAT_FREE; 1084 1085 be_lun->unmap(be_lun, beio); 1086 return; 1087 } 1088 1089 beio->bio_cmd = BIO_WRITE; 1090 beio->ds_trans_type = DEVSTAT_WRITE; 1091 1092 DPRINTF("WRITE SAME at LBA %jx len %u\n", 1093 (uintmax_t)lbalen->lba, lbalen->len); 1094 1095 len_left = (uint64_t)lbalen->len * be_lun->blocksize; 1096 for (i = 0, lba = 0; i < CTLBLK_MAX_SEGS && len_left > 0; i++) { 1097 1098 /* 1099 * Setup the S/G entry for this chunk. 1100 */ 1101 seglen = MIN(CTLBLK_MAX_SEG, len_left); 1102 seglen -= seglen % be_lun->blocksize; 1103 beio->sg_segs[i].len = seglen; 1104 beio->sg_segs[i].addr = uma_zalloc(be_lun->lun_zone, M_WAITOK); 1105 1106 DPRINTF("segment %d addr %p len %zd\n", i, 1107 beio->sg_segs[i].addr, beio->sg_segs[i].len); 1108 1109 beio->num_segs++; 1110 len_left -= seglen; 1111 1112 buf = beio->sg_segs[i].addr; 1113 end = buf + seglen; 1114 for (; buf < end; buf += be_lun->blocksize) { 1115 memcpy(buf, io->scsiio.kern_data_ptr, be_lun->blocksize); 1116 if (lbalen->flags & SWS_LBDATA) 1117 scsi_ulto4b(lbalen->lba + lba, buf); 1118 lba++; 1119 } 1120 } 1121 1122 beio->io_offset = lbalen->lba * be_lun->blocksize; 1123 beio->io_len = lba * be_lun->blocksize; 1124 1125 /* We can not do all in one run. Correct and schedule rerun. */ 1126 if (len_left > 0) { 1127 lbalen->lba += lba; 1128 lbalen->len -= lba; 1129 beio->beio_cont = ctl_be_block_cw_done_ws; 1130 } 1131 1132 be_lun->dispatch(be_lun, beio); 1133 } 1134 1135 static void 1136 ctl_be_block_cw_dispatch_unmap(struct ctl_be_block_lun *be_lun, 1137 union ctl_io *io) 1138 { 1139 struct ctl_be_block_io *beio; 1140 struct ctl_be_block_softc *softc; 1141 struct ctl_ptr_len_flags *ptrlen; 1142 1143 DPRINTF("entered\n"); 1144 1145 beio = (struct ctl_be_block_io *)PRIV(io)->ptr; 1146 softc = be_lun->softc; 1147 ptrlen = (struct ctl_ptr_len_flags *)&io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 1148 1149 if (ptrlen->flags != 0 || be_lun->unmap == NULL) { 1150 ctl_free_beio(beio); 1151 ctl_set_invalid_field(&io->scsiio, 1152 /*sks_valid*/ 0, 1153 /*command*/ 1, 1154 /*field*/ 0, 1155 /*bit_valid*/ 0, 1156 /*bit*/ 0); 1157 ctl_config_write_done(io); 1158 return; 1159 } 1160 1161 /* 1162 * If the I/O came down with an ordered or head of queue tag, set 1163 * the BIO_ORDERED attribute. For head of queue tags, that's 1164 * pretty much the best we can do. 1165 */ 1166 if ((io->scsiio.tag_type == CTL_TAG_ORDERED) 1167 || (io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE)) 1168 beio->bio_flags = BIO_ORDERED; 1169 1170 switch (io->scsiio.tag_type) { 1171 case CTL_TAG_ORDERED: 1172 beio->ds_tag_type = DEVSTAT_TAG_ORDERED; 1173 break; 1174 case CTL_TAG_HEAD_OF_QUEUE: 1175 beio->ds_tag_type = DEVSTAT_TAG_HEAD; 1176 break; 1177 case CTL_TAG_UNTAGGED: 1178 case CTL_TAG_SIMPLE: 1179 case CTL_TAG_ACA: 1180 default: 1181 beio->ds_tag_type = DEVSTAT_TAG_SIMPLE; 1182 break; 1183 } 1184 1185 beio->io_len = 0; 1186 beio->io_offset = -1; 1187 1188 beio->bio_cmd = BIO_DELETE; 1189 beio->ds_trans_type = DEVSTAT_FREE; 1190 1191 DPRINTF("UNMAP\n"); 1192 1193 be_lun->unmap(be_lun, beio); 1194 } 1195 1196 static void 1197 ctl_be_block_cw_done(struct ctl_be_block_io *beio) 1198 { 1199 union ctl_io *io; 1200 1201 io = beio->io; 1202 ctl_free_beio(beio); 1203 ctl_config_write_done(io); 1204 } 1205 1206 static void 1207 ctl_be_block_cw_dispatch(struct ctl_be_block_lun *be_lun, 1208 union ctl_io *io) 1209 { 1210 struct ctl_be_block_io *beio; 1211 struct ctl_be_block_softc *softc; 1212 1213 DPRINTF("entered\n"); 1214 1215 softc = be_lun->softc; 1216 beio = ctl_alloc_beio(softc); 1217 beio->io = io; 1218 beio->lun = be_lun; 1219 beio->beio_cont = ctl_be_block_cw_done; 1220 PRIV(io)->ptr = (void *)beio; 1221 1222 switch (io->scsiio.cdb[0]) { 1223 case SYNCHRONIZE_CACHE: 1224 case SYNCHRONIZE_CACHE_16: 1225 beio->bio_cmd = BIO_FLUSH; 1226 beio->ds_trans_type = DEVSTAT_NO_DATA; 1227 beio->ds_tag_type = DEVSTAT_TAG_ORDERED; 1228 beio->io_len = 0; 1229 be_lun->lun_flush(be_lun, beio); 1230 break; 1231 case WRITE_SAME_10: 1232 case WRITE_SAME_16: 1233 ctl_be_block_cw_dispatch_ws(be_lun, io); 1234 break; 1235 case UNMAP: 1236 ctl_be_block_cw_dispatch_unmap(be_lun, io); 1237 break; 1238 default: 1239 panic("Unhandled CDB type %#x", io->scsiio.cdb[0]); 1240 break; 1241 } 1242 } 1243 1244 SDT_PROBE_DEFINE1(cbb, kernel, read, start, "uint64_t"); 1245 SDT_PROBE_DEFINE1(cbb, kernel, write, start, "uint64_t"); 1246 SDT_PROBE_DEFINE1(cbb, kernel, read, alloc_done, "uint64_t"); 1247 SDT_PROBE_DEFINE1(cbb, kernel, write, alloc_done, "uint64_t"); 1248 1249 static void 1250 ctl_be_block_next(struct ctl_be_block_io *beio) 1251 { 1252 struct ctl_be_block_lun *be_lun; 1253 union ctl_io *io; 1254 1255 io = beio->io; 1256 be_lun = beio->lun; 1257 ctl_free_beio(beio); 1258 if ((io->io_hdr.flags & CTL_FLAG_ABORT) || 1259 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 1260 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) { 1261 ctl_data_submit_done(io); 1262 return; 1263 } 1264 1265 io->io_hdr.status &= ~CTL_STATUS_MASK; 1266 io->io_hdr.status |= CTL_STATUS_NONE; 1267 1268 mtx_lock(&be_lun->queue_lock); 1269 /* 1270 * XXX KDM make sure that links is okay to use at this point. 1271 * Otherwise, we either need to add another field to ctl_io_hdr, 1272 * or deal with resource allocation here. 1273 */ 1274 STAILQ_INSERT_TAIL(&be_lun->input_queue, &io->io_hdr, links); 1275 mtx_unlock(&be_lun->queue_lock); 1276 1277 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); 1278 } 1279 1280 static void 1281 ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun, 1282 union ctl_io *io) 1283 { 1284 struct ctl_be_block_io *beio; 1285 struct ctl_be_block_softc *softc; 1286 struct ctl_lba_len_flags *lbalen; 1287 struct ctl_ptr_len_flags *bptrlen; 1288 uint64_t len_left, lbas; 1289 int i; 1290 1291 softc = be_lun->softc; 1292 1293 DPRINTF("entered\n"); 1294 1295 lbalen = ARGS(io); 1296 if (lbalen->flags & CTL_LLF_WRITE) { 1297 SDT_PROBE(cbb, kernel, write, start, 0, 0, 0, 0, 0); 1298 } else { 1299 SDT_PROBE(cbb, kernel, read, start, 0, 0, 0, 0, 0); 1300 } 1301 1302 beio = ctl_alloc_beio(softc); 1303 beio->io = io; 1304 beio->lun = be_lun; 1305 bptrlen = PRIV(io); 1306 bptrlen->ptr = (void *)beio; 1307 1308 /* 1309 * If the I/O came down with an ordered or head of queue tag, set 1310 * the BIO_ORDERED attribute. For head of queue tags, that's 1311 * pretty much the best we can do. 1312 * 1313 * XXX KDM we don't have a great way to easily know about the FUA 1314 * bit right now (it is decoded in ctl_read_write(), but we don't 1315 * pass that knowledge to the backend), and in any case we would 1316 * need to determine how to handle it. 1317 */ 1318 if ((io->scsiio.tag_type == CTL_TAG_ORDERED) 1319 || (io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE)) 1320 beio->bio_flags = BIO_ORDERED; 1321 1322 switch (io->scsiio.tag_type) { 1323 case CTL_TAG_ORDERED: 1324 beio->ds_tag_type = DEVSTAT_TAG_ORDERED; 1325 break; 1326 case CTL_TAG_HEAD_OF_QUEUE: 1327 beio->ds_tag_type = DEVSTAT_TAG_HEAD; 1328 break; 1329 case CTL_TAG_UNTAGGED: 1330 case CTL_TAG_SIMPLE: 1331 case CTL_TAG_ACA: 1332 default: 1333 beio->ds_tag_type = DEVSTAT_TAG_SIMPLE; 1334 break; 1335 } 1336 1337 if (lbalen->flags & CTL_LLF_WRITE) { 1338 beio->bio_cmd = BIO_WRITE; 1339 beio->ds_trans_type = DEVSTAT_WRITE; 1340 } else { 1341 beio->bio_cmd = BIO_READ; 1342 beio->ds_trans_type = DEVSTAT_READ; 1343 } 1344 1345 DPRINTF("%s at LBA %jx len %u @%ju\n", 1346 (beio->bio_cmd == BIO_READ) ? "READ" : "WRITE", 1347 (uintmax_t)lbalen->lba, lbalen->len, bptrlen->len); 1348 if (lbalen->flags & CTL_LLF_COMPARE) 1349 lbas = CTLBLK_HALF_IO_SIZE; 1350 else 1351 lbas = CTLBLK_MAX_IO_SIZE; 1352 lbas = MIN(lbalen->len - bptrlen->len, lbas / be_lun->blocksize); 1353 beio->io_offset = (lbalen->lba + bptrlen->len) * be_lun->blocksize; 1354 beio->io_len = lbas * be_lun->blocksize; 1355 bptrlen->len += lbas; 1356 1357 for (i = 0, len_left = beio->io_len; len_left > 0; i++) { 1358 KASSERT(i < CTLBLK_MAX_SEGS, ("Too many segs (%d >= %d)", 1359 i, CTLBLK_MAX_SEGS)); 1360 1361 /* 1362 * Setup the S/G entry for this chunk. 1363 */ 1364 beio->sg_segs[i].len = min(CTLBLK_MAX_SEG, len_left); 1365 beio->sg_segs[i].addr = uma_zalloc(be_lun->lun_zone, M_WAITOK); 1366 1367 DPRINTF("segment %d addr %p len %zd\n", i, 1368 beio->sg_segs[i].addr, beio->sg_segs[i].len); 1369 1370 /* Set up second segment for compare operation. */ 1371 if (lbalen->flags & CTL_LLF_COMPARE) { 1372 beio->sg_segs[i + CTLBLK_HALF_SEGS].len = 1373 beio->sg_segs[i].len; 1374 beio->sg_segs[i + CTLBLK_HALF_SEGS].addr = 1375 uma_zalloc(be_lun->lun_zone, M_WAITOK); 1376 } 1377 1378 beio->num_segs++; 1379 len_left -= beio->sg_segs[i].len; 1380 } 1381 if (bptrlen->len < lbalen->len) 1382 beio->beio_cont = ctl_be_block_next; 1383 io->scsiio.be_move_done = ctl_be_block_move_done; 1384 /* For compare we have separate S/G lists for read and datamove. */ 1385 if (lbalen->flags & CTL_LLF_COMPARE) 1386 io->scsiio.kern_data_ptr = (uint8_t *)&beio->sg_segs[CTLBLK_HALF_SEGS]; 1387 else 1388 io->scsiio.kern_data_ptr = (uint8_t *)beio->sg_segs; 1389 io->scsiio.kern_data_len = beio->io_len; 1390 io->scsiio.kern_data_resid = 0; 1391 io->scsiio.kern_sg_entries = beio->num_segs; 1392 io->io_hdr.flags |= CTL_FLAG_ALLOCATED | CTL_FLAG_KDPTR_SGLIST; 1393 1394 /* 1395 * For the read case, we need to read the data into our buffers and 1396 * then we can send it back to the user. For the write case, we 1397 * need to get the data from the user first. 1398 */ 1399 if (beio->bio_cmd == BIO_READ) { 1400 SDT_PROBE(cbb, kernel, read, alloc_done, 0, 0, 0, 0, 0); 1401 be_lun->dispatch(be_lun, beio); 1402 } else { 1403 SDT_PROBE(cbb, kernel, write, alloc_done, 0, 0, 0, 0, 0); 1404 #ifdef CTL_TIME_IO 1405 getbintime(&io->io_hdr.dma_start_bt); 1406 #endif 1407 ctl_datamove(io); 1408 } 1409 } 1410 1411 static void 1412 ctl_be_block_worker(void *context, int pending) 1413 { 1414 struct ctl_be_block_lun *be_lun; 1415 struct ctl_be_block_softc *softc; 1416 union ctl_io *io; 1417 1418 be_lun = (struct ctl_be_block_lun *)context; 1419 softc = be_lun->softc; 1420 1421 DPRINTF("entered\n"); 1422 1423 mtx_lock(&be_lun->queue_lock); 1424 for (;;) { 1425 io = (union ctl_io *)STAILQ_FIRST(&be_lun->datamove_queue); 1426 if (io != NULL) { 1427 struct ctl_be_block_io *beio; 1428 1429 DPRINTF("datamove queue\n"); 1430 1431 STAILQ_REMOVE(&be_lun->datamove_queue, &io->io_hdr, 1432 ctl_io_hdr, links); 1433 1434 mtx_unlock(&be_lun->queue_lock); 1435 1436 beio = (struct ctl_be_block_io *)PRIV(io)->ptr; 1437 1438 be_lun->dispatch(be_lun, beio); 1439 1440 mtx_lock(&be_lun->queue_lock); 1441 continue; 1442 } 1443 io = (union ctl_io *)STAILQ_FIRST(&be_lun->config_write_queue); 1444 if (io != NULL) { 1445 1446 DPRINTF("config write queue\n"); 1447 1448 STAILQ_REMOVE(&be_lun->config_write_queue, &io->io_hdr, 1449 ctl_io_hdr, links); 1450 1451 mtx_unlock(&be_lun->queue_lock); 1452 1453 ctl_be_block_cw_dispatch(be_lun, io); 1454 1455 mtx_lock(&be_lun->queue_lock); 1456 continue; 1457 } 1458 io = (union ctl_io *)STAILQ_FIRST(&be_lun->input_queue); 1459 if (io != NULL) { 1460 DPRINTF("input queue\n"); 1461 1462 STAILQ_REMOVE(&be_lun->input_queue, &io->io_hdr, 1463 ctl_io_hdr, links); 1464 mtx_unlock(&be_lun->queue_lock); 1465 1466 /* 1467 * We must drop the lock, since this routine and 1468 * its children may sleep. 1469 */ 1470 ctl_be_block_dispatch(be_lun, io); 1471 1472 mtx_lock(&be_lun->queue_lock); 1473 continue; 1474 } 1475 1476 /* 1477 * If we get here, there is no work left in the queues, so 1478 * just break out and let the task queue go to sleep. 1479 */ 1480 break; 1481 } 1482 mtx_unlock(&be_lun->queue_lock); 1483 } 1484 1485 /* 1486 * Entry point from CTL to the backend for I/O. We queue everything to a 1487 * work thread, so this just puts the I/O on a queue and wakes up the 1488 * thread. 1489 */ 1490 static int 1491 ctl_be_block_submit(union ctl_io *io) 1492 { 1493 struct ctl_be_block_lun *be_lun; 1494 struct ctl_be_lun *ctl_be_lun; 1495 1496 DPRINTF("entered\n"); 1497 1498 ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[ 1499 CTL_PRIV_BACKEND_LUN].ptr; 1500 be_lun = (struct ctl_be_block_lun *)ctl_be_lun->be_lun; 1501 1502 /* 1503 * Make sure we only get SCSI I/O. 1504 */ 1505 KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, ("Non-SCSI I/O (type " 1506 "%#x) encountered", io->io_hdr.io_type)); 1507 1508 PRIV(io)->len = 0; 1509 1510 mtx_lock(&be_lun->queue_lock); 1511 /* 1512 * XXX KDM make sure that links is okay to use at this point. 1513 * Otherwise, we either need to add another field to ctl_io_hdr, 1514 * or deal with resource allocation here. 1515 */ 1516 STAILQ_INSERT_TAIL(&be_lun->input_queue, &io->io_hdr, links); 1517 mtx_unlock(&be_lun->queue_lock); 1518 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); 1519 1520 return (CTL_RETVAL_COMPLETE); 1521 } 1522 1523 static int 1524 ctl_be_block_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, 1525 int flag, struct thread *td) 1526 { 1527 struct ctl_be_block_softc *softc; 1528 int error; 1529 1530 softc = &backend_block_softc; 1531 1532 error = 0; 1533 1534 switch (cmd) { 1535 case CTL_LUN_REQ: { 1536 struct ctl_lun_req *lun_req; 1537 1538 lun_req = (struct ctl_lun_req *)addr; 1539 1540 switch (lun_req->reqtype) { 1541 case CTL_LUNREQ_CREATE: 1542 error = ctl_be_block_create(softc, lun_req); 1543 break; 1544 case CTL_LUNREQ_RM: 1545 error = ctl_be_block_rm(softc, lun_req); 1546 break; 1547 case CTL_LUNREQ_MODIFY: 1548 error = ctl_be_block_modify(softc, lun_req); 1549 break; 1550 default: 1551 lun_req->status = CTL_LUN_ERROR; 1552 snprintf(lun_req->error_str, sizeof(lun_req->error_str), 1553 "%s: invalid LUN request type %d", __func__, 1554 lun_req->reqtype); 1555 break; 1556 } 1557 break; 1558 } 1559 default: 1560 error = ENOTTY; 1561 break; 1562 } 1563 1564 return (error); 1565 } 1566 1567 static int 1568 ctl_be_block_open_file(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req) 1569 { 1570 struct ctl_be_block_filedata *file_data; 1571 struct ctl_lun_create_params *params; 1572 struct vattr vattr; 1573 int error; 1574 1575 error = 0; 1576 file_data = &be_lun->backend.file; 1577 params = &req->reqdata.create; 1578 1579 be_lun->dev_type = CTL_BE_BLOCK_FILE; 1580 be_lun->dispatch = ctl_be_block_dispatch_file; 1581 be_lun->lun_flush = ctl_be_block_flush_file; 1582 1583 error = VOP_GETATTR(be_lun->vn, &vattr, curthread->td_ucred); 1584 if (error != 0) { 1585 snprintf(req->error_str, sizeof(req->error_str), 1586 "error calling VOP_GETATTR() for file %s", 1587 be_lun->dev_path); 1588 return (error); 1589 } 1590 1591 /* 1592 * Verify that we have the ability to upgrade to exclusive 1593 * access on this file so we can trap errors at open instead 1594 * of reporting them during first access. 1595 */ 1596 if (VOP_ISLOCKED(be_lun->vn) != LK_EXCLUSIVE) { 1597 vn_lock(be_lun->vn, LK_UPGRADE | LK_RETRY); 1598 if (be_lun->vn->v_iflag & VI_DOOMED) { 1599 error = EBADF; 1600 snprintf(req->error_str, sizeof(req->error_str), 1601 "error locking file %s", be_lun->dev_path); 1602 return (error); 1603 } 1604 } 1605 1606 1607 file_data->cred = crhold(curthread->td_ucred); 1608 if (params->lun_size_bytes != 0) 1609 be_lun->size_bytes = params->lun_size_bytes; 1610 else 1611 be_lun->size_bytes = vattr.va_size; 1612 /* 1613 * We set the multi thread flag for file operations because all 1614 * filesystems (in theory) are capable of allowing multiple readers 1615 * of a file at once. So we want to get the maximum possible 1616 * concurrency. 1617 */ 1618 be_lun->flags |= CTL_BE_BLOCK_LUN_MULTI_THREAD; 1619 1620 /* 1621 * XXX KDM vattr.va_blocksize may be larger than 512 bytes here. 1622 * With ZFS, it is 131072 bytes. Block sizes that large don't work 1623 * with disklabel and UFS on FreeBSD at least. Large block sizes 1624 * may not work with other OSes as well. So just export a sector 1625 * size of 512 bytes, which should work with any OS or 1626 * application. Since our backing is a file, any block size will 1627 * work fine for the backing store. 1628 */ 1629 #if 0 1630 be_lun->blocksize= vattr.va_blocksize; 1631 #endif 1632 if (params->blocksize_bytes != 0) 1633 be_lun->blocksize = params->blocksize_bytes; 1634 else 1635 be_lun->blocksize = 512; 1636 1637 /* 1638 * Sanity check. The media size has to be at least one 1639 * sector long. 1640 */ 1641 if (be_lun->size_bytes < be_lun->blocksize) { 1642 error = EINVAL; 1643 snprintf(req->error_str, sizeof(req->error_str), 1644 "file %s size %ju < block size %u", be_lun->dev_path, 1645 (uintmax_t)be_lun->size_bytes, be_lun->blocksize); 1646 } 1647 return (error); 1648 } 1649 1650 static int 1651 ctl_be_block_open_dev(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req) 1652 { 1653 struct ctl_lun_create_params *params; 1654 struct vattr vattr; 1655 struct cdev *dev; 1656 struct cdevsw *devsw; 1657 int error; 1658 off_t ps, pss, po, pos; 1659 1660 params = &req->reqdata.create; 1661 1662 be_lun->dev_type = CTL_BE_BLOCK_DEV; 1663 be_lun->backend.dev.cdev = be_lun->vn->v_rdev; 1664 be_lun->backend.dev.csw = dev_refthread(be_lun->backend.dev.cdev, 1665 &be_lun->backend.dev.dev_ref); 1666 if (be_lun->backend.dev.csw == NULL) 1667 panic("Unable to retrieve device switch"); 1668 if (strcmp(be_lun->backend.dev.csw->d_name, "zvol") == 0) 1669 be_lun->dispatch = ctl_be_block_dispatch_zvol; 1670 else 1671 be_lun->dispatch = ctl_be_block_dispatch_dev; 1672 be_lun->lun_flush = ctl_be_block_flush_dev; 1673 be_lun->unmap = ctl_be_block_unmap_dev; 1674 1675 error = VOP_GETATTR(be_lun->vn, &vattr, NOCRED); 1676 if (error) { 1677 snprintf(req->error_str, sizeof(req->error_str), 1678 "%s: error getting vnode attributes for device %s", 1679 __func__, be_lun->dev_path); 1680 return (error); 1681 } 1682 1683 dev = be_lun->vn->v_rdev; 1684 devsw = dev->si_devsw; 1685 if (!devsw->d_ioctl) { 1686 snprintf(req->error_str, sizeof(req->error_str), 1687 "%s: no d_ioctl for device %s!", __func__, 1688 be_lun->dev_path); 1689 return (ENODEV); 1690 } 1691 1692 error = devsw->d_ioctl(dev, DIOCGSECTORSIZE, 1693 (caddr_t)&be_lun->blocksize, FREAD, 1694 curthread); 1695 if (error) { 1696 snprintf(req->error_str, sizeof(req->error_str), 1697 "%s: error %d returned for DIOCGSECTORSIZE ioctl " 1698 "on %s!", __func__, error, be_lun->dev_path); 1699 return (error); 1700 } 1701 1702 /* 1703 * If the user has asked for a blocksize that is greater than the 1704 * backing device's blocksize, we can do it only if the blocksize 1705 * the user is asking for is an even multiple of the underlying 1706 * device's blocksize. 1707 */ 1708 if ((params->blocksize_bytes != 0) 1709 && (params->blocksize_bytes > be_lun->blocksize)) { 1710 uint32_t bs_multiple, tmp_blocksize; 1711 1712 bs_multiple = params->blocksize_bytes / be_lun->blocksize; 1713 1714 tmp_blocksize = bs_multiple * be_lun->blocksize; 1715 1716 if (tmp_blocksize == params->blocksize_bytes) { 1717 be_lun->blocksize = params->blocksize_bytes; 1718 } else { 1719 snprintf(req->error_str, sizeof(req->error_str), 1720 "%s: requested blocksize %u is not an even " 1721 "multiple of backing device blocksize %u", 1722 __func__, params->blocksize_bytes, 1723 be_lun->blocksize); 1724 return (EINVAL); 1725 1726 } 1727 } else if ((params->blocksize_bytes != 0) 1728 && (params->blocksize_bytes != be_lun->blocksize)) { 1729 snprintf(req->error_str, sizeof(req->error_str), 1730 "%s: requested blocksize %u < backing device " 1731 "blocksize %u", __func__, params->blocksize_bytes, 1732 be_lun->blocksize); 1733 return (EINVAL); 1734 } 1735 1736 error = devsw->d_ioctl(dev, DIOCGMEDIASIZE, 1737 (caddr_t)&be_lun->size_bytes, FREAD, 1738 curthread); 1739 if (error) { 1740 snprintf(req->error_str, sizeof(req->error_str), 1741 "%s: error %d returned for DIOCGMEDIASIZE " 1742 " ioctl on %s!", __func__, error, 1743 be_lun->dev_path); 1744 return (error); 1745 } 1746 1747 if (params->lun_size_bytes != 0) { 1748 if (params->lun_size_bytes > be_lun->size_bytes) { 1749 snprintf(req->error_str, sizeof(req->error_str), 1750 "%s: requested LUN size %ju > backing device " 1751 "size %ju", __func__, 1752 (uintmax_t)params->lun_size_bytes, 1753 (uintmax_t)be_lun->size_bytes); 1754 return (EINVAL); 1755 } 1756 1757 be_lun->size_bytes = params->lun_size_bytes; 1758 } 1759 1760 error = devsw->d_ioctl(dev, DIOCGSTRIPESIZE, 1761 (caddr_t)&ps, FREAD, curthread); 1762 if (error) 1763 ps = po = 0; 1764 else { 1765 error = devsw->d_ioctl(dev, DIOCGSTRIPEOFFSET, 1766 (caddr_t)&po, FREAD, curthread); 1767 if (error) 1768 po = 0; 1769 } 1770 pss = ps / be_lun->blocksize; 1771 pos = po / be_lun->blocksize; 1772 if ((pss > 0) && (pss * be_lun->blocksize == ps) && (pss >= pos) && 1773 ((pss & (pss - 1)) == 0) && (pos * be_lun->blocksize == po)) { 1774 be_lun->pblockexp = fls(pss) - 1; 1775 be_lun->pblockoff = (pss - pos) % pss; 1776 } 1777 1778 return (0); 1779 } 1780 1781 static int 1782 ctl_be_block_close(struct ctl_be_block_lun *be_lun) 1783 { 1784 DROP_GIANT(); 1785 if (be_lun->vn) { 1786 int flags = FREAD | FWRITE; 1787 1788 switch (be_lun->dev_type) { 1789 case CTL_BE_BLOCK_DEV: 1790 if (be_lun->backend.dev.csw) { 1791 dev_relthread(be_lun->backend.dev.cdev, 1792 be_lun->backend.dev.dev_ref); 1793 be_lun->backend.dev.csw = NULL; 1794 be_lun->backend.dev.cdev = NULL; 1795 } 1796 break; 1797 case CTL_BE_BLOCK_FILE: 1798 break; 1799 case CTL_BE_BLOCK_NONE: 1800 break; 1801 default: 1802 panic("Unexpected backend type."); 1803 break; 1804 } 1805 1806 (void)vn_close(be_lun->vn, flags, NOCRED, curthread); 1807 be_lun->vn = NULL; 1808 1809 switch (be_lun->dev_type) { 1810 case CTL_BE_BLOCK_DEV: 1811 break; 1812 case CTL_BE_BLOCK_FILE: 1813 if (be_lun->backend.file.cred != NULL) { 1814 crfree(be_lun->backend.file.cred); 1815 be_lun->backend.file.cred = NULL; 1816 } 1817 break; 1818 case CTL_BE_BLOCK_NONE: 1819 break; 1820 default: 1821 panic("Unexpected backend type."); 1822 break; 1823 } 1824 } 1825 PICKUP_GIANT(); 1826 1827 return (0); 1828 } 1829 1830 static int 1831 ctl_be_block_open(struct ctl_be_block_softc *softc, 1832 struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req) 1833 { 1834 struct nameidata nd; 1835 int flags; 1836 int error; 1837 1838 /* 1839 * XXX KDM allow a read-only option? 1840 */ 1841 flags = FREAD | FWRITE; 1842 error = 0; 1843 1844 if (rootvnode == NULL) { 1845 snprintf(req->error_str, sizeof(req->error_str), 1846 "%s: Root filesystem is not mounted", __func__); 1847 return (1); 1848 } 1849 1850 if (!curthread->td_proc->p_fd->fd_cdir) { 1851 curthread->td_proc->p_fd->fd_cdir = rootvnode; 1852 VREF(rootvnode); 1853 } 1854 if (!curthread->td_proc->p_fd->fd_rdir) { 1855 curthread->td_proc->p_fd->fd_rdir = rootvnode; 1856 VREF(rootvnode); 1857 } 1858 if (!curthread->td_proc->p_fd->fd_jdir) { 1859 curthread->td_proc->p_fd->fd_jdir = rootvnode; 1860 VREF(rootvnode); 1861 } 1862 1863 again: 1864 NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, be_lun->dev_path, curthread); 1865 error = vn_open(&nd, &flags, 0, NULL); 1866 if (error) { 1867 /* 1868 * This is the only reasonable guess we can make as far as 1869 * path if the user doesn't give us a fully qualified path. 1870 * If they want to specify a file, they need to specify the 1871 * full path. 1872 */ 1873 if (be_lun->dev_path[0] != '/') { 1874 char *dev_path = "/dev/"; 1875 char *dev_name; 1876 1877 /* Try adding device path at beginning of name */ 1878 dev_name = malloc(strlen(be_lun->dev_path) 1879 + strlen(dev_path) + 1, 1880 M_CTLBLK, M_WAITOK); 1881 if (dev_name) { 1882 sprintf(dev_name, "%s%s", dev_path, 1883 be_lun->dev_path); 1884 free(be_lun->dev_path, M_CTLBLK); 1885 be_lun->dev_path = dev_name; 1886 goto again; 1887 } 1888 } 1889 snprintf(req->error_str, sizeof(req->error_str), 1890 "%s: error opening %s", __func__, be_lun->dev_path); 1891 return (error); 1892 } 1893 1894 NDFREE(&nd, NDF_ONLY_PNBUF); 1895 1896 be_lun->vn = nd.ni_vp; 1897 1898 /* We only support disks and files. */ 1899 if (vn_isdisk(be_lun->vn, &error)) { 1900 error = ctl_be_block_open_dev(be_lun, req); 1901 } else if (be_lun->vn->v_type == VREG) { 1902 error = ctl_be_block_open_file(be_lun, req); 1903 } else { 1904 error = EINVAL; 1905 snprintf(req->error_str, sizeof(req->error_str), 1906 "%s is not a disk or plain file", be_lun->dev_path); 1907 } 1908 VOP_UNLOCK(be_lun->vn, 0); 1909 1910 if (error != 0) { 1911 ctl_be_block_close(be_lun); 1912 return (error); 1913 } 1914 1915 be_lun->blocksize_shift = fls(be_lun->blocksize) - 1; 1916 be_lun->size_blocks = be_lun->size_bytes >> be_lun->blocksize_shift; 1917 1918 return (0); 1919 } 1920 1921 static int 1922 ctl_be_block_create(struct ctl_be_block_softc *softc, struct ctl_lun_req *req) 1923 { 1924 struct ctl_be_block_lun *be_lun; 1925 struct ctl_lun_create_params *params; 1926 char num_thread_str[16]; 1927 char tmpstr[32]; 1928 char *value; 1929 int retval, num_threads, unmap; 1930 int tmp_num_threads; 1931 1932 params = &req->reqdata.create; 1933 retval = 0; 1934 1935 num_threads = cbb_num_threads; 1936 1937 be_lun = malloc(sizeof(*be_lun), M_CTLBLK, M_ZERO | M_WAITOK); 1938 1939 be_lun->softc = softc; 1940 STAILQ_INIT(&be_lun->input_queue); 1941 STAILQ_INIT(&be_lun->config_write_queue); 1942 STAILQ_INIT(&be_lun->datamove_queue); 1943 sprintf(be_lun->lunname, "cblk%d", softc->num_luns); 1944 mtx_init(&be_lun->io_lock, "cblk io lock", NULL, MTX_DEF); 1945 mtx_init(&be_lun->queue_lock, "cblk queue lock", NULL, MTX_DEF); 1946 ctl_init_opts(&be_lun->ctl_be_lun.options, 1947 req->num_be_args, req->kern_be_args); 1948 1949 be_lun->lun_zone = uma_zcreate(be_lun->lunname, CTLBLK_MAX_SEG, 1950 NULL, NULL, NULL, NULL, /*align*/ 0, /*flags*/0); 1951 1952 if (be_lun->lun_zone == NULL) { 1953 snprintf(req->error_str, sizeof(req->error_str), 1954 "%s: error allocating UMA zone", __func__); 1955 goto bailout_error; 1956 } 1957 1958 if (params->flags & CTL_LUN_FLAG_DEV_TYPE) 1959 be_lun->ctl_be_lun.lun_type = params->device_type; 1960 else 1961 be_lun->ctl_be_lun.lun_type = T_DIRECT; 1962 1963 if (be_lun->ctl_be_lun.lun_type == T_DIRECT) { 1964 value = ctl_get_opt(&be_lun->ctl_be_lun.options, "file"); 1965 if (value == NULL) { 1966 snprintf(req->error_str, sizeof(req->error_str), 1967 "%s: no file argument specified", __func__); 1968 goto bailout_error; 1969 } 1970 be_lun->dev_path = strdup(value, M_CTLBLK); 1971 1972 retval = ctl_be_block_open(softc, be_lun, req); 1973 if (retval != 0) { 1974 retval = 0; 1975 goto bailout_error; 1976 } 1977 1978 /* 1979 * Tell the user the size of the file/device. 1980 */ 1981 params->lun_size_bytes = be_lun->size_bytes; 1982 1983 /* 1984 * The maximum LBA is the size - 1. 1985 */ 1986 be_lun->ctl_be_lun.maxlba = be_lun->size_blocks - 1; 1987 } else { 1988 /* 1989 * For processor devices, we don't have any size. 1990 */ 1991 be_lun->blocksize = 0; 1992 be_lun->pblockexp = 0; 1993 be_lun->pblockoff = 0; 1994 be_lun->size_blocks = 0; 1995 be_lun->size_bytes = 0; 1996 be_lun->ctl_be_lun.maxlba = 0; 1997 params->lun_size_bytes = 0; 1998 1999 /* 2000 * Default to just 1 thread for processor devices. 2001 */ 2002 num_threads = 1; 2003 } 2004 2005 /* 2006 * XXX This searching loop might be refactored to be combined with 2007 * the loop above, 2008 */ 2009 value = ctl_get_opt(&be_lun->ctl_be_lun.options, "num_threads"); 2010 if (value != NULL) { 2011 tmp_num_threads = strtol(value, NULL, 0); 2012 2013 /* 2014 * We don't let the user specify less than one 2015 * thread, but hope he's clueful enough not to 2016 * specify 1000 threads. 2017 */ 2018 if (tmp_num_threads < 1) { 2019 snprintf(req->error_str, sizeof(req->error_str), 2020 "%s: invalid number of threads %s", 2021 __func__, num_thread_str); 2022 goto bailout_error; 2023 } 2024 num_threads = tmp_num_threads; 2025 } 2026 unmap = 0; 2027 value = ctl_get_opt(&be_lun->ctl_be_lun.options, "unmap"); 2028 if (value != NULL && strcmp(value, "on") == 0) 2029 unmap = 1; 2030 2031 be_lun->flags = CTL_BE_BLOCK_LUN_UNCONFIGURED; 2032 be_lun->ctl_be_lun.flags = CTL_LUN_FLAG_PRIMARY; 2033 if (unmap) 2034 be_lun->ctl_be_lun.flags |= CTL_LUN_FLAG_UNMAP; 2035 be_lun->ctl_be_lun.be_lun = be_lun; 2036 be_lun->ctl_be_lun.blocksize = be_lun->blocksize; 2037 be_lun->ctl_be_lun.pblockexp = be_lun->pblockexp; 2038 be_lun->ctl_be_lun.pblockoff = be_lun->pblockoff; 2039 /* Tell the user the blocksize we ended up using */ 2040 params->blocksize_bytes = be_lun->blocksize; 2041 if (params->flags & CTL_LUN_FLAG_ID_REQ) { 2042 be_lun->ctl_be_lun.req_lun_id = params->req_lun_id; 2043 be_lun->ctl_be_lun.flags |= CTL_LUN_FLAG_ID_REQ; 2044 } else 2045 be_lun->ctl_be_lun.req_lun_id = 0; 2046 2047 be_lun->ctl_be_lun.lun_shutdown = ctl_be_block_lun_shutdown; 2048 be_lun->ctl_be_lun.lun_config_status = 2049 ctl_be_block_lun_config_status; 2050 be_lun->ctl_be_lun.be = &ctl_be_block_driver; 2051 2052 if ((params->flags & CTL_LUN_FLAG_SERIAL_NUM) == 0) { 2053 snprintf(tmpstr, sizeof(tmpstr), "MYSERIAL%4d", 2054 softc->num_luns); 2055 strncpy((char *)be_lun->ctl_be_lun.serial_num, tmpstr, 2056 ctl_min(sizeof(be_lun->ctl_be_lun.serial_num), 2057 sizeof(tmpstr))); 2058 2059 /* Tell the user what we used for a serial number */ 2060 strncpy((char *)params->serial_num, tmpstr, 2061 ctl_min(sizeof(params->serial_num), sizeof(tmpstr))); 2062 } else { 2063 strncpy((char *)be_lun->ctl_be_lun.serial_num, 2064 params->serial_num, 2065 ctl_min(sizeof(be_lun->ctl_be_lun.serial_num), 2066 sizeof(params->serial_num))); 2067 } 2068 if ((params->flags & CTL_LUN_FLAG_DEVID) == 0) { 2069 snprintf(tmpstr, sizeof(tmpstr), "MYDEVID%4d", softc->num_luns); 2070 strncpy((char *)be_lun->ctl_be_lun.device_id, tmpstr, 2071 ctl_min(sizeof(be_lun->ctl_be_lun.device_id), 2072 sizeof(tmpstr))); 2073 2074 /* Tell the user what we used for a device ID */ 2075 strncpy((char *)params->device_id, tmpstr, 2076 ctl_min(sizeof(params->device_id), sizeof(tmpstr))); 2077 } else { 2078 strncpy((char *)be_lun->ctl_be_lun.device_id, 2079 params->device_id, 2080 ctl_min(sizeof(be_lun->ctl_be_lun.device_id), 2081 sizeof(params->device_id))); 2082 } 2083 2084 TASK_INIT(&be_lun->io_task, /*priority*/0, ctl_be_block_worker, be_lun); 2085 2086 be_lun->io_taskqueue = taskqueue_create(be_lun->lunname, M_WAITOK, 2087 taskqueue_thread_enqueue, /*context*/&be_lun->io_taskqueue); 2088 2089 if (be_lun->io_taskqueue == NULL) { 2090 snprintf(req->error_str, sizeof(req->error_str), 2091 "%s: Unable to create taskqueue", __func__); 2092 goto bailout_error; 2093 } 2094 2095 /* 2096 * Note that we start the same number of threads by default for 2097 * both the file case and the block device case. For the file 2098 * case, we need multiple threads to allow concurrency, because the 2099 * vnode interface is designed to be a blocking interface. For the 2100 * block device case, ZFS zvols at least will block the caller's 2101 * context in many instances, and so we need multiple threads to 2102 * overcome that problem. Other block devices don't need as many 2103 * threads, but they shouldn't cause too many problems. 2104 * 2105 * If the user wants to just have a single thread for a block 2106 * device, he can specify that when the LUN is created, or change 2107 * the tunable/sysctl to alter the default number of threads. 2108 */ 2109 retval = taskqueue_start_threads(&be_lun->io_taskqueue, 2110 /*num threads*/num_threads, 2111 /*priority*/PWAIT, 2112 /*thread name*/ 2113 "%s taskq", be_lun->lunname); 2114 2115 if (retval != 0) 2116 goto bailout_error; 2117 2118 be_lun->num_threads = num_threads; 2119 2120 mtx_lock(&softc->lock); 2121 softc->num_luns++; 2122 STAILQ_INSERT_TAIL(&softc->lun_list, be_lun, links); 2123 2124 mtx_unlock(&softc->lock); 2125 2126 retval = ctl_add_lun(&be_lun->ctl_be_lun); 2127 if (retval != 0) { 2128 mtx_lock(&softc->lock); 2129 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_block_lun, 2130 links); 2131 softc->num_luns--; 2132 mtx_unlock(&softc->lock); 2133 snprintf(req->error_str, sizeof(req->error_str), 2134 "%s: ctl_add_lun() returned error %d, see dmesg for " 2135 "details", __func__, retval); 2136 retval = 0; 2137 goto bailout_error; 2138 } 2139 2140 mtx_lock(&softc->lock); 2141 2142 /* 2143 * Tell the config_status routine that we're waiting so it won't 2144 * clean up the LUN in the event of an error. 2145 */ 2146 be_lun->flags |= CTL_BE_BLOCK_LUN_WAITING; 2147 2148 while (be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) { 2149 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlblk", 0); 2150 if (retval == EINTR) 2151 break; 2152 } 2153 be_lun->flags &= ~CTL_BE_BLOCK_LUN_WAITING; 2154 2155 if (be_lun->flags & CTL_BE_BLOCK_LUN_CONFIG_ERR) { 2156 snprintf(req->error_str, sizeof(req->error_str), 2157 "%s: LUN configuration error, see dmesg for details", 2158 __func__); 2159 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_block_lun, 2160 links); 2161 softc->num_luns--; 2162 mtx_unlock(&softc->lock); 2163 goto bailout_error; 2164 } else { 2165 params->req_lun_id = be_lun->ctl_be_lun.lun_id; 2166 } 2167 2168 mtx_unlock(&softc->lock); 2169 2170 be_lun->disk_stats = devstat_new_entry("cbb", params->req_lun_id, 2171 be_lun->blocksize, 2172 DEVSTAT_ALL_SUPPORTED, 2173 be_lun->ctl_be_lun.lun_type 2174 | DEVSTAT_TYPE_IF_OTHER, 2175 DEVSTAT_PRIORITY_OTHER); 2176 2177 2178 req->status = CTL_LUN_OK; 2179 2180 return (retval); 2181 2182 bailout_error: 2183 req->status = CTL_LUN_ERROR; 2184 2185 if (be_lun->io_taskqueue != NULL) 2186 taskqueue_free(be_lun->io_taskqueue); 2187 ctl_be_block_close(be_lun); 2188 if (be_lun->dev_path != NULL) 2189 free(be_lun->dev_path, M_CTLBLK); 2190 if (be_lun->lun_zone != NULL) 2191 uma_zdestroy(be_lun->lun_zone); 2192 ctl_free_opts(&be_lun->ctl_be_lun.options); 2193 mtx_destroy(&be_lun->queue_lock); 2194 mtx_destroy(&be_lun->io_lock); 2195 free(be_lun, M_CTLBLK); 2196 2197 return (retval); 2198 } 2199 2200 static int 2201 ctl_be_block_rm(struct ctl_be_block_softc *softc, struct ctl_lun_req *req) 2202 { 2203 struct ctl_lun_rm_params *params; 2204 struct ctl_be_block_lun *be_lun; 2205 int retval; 2206 2207 params = &req->reqdata.rm; 2208 2209 mtx_lock(&softc->lock); 2210 2211 be_lun = NULL; 2212 2213 STAILQ_FOREACH(be_lun, &softc->lun_list, links) { 2214 if (be_lun->ctl_be_lun.lun_id == params->lun_id) 2215 break; 2216 } 2217 mtx_unlock(&softc->lock); 2218 2219 if (be_lun == NULL) { 2220 snprintf(req->error_str, sizeof(req->error_str), 2221 "%s: LUN %u is not managed by the block backend", 2222 __func__, params->lun_id); 2223 goto bailout_error; 2224 } 2225 2226 retval = ctl_disable_lun(&be_lun->ctl_be_lun); 2227 2228 if (retval != 0) { 2229 snprintf(req->error_str, sizeof(req->error_str), 2230 "%s: error %d returned from ctl_disable_lun() for " 2231 "LUN %d", __func__, retval, params->lun_id); 2232 goto bailout_error; 2233 2234 } 2235 2236 retval = ctl_invalidate_lun(&be_lun->ctl_be_lun); 2237 if (retval != 0) { 2238 snprintf(req->error_str, sizeof(req->error_str), 2239 "%s: error %d returned from ctl_invalidate_lun() for " 2240 "LUN %d", __func__, retval, params->lun_id); 2241 goto bailout_error; 2242 } 2243 2244 mtx_lock(&softc->lock); 2245 2246 be_lun->flags |= CTL_BE_BLOCK_LUN_WAITING; 2247 2248 while ((be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) == 0) { 2249 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlblk", 0); 2250 if (retval == EINTR) 2251 break; 2252 } 2253 2254 be_lun->flags &= ~CTL_BE_BLOCK_LUN_WAITING; 2255 2256 if ((be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) == 0) { 2257 snprintf(req->error_str, sizeof(req->error_str), 2258 "%s: interrupted waiting for LUN to be freed", 2259 __func__); 2260 mtx_unlock(&softc->lock); 2261 goto bailout_error; 2262 } 2263 2264 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_block_lun, links); 2265 2266 softc->num_luns--; 2267 mtx_unlock(&softc->lock); 2268 2269 taskqueue_drain(be_lun->io_taskqueue, &be_lun->io_task); 2270 2271 taskqueue_free(be_lun->io_taskqueue); 2272 2273 ctl_be_block_close(be_lun); 2274 2275 if (be_lun->disk_stats != NULL) 2276 devstat_remove_entry(be_lun->disk_stats); 2277 2278 uma_zdestroy(be_lun->lun_zone); 2279 2280 ctl_free_opts(&be_lun->ctl_be_lun.options); 2281 free(be_lun->dev_path, M_CTLBLK); 2282 mtx_destroy(&be_lun->queue_lock); 2283 mtx_destroy(&be_lun->io_lock); 2284 free(be_lun, M_CTLBLK); 2285 2286 req->status = CTL_LUN_OK; 2287 2288 return (0); 2289 2290 bailout_error: 2291 2292 req->status = CTL_LUN_ERROR; 2293 2294 return (0); 2295 } 2296 2297 static int 2298 ctl_be_block_modify_file(struct ctl_be_block_lun *be_lun, 2299 struct ctl_lun_req *req) 2300 { 2301 struct vattr vattr; 2302 int error; 2303 struct ctl_lun_modify_params *params; 2304 2305 params = &req->reqdata.modify; 2306 2307 if (params->lun_size_bytes != 0) { 2308 be_lun->size_bytes = params->lun_size_bytes; 2309 } else { 2310 error = VOP_GETATTR(be_lun->vn, &vattr, curthread->td_ucred); 2311 if (error != 0) { 2312 snprintf(req->error_str, sizeof(req->error_str), 2313 "error calling VOP_GETATTR() for file %s", 2314 be_lun->dev_path); 2315 return (error); 2316 } 2317 2318 be_lun->size_bytes = vattr.va_size; 2319 } 2320 2321 return (0); 2322 } 2323 2324 static int 2325 ctl_be_block_modify_dev(struct ctl_be_block_lun *be_lun, 2326 struct ctl_lun_req *req) 2327 { 2328 struct cdev *dev; 2329 struct cdevsw *devsw; 2330 int error; 2331 struct ctl_lun_modify_params *params; 2332 uint64_t size_bytes; 2333 2334 params = &req->reqdata.modify; 2335 2336 dev = be_lun->vn->v_rdev; 2337 devsw = dev->si_devsw; 2338 if (!devsw->d_ioctl) { 2339 snprintf(req->error_str, sizeof(req->error_str), 2340 "%s: no d_ioctl for device %s!", __func__, 2341 be_lun->dev_path); 2342 return (ENODEV); 2343 } 2344 2345 error = devsw->d_ioctl(dev, DIOCGMEDIASIZE, 2346 (caddr_t)&size_bytes, FREAD, 2347 curthread); 2348 if (error) { 2349 snprintf(req->error_str, sizeof(req->error_str), 2350 "%s: error %d returned for DIOCGMEDIASIZE ioctl " 2351 "on %s!", __func__, error, be_lun->dev_path); 2352 return (error); 2353 } 2354 2355 if (params->lun_size_bytes != 0) { 2356 if (params->lun_size_bytes > size_bytes) { 2357 snprintf(req->error_str, sizeof(req->error_str), 2358 "%s: requested LUN size %ju > backing device " 2359 "size %ju", __func__, 2360 (uintmax_t)params->lun_size_bytes, 2361 (uintmax_t)size_bytes); 2362 return (EINVAL); 2363 } 2364 2365 be_lun->size_bytes = params->lun_size_bytes; 2366 } else { 2367 be_lun->size_bytes = size_bytes; 2368 } 2369 2370 return (0); 2371 } 2372 2373 static int 2374 ctl_be_block_modify(struct ctl_be_block_softc *softc, struct ctl_lun_req *req) 2375 { 2376 struct ctl_lun_modify_params *params; 2377 struct ctl_be_block_lun *be_lun; 2378 int error; 2379 2380 params = &req->reqdata.modify; 2381 2382 mtx_lock(&softc->lock); 2383 2384 be_lun = NULL; 2385 2386 STAILQ_FOREACH(be_lun, &softc->lun_list, links) { 2387 if (be_lun->ctl_be_lun.lun_id == params->lun_id) 2388 break; 2389 } 2390 mtx_unlock(&softc->lock); 2391 2392 if (be_lun == NULL) { 2393 snprintf(req->error_str, sizeof(req->error_str), 2394 "%s: LUN %u is not managed by the block backend", 2395 __func__, params->lun_id); 2396 goto bailout_error; 2397 } 2398 2399 if (params->lun_size_bytes != 0) { 2400 if (params->lun_size_bytes < be_lun->blocksize) { 2401 snprintf(req->error_str, sizeof(req->error_str), 2402 "%s: LUN size %ju < blocksize %u", __func__, 2403 params->lun_size_bytes, be_lun->blocksize); 2404 goto bailout_error; 2405 } 2406 } 2407 2408 vn_lock(be_lun->vn, LK_SHARED | LK_RETRY); 2409 2410 if (be_lun->vn->v_type == VREG) 2411 error = ctl_be_block_modify_file(be_lun, req); 2412 else 2413 error = ctl_be_block_modify_dev(be_lun, req); 2414 2415 VOP_UNLOCK(be_lun->vn, 0); 2416 2417 if (error != 0) 2418 goto bailout_error; 2419 2420 be_lun->size_blocks = be_lun->size_bytes >> be_lun->blocksize_shift; 2421 2422 /* 2423 * The maximum LBA is the size - 1. 2424 * 2425 * XXX: Note that this field is being updated without locking, 2426 * which might cause problems on 32-bit architectures. 2427 */ 2428 be_lun->ctl_be_lun.maxlba = be_lun->size_blocks - 1; 2429 ctl_lun_capacity_changed(&be_lun->ctl_be_lun); 2430 2431 /* Tell the user the exact size we ended up using */ 2432 params->lun_size_bytes = be_lun->size_bytes; 2433 2434 req->status = CTL_LUN_OK; 2435 2436 return (0); 2437 2438 bailout_error: 2439 req->status = CTL_LUN_ERROR; 2440 2441 return (0); 2442 } 2443 2444 static void 2445 ctl_be_block_lun_shutdown(void *be_lun) 2446 { 2447 struct ctl_be_block_lun *lun; 2448 struct ctl_be_block_softc *softc; 2449 2450 lun = (struct ctl_be_block_lun *)be_lun; 2451 2452 softc = lun->softc; 2453 2454 mtx_lock(&softc->lock); 2455 lun->flags |= CTL_BE_BLOCK_LUN_UNCONFIGURED; 2456 if (lun->flags & CTL_BE_BLOCK_LUN_WAITING) 2457 wakeup(lun); 2458 mtx_unlock(&softc->lock); 2459 2460 } 2461 2462 static void 2463 ctl_be_block_lun_config_status(void *be_lun, ctl_lun_config_status status) 2464 { 2465 struct ctl_be_block_lun *lun; 2466 struct ctl_be_block_softc *softc; 2467 2468 lun = (struct ctl_be_block_lun *)be_lun; 2469 softc = lun->softc; 2470 2471 if (status == CTL_LUN_CONFIG_OK) { 2472 mtx_lock(&softc->lock); 2473 lun->flags &= ~CTL_BE_BLOCK_LUN_UNCONFIGURED; 2474 if (lun->flags & CTL_BE_BLOCK_LUN_WAITING) 2475 wakeup(lun); 2476 mtx_unlock(&softc->lock); 2477 2478 /* 2479 * We successfully added the LUN, attempt to enable it. 2480 */ 2481 if (ctl_enable_lun(&lun->ctl_be_lun) != 0) { 2482 printf("%s: ctl_enable_lun() failed!\n", __func__); 2483 if (ctl_invalidate_lun(&lun->ctl_be_lun) != 0) { 2484 printf("%s: ctl_invalidate_lun() failed!\n", 2485 __func__); 2486 } 2487 } 2488 2489 return; 2490 } 2491 2492 2493 mtx_lock(&softc->lock); 2494 lun->flags &= ~CTL_BE_BLOCK_LUN_UNCONFIGURED; 2495 lun->flags |= CTL_BE_BLOCK_LUN_CONFIG_ERR; 2496 wakeup(lun); 2497 mtx_unlock(&softc->lock); 2498 } 2499 2500 2501 static int 2502 ctl_be_block_config_write(union ctl_io *io) 2503 { 2504 struct ctl_be_block_lun *be_lun; 2505 struct ctl_be_lun *ctl_be_lun; 2506 int retval; 2507 2508 retval = 0; 2509 2510 DPRINTF("entered\n"); 2511 2512 ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[ 2513 CTL_PRIV_BACKEND_LUN].ptr; 2514 be_lun = (struct ctl_be_block_lun *)ctl_be_lun->be_lun; 2515 2516 switch (io->scsiio.cdb[0]) { 2517 case SYNCHRONIZE_CACHE: 2518 case SYNCHRONIZE_CACHE_16: 2519 case WRITE_SAME_10: 2520 case WRITE_SAME_16: 2521 case UNMAP: 2522 /* 2523 * The upper level CTL code will filter out any CDBs with 2524 * the immediate bit set and return the proper error. 2525 * 2526 * We don't really need to worry about what LBA range the 2527 * user asked to be synced out. When they issue a sync 2528 * cache command, we'll sync out the whole thing. 2529 */ 2530 mtx_lock(&be_lun->queue_lock); 2531 STAILQ_INSERT_TAIL(&be_lun->config_write_queue, &io->io_hdr, 2532 links); 2533 mtx_unlock(&be_lun->queue_lock); 2534 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); 2535 break; 2536 case START_STOP_UNIT: { 2537 struct scsi_start_stop_unit *cdb; 2538 2539 cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb; 2540 2541 if (cdb->how & SSS_START) 2542 retval = ctl_start_lun(ctl_be_lun); 2543 else { 2544 retval = ctl_stop_lun(ctl_be_lun); 2545 /* 2546 * XXX KDM Copan-specific offline behavior. 2547 * Figure out a reasonable way to port this? 2548 */ 2549 #ifdef NEEDTOPORT 2550 if ((retval == 0) 2551 && (cdb->byte2 & SSS_ONOFFLINE)) 2552 retval = ctl_lun_offline(ctl_be_lun); 2553 #endif 2554 } 2555 2556 /* 2557 * In general, the above routines should not fail. They 2558 * just set state for the LUN. So we've got something 2559 * pretty wrong here if we can't start or stop the LUN. 2560 */ 2561 if (retval != 0) { 2562 ctl_set_internal_failure(&io->scsiio, 2563 /*sks_valid*/ 1, 2564 /*retry_count*/ 0xf051); 2565 retval = CTL_RETVAL_COMPLETE; 2566 } else { 2567 ctl_set_success(&io->scsiio); 2568 } 2569 ctl_config_write_done(io); 2570 break; 2571 } 2572 default: 2573 ctl_set_invalid_opcode(&io->scsiio); 2574 ctl_config_write_done(io); 2575 retval = CTL_RETVAL_COMPLETE; 2576 break; 2577 } 2578 2579 return (retval); 2580 2581 } 2582 2583 static int 2584 ctl_be_block_config_read(union ctl_io *io) 2585 { 2586 return (0); 2587 } 2588 2589 static int 2590 ctl_be_block_lun_info(void *be_lun, struct sbuf *sb) 2591 { 2592 struct ctl_be_block_lun *lun; 2593 int retval; 2594 2595 lun = (struct ctl_be_block_lun *)be_lun; 2596 retval = 0; 2597 2598 retval = sbuf_printf(sb, "\t<num_threads>"); 2599 2600 if (retval != 0) 2601 goto bailout; 2602 2603 retval = sbuf_printf(sb, "%d", lun->num_threads); 2604 2605 if (retval != 0) 2606 goto bailout; 2607 2608 retval = sbuf_printf(sb, "</num_threads>\n"); 2609 2610 bailout: 2611 2612 return (retval); 2613 } 2614 2615 int 2616 ctl_be_block_init(void) 2617 { 2618 struct ctl_be_block_softc *softc; 2619 int retval; 2620 2621 softc = &backend_block_softc; 2622 retval = 0; 2623 2624 mtx_init(&softc->lock, "ctlblock", NULL, MTX_DEF); 2625 beio_zone = uma_zcreate("beio", sizeof(struct ctl_be_block_io), 2626 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 2627 STAILQ_INIT(&softc->disk_list); 2628 STAILQ_INIT(&softc->lun_list); 2629 2630 return (retval); 2631 } 2632