1 /*- 2 * Copyright (c) 2003 Silicon Graphics International Corp. 3 * Copyright (c) 2009-2011 Spectra Logic Corporation 4 * Copyright (c) 2012 The FreeBSD Foundation 5 * All rights reserved. 6 * 7 * Portions of this software were developed by Edward Tomasz Napierala 8 * under sponsorship from the FreeBSD Foundation. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions, and the following disclaimer, 15 * without modification. 16 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 17 * substantially similar to the "NO WARRANTY" disclaimer below 18 * ("Disclaimer") and any redistribution must be conditioned upon 19 * including a substantially similar Disclaimer requirement for further 20 * binary redistribution. 21 * 22 * NO WARRANTY 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 27 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 31 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 32 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 33 * POSSIBILITY OF SUCH DAMAGES. 34 * 35 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend_block.c#5 $ 36 */ 37 /* 38 * CAM Target Layer driver backend for block devices. 39 * 40 * Author: Ken Merry <ken@FreeBSD.org> 41 */ 42 #include <sys/cdefs.h> 43 __FBSDID("$FreeBSD$"); 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/kernel.h> 48 #include <sys/types.h> 49 #include <sys/kthread.h> 50 #include <sys/bio.h> 51 #include <sys/fcntl.h> 52 #include <sys/limits.h> 53 #include <sys/lock.h> 54 #include <sys/mutex.h> 55 #include <sys/condvar.h> 56 #include <sys/malloc.h> 57 #include <sys/conf.h> 58 #include <sys/ioccom.h> 59 #include <sys/queue.h> 60 #include <sys/sbuf.h> 61 #include <sys/endian.h> 62 #include <sys/uio.h> 63 #include <sys/buf.h> 64 #include <sys/taskqueue.h> 65 #include <sys/vnode.h> 66 #include <sys/namei.h> 67 #include <sys/mount.h> 68 #include <sys/disk.h> 69 #include <sys/fcntl.h> 70 #include <sys/filedesc.h> 71 #include <sys/proc.h> 72 #include <sys/pcpu.h> 73 #include <sys/module.h> 74 #include <sys/sdt.h> 75 #include <sys/devicestat.h> 76 #include <sys/sysctl.h> 77 78 #include <geom/geom.h> 79 80 #include <cam/cam.h> 81 #include <cam/scsi/scsi_all.h> 82 #include <cam/scsi/scsi_da.h> 83 #include <cam/ctl/ctl_io.h> 84 #include <cam/ctl/ctl.h> 85 #include <cam/ctl/ctl_backend.h> 86 #include <cam/ctl/ctl_frontend_internal.h> 87 #include <cam/ctl/ctl_ioctl.h> 88 #include <cam/ctl/ctl_scsi_all.h> 89 #include <cam/ctl/ctl_error.h> 90 91 /* 92 * The idea here is that we'll allocate enough S/G space to hold a 1MB 93 * I/O. If we get an I/O larger than that, we'll split it. 94 */ 95 #define CTLBLK_HALF_IO_SIZE (512 * 1024) 96 #define CTLBLK_MAX_IO_SIZE (CTLBLK_HALF_IO_SIZE * 2) 97 #define CTLBLK_MAX_SEG MAXPHYS 98 #define CTLBLK_HALF_SEGS MAX(CTLBLK_HALF_IO_SIZE / CTLBLK_MAX_SEG, 1) 99 #define CTLBLK_MAX_SEGS (CTLBLK_HALF_SEGS * 2) 100 101 #ifdef CTLBLK_DEBUG 102 #define DPRINTF(fmt, args...) \ 103 printf("cbb(%s:%d): " fmt, __FUNCTION__, __LINE__, ##args) 104 #else 105 #define DPRINTF(fmt, args...) do {} while(0) 106 #endif 107 108 #define PRIV(io) \ 109 ((struct ctl_ptr_len_flags *)&(io)->io_hdr.ctl_private[CTL_PRIV_BACKEND]) 110 #define ARGS(io) \ 111 ((struct ctl_lba_len_flags *)&(io)->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]) 112 113 SDT_PROVIDER_DEFINE(cbb); 114 115 typedef enum { 116 CTL_BE_BLOCK_LUN_UNCONFIGURED = 0x01, 117 CTL_BE_BLOCK_LUN_CONFIG_ERR = 0x02, 118 CTL_BE_BLOCK_LUN_WAITING = 0x04, 119 CTL_BE_BLOCK_LUN_MULTI_THREAD = 0x08 120 } ctl_be_block_lun_flags; 121 122 typedef enum { 123 CTL_BE_BLOCK_NONE, 124 CTL_BE_BLOCK_DEV, 125 CTL_BE_BLOCK_FILE 126 } ctl_be_block_type; 127 128 struct ctl_be_block_devdata { 129 struct cdev *cdev; 130 struct cdevsw *csw; 131 int dev_ref; 132 }; 133 134 struct ctl_be_block_filedata { 135 struct ucred *cred; 136 }; 137 138 union ctl_be_block_bedata { 139 struct ctl_be_block_devdata dev; 140 struct ctl_be_block_filedata file; 141 }; 142 143 struct ctl_be_block_io; 144 struct ctl_be_block_lun; 145 146 typedef void (*cbb_dispatch_t)(struct ctl_be_block_lun *be_lun, 147 struct ctl_be_block_io *beio); 148 149 /* 150 * Backend LUN structure. There is a 1:1 mapping between a block device 151 * and a backend block LUN, and between a backend block LUN and a CTL LUN. 152 */ 153 struct ctl_be_block_lun { 154 struct ctl_block_disk *disk; 155 char lunname[32]; 156 char *dev_path; 157 ctl_be_block_type dev_type; 158 struct vnode *vn; 159 union ctl_be_block_bedata backend; 160 cbb_dispatch_t dispatch; 161 cbb_dispatch_t lun_flush; 162 cbb_dispatch_t unmap; 163 uma_zone_t lun_zone; 164 uint64_t size_blocks; 165 uint64_t size_bytes; 166 uint32_t blocksize; 167 int blocksize_shift; 168 uint16_t pblockexp; 169 uint16_t pblockoff; 170 struct ctl_be_block_softc *softc; 171 struct devstat *disk_stats; 172 ctl_be_block_lun_flags flags; 173 STAILQ_ENTRY(ctl_be_block_lun) links; 174 struct ctl_be_lun ctl_be_lun; 175 struct taskqueue *io_taskqueue; 176 struct task io_task; 177 int num_threads; 178 STAILQ_HEAD(, ctl_io_hdr) input_queue; 179 STAILQ_HEAD(, ctl_io_hdr) config_write_queue; 180 STAILQ_HEAD(, ctl_io_hdr) datamove_queue; 181 struct mtx_padalign io_lock; 182 struct mtx_padalign queue_lock; 183 }; 184 185 /* 186 * Overall softc structure for the block backend module. 187 */ 188 struct ctl_be_block_softc { 189 struct mtx lock; 190 int num_disks; 191 STAILQ_HEAD(, ctl_block_disk) disk_list; 192 int num_luns; 193 STAILQ_HEAD(, ctl_be_block_lun) lun_list; 194 }; 195 196 static struct ctl_be_block_softc backend_block_softc; 197 198 /* 199 * Per-I/O information. 200 */ 201 struct ctl_be_block_io { 202 union ctl_io *io; 203 struct ctl_sg_entry sg_segs[CTLBLK_MAX_SEGS]; 204 struct iovec xiovecs[CTLBLK_MAX_SEGS]; 205 int bio_cmd; 206 int num_segs; 207 int num_bios_sent; 208 int num_bios_done; 209 int send_complete; 210 int num_errors; 211 struct bintime ds_t0; 212 devstat_tag_type ds_tag_type; 213 devstat_trans_flags ds_trans_type; 214 uint64_t io_len; 215 uint64_t io_offset; 216 struct ctl_be_block_softc *softc; 217 struct ctl_be_block_lun *lun; 218 void (*beio_cont)(struct ctl_be_block_io *beio); /* to continue processing */ 219 }; 220 221 static int cbb_num_threads = 14; 222 SYSCTL_NODE(_kern_cam_ctl, OID_AUTO, block, CTLFLAG_RD, 0, 223 "CAM Target Layer Block Backend"); 224 SYSCTL_INT(_kern_cam_ctl_block, OID_AUTO, num_threads, CTLFLAG_RWTUN, 225 &cbb_num_threads, 0, "Number of threads per backing file"); 226 227 static struct ctl_be_block_io *ctl_alloc_beio(struct ctl_be_block_softc *softc); 228 static void ctl_free_beio(struct ctl_be_block_io *beio); 229 static void ctl_complete_beio(struct ctl_be_block_io *beio); 230 static int ctl_be_block_move_done(union ctl_io *io); 231 static void ctl_be_block_biodone(struct bio *bio); 232 static void ctl_be_block_flush_file(struct ctl_be_block_lun *be_lun, 233 struct ctl_be_block_io *beio); 234 static void ctl_be_block_dispatch_file(struct ctl_be_block_lun *be_lun, 235 struct ctl_be_block_io *beio); 236 static void ctl_be_block_flush_dev(struct ctl_be_block_lun *be_lun, 237 struct ctl_be_block_io *beio); 238 static void ctl_be_block_unmap_dev(struct ctl_be_block_lun *be_lun, 239 struct ctl_be_block_io *beio); 240 static void ctl_be_block_dispatch_dev(struct ctl_be_block_lun *be_lun, 241 struct ctl_be_block_io *beio); 242 static void ctl_be_block_cw_dispatch(struct ctl_be_block_lun *be_lun, 243 union ctl_io *io); 244 static void ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun, 245 union ctl_io *io); 246 static void ctl_be_block_worker(void *context, int pending); 247 static int ctl_be_block_submit(union ctl_io *io); 248 static int ctl_be_block_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, 249 int flag, struct thread *td); 250 static int ctl_be_block_open_file(struct ctl_be_block_lun *be_lun, 251 struct ctl_lun_req *req); 252 static int ctl_be_block_open_dev(struct ctl_be_block_lun *be_lun, 253 struct ctl_lun_req *req); 254 static int ctl_be_block_close(struct ctl_be_block_lun *be_lun); 255 static int ctl_be_block_open(struct ctl_be_block_softc *softc, 256 struct ctl_be_block_lun *be_lun, 257 struct ctl_lun_req *req); 258 static int ctl_be_block_create(struct ctl_be_block_softc *softc, 259 struct ctl_lun_req *req); 260 static int ctl_be_block_rm(struct ctl_be_block_softc *softc, 261 struct ctl_lun_req *req); 262 static int ctl_be_block_modify_file(struct ctl_be_block_lun *be_lun, 263 struct ctl_lun_req *req); 264 static int ctl_be_block_modify_dev(struct ctl_be_block_lun *be_lun, 265 struct ctl_lun_req *req); 266 static int ctl_be_block_modify(struct ctl_be_block_softc *softc, 267 struct ctl_lun_req *req); 268 static void ctl_be_block_lun_shutdown(void *be_lun); 269 static void ctl_be_block_lun_config_status(void *be_lun, 270 ctl_lun_config_status status); 271 static int ctl_be_block_config_write(union ctl_io *io); 272 static int ctl_be_block_config_read(union ctl_io *io); 273 static int ctl_be_block_lun_info(void *be_lun, struct sbuf *sb); 274 int ctl_be_block_init(void); 275 276 static struct ctl_backend_driver ctl_be_block_driver = 277 { 278 .name = "block", 279 .flags = CTL_BE_FLAG_HAS_CONFIG, 280 .init = ctl_be_block_init, 281 .data_submit = ctl_be_block_submit, 282 .data_move_done = ctl_be_block_move_done, 283 .config_read = ctl_be_block_config_read, 284 .config_write = ctl_be_block_config_write, 285 .ioctl = ctl_be_block_ioctl, 286 .lun_info = ctl_be_block_lun_info 287 }; 288 289 MALLOC_DEFINE(M_CTLBLK, "ctlblk", "Memory used for CTL block backend"); 290 CTL_BACKEND_DECLARE(cbb, ctl_be_block_driver); 291 292 static uma_zone_t beio_zone; 293 294 static struct ctl_be_block_io * 295 ctl_alloc_beio(struct ctl_be_block_softc *softc) 296 { 297 struct ctl_be_block_io *beio; 298 299 beio = uma_zalloc(beio_zone, M_WAITOK | M_ZERO); 300 beio->softc = softc; 301 return (beio); 302 } 303 304 static void 305 ctl_free_beio(struct ctl_be_block_io *beio) 306 { 307 int duplicate_free; 308 int i; 309 310 duplicate_free = 0; 311 312 for (i = 0; i < beio->num_segs; i++) { 313 if (beio->sg_segs[i].addr == NULL) 314 duplicate_free++; 315 316 uma_zfree(beio->lun->lun_zone, beio->sg_segs[i].addr); 317 beio->sg_segs[i].addr = NULL; 318 319 /* For compare we had two equal S/G lists. */ 320 if (ARGS(beio->io)->flags & CTL_LLF_COMPARE) { 321 uma_zfree(beio->lun->lun_zone, 322 beio->sg_segs[i + CTLBLK_HALF_SEGS].addr); 323 beio->sg_segs[i + CTLBLK_HALF_SEGS].addr = NULL; 324 } 325 } 326 327 if (duplicate_free > 0) { 328 printf("%s: %d duplicate frees out of %d segments\n", __func__, 329 duplicate_free, beio->num_segs); 330 } 331 332 uma_zfree(beio_zone, beio); 333 } 334 335 static void 336 ctl_complete_beio(struct ctl_be_block_io *beio) 337 { 338 union ctl_io *io = beio->io; 339 340 if (beio->beio_cont != NULL) { 341 beio->beio_cont(beio); 342 } else { 343 ctl_free_beio(beio); 344 ctl_data_submit_done(io); 345 } 346 } 347 348 static int 349 ctl_be_block_move_done(union ctl_io *io) 350 { 351 struct ctl_be_block_io *beio; 352 struct ctl_be_block_lun *be_lun; 353 struct ctl_lba_len_flags *lbalen; 354 #ifdef CTL_TIME_IO 355 struct bintime cur_bt; 356 #endif 357 int i; 358 359 beio = (struct ctl_be_block_io *)PRIV(io)->ptr; 360 be_lun = beio->lun; 361 362 DPRINTF("entered\n"); 363 364 #ifdef CTL_TIME_IO 365 getbintime(&cur_bt); 366 bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt); 367 bintime_add(&io->io_hdr.dma_bt, &cur_bt); 368 io->io_hdr.num_dmas++; 369 #endif 370 io->scsiio.kern_rel_offset += io->scsiio.kern_data_len; 371 372 /* 373 * We set status at this point for read commands, and write 374 * commands with errors. 375 */ 376 if ((io->io_hdr.port_status == 0) && 377 ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0) && 378 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) { 379 lbalen = ARGS(beio->io); 380 if (lbalen->flags & CTL_LLF_READ) { 381 ctl_set_success(&io->scsiio); 382 } else if (lbalen->flags & CTL_LLF_COMPARE) { 383 /* We have two data blocks ready for comparison. */ 384 for (i = 0; i < beio->num_segs; i++) { 385 if (memcmp(beio->sg_segs[i].addr, 386 beio->sg_segs[i + CTLBLK_HALF_SEGS].addr, 387 beio->sg_segs[i].len) != 0) 388 break; 389 } 390 if (i < beio->num_segs) 391 ctl_set_sense(&io->scsiio, 392 /*current_error*/ 1, 393 /*sense_key*/ SSD_KEY_MISCOMPARE, 394 /*asc*/ 0x1D, 395 /*ascq*/ 0x00, 396 SSD_ELEM_NONE); 397 else 398 ctl_set_success(&io->scsiio); 399 } 400 } 401 else if ((io->io_hdr.port_status != 0) 402 && ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0) 403 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) { 404 /* 405 * For hardware error sense keys, the sense key 406 * specific value is defined to be a retry count, 407 * but we use it to pass back an internal FETD 408 * error code. XXX KDM Hopefully the FETD is only 409 * using 16 bits for an error code, since that's 410 * all the space we have in the sks field. 411 */ 412 ctl_set_internal_failure(&io->scsiio, 413 /*sks_valid*/ 1, 414 /*retry_count*/ 415 io->io_hdr.port_status); 416 } 417 418 /* 419 * If this is a read, or a write with errors, it is done. 420 */ 421 if ((beio->bio_cmd == BIO_READ) 422 || ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0) 423 || ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE)) { 424 ctl_complete_beio(beio); 425 return (0); 426 } 427 428 /* 429 * At this point, we have a write and the DMA completed 430 * successfully. We now have to queue it to the task queue to 431 * execute the backend I/O. That is because we do blocking 432 * memory allocations, and in the file backing case, blocking I/O. 433 * This move done routine is generally called in the SIM's 434 * interrupt context, and therefore we cannot block. 435 */ 436 mtx_lock(&be_lun->queue_lock); 437 /* 438 * XXX KDM make sure that links is okay to use at this point. 439 * Otherwise, we either need to add another field to ctl_io_hdr, 440 * or deal with resource allocation here. 441 */ 442 STAILQ_INSERT_TAIL(&be_lun->datamove_queue, &io->io_hdr, links); 443 mtx_unlock(&be_lun->queue_lock); 444 445 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); 446 447 return (0); 448 } 449 450 static void 451 ctl_be_block_biodone(struct bio *bio) 452 { 453 struct ctl_be_block_io *beio; 454 struct ctl_be_block_lun *be_lun; 455 union ctl_io *io; 456 int error; 457 458 beio = bio->bio_caller1; 459 be_lun = beio->lun; 460 io = beio->io; 461 462 DPRINTF("entered\n"); 463 464 error = bio->bio_error; 465 mtx_lock(&be_lun->io_lock); 466 if (error != 0) 467 beio->num_errors++; 468 469 beio->num_bios_done++; 470 471 /* 472 * XXX KDM will this cause WITNESS to complain? Holding a lock 473 * during the free might cause it to complain. 474 */ 475 g_destroy_bio(bio); 476 477 /* 478 * If the send complete bit isn't set, or we aren't the last I/O to 479 * complete, then we're done. 480 */ 481 if ((beio->send_complete == 0) 482 || (beio->num_bios_done < beio->num_bios_sent)) { 483 mtx_unlock(&be_lun->io_lock); 484 return; 485 } 486 487 /* 488 * At this point, we've verified that we are the last I/O to 489 * complete, so it's safe to drop the lock. 490 */ 491 devstat_end_transaction(beio->lun->disk_stats, beio->io_len, 492 beio->ds_tag_type, beio->ds_trans_type, 493 /*now*/ NULL, /*then*/&beio->ds_t0); 494 mtx_unlock(&be_lun->io_lock); 495 496 /* 497 * If there are any errors from the backing device, we fail the 498 * entire I/O with a medium error. 499 */ 500 if (beio->num_errors > 0) { 501 if (error == EOPNOTSUPP) { 502 ctl_set_invalid_opcode(&io->scsiio); 503 } else if (beio->bio_cmd == BIO_FLUSH) { 504 /* XXX KDM is there is a better error here? */ 505 ctl_set_internal_failure(&io->scsiio, 506 /*sks_valid*/ 1, 507 /*retry_count*/ 0xbad2); 508 } else 509 ctl_set_medium_error(&io->scsiio); 510 ctl_complete_beio(beio); 511 return; 512 } 513 514 /* 515 * If this is a write, a flush, a delete or verify, we're all done. 516 * If this is a read, we can now send the data to the user. 517 */ 518 if ((beio->bio_cmd == BIO_WRITE) 519 || (beio->bio_cmd == BIO_FLUSH) 520 || (beio->bio_cmd == BIO_DELETE) 521 || (ARGS(io)->flags & CTL_LLF_VERIFY)) { 522 ctl_set_success(&io->scsiio); 523 ctl_complete_beio(beio); 524 } else { 525 #ifdef CTL_TIME_IO 526 getbintime(&io->io_hdr.dma_start_bt); 527 #endif 528 ctl_datamove(io); 529 } 530 } 531 532 static void 533 ctl_be_block_flush_file(struct ctl_be_block_lun *be_lun, 534 struct ctl_be_block_io *beio) 535 { 536 union ctl_io *io = beio->io; 537 struct mount *mountpoint; 538 int error, lock_flags; 539 540 DPRINTF("entered\n"); 541 542 binuptime(&beio->ds_t0); 543 mtx_lock(&be_lun->io_lock); 544 devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0); 545 mtx_unlock(&be_lun->io_lock); 546 547 (void) vn_start_write(be_lun->vn, &mountpoint, V_WAIT); 548 549 if (MNT_SHARED_WRITES(mountpoint) 550 || ((mountpoint == NULL) 551 && MNT_SHARED_WRITES(be_lun->vn->v_mount))) 552 lock_flags = LK_SHARED; 553 else 554 lock_flags = LK_EXCLUSIVE; 555 556 vn_lock(be_lun->vn, lock_flags | LK_RETRY); 557 558 error = VOP_FSYNC(be_lun->vn, MNT_WAIT, curthread); 559 VOP_UNLOCK(be_lun->vn, 0); 560 561 vn_finished_write(mountpoint); 562 563 mtx_lock(&be_lun->io_lock); 564 devstat_end_transaction(beio->lun->disk_stats, beio->io_len, 565 beio->ds_tag_type, beio->ds_trans_type, 566 /*now*/ NULL, /*then*/&beio->ds_t0); 567 mtx_unlock(&be_lun->io_lock); 568 569 if (error == 0) 570 ctl_set_success(&io->scsiio); 571 else { 572 /* XXX KDM is there is a better error here? */ 573 ctl_set_internal_failure(&io->scsiio, 574 /*sks_valid*/ 1, 575 /*retry_count*/ 0xbad1); 576 } 577 578 ctl_complete_beio(beio); 579 } 580 581 SDT_PROBE_DEFINE1(cbb, kernel, read, file_start, "uint64_t"); 582 SDT_PROBE_DEFINE1(cbb, kernel, write, file_start, "uint64_t"); 583 SDT_PROBE_DEFINE1(cbb, kernel, read, file_done,"uint64_t"); 584 SDT_PROBE_DEFINE1(cbb, kernel, write, file_done, "uint64_t"); 585 586 static void 587 ctl_be_block_dispatch_file(struct ctl_be_block_lun *be_lun, 588 struct ctl_be_block_io *beio) 589 { 590 struct ctl_be_block_filedata *file_data; 591 union ctl_io *io; 592 struct uio xuio; 593 struct iovec *xiovec; 594 int flags; 595 int error, i; 596 597 DPRINTF("entered\n"); 598 599 file_data = &be_lun->backend.file; 600 io = beio->io; 601 flags = 0; 602 if (ARGS(io)->flags & CTL_LLF_DPO) 603 flags |= IO_DIRECT; 604 if (beio->bio_cmd == BIO_WRITE && ARGS(io)->flags & CTL_LLF_FUA) 605 flags |= IO_SYNC; 606 607 bzero(&xuio, sizeof(xuio)); 608 if (beio->bio_cmd == BIO_READ) { 609 SDT_PROBE(cbb, kernel, read, file_start, 0, 0, 0, 0, 0); 610 xuio.uio_rw = UIO_READ; 611 } else { 612 SDT_PROBE(cbb, kernel, write, file_start, 0, 0, 0, 0, 0); 613 xuio.uio_rw = UIO_WRITE; 614 } 615 xuio.uio_offset = beio->io_offset; 616 xuio.uio_resid = beio->io_len; 617 xuio.uio_segflg = UIO_SYSSPACE; 618 xuio.uio_iov = beio->xiovecs; 619 xuio.uio_iovcnt = beio->num_segs; 620 xuio.uio_td = curthread; 621 622 for (i = 0, xiovec = xuio.uio_iov; i < xuio.uio_iovcnt; i++, xiovec++) { 623 xiovec->iov_base = beio->sg_segs[i].addr; 624 xiovec->iov_len = beio->sg_segs[i].len; 625 } 626 627 binuptime(&beio->ds_t0); 628 mtx_lock(&be_lun->io_lock); 629 devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0); 630 mtx_unlock(&be_lun->io_lock); 631 632 if (beio->bio_cmd == BIO_READ) { 633 vn_lock(be_lun->vn, LK_SHARED | LK_RETRY); 634 635 /* 636 * UFS pays attention to IO_DIRECT for reads. If the 637 * DIRECTIO option is configured into the kernel, it calls 638 * ffs_rawread(). But that only works for single-segment 639 * uios with user space addresses. In our case, with a 640 * kernel uio, it still reads into the buffer cache, but it 641 * will just try to release the buffer from the cache later 642 * on in ffs_read(). 643 * 644 * ZFS does not pay attention to IO_DIRECT for reads. 645 * 646 * UFS does not pay attention to IO_SYNC for reads. 647 * 648 * ZFS pays attention to IO_SYNC (which translates into the 649 * Solaris define FRSYNC for zfs_read()) for reads. It 650 * attempts to sync the file before reading. 651 * 652 * So, to attempt to provide some barrier semantics in the 653 * BIO_ORDERED case, set both IO_DIRECT and IO_SYNC. 654 */ 655 error = VOP_READ(be_lun->vn, &xuio, flags, file_data->cred); 656 657 VOP_UNLOCK(be_lun->vn, 0); 658 SDT_PROBE(cbb, kernel, read, file_done, 0, 0, 0, 0, 0); 659 } else { 660 struct mount *mountpoint; 661 int lock_flags; 662 663 (void)vn_start_write(be_lun->vn, &mountpoint, V_WAIT); 664 665 if (MNT_SHARED_WRITES(mountpoint) 666 || ((mountpoint == NULL) 667 && MNT_SHARED_WRITES(be_lun->vn->v_mount))) 668 lock_flags = LK_SHARED; 669 else 670 lock_flags = LK_EXCLUSIVE; 671 672 vn_lock(be_lun->vn, lock_flags | LK_RETRY); 673 674 /* 675 * UFS pays attention to IO_DIRECT for writes. The write 676 * is done asynchronously. (Normally the write would just 677 * get put into cache. 678 * 679 * UFS pays attention to IO_SYNC for writes. It will 680 * attempt to write the buffer out synchronously if that 681 * flag is set. 682 * 683 * ZFS does not pay attention to IO_DIRECT for writes. 684 * 685 * ZFS pays attention to IO_SYNC (a.k.a. FSYNC or FRSYNC) 686 * for writes. It will flush the transaction from the 687 * cache before returning. 688 * 689 * So if we've got the BIO_ORDERED flag set, we want 690 * IO_SYNC in either the UFS or ZFS case. 691 */ 692 error = VOP_WRITE(be_lun->vn, &xuio, flags, file_data->cred); 693 VOP_UNLOCK(be_lun->vn, 0); 694 695 vn_finished_write(mountpoint); 696 SDT_PROBE(cbb, kernel, write, file_done, 0, 0, 0, 0, 0); 697 } 698 699 mtx_lock(&be_lun->io_lock); 700 devstat_end_transaction(beio->lun->disk_stats, beio->io_len, 701 beio->ds_tag_type, beio->ds_trans_type, 702 /*now*/ NULL, /*then*/&beio->ds_t0); 703 mtx_unlock(&be_lun->io_lock); 704 705 /* 706 * If we got an error, set the sense data to "MEDIUM ERROR" and 707 * return the I/O to the user. 708 */ 709 if (error != 0) { 710 char path_str[32]; 711 712 ctl_scsi_path_string(io, path_str, sizeof(path_str)); 713 /* 714 * XXX KDM ZFS returns ENOSPC when the underlying 715 * filesystem fills up. What kind of SCSI error should we 716 * return for that? 717 */ 718 printf("%s%s command returned errno %d\n", path_str, 719 (beio->bio_cmd == BIO_READ) ? "READ" : "WRITE", error); 720 ctl_set_medium_error(&io->scsiio); 721 ctl_complete_beio(beio); 722 return; 723 } 724 725 /* 726 * If this is a write or a verify, we're all done. 727 * If this is a read, we can now send the data to the user. 728 */ 729 if ((beio->bio_cmd == BIO_WRITE) || 730 (ARGS(io)->flags & CTL_LLF_VERIFY)) { 731 ctl_set_success(&io->scsiio); 732 ctl_complete_beio(beio); 733 } else { 734 #ifdef CTL_TIME_IO 735 getbintime(&io->io_hdr.dma_start_bt); 736 #endif 737 ctl_datamove(io); 738 } 739 } 740 741 static void 742 ctl_be_block_dispatch_zvol(struct ctl_be_block_lun *be_lun, 743 struct ctl_be_block_io *beio) 744 { 745 struct ctl_be_block_devdata *dev_data; 746 union ctl_io *io; 747 struct uio xuio; 748 struct iovec *xiovec; 749 int flags; 750 int error, i; 751 752 DPRINTF("entered\n"); 753 754 dev_data = &be_lun->backend.dev; 755 io = beio->io; 756 flags = 0; 757 if (ARGS(io)->flags & CTL_LLF_DPO) 758 flags |= IO_DIRECT; 759 if (beio->bio_cmd == BIO_WRITE && ARGS(io)->flags & CTL_LLF_FUA) 760 flags |= IO_SYNC; 761 762 bzero(&xuio, sizeof(xuio)); 763 if (beio->bio_cmd == BIO_READ) { 764 SDT_PROBE(cbb, kernel, read, file_start, 0, 0, 0, 0, 0); 765 xuio.uio_rw = UIO_READ; 766 } else { 767 SDT_PROBE(cbb, kernel, write, file_start, 0, 0, 0, 0, 0); 768 xuio.uio_rw = UIO_WRITE; 769 } 770 xuio.uio_offset = beio->io_offset; 771 xuio.uio_resid = beio->io_len; 772 xuio.uio_segflg = UIO_SYSSPACE; 773 xuio.uio_iov = beio->xiovecs; 774 xuio.uio_iovcnt = beio->num_segs; 775 xuio.uio_td = curthread; 776 777 for (i = 0, xiovec = xuio.uio_iov; i < xuio.uio_iovcnt; i++, xiovec++) { 778 xiovec->iov_base = beio->sg_segs[i].addr; 779 xiovec->iov_len = beio->sg_segs[i].len; 780 } 781 782 binuptime(&beio->ds_t0); 783 mtx_lock(&be_lun->io_lock); 784 devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0); 785 mtx_unlock(&be_lun->io_lock); 786 787 if (beio->bio_cmd == BIO_READ) { 788 error = (*dev_data->csw->d_read)(dev_data->cdev, &xuio, flags); 789 SDT_PROBE(cbb, kernel, read, file_done, 0, 0, 0, 0, 0); 790 } else { 791 error = (*dev_data->csw->d_write)(dev_data->cdev, &xuio, flags); 792 SDT_PROBE(cbb, kernel, write, file_done, 0, 0, 0, 0, 0); 793 } 794 795 mtx_lock(&be_lun->io_lock); 796 devstat_end_transaction(beio->lun->disk_stats, beio->io_len, 797 beio->ds_tag_type, beio->ds_trans_type, 798 /*now*/ NULL, /*then*/&beio->ds_t0); 799 mtx_unlock(&be_lun->io_lock); 800 801 /* 802 * If we got an error, set the sense data to "MEDIUM ERROR" and 803 * return the I/O to the user. 804 */ 805 if (error != 0) { 806 ctl_set_medium_error(&io->scsiio); 807 ctl_complete_beio(beio); 808 return; 809 } 810 811 /* 812 * If this is a write or a verify, we're all done. 813 * If this is a read, we can now send the data to the user. 814 */ 815 if ((beio->bio_cmd == BIO_WRITE) || 816 (ARGS(io)->flags & CTL_LLF_VERIFY)) { 817 ctl_set_success(&io->scsiio); 818 ctl_complete_beio(beio); 819 } else { 820 #ifdef CTL_TIME_IO 821 getbintime(&io->io_hdr.dma_start_bt); 822 #endif 823 ctl_datamove(io); 824 } 825 } 826 827 static void 828 ctl_be_block_flush_dev(struct ctl_be_block_lun *be_lun, 829 struct ctl_be_block_io *beio) 830 { 831 struct bio *bio; 832 union ctl_io *io; 833 struct ctl_be_block_devdata *dev_data; 834 835 dev_data = &be_lun->backend.dev; 836 io = beio->io; 837 838 DPRINTF("entered\n"); 839 840 /* This can't fail, it's a blocking allocation. */ 841 bio = g_alloc_bio(); 842 843 bio->bio_cmd = BIO_FLUSH; 844 bio->bio_flags |= BIO_ORDERED; 845 bio->bio_dev = dev_data->cdev; 846 bio->bio_offset = 0; 847 bio->bio_data = 0; 848 bio->bio_done = ctl_be_block_biodone; 849 bio->bio_caller1 = beio; 850 bio->bio_pblkno = 0; 851 852 /* 853 * We don't need to acquire the LUN lock here, because we are only 854 * sending one bio, and so there is no other context to synchronize 855 * with. 856 */ 857 beio->num_bios_sent = 1; 858 beio->send_complete = 1; 859 860 binuptime(&beio->ds_t0); 861 mtx_lock(&be_lun->io_lock); 862 devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0); 863 mtx_unlock(&be_lun->io_lock); 864 865 (*dev_data->csw->d_strategy)(bio); 866 } 867 868 static void 869 ctl_be_block_unmap_dev_range(struct ctl_be_block_lun *be_lun, 870 struct ctl_be_block_io *beio, 871 uint64_t off, uint64_t len, int last) 872 { 873 struct bio *bio; 874 struct ctl_be_block_devdata *dev_data; 875 uint64_t maxlen; 876 877 dev_data = &be_lun->backend.dev; 878 maxlen = LONG_MAX - (LONG_MAX % be_lun->blocksize); 879 while (len > 0) { 880 bio = g_alloc_bio(); 881 bio->bio_cmd = BIO_DELETE; 882 bio->bio_dev = dev_data->cdev; 883 bio->bio_offset = off; 884 bio->bio_length = MIN(len, maxlen); 885 bio->bio_data = 0; 886 bio->bio_done = ctl_be_block_biodone; 887 bio->bio_caller1 = beio; 888 bio->bio_pblkno = off / be_lun->blocksize; 889 890 off += bio->bio_length; 891 len -= bio->bio_length; 892 893 mtx_lock(&be_lun->io_lock); 894 beio->num_bios_sent++; 895 if (last && len == 0) 896 beio->send_complete = 1; 897 mtx_unlock(&be_lun->io_lock); 898 899 (*dev_data->csw->d_strategy)(bio); 900 } 901 } 902 903 static void 904 ctl_be_block_unmap_dev(struct ctl_be_block_lun *be_lun, 905 struct ctl_be_block_io *beio) 906 { 907 union ctl_io *io; 908 struct ctl_be_block_devdata *dev_data; 909 struct ctl_ptr_len_flags *ptrlen; 910 struct scsi_unmap_desc *buf, *end; 911 uint64_t len; 912 913 dev_data = &be_lun->backend.dev; 914 io = beio->io; 915 916 DPRINTF("entered\n"); 917 918 binuptime(&beio->ds_t0); 919 mtx_lock(&be_lun->io_lock); 920 devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0); 921 mtx_unlock(&be_lun->io_lock); 922 923 if (beio->io_offset == -1) { 924 beio->io_len = 0; 925 ptrlen = (struct ctl_ptr_len_flags *)&io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 926 buf = (struct scsi_unmap_desc *)ptrlen->ptr; 927 end = buf + ptrlen->len / sizeof(*buf); 928 for (; buf < end; buf++) { 929 len = (uint64_t)scsi_4btoul(buf->length) * 930 be_lun->blocksize; 931 beio->io_len += len; 932 ctl_be_block_unmap_dev_range(be_lun, beio, 933 scsi_8btou64(buf->lba) * be_lun->blocksize, len, 934 (end - buf < 2) ? TRUE : FALSE); 935 } 936 } else 937 ctl_be_block_unmap_dev_range(be_lun, beio, 938 beio->io_offset, beio->io_len, TRUE); 939 } 940 941 static void 942 ctl_be_block_dispatch_dev(struct ctl_be_block_lun *be_lun, 943 struct ctl_be_block_io *beio) 944 { 945 TAILQ_HEAD(, bio) queue = TAILQ_HEAD_INITIALIZER(queue); 946 int i; 947 struct bio *bio; 948 struct ctl_be_block_devdata *dev_data; 949 off_t cur_offset; 950 int max_iosize; 951 952 DPRINTF("entered\n"); 953 954 dev_data = &be_lun->backend.dev; 955 956 /* 957 * We have to limit our I/O size to the maximum supported by the 958 * backend device. Hopefully it is MAXPHYS. If the driver doesn't 959 * set it properly, use DFLTPHYS. 960 */ 961 max_iosize = dev_data->cdev->si_iosize_max; 962 if (max_iosize < PAGE_SIZE) 963 max_iosize = DFLTPHYS; 964 965 cur_offset = beio->io_offset; 966 for (i = 0; i < beio->num_segs; i++) { 967 size_t cur_size; 968 uint8_t *cur_ptr; 969 970 cur_size = beio->sg_segs[i].len; 971 cur_ptr = beio->sg_segs[i].addr; 972 973 while (cur_size > 0) { 974 /* This can't fail, it's a blocking allocation. */ 975 bio = g_alloc_bio(); 976 977 KASSERT(bio != NULL, ("g_alloc_bio() failed!\n")); 978 979 bio->bio_cmd = beio->bio_cmd; 980 bio->bio_dev = dev_data->cdev; 981 bio->bio_caller1 = beio; 982 bio->bio_length = min(cur_size, max_iosize); 983 bio->bio_offset = cur_offset; 984 bio->bio_data = cur_ptr; 985 bio->bio_done = ctl_be_block_biodone; 986 bio->bio_pblkno = cur_offset / be_lun->blocksize; 987 988 cur_offset += bio->bio_length; 989 cur_ptr += bio->bio_length; 990 cur_size -= bio->bio_length; 991 992 TAILQ_INSERT_TAIL(&queue, bio, bio_queue); 993 beio->num_bios_sent++; 994 } 995 } 996 binuptime(&beio->ds_t0); 997 mtx_lock(&be_lun->io_lock); 998 devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0); 999 beio->send_complete = 1; 1000 mtx_unlock(&be_lun->io_lock); 1001 1002 /* 1003 * Fire off all allocated requests! 1004 */ 1005 while ((bio = TAILQ_FIRST(&queue)) != NULL) { 1006 TAILQ_REMOVE(&queue, bio, bio_queue); 1007 (*dev_data->csw->d_strategy)(bio); 1008 } 1009 } 1010 1011 static void 1012 ctl_be_block_cw_done_ws(struct ctl_be_block_io *beio) 1013 { 1014 union ctl_io *io; 1015 1016 io = beio->io; 1017 ctl_free_beio(beio); 1018 if ((io->io_hdr.flags & CTL_FLAG_ABORT) || 1019 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 1020 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) { 1021 ctl_config_write_done(io); 1022 return; 1023 } 1024 1025 ctl_be_block_config_write(io); 1026 } 1027 1028 static void 1029 ctl_be_block_cw_dispatch_ws(struct ctl_be_block_lun *be_lun, 1030 union ctl_io *io) 1031 { 1032 struct ctl_be_block_io *beio; 1033 struct ctl_be_block_softc *softc; 1034 struct ctl_lba_len_flags *lbalen; 1035 uint64_t len_left, lba; 1036 int i, seglen; 1037 uint8_t *buf, *end; 1038 1039 DPRINTF("entered\n"); 1040 1041 beio = (struct ctl_be_block_io *)PRIV(io)->ptr; 1042 softc = be_lun->softc; 1043 lbalen = ARGS(beio->io); 1044 1045 if (lbalen->flags & ~(SWS_LBDATA | SWS_UNMAP | SWS_ANCHOR) || 1046 (lbalen->flags & (SWS_UNMAP | SWS_ANCHOR) && be_lun->unmap == NULL)) { 1047 ctl_free_beio(beio); 1048 ctl_set_invalid_field(&io->scsiio, 1049 /*sks_valid*/ 1, 1050 /*command*/ 1, 1051 /*field*/ 1, 1052 /*bit_valid*/ 0, 1053 /*bit*/ 0); 1054 ctl_config_write_done(io); 1055 return; 1056 } 1057 1058 switch (io->scsiio.tag_type) { 1059 case CTL_TAG_ORDERED: 1060 beio->ds_tag_type = DEVSTAT_TAG_ORDERED; 1061 break; 1062 case CTL_TAG_HEAD_OF_QUEUE: 1063 beio->ds_tag_type = DEVSTAT_TAG_HEAD; 1064 break; 1065 case CTL_TAG_UNTAGGED: 1066 case CTL_TAG_SIMPLE: 1067 case CTL_TAG_ACA: 1068 default: 1069 beio->ds_tag_type = DEVSTAT_TAG_SIMPLE; 1070 break; 1071 } 1072 1073 if (lbalen->flags & (SWS_UNMAP | SWS_ANCHOR)) { 1074 beio->io_offset = lbalen->lba * be_lun->blocksize; 1075 beio->io_len = (uint64_t)lbalen->len * be_lun->blocksize; 1076 beio->bio_cmd = BIO_DELETE; 1077 beio->ds_trans_type = DEVSTAT_FREE; 1078 1079 be_lun->unmap(be_lun, beio); 1080 return; 1081 } 1082 1083 beio->bio_cmd = BIO_WRITE; 1084 beio->ds_trans_type = DEVSTAT_WRITE; 1085 1086 DPRINTF("WRITE SAME at LBA %jx len %u\n", 1087 (uintmax_t)lbalen->lba, lbalen->len); 1088 1089 len_left = (uint64_t)lbalen->len * be_lun->blocksize; 1090 for (i = 0, lba = 0; i < CTLBLK_MAX_SEGS && len_left > 0; i++) { 1091 1092 /* 1093 * Setup the S/G entry for this chunk. 1094 */ 1095 seglen = MIN(CTLBLK_MAX_SEG, len_left); 1096 seglen -= seglen % be_lun->blocksize; 1097 beio->sg_segs[i].len = seglen; 1098 beio->sg_segs[i].addr = uma_zalloc(be_lun->lun_zone, M_WAITOK); 1099 1100 DPRINTF("segment %d addr %p len %zd\n", i, 1101 beio->sg_segs[i].addr, beio->sg_segs[i].len); 1102 1103 beio->num_segs++; 1104 len_left -= seglen; 1105 1106 buf = beio->sg_segs[i].addr; 1107 end = buf + seglen; 1108 for (; buf < end; buf += be_lun->blocksize) { 1109 memcpy(buf, io->scsiio.kern_data_ptr, be_lun->blocksize); 1110 if (lbalen->flags & SWS_LBDATA) 1111 scsi_ulto4b(lbalen->lba + lba, buf); 1112 lba++; 1113 } 1114 } 1115 1116 beio->io_offset = lbalen->lba * be_lun->blocksize; 1117 beio->io_len = lba * be_lun->blocksize; 1118 1119 /* We can not do all in one run. Correct and schedule rerun. */ 1120 if (len_left > 0) { 1121 lbalen->lba += lba; 1122 lbalen->len -= lba; 1123 beio->beio_cont = ctl_be_block_cw_done_ws; 1124 } 1125 1126 be_lun->dispatch(be_lun, beio); 1127 } 1128 1129 static void 1130 ctl_be_block_cw_dispatch_unmap(struct ctl_be_block_lun *be_lun, 1131 union ctl_io *io) 1132 { 1133 struct ctl_be_block_io *beio; 1134 struct ctl_be_block_softc *softc; 1135 struct ctl_ptr_len_flags *ptrlen; 1136 1137 DPRINTF("entered\n"); 1138 1139 beio = (struct ctl_be_block_io *)PRIV(io)->ptr; 1140 softc = be_lun->softc; 1141 ptrlen = (struct ctl_ptr_len_flags *)&io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 1142 1143 if ((ptrlen->flags & ~SU_ANCHOR) != 0 || be_lun->unmap == NULL) { 1144 ctl_free_beio(beio); 1145 ctl_set_invalid_field(&io->scsiio, 1146 /*sks_valid*/ 0, 1147 /*command*/ 1, 1148 /*field*/ 0, 1149 /*bit_valid*/ 0, 1150 /*bit*/ 0); 1151 ctl_config_write_done(io); 1152 return; 1153 } 1154 1155 switch (io->scsiio.tag_type) { 1156 case CTL_TAG_ORDERED: 1157 beio->ds_tag_type = DEVSTAT_TAG_ORDERED; 1158 break; 1159 case CTL_TAG_HEAD_OF_QUEUE: 1160 beio->ds_tag_type = DEVSTAT_TAG_HEAD; 1161 break; 1162 case CTL_TAG_UNTAGGED: 1163 case CTL_TAG_SIMPLE: 1164 case CTL_TAG_ACA: 1165 default: 1166 beio->ds_tag_type = DEVSTAT_TAG_SIMPLE; 1167 break; 1168 } 1169 1170 beio->io_len = 0; 1171 beio->io_offset = -1; 1172 1173 beio->bio_cmd = BIO_DELETE; 1174 beio->ds_trans_type = DEVSTAT_FREE; 1175 1176 DPRINTF("UNMAP\n"); 1177 1178 be_lun->unmap(be_lun, beio); 1179 } 1180 1181 static void 1182 ctl_be_block_cw_done(struct ctl_be_block_io *beio) 1183 { 1184 union ctl_io *io; 1185 1186 io = beio->io; 1187 ctl_free_beio(beio); 1188 ctl_config_write_done(io); 1189 } 1190 1191 static void 1192 ctl_be_block_cw_dispatch(struct ctl_be_block_lun *be_lun, 1193 union ctl_io *io) 1194 { 1195 struct ctl_be_block_io *beio; 1196 struct ctl_be_block_softc *softc; 1197 1198 DPRINTF("entered\n"); 1199 1200 softc = be_lun->softc; 1201 beio = ctl_alloc_beio(softc); 1202 beio->io = io; 1203 beio->lun = be_lun; 1204 beio->beio_cont = ctl_be_block_cw_done; 1205 PRIV(io)->ptr = (void *)beio; 1206 1207 switch (io->scsiio.cdb[0]) { 1208 case SYNCHRONIZE_CACHE: 1209 case SYNCHRONIZE_CACHE_16: 1210 beio->bio_cmd = BIO_FLUSH; 1211 beio->ds_trans_type = DEVSTAT_NO_DATA; 1212 beio->ds_tag_type = DEVSTAT_TAG_ORDERED; 1213 beio->io_len = 0; 1214 be_lun->lun_flush(be_lun, beio); 1215 break; 1216 case WRITE_SAME_10: 1217 case WRITE_SAME_16: 1218 ctl_be_block_cw_dispatch_ws(be_lun, io); 1219 break; 1220 case UNMAP: 1221 ctl_be_block_cw_dispatch_unmap(be_lun, io); 1222 break; 1223 default: 1224 panic("Unhandled CDB type %#x", io->scsiio.cdb[0]); 1225 break; 1226 } 1227 } 1228 1229 SDT_PROBE_DEFINE1(cbb, kernel, read, start, "uint64_t"); 1230 SDT_PROBE_DEFINE1(cbb, kernel, write, start, "uint64_t"); 1231 SDT_PROBE_DEFINE1(cbb, kernel, read, alloc_done, "uint64_t"); 1232 SDT_PROBE_DEFINE1(cbb, kernel, write, alloc_done, "uint64_t"); 1233 1234 static void 1235 ctl_be_block_next(struct ctl_be_block_io *beio) 1236 { 1237 struct ctl_be_block_lun *be_lun; 1238 union ctl_io *io; 1239 1240 io = beio->io; 1241 be_lun = beio->lun; 1242 ctl_free_beio(beio); 1243 if ((io->io_hdr.flags & CTL_FLAG_ABORT) || 1244 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 1245 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) { 1246 ctl_data_submit_done(io); 1247 return; 1248 } 1249 1250 io->io_hdr.status &= ~CTL_STATUS_MASK; 1251 io->io_hdr.status |= CTL_STATUS_NONE; 1252 1253 mtx_lock(&be_lun->queue_lock); 1254 /* 1255 * XXX KDM make sure that links is okay to use at this point. 1256 * Otherwise, we either need to add another field to ctl_io_hdr, 1257 * or deal with resource allocation here. 1258 */ 1259 STAILQ_INSERT_TAIL(&be_lun->input_queue, &io->io_hdr, links); 1260 mtx_unlock(&be_lun->queue_lock); 1261 1262 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); 1263 } 1264 1265 static void 1266 ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun, 1267 union ctl_io *io) 1268 { 1269 struct ctl_be_block_io *beio; 1270 struct ctl_be_block_softc *softc; 1271 struct ctl_lba_len_flags *lbalen; 1272 struct ctl_ptr_len_flags *bptrlen; 1273 uint64_t len_left, lbas; 1274 int i; 1275 1276 softc = be_lun->softc; 1277 1278 DPRINTF("entered\n"); 1279 1280 lbalen = ARGS(io); 1281 if (lbalen->flags & CTL_LLF_WRITE) { 1282 SDT_PROBE(cbb, kernel, write, start, 0, 0, 0, 0, 0); 1283 } else { 1284 SDT_PROBE(cbb, kernel, read, start, 0, 0, 0, 0, 0); 1285 } 1286 1287 beio = ctl_alloc_beio(softc); 1288 beio->io = io; 1289 beio->lun = be_lun; 1290 bptrlen = PRIV(io); 1291 bptrlen->ptr = (void *)beio; 1292 1293 switch (io->scsiio.tag_type) { 1294 case CTL_TAG_ORDERED: 1295 beio->ds_tag_type = DEVSTAT_TAG_ORDERED; 1296 break; 1297 case CTL_TAG_HEAD_OF_QUEUE: 1298 beio->ds_tag_type = DEVSTAT_TAG_HEAD; 1299 break; 1300 case CTL_TAG_UNTAGGED: 1301 case CTL_TAG_SIMPLE: 1302 case CTL_TAG_ACA: 1303 default: 1304 beio->ds_tag_type = DEVSTAT_TAG_SIMPLE; 1305 break; 1306 } 1307 1308 if (lbalen->flags & CTL_LLF_WRITE) { 1309 beio->bio_cmd = BIO_WRITE; 1310 beio->ds_trans_type = DEVSTAT_WRITE; 1311 } else { 1312 beio->bio_cmd = BIO_READ; 1313 beio->ds_trans_type = DEVSTAT_READ; 1314 } 1315 1316 DPRINTF("%s at LBA %jx len %u @%ju\n", 1317 (beio->bio_cmd == BIO_READ) ? "READ" : "WRITE", 1318 (uintmax_t)lbalen->lba, lbalen->len, bptrlen->len); 1319 if (lbalen->flags & CTL_LLF_COMPARE) 1320 lbas = CTLBLK_HALF_IO_SIZE; 1321 else 1322 lbas = CTLBLK_MAX_IO_SIZE; 1323 lbas = MIN(lbalen->len - bptrlen->len, lbas / be_lun->blocksize); 1324 beio->io_offset = (lbalen->lba + bptrlen->len) * be_lun->blocksize; 1325 beio->io_len = lbas * be_lun->blocksize; 1326 bptrlen->len += lbas; 1327 1328 for (i = 0, len_left = beio->io_len; len_left > 0; i++) { 1329 KASSERT(i < CTLBLK_MAX_SEGS, ("Too many segs (%d >= %d)", 1330 i, CTLBLK_MAX_SEGS)); 1331 1332 /* 1333 * Setup the S/G entry for this chunk. 1334 */ 1335 beio->sg_segs[i].len = min(CTLBLK_MAX_SEG, len_left); 1336 beio->sg_segs[i].addr = uma_zalloc(be_lun->lun_zone, M_WAITOK); 1337 1338 DPRINTF("segment %d addr %p len %zd\n", i, 1339 beio->sg_segs[i].addr, beio->sg_segs[i].len); 1340 1341 /* Set up second segment for compare operation. */ 1342 if (lbalen->flags & CTL_LLF_COMPARE) { 1343 beio->sg_segs[i + CTLBLK_HALF_SEGS].len = 1344 beio->sg_segs[i].len; 1345 beio->sg_segs[i + CTLBLK_HALF_SEGS].addr = 1346 uma_zalloc(be_lun->lun_zone, M_WAITOK); 1347 } 1348 1349 beio->num_segs++; 1350 len_left -= beio->sg_segs[i].len; 1351 } 1352 if (bptrlen->len < lbalen->len) 1353 beio->beio_cont = ctl_be_block_next; 1354 io->scsiio.be_move_done = ctl_be_block_move_done; 1355 /* For compare we have separate S/G lists for read and datamove. */ 1356 if (lbalen->flags & CTL_LLF_COMPARE) 1357 io->scsiio.kern_data_ptr = (uint8_t *)&beio->sg_segs[CTLBLK_HALF_SEGS]; 1358 else 1359 io->scsiio.kern_data_ptr = (uint8_t *)beio->sg_segs; 1360 io->scsiio.kern_data_len = beio->io_len; 1361 io->scsiio.kern_data_resid = 0; 1362 io->scsiio.kern_sg_entries = beio->num_segs; 1363 io->io_hdr.flags |= CTL_FLAG_ALLOCATED | CTL_FLAG_KDPTR_SGLIST; 1364 1365 /* 1366 * For the read case, we need to read the data into our buffers and 1367 * then we can send it back to the user. For the write case, we 1368 * need to get the data from the user first. 1369 */ 1370 if (beio->bio_cmd == BIO_READ) { 1371 SDT_PROBE(cbb, kernel, read, alloc_done, 0, 0, 0, 0, 0); 1372 be_lun->dispatch(be_lun, beio); 1373 } else { 1374 SDT_PROBE(cbb, kernel, write, alloc_done, 0, 0, 0, 0, 0); 1375 #ifdef CTL_TIME_IO 1376 getbintime(&io->io_hdr.dma_start_bt); 1377 #endif 1378 ctl_datamove(io); 1379 } 1380 } 1381 1382 static void 1383 ctl_be_block_worker(void *context, int pending) 1384 { 1385 struct ctl_be_block_lun *be_lun; 1386 struct ctl_be_block_softc *softc; 1387 union ctl_io *io; 1388 1389 be_lun = (struct ctl_be_block_lun *)context; 1390 softc = be_lun->softc; 1391 1392 DPRINTF("entered\n"); 1393 1394 mtx_lock(&be_lun->queue_lock); 1395 for (;;) { 1396 io = (union ctl_io *)STAILQ_FIRST(&be_lun->datamove_queue); 1397 if (io != NULL) { 1398 struct ctl_be_block_io *beio; 1399 1400 DPRINTF("datamove queue\n"); 1401 1402 STAILQ_REMOVE(&be_lun->datamove_queue, &io->io_hdr, 1403 ctl_io_hdr, links); 1404 1405 mtx_unlock(&be_lun->queue_lock); 1406 1407 beio = (struct ctl_be_block_io *)PRIV(io)->ptr; 1408 1409 be_lun->dispatch(be_lun, beio); 1410 1411 mtx_lock(&be_lun->queue_lock); 1412 continue; 1413 } 1414 io = (union ctl_io *)STAILQ_FIRST(&be_lun->config_write_queue); 1415 if (io != NULL) { 1416 1417 DPRINTF("config write queue\n"); 1418 1419 STAILQ_REMOVE(&be_lun->config_write_queue, &io->io_hdr, 1420 ctl_io_hdr, links); 1421 1422 mtx_unlock(&be_lun->queue_lock); 1423 1424 ctl_be_block_cw_dispatch(be_lun, io); 1425 1426 mtx_lock(&be_lun->queue_lock); 1427 continue; 1428 } 1429 io = (union ctl_io *)STAILQ_FIRST(&be_lun->input_queue); 1430 if (io != NULL) { 1431 DPRINTF("input queue\n"); 1432 1433 STAILQ_REMOVE(&be_lun->input_queue, &io->io_hdr, 1434 ctl_io_hdr, links); 1435 mtx_unlock(&be_lun->queue_lock); 1436 1437 /* 1438 * We must drop the lock, since this routine and 1439 * its children may sleep. 1440 */ 1441 ctl_be_block_dispatch(be_lun, io); 1442 1443 mtx_lock(&be_lun->queue_lock); 1444 continue; 1445 } 1446 1447 /* 1448 * If we get here, there is no work left in the queues, so 1449 * just break out and let the task queue go to sleep. 1450 */ 1451 break; 1452 } 1453 mtx_unlock(&be_lun->queue_lock); 1454 } 1455 1456 /* 1457 * Entry point from CTL to the backend for I/O. We queue everything to a 1458 * work thread, so this just puts the I/O on a queue and wakes up the 1459 * thread. 1460 */ 1461 static int 1462 ctl_be_block_submit(union ctl_io *io) 1463 { 1464 struct ctl_be_block_lun *be_lun; 1465 struct ctl_be_lun *ctl_be_lun; 1466 1467 DPRINTF("entered\n"); 1468 1469 ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[ 1470 CTL_PRIV_BACKEND_LUN].ptr; 1471 be_lun = (struct ctl_be_block_lun *)ctl_be_lun->be_lun; 1472 1473 /* 1474 * Make sure we only get SCSI I/O. 1475 */ 1476 KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, ("Non-SCSI I/O (type " 1477 "%#x) encountered", io->io_hdr.io_type)); 1478 1479 PRIV(io)->len = 0; 1480 1481 mtx_lock(&be_lun->queue_lock); 1482 /* 1483 * XXX KDM make sure that links is okay to use at this point. 1484 * Otherwise, we either need to add another field to ctl_io_hdr, 1485 * or deal with resource allocation here. 1486 */ 1487 STAILQ_INSERT_TAIL(&be_lun->input_queue, &io->io_hdr, links); 1488 mtx_unlock(&be_lun->queue_lock); 1489 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); 1490 1491 return (CTL_RETVAL_COMPLETE); 1492 } 1493 1494 static int 1495 ctl_be_block_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, 1496 int flag, struct thread *td) 1497 { 1498 struct ctl_be_block_softc *softc; 1499 int error; 1500 1501 softc = &backend_block_softc; 1502 1503 error = 0; 1504 1505 switch (cmd) { 1506 case CTL_LUN_REQ: { 1507 struct ctl_lun_req *lun_req; 1508 1509 lun_req = (struct ctl_lun_req *)addr; 1510 1511 switch (lun_req->reqtype) { 1512 case CTL_LUNREQ_CREATE: 1513 error = ctl_be_block_create(softc, lun_req); 1514 break; 1515 case CTL_LUNREQ_RM: 1516 error = ctl_be_block_rm(softc, lun_req); 1517 break; 1518 case CTL_LUNREQ_MODIFY: 1519 error = ctl_be_block_modify(softc, lun_req); 1520 break; 1521 default: 1522 lun_req->status = CTL_LUN_ERROR; 1523 snprintf(lun_req->error_str, sizeof(lun_req->error_str), 1524 "%s: invalid LUN request type %d", __func__, 1525 lun_req->reqtype); 1526 break; 1527 } 1528 break; 1529 } 1530 default: 1531 error = ENOTTY; 1532 break; 1533 } 1534 1535 return (error); 1536 } 1537 1538 static int 1539 ctl_be_block_open_file(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req) 1540 { 1541 struct ctl_be_block_filedata *file_data; 1542 struct ctl_lun_create_params *params; 1543 struct vattr vattr; 1544 int error; 1545 1546 error = 0; 1547 file_data = &be_lun->backend.file; 1548 params = &req->reqdata.create; 1549 1550 be_lun->dev_type = CTL_BE_BLOCK_FILE; 1551 be_lun->dispatch = ctl_be_block_dispatch_file; 1552 be_lun->lun_flush = ctl_be_block_flush_file; 1553 1554 error = VOP_GETATTR(be_lun->vn, &vattr, curthread->td_ucred); 1555 if (error != 0) { 1556 snprintf(req->error_str, sizeof(req->error_str), 1557 "error calling VOP_GETATTR() for file %s", 1558 be_lun->dev_path); 1559 return (error); 1560 } 1561 1562 /* 1563 * Verify that we have the ability to upgrade to exclusive 1564 * access on this file so we can trap errors at open instead 1565 * of reporting them during first access. 1566 */ 1567 if (VOP_ISLOCKED(be_lun->vn) != LK_EXCLUSIVE) { 1568 vn_lock(be_lun->vn, LK_UPGRADE | LK_RETRY); 1569 if (be_lun->vn->v_iflag & VI_DOOMED) { 1570 error = EBADF; 1571 snprintf(req->error_str, sizeof(req->error_str), 1572 "error locking file %s", be_lun->dev_path); 1573 return (error); 1574 } 1575 } 1576 1577 1578 file_data->cred = crhold(curthread->td_ucred); 1579 if (params->lun_size_bytes != 0) 1580 be_lun->size_bytes = params->lun_size_bytes; 1581 else 1582 be_lun->size_bytes = vattr.va_size; 1583 /* 1584 * We set the multi thread flag for file operations because all 1585 * filesystems (in theory) are capable of allowing multiple readers 1586 * of a file at once. So we want to get the maximum possible 1587 * concurrency. 1588 */ 1589 be_lun->flags |= CTL_BE_BLOCK_LUN_MULTI_THREAD; 1590 1591 /* 1592 * XXX KDM vattr.va_blocksize may be larger than 512 bytes here. 1593 * With ZFS, it is 131072 bytes. Block sizes that large don't work 1594 * with disklabel and UFS on FreeBSD at least. Large block sizes 1595 * may not work with other OSes as well. So just export a sector 1596 * size of 512 bytes, which should work with any OS or 1597 * application. Since our backing is a file, any block size will 1598 * work fine for the backing store. 1599 */ 1600 #if 0 1601 be_lun->blocksize= vattr.va_blocksize; 1602 #endif 1603 if (params->blocksize_bytes != 0) 1604 be_lun->blocksize = params->blocksize_bytes; 1605 else 1606 be_lun->blocksize = 512; 1607 1608 /* 1609 * Sanity check. The media size has to be at least one 1610 * sector long. 1611 */ 1612 if (be_lun->size_bytes < be_lun->blocksize) { 1613 error = EINVAL; 1614 snprintf(req->error_str, sizeof(req->error_str), 1615 "file %s size %ju < block size %u", be_lun->dev_path, 1616 (uintmax_t)be_lun->size_bytes, be_lun->blocksize); 1617 } 1618 return (error); 1619 } 1620 1621 static int 1622 ctl_be_block_open_dev(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req) 1623 { 1624 struct ctl_lun_create_params *params; 1625 struct vattr vattr; 1626 struct cdev *dev; 1627 struct cdevsw *devsw; 1628 int error; 1629 off_t ps, pss, po, pos; 1630 1631 params = &req->reqdata.create; 1632 1633 be_lun->dev_type = CTL_BE_BLOCK_DEV; 1634 be_lun->backend.dev.cdev = be_lun->vn->v_rdev; 1635 be_lun->backend.dev.csw = dev_refthread(be_lun->backend.dev.cdev, 1636 &be_lun->backend.dev.dev_ref); 1637 if (be_lun->backend.dev.csw == NULL) 1638 panic("Unable to retrieve device switch"); 1639 if (strcmp(be_lun->backend.dev.csw->d_name, "zvol") == 0) 1640 be_lun->dispatch = ctl_be_block_dispatch_zvol; 1641 else 1642 be_lun->dispatch = ctl_be_block_dispatch_dev; 1643 be_lun->lun_flush = ctl_be_block_flush_dev; 1644 be_lun->unmap = ctl_be_block_unmap_dev; 1645 1646 error = VOP_GETATTR(be_lun->vn, &vattr, NOCRED); 1647 if (error) { 1648 snprintf(req->error_str, sizeof(req->error_str), 1649 "%s: error getting vnode attributes for device %s", 1650 __func__, be_lun->dev_path); 1651 return (error); 1652 } 1653 1654 dev = be_lun->vn->v_rdev; 1655 devsw = dev->si_devsw; 1656 if (!devsw->d_ioctl) { 1657 snprintf(req->error_str, sizeof(req->error_str), 1658 "%s: no d_ioctl for device %s!", __func__, 1659 be_lun->dev_path); 1660 return (ENODEV); 1661 } 1662 1663 error = devsw->d_ioctl(dev, DIOCGSECTORSIZE, 1664 (caddr_t)&be_lun->blocksize, FREAD, 1665 curthread); 1666 if (error) { 1667 snprintf(req->error_str, sizeof(req->error_str), 1668 "%s: error %d returned for DIOCGSECTORSIZE ioctl " 1669 "on %s!", __func__, error, be_lun->dev_path); 1670 return (error); 1671 } 1672 1673 /* 1674 * If the user has asked for a blocksize that is greater than the 1675 * backing device's blocksize, we can do it only if the blocksize 1676 * the user is asking for is an even multiple of the underlying 1677 * device's blocksize. 1678 */ 1679 if ((params->blocksize_bytes != 0) 1680 && (params->blocksize_bytes > be_lun->blocksize)) { 1681 uint32_t bs_multiple, tmp_blocksize; 1682 1683 bs_multiple = params->blocksize_bytes / be_lun->blocksize; 1684 1685 tmp_blocksize = bs_multiple * be_lun->blocksize; 1686 1687 if (tmp_blocksize == params->blocksize_bytes) { 1688 be_lun->blocksize = params->blocksize_bytes; 1689 } else { 1690 snprintf(req->error_str, sizeof(req->error_str), 1691 "%s: requested blocksize %u is not an even " 1692 "multiple of backing device blocksize %u", 1693 __func__, params->blocksize_bytes, 1694 be_lun->blocksize); 1695 return (EINVAL); 1696 1697 } 1698 } else if ((params->blocksize_bytes != 0) 1699 && (params->blocksize_bytes != be_lun->blocksize)) { 1700 snprintf(req->error_str, sizeof(req->error_str), 1701 "%s: requested blocksize %u < backing device " 1702 "blocksize %u", __func__, params->blocksize_bytes, 1703 be_lun->blocksize); 1704 return (EINVAL); 1705 } 1706 1707 error = devsw->d_ioctl(dev, DIOCGMEDIASIZE, 1708 (caddr_t)&be_lun->size_bytes, FREAD, 1709 curthread); 1710 if (error) { 1711 snprintf(req->error_str, sizeof(req->error_str), 1712 "%s: error %d returned for DIOCGMEDIASIZE " 1713 " ioctl on %s!", __func__, error, 1714 be_lun->dev_path); 1715 return (error); 1716 } 1717 1718 if (params->lun_size_bytes != 0) { 1719 if (params->lun_size_bytes > be_lun->size_bytes) { 1720 snprintf(req->error_str, sizeof(req->error_str), 1721 "%s: requested LUN size %ju > backing device " 1722 "size %ju", __func__, 1723 (uintmax_t)params->lun_size_bytes, 1724 (uintmax_t)be_lun->size_bytes); 1725 return (EINVAL); 1726 } 1727 1728 be_lun->size_bytes = params->lun_size_bytes; 1729 } 1730 1731 error = devsw->d_ioctl(dev, DIOCGSTRIPESIZE, 1732 (caddr_t)&ps, FREAD, curthread); 1733 if (error) 1734 ps = po = 0; 1735 else { 1736 error = devsw->d_ioctl(dev, DIOCGSTRIPEOFFSET, 1737 (caddr_t)&po, FREAD, curthread); 1738 if (error) 1739 po = 0; 1740 } 1741 pss = ps / be_lun->blocksize; 1742 pos = po / be_lun->blocksize; 1743 if ((pss > 0) && (pss * be_lun->blocksize == ps) && (pss >= pos) && 1744 ((pss & (pss - 1)) == 0) && (pos * be_lun->blocksize == po)) { 1745 be_lun->pblockexp = fls(pss) - 1; 1746 be_lun->pblockoff = (pss - pos) % pss; 1747 } 1748 1749 return (0); 1750 } 1751 1752 static int 1753 ctl_be_block_close(struct ctl_be_block_lun *be_lun) 1754 { 1755 DROP_GIANT(); 1756 if (be_lun->vn) { 1757 int flags = FREAD | FWRITE; 1758 1759 switch (be_lun->dev_type) { 1760 case CTL_BE_BLOCK_DEV: 1761 if (be_lun->backend.dev.csw) { 1762 dev_relthread(be_lun->backend.dev.cdev, 1763 be_lun->backend.dev.dev_ref); 1764 be_lun->backend.dev.csw = NULL; 1765 be_lun->backend.dev.cdev = NULL; 1766 } 1767 break; 1768 case CTL_BE_BLOCK_FILE: 1769 break; 1770 case CTL_BE_BLOCK_NONE: 1771 break; 1772 default: 1773 panic("Unexpected backend type."); 1774 break; 1775 } 1776 1777 (void)vn_close(be_lun->vn, flags, NOCRED, curthread); 1778 be_lun->vn = NULL; 1779 1780 switch (be_lun->dev_type) { 1781 case CTL_BE_BLOCK_DEV: 1782 break; 1783 case CTL_BE_BLOCK_FILE: 1784 if (be_lun->backend.file.cred != NULL) { 1785 crfree(be_lun->backend.file.cred); 1786 be_lun->backend.file.cred = NULL; 1787 } 1788 break; 1789 case CTL_BE_BLOCK_NONE: 1790 break; 1791 default: 1792 panic("Unexpected backend type."); 1793 break; 1794 } 1795 } 1796 PICKUP_GIANT(); 1797 1798 return (0); 1799 } 1800 1801 static int 1802 ctl_be_block_open(struct ctl_be_block_softc *softc, 1803 struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req) 1804 { 1805 struct nameidata nd; 1806 int flags; 1807 int error; 1808 1809 /* 1810 * XXX KDM allow a read-only option? 1811 */ 1812 flags = FREAD | FWRITE; 1813 error = 0; 1814 1815 if (rootvnode == NULL) { 1816 snprintf(req->error_str, sizeof(req->error_str), 1817 "%s: Root filesystem is not mounted", __func__); 1818 return (1); 1819 } 1820 1821 if (!curthread->td_proc->p_fd->fd_cdir) { 1822 curthread->td_proc->p_fd->fd_cdir = rootvnode; 1823 VREF(rootvnode); 1824 } 1825 if (!curthread->td_proc->p_fd->fd_rdir) { 1826 curthread->td_proc->p_fd->fd_rdir = rootvnode; 1827 VREF(rootvnode); 1828 } 1829 if (!curthread->td_proc->p_fd->fd_jdir) { 1830 curthread->td_proc->p_fd->fd_jdir = rootvnode; 1831 VREF(rootvnode); 1832 } 1833 1834 again: 1835 NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, be_lun->dev_path, curthread); 1836 error = vn_open(&nd, &flags, 0, NULL); 1837 if (error) { 1838 /* 1839 * This is the only reasonable guess we can make as far as 1840 * path if the user doesn't give us a fully qualified path. 1841 * If they want to specify a file, they need to specify the 1842 * full path. 1843 */ 1844 if (be_lun->dev_path[0] != '/') { 1845 char *dev_path = "/dev/"; 1846 char *dev_name; 1847 1848 /* Try adding device path at beginning of name */ 1849 dev_name = malloc(strlen(be_lun->dev_path) 1850 + strlen(dev_path) + 1, 1851 M_CTLBLK, M_WAITOK); 1852 if (dev_name) { 1853 sprintf(dev_name, "%s%s", dev_path, 1854 be_lun->dev_path); 1855 free(be_lun->dev_path, M_CTLBLK); 1856 be_lun->dev_path = dev_name; 1857 goto again; 1858 } 1859 } 1860 snprintf(req->error_str, sizeof(req->error_str), 1861 "%s: error opening %s", __func__, be_lun->dev_path); 1862 return (error); 1863 } 1864 1865 NDFREE(&nd, NDF_ONLY_PNBUF); 1866 1867 be_lun->vn = nd.ni_vp; 1868 1869 /* We only support disks and files. */ 1870 if (vn_isdisk(be_lun->vn, &error)) { 1871 error = ctl_be_block_open_dev(be_lun, req); 1872 } else if (be_lun->vn->v_type == VREG) { 1873 error = ctl_be_block_open_file(be_lun, req); 1874 } else { 1875 error = EINVAL; 1876 snprintf(req->error_str, sizeof(req->error_str), 1877 "%s is not a disk or plain file", be_lun->dev_path); 1878 } 1879 VOP_UNLOCK(be_lun->vn, 0); 1880 1881 if (error != 0) { 1882 ctl_be_block_close(be_lun); 1883 return (error); 1884 } 1885 1886 be_lun->blocksize_shift = fls(be_lun->blocksize) - 1; 1887 be_lun->size_blocks = be_lun->size_bytes >> be_lun->blocksize_shift; 1888 1889 return (0); 1890 } 1891 1892 static int 1893 ctl_be_block_create(struct ctl_be_block_softc *softc, struct ctl_lun_req *req) 1894 { 1895 struct ctl_be_block_lun *be_lun; 1896 struct ctl_lun_create_params *params; 1897 char num_thread_str[16]; 1898 char tmpstr[32]; 1899 char *value; 1900 int retval, num_threads, unmap; 1901 int tmp_num_threads; 1902 1903 params = &req->reqdata.create; 1904 retval = 0; 1905 1906 num_threads = cbb_num_threads; 1907 1908 be_lun = malloc(sizeof(*be_lun), M_CTLBLK, M_ZERO | M_WAITOK); 1909 1910 be_lun->softc = softc; 1911 STAILQ_INIT(&be_lun->input_queue); 1912 STAILQ_INIT(&be_lun->config_write_queue); 1913 STAILQ_INIT(&be_lun->datamove_queue); 1914 sprintf(be_lun->lunname, "cblk%d", softc->num_luns); 1915 mtx_init(&be_lun->io_lock, "cblk io lock", NULL, MTX_DEF); 1916 mtx_init(&be_lun->queue_lock, "cblk queue lock", NULL, MTX_DEF); 1917 ctl_init_opts(&be_lun->ctl_be_lun.options, 1918 req->num_be_args, req->kern_be_args); 1919 1920 be_lun->lun_zone = uma_zcreate(be_lun->lunname, CTLBLK_MAX_SEG, 1921 NULL, NULL, NULL, NULL, /*align*/ 0, /*flags*/0); 1922 1923 if (be_lun->lun_zone == NULL) { 1924 snprintf(req->error_str, sizeof(req->error_str), 1925 "%s: error allocating UMA zone", __func__); 1926 goto bailout_error; 1927 } 1928 1929 if (params->flags & CTL_LUN_FLAG_DEV_TYPE) 1930 be_lun->ctl_be_lun.lun_type = params->device_type; 1931 else 1932 be_lun->ctl_be_lun.lun_type = T_DIRECT; 1933 1934 if (be_lun->ctl_be_lun.lun_type == T_DIRECT) { 1935 value = ctl_get_opt(&be_lun->ctl_be_lun.options, "file"); 1936 if (value == NULL) { 1937 snprintf(req->error_str, sizeof(req->error_str), 1938 "%s: no file argument specified", __func__); 1939 goto bailout_error; 1940 } 1941 be_lun->dev_path = strdup(value, M_CTLBLK); 1942 1943 retval = ctl_be_block_open(softc, be_lun, req); 1944 if (retval != 0) { 1945 retval = 0; 1946 goto bailout_error; 1947 } 1948 1949 /* 1950 * Tell the user the size of the file/device. 1951 */ 1952 params->lun_size_bytes = be_lun->size_bytes; 1953 1954 /* 1955 * The maximum LBA is the size - 1. 1956 */ 1957 be_lun->ctl_be_lun.maxlba = be_lun->size_blocks - 1; 1958 } else { 1959 /* 1960 * For processor devices, we don't have any size. 1961 */ 1962 be_lun->blocksize = 0; 1963 be_lun->pblockexp = 0; 1964 be_lun->pblockoff = 0; 1965 be_lun->size_blocks = 0; 1966 be_lun->size_bytes = 0; 1967 be_lun->ctl_be_lun.maxlba = 0; 1968 params->lun_size_bytes = 0; 1969 1970 /* 1971 * Default to just 1 thread for processor devices. 1972 */ 1973 num_threads = 1; 1974 } 1975 1976 /* 1977 * XXX This searching loop might be refactored to be combined with 1978 * the loop above, 1979 */ 1980 value = ctl_get_opt(&be_lun->ctl_be_lun.options, "num_threads"); 1981 if (value != NULL) { 1982 tmp_num_threads = strtol(value, NULL, 0); 1983 1984 /* 1985 * We don't let the user specify less than one 1986 * thread, but hope he's clueful enough not to 1987 * specify 1000 threads. 1988 */ 1989 if (tmp_num_threads < 1) { 1990 snprintf(req->error_str, sizeof(req->error_str), 1991 "%s: invalid number of threads %s", 1992 __func__, num_thread_str); 1993 goto bailout_error; 1994 } 1995 num_threads = tmp_num_threads; 1996 } 1997 unmap = 0; 1998 value = ctl_get_opt(&be_lun->ctl_be_lun.options, "unmap"); 1999 if (value != NULL && strcmp(value, "on") == 0) 2000 unmap = 1; 2001 2002 be_lun->flags = CTL_BE_BLOCK_LUN_UNCONFIGURED; 2003 be_lun->ctl_be_lun.flags = CTL_LUN_FLAG_PRIMARY; 2004 if (unmap) 2005 be_lun->ctl_be_lun.flags |= CTL_LUN_FLAG_UNMAP; 2006 be_lun->ctl_be_lun.be_lun = be_lun; 2007 be_lun->ctl_be_lun.blocksize = be_lun->blocksize; 2008 be_lun->ctl_be_lun.pblockexp = be_lun->pblockexp; 2009 be_lun->ctl_be_lun.pblockoff = be_lun->pblockoff; 2010 /* Tell the user the blocksize we ended up using */ 2011 params->blocksize_bytes = be_lun->blocksize; 2012 if (params->flags & CTL_LUN_FLAG_ID_REQ) { 2013 be_lun->ctl_be_lun.req_lun_id = params->req_lun_id; 2014 be_lun->ctl_be_lun.flags |= CTL_LUN_FLAG_ID_REQ; 2015 } else 2016 be_lun->ctl_be_lun.req_lun_id = 0; 2017 2018 be_lun->ctl_be_lun.lun_shutdown = ctl_be_block_lun_shutdown; 2019 be_lun->ctl_be_lun.lun_config_status = 2020 ctl_be_block_lun_config_status; 2021 be_lun->ctl_be_lun.be = &ctl_be_block_driver; 2022 2023 if ((params->flags & CTL_LUN_FLAG_SERIAL_NUM) == 0) { 2024 snprintf(tmpstr, sizeof(tmpstr), "MYSERIAL%4d", 2025 softc->num_luns); 2026 strncpy((char *)be_lun->ctl_be_lun.serial_num, tmpstr, 2027 ctl_min(sizeof(be_lun->ctl_be_lun.serial_num), 2028 sizeof(tmpstr))); 2029 2030 /* Tell the user what we used for a serial number */ 2031 strncpy((char *)params->serial_num, tmpstr, 2032 ctl_min(sizeof(params->serial_num), sizeof(tmpstr))); 2033 } else { 2034 strncpy((char *)be_lun->ctl_be_lun.serial_num, 2035 params->serial_num, 2036 ctl_min(sizeof(be_lun->ctl_be_lun.serial_num), 2037 sizeof(params->serial_num))); 2038 } 2039 if ((params->flags & CTL_LUN_FLAG_DEVID) == 0) { 2040 snprintf(tmpstr, sizeof(tmpstr), "MYDEVID%4d", softc->num_luns); 2041 strncpy((char *)be_lun->ctl_be_lun.device_id, tmpstr, 2042 ctl_min(sizeof(be_lun->ctl_be_lun.device_id), 2043 sizeof(tmpstr))); 2044 2045 /* Tell the user what we used for a device ID */ 2046 strncpy((char *)params->device_id, tmpstr, 2047 ctl_min(sizeof(params->device_id), sizeof(tmpstr))); 2048 } else { 2049 strncpy((char *)be_lun->ctl_be_lun.device_id, 2050 params->device_id, 2051 ctl_min(sizeof(be_lun->ctl_be_lun.device_id), 2052 sizeof(params->device_id))); 2053 } 2054 2055 TASK_INIT(&be_lun->io_task, /*priority*/0, ctl_be_block_worker, be_lun); 2056 2057 be_lun->io_taskqueue = taskqueue_create(be_lun->lunname, M_WAITOK, 2058 taskqueue_thread_enqueue, /*context*/&be_lun->io_taskqueue); 2059 2060 if (be_lun->io_taskqueue == NULL) { 2061 snprintf(req->error_str, sizeof(req->error_str), 2062 "%s: Unable to create taskqueue", __func__); 2063 goto bailout_error; 2064 } 2065 2066 /* 2067 * Note that we start the same number of threads by default for 2068 * both the file case and the block device case. For the file 2069 * case, we need multiple threads to allow concurrency, because the 2070 * vnode interface is designed to be a blocking interface. For the 2071 * block device case, ZFS zvols at least will block the caller's 2072 * context in many instances, and so we need multiple threads to 2073 * overcome that problem. Other block devices don't need as many 2074 * threads, but they shouldn't cause too many problems. 2075 * 2076 * If the user wants to just have a single thread for a block 2077 * device, he can specify that when the LUN is created, or change 2078 * the tunable/sysctl to alter the default number of threads. 2079 */ 2080 retval = taskqueue_start_threads(&be_lun->io_taskqueue, 2081 /*num threads*/num_threads, 2082 /*priority*/PWAIT, 2083 /*thread name*/ 2084 "%s taskq", be_lun->lunname); 2085 2086 if (retval != 0) 2087 goto bailout_error; 2088 2089 be_lun->num_threads = num_threads; 2090 2091 mtx_lock(&softc->lock); 2092 softc->num_luns++; 2093 STAILQ_INSERT_TAIL(&softc->lun_list, be_lun, links); 2094 2095 mtx_unlock(&softc->lock); 2096 2097 retval = ctl_add_lun(&be_lun->ctl_be_lun); 2098 if (retval != 0) { 2099 mtx_lock(&softc->lock); 2100 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_block_lun, 2101 links); 2102 softc->num_luns--; 2103 mtx_unlock(&softc->lock); 2104 snprintf(req->error_str, sizeof(req->error_str), 2105 "%s: ctl_add_lun() returned error %d, see dmesg for " 2106 "details", __func__, retval); 2107 retval = 0; 2108 goto bailout_error; 2109 } 2110 2111 mtx_lock(&softc->lock); 2112 2113 /* 2114 * Tell the config_status routine that we're waiting so it won't 2115 * clean up the LUN in the event of an error. 2116 */ 2117 be_lun->flags |= CTL_BE_BLOCK_LUN_WAITING; 2118 2119 while (be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) { 2120 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlblk", 0); 2121 if (retval == EINTR) 2122 break; 2123 } 2124 be_lun->flags &= ~CTL_BE_BLOCK_LUN_WAITING; 2125 2126 if (be_lun->flags & CTL_BE_BLOCK_LUN_CONFIG_ERR) { 2127 snprintf(req->error_str, sizeof(req->error_str), 2128 "%s: LUN configuration error, see dmesg for details", 2129 __func__); 2130 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_block_lun, 2131 links); 2132 softc->num_luns--; 2133 mtx_unlock(&softc->lock); 2134 goto bailout_error; 2135 } else { 2136 params->req_lun_id = be_lun->ctl_be_lun.lun_id; 2137 } 2138 2139 mtx_unlock(&softc->lock); 2140 2141 be_lun->disk_stats = devstat_new_entry("cbb", params->req_lun_id, 2142 be_lun->blocksize, 2143 DEVSTAT_ALL_SUPPORTED, 2144 be_lun->ctl_be_lun.lun_type 2145 | DEVSTAT_TYPE_IF_OTHER, 2146 DEVSTAT_PRIORITY_OTHER); 2147 2148 2149 req->status = CTL_LUN_OK; 2150 2151 return (retval); 2152 2153 bailout_error: 2154 req->status = CTL_LUN_ERROR; 2155 2156 if (be_lun->io_taskqueue != NULL) 2157 taskqueue_free(be_lun->io_taskqueue); 2158 ctl_be_block_close(be_lun); 2159 if (be_lun->dev_path != NULL) 2160 free(be_lun->dev_path, M_CTLBLK); 2161 if (be_lun->lun_zone != NULL) 2162 uma_zdestroy(be_lun->lun_zone); 2163 ctl_free_opts(&be_lun->ctl_be_lun.options); 2164 mtx_destroy(&be_lun->queue_lock); 2165 mtx_destroy(&be_lun->io_lock); 2166 free(be_lun, M_CTLBLK); 2167 2168 return (retval); 2169 } 2170 2171 static int 2172 ctl_be_block_rm(struct ctl_be_block_softc *softc, struct ctl_lun_req *req) 2173 { 2174 struct ctl_lun_rm_params *params; 2175 struct ctl_be_block_lun *be_lun; 2176 int retval; 2177 2178 params = &req->reqdata.rm; 2179 2180 mtx_lock(&softc->lock); 2181 2182 be_lun = NULL; 2183 2184 STAILQ_FOREACH(be_lun, &softc->lun_list, links) { 2185 if (be_lun->ctl_be_lun.lun_id == params->lun_id) 2186 break; 2187 } 2188 mtx_unlock(&softc->lock); 2189 2190 if (be_lun == NULL) { 2191 snprintf(req->error_str, sizeof(req->error_str), 2192 "%s: LUN %u is not managed by the block backend", 2193 __func__, params->lun_id); 2194 goto bailout_error; 2195 } 2196 2197 retval = ctl_disable_lun(&be_lun->ctl_be_lun); 2198 2199 if (retval != 0) { 2200 snprintf(req->error_str, sizeof(req->error_str), 2201 "%s: error %d returned from ctl_disable_lun() for " 2202 "LUN %d", __func__, retval, params->lun_id); 2203 goto bailout_error; 2204 2205 } 2206 2207 retval = ctl_invalidate_lun(&be_lun->ctl_be_lun); 2208 if (retval != 0) { 2209 snprintf(req->error_str, sizeof(req->error_str), 2210 "%s: error %d returned from ctl_invalidate_lun() for " 2211 "LUN %d", __func__, retval, params->lun_id); 2212 goto bailout_error; 2213 } 2214 2215 mtx_lock(&softc->lock); 2216 2217 be_lun->flags |= CTL_BE_BLOCK_LUN_WAITING; 2218 2219 while ((be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) == 0) { 2220 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlblk", 0); 2221 if (retval == EINTR) 2222 break; 2223 } 2224 2225 be_lun->flags &= ~CTL_BE_BLOCK_LUN_WAITING; 2226 2227 if ((be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) == 0) { 2228 snprintf(req->error_str, sizeof(req->error_str), 2229 "%s: interrupted waiting for LUN to be freed", 2230 __func__); 2231 mtx_unlock(&softc->lock); 2232 goto bailout_error; 2233 } 2234 2235 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_block_lun, links); 2236 2237 softc->num_luns--; 2238 mtx_unlock(&softc->lock); 2239 2240 taskqueue_drain(be_lun->io_taskqueue, &be_lun->io_task); 2241 2242 taskqueue_free(be_lun->io_taskqueue); 2243 2244 ctl_be_block_close(be_lun); 2245 2246 if (be_lun->disk_stats != NULL) 2247 devstat_remove_entry(be_lun->disk_stats); 2248 2249 uma_zdestroy(be_lun->lun_zone); 2250 2251 ctl_free_opts(&be_lun->ctl_be_lun.options); 2252 free(be_lun->dev_path, M_CTLBLK); 2253 mtx_destroy(&be_lun->queue_lock); 2254 mtx_destroy(&be_lun->io_lock); 2255 free(be_lun, M_CTLBLK); 2256 2257 req->status = CTL_LUN_OK; 2258 2259 return (0); 2260 2261 bailout_error: 2262 2263 req->status = CTL_LUN_ERROR; 2264 2265 return (0); 2266 } 2267 2268 static int 2269 ctl_be_block_modify_file(struct ctl_be_block_lun *be_lun, 2270 struct ctl_lun_req *req) 2271 { 2272 struct vattr vattr; 2273 int error; 2274 struct ctl_lun_modify_params *params; 2275 2276 params = &req->reqdata.modify; 2277 2278 if (params->lun_size_bytes != 0) { 2279 be_lun->size_bytes = params->lun_size_bytes; 2280 } else { 2281 error = VOP_GETATTR(be_lun->vn, &vattr, curthread->td_ucred); 2282 if (error != 0) { 2283 snprintf(req->error_str, sizeof(req->error_str), 2284 "error calling VOP_GETATTR() for file %s", 2285 be_lun->dev_path); 2286 return (error); 2287 } 2288 2289 be_lun->size_bytes = vattr.va_size; 2290 } 2291 2292 return (0); 2293 } 2294 2295 static int 2296 ctl_be_block_modify_dev(struct ctl_be_block_lun *be_lun, 2297 struct ctl_lun_req *req) 2298 { 2299 struct cdev *dev; 2300 struct cdevsw *devsw; 2301 int error; 2302 struct ctl_lun_modify_params *params; 2303 uint64_t size_bytes; 2304 2305 params = &req->reqdata.modify; 2306 2307 dev = be_lun->vn->v_rdev; 2308 devsw = dev->si_devsw; 2309 if (!devsw->d_ioctl) { 2310 snprintf(req->error_str, sizeof(req->error_str), 2311 "%s: no d_ioctl for device %s!", __func__, 2312 be_lun->dev_path); 2313 return (ENODEV); 2314 } 2315 2316 error = devsw->d_ioctl(dev, DIOCGMEDIASIZE, 2317 (caddr_t)&size_bytes, FREAD, 2318 curthread); 2319 if (error) { 2320 snprintf(req->error_str, sizeof(req->error_str), 2321 "%s: error %d returned for DIOCGMEDIASIZE ioctl " 2322 "on %s!", __func__, error, be_lun->dev_path); 2323 return (error); 2324 } 2325 2326 if (params->lun_size_bytes != 0) { 2327 if (params->lun_size_bytes > size_bytes) { 2328 snprintf(req->error_str, sizeof(req->error_str), 2329 "%s: requested LUN size %ju > backing device " 2330 "size %ju", __func__, 2331 (uintmax_t)params->lun_size_bytes, 2332 (uintmax_t)size_bytes); 2333 return (EINVAL); 2334 } 2335 2336 be_lun->size_bytes = params->lun_size_bytes; 2337 } else { 2338 be_lun->size_bytes = size_bytes; 2339 } 2340 2341 return (0); 2342 } 2343 2344 static int 2345 ctl_be_block_modify(struct ctl_be_block_softc *softc, struct ctl_lun_req *req) 2346 { 2347 struct ctl_lun_modify_params *params; 2348 struct ctl_be_block_lun *be_lun; 2349 int error; 2350 2351 params = &req->reqdata.modify; 2352 2353 mtx_lock(&softc->lock); 2354 2355 be_lun = NULL; 2356 2357 STAILQ_FOREACH(be_lun, &softc->lun_list, links) { 2358 if (be_lun->ctl_be_lun.lun_id == params->lun_id) 2359 break; 2360 } 2361 mtx_unlock(&softc->lock); 2362 2363 if (be_lun == NULL) { 2364 snprintf(req->error_str, sizeof(req->error_str), 2365 "%s: LUN %u is not managed by the block backend", 2366 __func__, params->lun_id); 2367 goto bailout_error; 2368 } 2369 2370 if (params->lun_size_bytes != 0) { 2371 if (params->lun_size_bytes < be_lun->blocksize) { 2372 snprintf(req->error_str, sizeof(req->error_str), 2373 "%s: LUN size %ju < blocksize %u", __func__, 2374 params->lun_size_bytes, be_lun->blocksize); 2375 goto bailout_error; 2376 } 2377 } 2378 2379 vn_lock(be_lun->vn, LK_SHARED | LK_RETRY); 2380 2381 if (be_lun->vn->v_type == VREG) 2382 error = ctl_be_block_modify_file(be_lun, req); 2383 else 2384 error = ctl_be_block_modify_dev(be_lun, req); 2385 2386 VOP_UNLOCK(be_lun->vn, 0); 2387 2388 if (error != 0) 2389 goto bailout_error; 2390 2391 be_lun->size_blocks = be_lun->size_bytes >> be_lun->blocksize_shift; 2392 2393 /* 2394 * The maximum LBA is the size - 1. 2395 * 2396 * XXX: Note that this field is being updated without locking, 2397 * which might cause problems on 32-bit architectures. 2398 */ 2399 be_lun->ctl_be_lun.maxlba = be_lun->size_blocks - 1; 2400 ctl_lun_capacity_changed(&be_lun->ctl_be_lun); 2401 2402 /* Tell the user the exact size we ended up using */ 2403 params->lun_size_bytes = be_lun->size_bytes; 2404 2405 req->status = CTL_LUN_OK; 2406 2407 return (0); 2408 2409 bailout_error: 2410 req->status = CTL_LUN_ERROR; 2411 2412 return (0); 2413 } 2414 2415 static void 2416 ctl_be_block_lun_shutdown(void *be_lun) 2417 { 2418 struct ctl_be_block_lun *lun; 2419 struct ctl_be_block_softc *softc; 2420 2421 lun = (struct ctl_be_block_lun *)be_lun; 2422 2423 softc = lun->softc; 2424 2425 mtx_lock(&softc->lock); 2426 lun->flags |= CTL_BE_BLOCK_LUN_UNCONFIGURED; 2427 if (lun->flags & CTL_BE_BLOCK_LUN_WAITING) 2428 wakeup(lun); 2429 mtx_unlock(&softc->lock); 2430 2431 } 2432 2433 static void 2434 ctl_be_block_lun_config_status(void *be_lun, ctl_lun_config_status status) 2435 { 2436 struct ctl_be_block_lun *lun; 2437 struct ctl_be_block_softc *softc; 2438 2439 lun = (struct ctl_be_block_lun *)be_lun; 2440 softc = lun->softc; 2441 2442 if (status == CTL_LUN_CONFIG_OK) { 2443 mtx_lock(&softc->lock); 2444 lun->flags &= ~CTL_BE_BLOCK_LUN_UNCONFIGURED; 2445 if (lun->flags & CTL_BE_BLOCK_LUN_WAITING) 2446 wakeup(lun); 2447 mtx_unlock(&softc->lock); 2448 2449 /* 2450 * We successfully added the LUN, attempt to enable it. 2451 */ 2452 if (ctl_enable_lun(&lun->ctl_be_lun) != 0) { 2453 printf("%s: ctl_enable_lun() failed!\n", __func__); 2454 if (ctl_invalidate_lun(&lun->ctl_be_lun) != 0) { 2455 printf("%s: ctl_invalidate_lun() failed!\n", 2456 __func__); 2457 } 2458 } 2459 2460 return; 2461 } 2462 2463 2464 mtx_lock(&softc->lock); 2465 lun->flags &= ~CTL_BE_BLOCK_LUN_UNCONFIGURED; 2466 lun->flags |= CTL_BE_BLOCK_LUN_CONFIG_ERR; 2467 wakeup(lun); 2468 mtx_unlock(&softc->lock); 2469 } 2470 2471 2472 static int 2473 ctl_be_block_config_write(union ctl_io *io) 2474 { 2475 struct ctl_be_block_lun *be_lun; 2476 struct ctl_be_lun *ctl_be_lun; 2477 int retval; 2478 2479 retval = 0; 2480 2481 DPRINTF("entered\n"); 2482 2483 ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[ 2484 CTL_PRIV_BACKEND_LUN].ptr; 2485 be_lun = (struct ctl_be_block_lun *)ctl_be_lun->be_lun; 2486 2487 switch (io->scsiio.cdb[0]) { 2488 case SYNCHRONIZE_CACHE: 2489 case SYNCHRONIZE_CACHE_16: 2490 case WRITE_SAME_10: 2491 case WRITE_SAME_16: 2492 case UNMAP: 2493 /* 2494 * The upper level CTL code will filter out any CDBs with 2495 * the immediate bit set and return the proper error. 2496 * 2497 * We don't really need to worry about what LBA range the 2498 * user asked to be synced out. When they issue a sync 2499 * cache command, we'll sync out the whole thing. 2500 */ 2501 mtx_lock(&be_lun->queue_lock); 2502 STAILQ_INSERT_TAIL(&be_lun->config_write_queue, &io->io_hdr, 2503 links); 2504 mtx_unlock(&be_lun->queue_lock); 2505 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); 2506 break; 2507 case START_STOP_UNIT: { 2508 struct scsi_start_stop_unit *cdb; 2509 2510 cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb; 2511 2512 if (cdb->how & SSS_START) 2513 retval = ctl_start_lun(ctl_be_lun); 2514 else { 2515 retval = ctl_stop_lun(ctl_be_lun); 2516 /* 2517 * XXX KDM Copan-specific offline behavior. 2518 * Figure out a reasonable way to port this? 2519 */ 2520 #ifdef NEEDTOPORT 2521 if ((retval == 0) 2522 && (cdb->byte2 & SSS_ONOFFLINE)) 2523 retval = ctl_lun_offline(ctl_be_lun); 2524 #endif 2525 } 2526 2527 /* 2528 * In general, the above routines should not fail. They 2529 * just set state for the LUN. So we've got something 2530 * pretty wrong here if we can't start or stop the LUN. 2531 */ 2532 if (retval != 0) { 2533 ctl_set_internal_failure(&io->scsiio, 2534 /*sks_valid*/ 1, 2535 /*retry_count*/ 0xf051); 2536 retval = CTL_RETVAL_COMPLETE; 2537 } else { 2538 ctl_set_success(&io->scsiio); 2539 } 2540 ctl_config_write_done(io); 2541 break; 2542 } 2543 default: 2544 ctl_set_invalid_opcode(&io->scsiio); 2545 ctl_config_write_done(io); 2546 retval = CTL_RETVAL_COMPLETE; 2547 break; 2548 } 2549 2550 return (retval); 2551 2552 } 2553 2554 static int 2555 ctl_be_block_config_read(union ctl_io *io) 2556 { 2557 return (0); 2558 } 2559 2560 static int 2561 ctl_be_block_lun_info(void *be_lun, struct sbuf *sb) 2562 { 2563 struct ctl_be_block_lun *lun; 2564 int retval; 2565 2566 lun = (struct ctl_be_block_lun *)be_lun; 2567 retval = 0; 2568 2569 retval = sbuf_printf(sb, "\t<num_threads>"); 2570 2571 if (retval != 0) 2572 goto bailout; 2573 2574 retval = sbuf_printf(sb, "%d", lun->num_threads); 2575 2576 if (retval != 0) 2577 goto bailout; 2578 2579 retval = sbuf_printf(sb, "</num_threads>\n"); 2580 2581 bailout: 2582 2583 return (retval); 2584 } 2585 2586 int 2587 ctl_be_block_init(void) 2588 { 2589 struct ctl_be_block_softc *softc; 2590 int retval; 2591 2592 softc = &backend_block_softc; 2593 retval = 0; 2594 2595 mtx_init(&softc->lock, "ctlblock", NULL, MTX_DEF); 2596 beio_zone = uma_zcreate("beio", sizeof(struct ctl_be_block_io), 2597 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 2598 STAILQ_INIT(&softc->disk_list); 2599 STAILQ_INIT(&softc->lun_list); 2600 2601 return (retval); 2602 } 2603