1 /*- 2 * Copyright (c) 2003 Silicon Graphics International Corp. 3 * Copyright (c) 2009-2011 Spectra Logic Corporation 4 * Copyright (c) 2012 The FreeBSD Foundation 5 * All rights reserved. 6 * 7 * Portions of this software were developed by Edward Tomasz Napierala 8 * under sponsorship from the FreeBSD Foundation. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions, and the following disclaimer, 15 * without modification. 16 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 17 * substantially similar to the "NO WARRANTY" disclaimer below 18 * ("Disclaimer") and any redistribution must be conditioned upon 19 * including a substantially similar Disclaimer requirement for further 20 * binary redistribution. 21 * 22 * NO WARRANTY 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 27 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 31 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 32 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 33 * POSSIBILITY OF SUCH DAMAGES. 34 * 35 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend_block.c#5 $ 36 */ 37 /* 38 * CAM Target Layer driver backend for block devices. 39 * 40 * Author: Ken Merry <ken@FreeBSD.org> 41 */ 42 #include <sys/cdefs.h> 43 __FBSDID("$FreeBSD$"); 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/kernel.h> 48 #include <sys/types.h> 49 #include <sys/kthread.h> 50 #include <sys/bio.h> 51 #include <sys/fcntl.h> 52 #include <sys/limits.h> 53 #include <sys/lock.h> 54 #include <sys/mutex.h> 55 #include <sys/condvar.h> 56 #include <sys/malloc.h> 57 #include <sys/conf.h> 58 #include <sys/ioccom.h> 59 #include <sys/queue.h> 60 #include <sys/sbuf.h> 61 #include <sys/endian.h> 62 #include <sys/uio.h> 63 #include <sys/buf.h> 64 #include <sys/taskqueue.h> 65 #include <sys/vnode.h> 66 #include <sys/namei.h> 67 #include <sys/mount.h> 68 #include <sys/disk.h> 69 #include <sys/fcntl.h> 70 #include <sys/filedesc.h> 71 #include <sys/proc.h> 72 #include <sys/pcpu.h> 73 #include <sys/module.h> 74 #include <sys/sdt.h> 75 #include <sys/devicestat.h> 76 #include <sys/sysctl.h> 77 78 #include <geom/geom.h> 79 80 #include <cam/cam.h> 81 #include <cam/scsi/scsi_all.h> 82 #include <cam/scsi/scsi_da.h> 83 #include <cam/ctl/ctl_io.h> 84 #include <cam/ctl/ctl.h> 85 #include <cam/ctl/ctl_backend.h> 86 #include <cam/ctl/ctl_frontend_internal.h> 87 #include <cam/ctl/ctl_ioctl.h> 88 #include <cam/ctl/ctl_scsi_all.h> 89 #include <cam/ctl/ctl_error.h> 90 91 /* 92 * The idea here is that we'll allocate enough S/G space to hold a 16MB 93 * I/O. If we get an I/O larger than that, we'll reject it. 94 */ 95 #define CTLBLK_MAX_IO_SIZE (16 * 1024 * 1024) 96 #define CTLBLK_MAX_SEGS (CTLBLK_MAX_IO_SIZE / MAXPHYS) + 1 97 98 #ifdef CTLBLK_DEBUG 99 #define DPRINTF(fmt, args...) \ 100 printf("cbb(%s:%d): " fmt, __FUNCTION__, __LINE__, ##args) 101 #else 102 #define DPRINTF(fmt, args...) do {} while(0) 103 #endif 104 105 SDT_PROVIDER_DEFINE(cbb); 106 107 typedef enum { 108 CTL_BE_BLOCK_LUN_UNCONFIGURED = 0x01, 109 CTL_BE_BLOCK_LUN_CONFIG_ERR = 0x02, 110 CTL_BE_BLOCK_LUN_WAITING = 0x04, 111 CTL_BE_BLOCK_LUN_MULTI_THREAD = 0x08 112 } ctl_be_block_lun_flags; 113 114 typedef enum { 115 CTL_BE_BLOCK_NONE, 116 CTL_BE_BLOCK_DEV, 117 CTL_BE_BLOCK_FILE 118 } ctl_be_block_type; 119 120 struct ctl_be_block_devdata { 121 struct cdev *cdev; 122 struct cdevsw *csw; 123 int dev_ref; 124 }; 125 126 struct ctl_be_block_filedata { 127 struct ucred *cred; 128 }; 129 130 union ctl_be_block_bedata { 131 struct ctl_be_block_devdata dev; 132 struct ctl_be_block_filedata file; 133 }; 134 135 struct ctl_be_block_io; 136 struct ctl_be_block_lun; 137 138 typedef void (*cbb_dispatch_t)(struct ctl_be_block_lun *be_lun, 139 struct ctl_be_block_io *beio); 140 141 /* 142 * Backend LUN structure. There is a 1:1 mapping between a block device 143 * and a backend block LUN, and between a backend block LUN and a CTL LUN. 144 */ 145 struct ctl_be_block_lun { 146 struct ctl_block_disk *disk; 147 char lunname[32]; 148 char *dev_path; 149 ctl_be_block_type dev_type; 150 struct vnode *vn; 151 union ctl_be_block_bedata backend; 152 cbb_dispatch_t dispatch; 153 cbb_dispatch_t lun_flush; 154 cbb_dispatch_t unmap; 155 struct mtx lock; 156 uma_zone_t lun_zone; 157 uint64_t size_blocks; 158 uint64_t size_bytes; 159 uint32_t blocksize; 160 int blocksize_shift; 161 uint16_t pblockexp; 162 uint16_t pblockoff; 163 struct ctl_be_block_softc *softc; 164 struct devstat *disk_stats; 165 ctl_be_block_lun_flags flags; 166 STAILQ_ENTRY(ctl_be_block_lun) links; 167 struct ctl_be_lun ctl_be_lun; 168 struct taskqueue *io_taskqueue; 169 struct task io_task; 170 int num_threads; 171 STAILQ_HEAD(, ctl_io_hdr) input_queue; 172 STAILQ_HEAD(, ctl_io_hdr) config_write_queue; 173 STAILQ_HEAD(, ctl_io_hdr) datamove_queue; 174 }; 175 176 /* 177 * Overall softc structure for the block backend module. 178 */ 179 struct ctl_be_block_softc { 180 struct mtx lock; 181 int num_disks; 182 STAILQ_HEAD(, ctl_block_disk) disk_list; 183 int num_luns; 184 STAILQ_HEAD(, ctl_be_block_lun) lun_list; 185 }; 186 187 static struct ctl_be_block_softc backend_block_softc; 188 189 /* 190 * Per-I/O information. 191 */ 192 struct ctl_be_block_io { 193 union ctl_io *io; 194 struct ctl_sg_entry sg_segs[CTLBLK_MAX_SEGS]; 195 struct iovec xiovecs[CTLBLK_MAX_SEGS]; 196 int bio_cmd; 197 int bio_flags; 198 int num_segs; 199 int num_bios_sent; 200 int num_bios_done; 201 int send_complete; 202 int num_errors; 203 struct bintime ds_t0; 204 devstat_tag_type ds_tag_type; 205 devstat_trans_flags ds_trans_type; 206 uint64_t io_len; 207 uint64_t io_offset; 208 struct ctl_be_block_softc *softc; 209 struct ctl_be_block_lun *lun; 210 void (*beio_cont)(struct ctl_be_block_io *beio); /* to continue processing */ 211 }; 212 213 static int cbb_num_threads = 14; 214 TUNABLE_INT("kern.cam.ctl.block.num_threads", &cbb_num_threads); 215 SYSCTL_NODE(_kern_cam_ctl, OID_AUTO, block, CTLFLAG_RD, 0, 216 "CAM Target Layer Block Backend"); 217 SYSCTL_INT(_kern_cam_ctl_block, OID_AUTO, num_threads, CTLFLAG_RW, 218 &cbb_num_threads, 0, "Number of threads per backing file"); 219 220 static struct ctl_be_block_io *ctl_alloc_beio(struct ctl_be_block_softc *softc); 221 static void ctl_free_beio(struct ctl_be_block_io *beio); 222 static void ctl_complete_beio(struct ctl_be_block_io *beio); 223 static int ctl_be_block_move_done(union ctl_io *io); 224 static void ctl_be_block_biodone(struct bio *bio); 225 static void ctl_be_block_flush_file(struct ctl_be_block_lun *be_lun, 226 struct ctl_be_block_io *beio); 227 static void ctl_be_block_dispatch_file(struct ctl_be_block_lun *be_lun, 228 struct ctl_be_block_io *beio); 229 static void ctl_be_block_flush_dev(struct ctl_be_block_lun *be_lun, 230 struct ctl_be_block_io *beio); 231 static void ctl_be_block_unmap_dev(struct ctl_be_block_lun *be_lun, 232 struct ctl_be_block_io *beio); 233 static void ctl_be_block_dispatch_dev(struct ctl_be_block_lun *be_lun, 234 struct ctl_be_block_io *beio); 235 static void ctl_be_block_cw_dispatch(struct ctl_be_block_lun *be_lun, 236 union ctl_io *io); 237 static void ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun, 238 union ctl_io *io); 239 static void ctl_be_block_worker(void *context, int pending); 240 static int ctl_be_block_submit(union ctl_io *io); 241 static int ctl_be_block_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, 242 int flag, struct thread *td); 243 static int ctl_be_block_open_file(struct ctl_be_block_lun *be_lun, 244 struct ctl_lun_req *req); 245 static int ctl_be_block_open_dev(struct ctl_be_block_lun *be_lun, 246 struct ctl_lun_req *req); 247 static int ctl_be_block_close(struct ctl_be_block_lun *be_lun); 248 static int ctl_be_block_open(struct ctl_be_block_softc *softc, 249 struct ctl_be_block_lun *be_lun, 250 struct ctl_lun_req *req); 251 static int ctl_be_block_create(struct ctl_be_block_softc *softc, 252 struct ctl_lun_req *req); 253 static int ctl_be_block_rm(struct ctl_be_block_softc *softc, 254 struct ctl_lun_req *req); 255 static int ctl_be_block_modify_file(struct ctl_be_block_lun *be_lun, 256 struct ctl_lun_req *req); 257 static int ctl_be_block_modify_dev(struct ctl_be_block_lun *be_lun, 258 struct ctl_lun_req *req); 259 static int ctl_be_block_modify(struct ctl_be_block_softc *softc, 260 struct ctl_lun_req *req); 261 static void ctl_be_block_lun_shutdown(void *be_lun); 262 static void ctl_be_block_lun_config_status(void *be_lun, 263 ctl_lun_config_status status); 264 static int ctl_be_block_config_write(union ctl_io *io); 265 static int ctl_be_block_config_read(union ctl_io *io); 266 static int ctl_be_block_lun_info(void *be_lun, struct sbuf *sb); 267 int ctl_be_block_init(void); 268 269 static struct ctl_backend_driver ctl_be_block_driver = 270 { 271 .name = "block", 272 .flags = CTL_BE_FLAG_HAS_CONFIG, 273 .init = ctl_be_block_init, 274 .data_submit = ctl_be_block_submit, 275 .data_move_done = ctl_be_block_move_done, 276 .config_read = ctl_be_block_config_read, 277 .config_write = ctl_be_block_config_write, 278 .ioctl = ctl_be_block_ioctl, 279 .lun_info = ctl_be_block_lun_info 280 }; 281 282 MALLOC_DEFINE(M_CTLBLK, "ctlblk", "Memory used for CTL block backend"); 283 CTL_BACKEND_DECLARE(cbb, ctl_be_block_driver); 284 285 static uma_zone_t beio_zone; 286 287 static struct ctl_be_block_io * 288 ctl_alloc_beio(struct ctl_be_block_softc *softc) 289 { 290 struct ctl_be_block_io *beio; 291 292 beio = uma_zalloc(beio_zone, M_WAITOK | M_ZERO); 293 beio->softc = softc; 294 return (beio); 295 } 296 297 static void 298 ctl_free_beio(struct ctl_be_block_io *beio) 299 { 300 int duplicate_free; 301 int i; 302 303 duplicate_free = 0; 304 305 for (i = 0; i < beio->num_segs; i++) { 306 if (beio->sg_segs[i].addr == NULL) 307 duplicate_free++; 308 309 uma_zfree(beio->lun->lun_zone, beio->sg_segs[i].addr); 310 beio->sg_segs[i].addr = NULL; 311 } 312 313 if (duplicate_free > 0) { 314 printf("%s: %d duplicate frees out of %d segments\n", __func__, 315 duplicate_free, beio->num_segs); 316 } 317 318 uma_zfree(beio_zone, beio); 319 } 320 321 static void 322 ctl_complete_beio(struct ctl_be_block_io *beio) 323 { 324 union ctl_io *io; 325 int io_len; 326 327 io = beio->io; 328 329 if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) 330 io_len = beio->io_len; 331 else 332 io_len = 0; 333 334 devstat_end_transaction(beio->lun->disk_stats, 335 /*bytes*/ io_len, 336 beio->ds_tag_type, 337 beio->ds_trans_type, 338 /*now*/ NULL, 339 /*then*/&beio->ds_t0); 340 341 if (beio->beio_cont != NULL) { 342 beio->beio_cont(beio); 343 } else { 344 ctl_free_beio(beio); 345 ctl_done(io); 346 } 347 } 348 349 static int 350 ctl_be_block_move_done(union ctl_io *io) 351 { 352 struct ctl_be_block_io *beio; 353 struct ctl_be_block_lun *be_lun; 354 #ifdef CTL_TIME_IO 355 struct bintime cur_bt; 356 #endif 357 358 beio = (struct ctl_be_block_io *) 359 io->io_hdr.ctl_private[CTL_PRIV_BACKEND].ptr; 360 361 be_lun = beio->lun; 362 363 DPRINTF("entered\n"); 364 365 #ifdef CTL_TIME_IO 366 getbintime(&cur_bt); 367 bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt); 368 bintime_add(&io->io_hdr.dma_bt, &cur_bt); 369 io->io_hdr.num_dmas++; 370 #endif 371 372 /* 373 * We set status at this point for read commands, and write 374 * commands with errors. 375 */ 376 if ((beio->bio_cmd == BIO_READ) 377 && (io->io_hdr.port_status == 0) 378 && ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0) 379 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) 380 ctl_set_success(&io->scsiio); 381 else if ((io->io_hdr.port_status != 0) 382 && ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0) 383 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) { 384 /* 385 * For hardware error sense keys, the sense key 386 * specific value is defined to be a retry count, 387 * but we use it to pass back an internal FETD 388 * error code. XXX KDM Hopefully the FETD is only 389 * using 16 bits for an error code, since that's 390 * all the space we have in the sks field. 391 */ 392 ctl_set_internal_failure(&io->scsiio, 393 /*sks_valid*/ 1, 394 /*retry_count*/ 395 io->io_hdr.port_status); 396 } 397 398 /* 399 * If this is a read, or a write with errors, it is done. 400 */ 401 if ((beio->bio_cmd == BIO_READ) 402 || ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0) 403 || ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE)) { 404 ctl_complete_beio(beio); 405 return (0); 406 } 407 408 /* 409 * At this point, we have a write and the DMA completed 410 * successfully. We now have to queue it to the task queue to 411 * execute the backend I/O. That is because we do blocking 412 * memory allocations, and in the file backing case, blocking I/O. 413 * This move done routine is generally called in the SIM's 414 * interrupt context, and therefore we cannot block. 415 */ 416 mtx_lock(&be_lun->lock); 417 /* 418 * XXX KDM make sure that links is okay to use at this point. 419 * Otherwise, we either need to add another field to ctl_io_hdr, 420 * or deal with resource allocation here. 421 */ 422 STAILQ_INSERT_TAIL(&be_lun->datamove_queue, &io->io_hdr, links); 423 mtx_unlock(&be_lun->lock); 424 425 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); 426 427 return (0); 428 } 429 430 static void 431 ctl_be_block_biodone(struct bio *bio) 432 { 433 struct ctl_be_block_io *beio; 434 struct ctl_be_block_lun *be_lun; 435 union ctl_io *io; 436 int error; 437 438 beio = bio->bio_caller1; 439 be_lun = beio->lun; 440 io = beio->io; 441 442 DPRINTF("entered\n"); 443 444 error = bio->bio_error; 445 mtx_lock(&be_lun->lock); 446 if (error != 0) 447 beio->num_errors++; 448 449 beio->num_bios_done++; 450 451 /* 452 * XXX KDM will this cause WITNESS to complain? Holding a lock 453 * during the free might cause it to complain. 454 */ 455 g_destroy_bio(bio); 456 457 /* 458 * If the send complete bit isn't set, or we aren't the last I/O to 459 * complete, then we're done. 460 */ 461 if ((beio->send_complete == 0) 462 || (beio->num_bios_done < beio->num_bios_sent)) { 463 mtx_unlock(&be_lun->lock); 464 return; 465 } 466 467 /* 468 * At this point, we've verified that we are the last I/O to 469 * complete, so it's safe to drop the lock. 470 */ 471 mtx_unlock(&be_lun->lock); 472 473 /* 474 * If there are any errors from the backing device, we fail the 475 * entire I/O with a medium error. 476 */ 477 if (beio->num_errors > 0) { 478 if (error == EOPNOTSUPP) { 479 ctl_set_invalid_opcode(&io->scsiio); 480 } else if (beio->bio_cmd == BIO_FLUSH) { 481 /* XXX KDM is there is a better error here? */ 482 ctl_set_internal_failure(&io->scsiio, 483 /*sks_valid*/ 1, 484 /*retry_count*/ 0xbad2); 485 } else 486 ctl_set_medium_error(&io->scsiio); 487 ctl_complete_beio(beio); 488 return; 489 } 490 491 /* 492 * If this is a write, a flush or a delete, we're all done. 493 * If this is a read, we can now send the data to the user. 494 */ 495 if ((beio->bio_cmd == BIO_WRITE) 496 || (beio->bio_cmd == BIO_FLUSH) 497 || (beio->bio_cmd == BIO_DELETE)) { 498 ctl_set_success(&io->scsiio); 499 ctl_complete_beio(beio); 500 } else { 501 io->scsiio.be_move_done = ctl_be_block_move_done; 502 io->scsiio.kern_data_ptr = (uint8_t *)beio->sg_segs; 503 io->scsiio.kern_data_len = beio->io_len; 504 io->scsiio.kern_total_len = beio->io_len; 505 io->scsiio.kern_rel_offset = 0; 506 io->scsiio.kern_data_resid = 0; 507 io->scsiio.kern_sg_entries = beio->num_segs; 508 io->io_hdr.flags |= CTL_FLAG_ALLOCATED | CTL_FLAG_KDPTR_SGLIST; 509 #ifdef CTL_TIME_IO 510 getbintime(&io->io_hdr.dma_start_bt); 511 #endif 512 ctl_datamove(io); 513 } 514 } 515 516 static void 517 ctl_be_block_flush_file(struct ctl_be_block_lun *be_lun, 518 struct ctl_be_block_io *beio) 519 { 520 union ctl_io *io; 521 struct mount *mountpoint; 522 int error, lock_flags; 523 524 DPRINTF("entered\n"); 525 526 io = beio->io; 527 528 (void) vn_start_write(be_lun->vn, &mountpoint, V_WAIT); 529 530 if (MNT_SHARED_WRITES(mountpoint) 531 || ((mountpoint == NULL) 532 && MNT_SHARED_WRITES(be_lun->vn->v_mount))) 533 lock_flags = LK_SHARED; 534 else 535 lock_flags = LK_EXCLUSIVE; 536 537 vn_lock(be_lun->vn, lock_flags | LK_RETRY); 538 539 binuptime(&beio->ds_t0); 540 devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0); 541 542 error = VOP_FSYNC(be_lun->vn, MNT_WAIT, curthread); 543 VOP_UNLOCK(be_lun->vn, 0); 544 545 vn_finished_write(mountpoint); 546 547 if (error == 0) 548 ctl_set_success(&io->scsiio); 549 else { 550 /* XXX KDM is there is a better error here? */ 551 ctl_set_internal_failure(&io->scsiio, 552 /*sks_valid*/ 1, 553 /*retry_count*/ 0xbad1); 554 } 555 556 ctl_complete_beio(beio); 557 } 558 559 SDT_PROBE_DEFINE1(cbb, kernel, read, file_start, "uint64_t"); 560 SDT_PROBE_DEFINE1(cbb, kernel, write, file_start, "uint64_t"); 561 SDT_PROBE_DEFINE1(cbb, kernel, read, file_done,"uint64_t"); 562 SDT_PROBE_DEFINE1(cbb, kernel, write, file_done, "uint64_t"); 563 564 static void 565 ctl_be_block_dispatch_file(struct ctl_be_block_lun *be_lun, 566 struct ctl_be_block_io *beio) 567 { 568 struct ctl_be_block_filedata *file_data; 569 union ctl_io *io; 570 struct uio xuio; 571 struct iovec *xiovec; 572 int flags; 573 int error, i; 574 575 DPRINTF("entered\n"); 576 577 file_data = &be_lun->backend.file; 578 io = beio->io; 579 flags = beio->bio_flags; 580 581 if (beio->bio_cmd == BIO_READ) { 582 SDT_PROBE(cbb, kernel, read, file_start, 0, 0, 0, 0, 0); 583 } else { 584 SDT_PROBE(cbb, kernel, write, file_start, 0, 0, 0, 0, 0); 585 } 586 587 bzero(&xuio, sizeof(xuio)); 588 if (beio->bio_cmd == BIO_READ) 589 xuio.uio_rw = UIO_READ; 590 else 591 xuio.uio_rw = UIO_WRITE; 592 593 xuio.uio_offset = beio->io_offset; 594 xuio.uio_resid = beio->io_len; 595 xuio.uio_segflg = UIO_SYSSPACE; 596 xuio.uio_iov = beio->xiovecs; 597 xuio.uio_iovcnt = beio->num_segs; 598 xuio.uio_td = curthread; 599 600 for (i = 0, xiovec = xuio.uio_iov; i < xuio.uio_iovcnt; i++, xiovec++) { 601 xiovec->iov_base = beio->sg_segs[i].addr; 602 xiovec->iov_len = beio->sg_segs[i].len; 603 } 604 605 if (beio->bio_cmd == BIO_READ) { 606 vn_lock(be_lun->vn, LK_SHARED | LK_RETRY); 607 608 binuptime(&beio->ds_t0); 609 devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0); 610 611 /* 612 * UFS pays attention to IO_DIRECT for reads. If the 613 * DIRECTIO option is configured into the kernel, it calls 614 * ffs_rawread(). But that only works for single-segment 615 * uios with user space addresses. In our case, with a 616 * kernel uio, it still reads into the buffer cache, but it 617 * will just try to release the buffer from the cache later 618 * on in ffs_read(). 619 * 620 * ZFS does not pay attention to IO_DIRECT for reads. 621 * 622 * UFS does not pay attention to IO_SYNC for reads. 623 * 624 * ZFS pays attention to IO_SYNC (which translates into the 625 * Solaris define FRSYNC for zfs_read()) for reads. It 626 * attempts to sync the file before reading. 627 * 628 * So, to attempt to provide some barrier semantics in the 629 * BIO_ORDERED case, set both IO_DIRECT and IO_SYNC. 630 */ 631 error = VOP_READ(be_lun->vn, &xuio, (flags & BIO_ORDERED) ? 632 (IO_DIRECT|IO_SYNC) : 0, file_data->cred); 633 634 VOP_UNLOCK(be_lun->vn, 0); 635 } else { 636 struct mount *mountpoint; 637 int lock_flags; 638 639 (void)vn_start_write(be_lun->vn, &mountpoint, V_WAIT); 640 641 if (MNT_SHARED_WRITES(mountpoint) 642 || ((mountpoint == NULL) 643 && MNT_SHARED_WRITES(be_lun->vn->v_mount))) 644 lock_flags = LK_SHARED; 645 else 646 lock_flags = LK_EXCLUSIVE; 647 648 vn_lock(be_lun->vn, lock_flags | LK_RETRY); 649 650 binuptime(&beio->ds_t0); 651 devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0); 652 653 /* 654 * UFS pays attention to IO_DIRECT for writes. The write 655 * is done asynchronously. (Normally the write would just 656 * get put into cache. 657 * 658 * UFS pays attention to IO_SYNC for writes. It will 659 * attempt to write the buffer out synchronously if that 660 * flag is set. 661 * 662 * ZFS does not pay attention to IO_DIRECT for writes. 663 * 664 * ZFS pays attention to IO_SYNC (a.k.a. FSYNC or FRSYNC) 665 * for writes. It will flush the transaction from the 666 * cache before returning. 667 * 668 * So if we've got the BIO_ORDERED flag set, we want 669 * IO_SYNC in either the UFS or ZFS case. 670 */ 671 error = VOP_WRITE(be_lun->vn, &xuio, (flags & BIO_ORDERED) ? 672 IO_SYNC : 0, file_data->cred); 673 VOP_UNLOCK(be_lun->vn, 0); 674 675 vn_finished_write(mountpoint); 676 } 677 678 /* 679 * If we got an error, set the sense data to "MEDIUM ERROR" and 680 * return the I/O to the user. 681 */ 682 if (error != 0) { 683 char path_str[32]; 684 685 ctl_scsi_path_string(io, path_str, sizeof(path_str)); 686 /* 687 * XXX KDM ZFS returns ENOSPC when the underlying 688 * filesystem fills up. What kind of SCSI error should we 689 * return for that? 690 */ 691 printf("%s%s command returned errno %d\n", path_str, 692 (beio->bio_cmd == BIO_READ) ? "READ" : "WRITE", error); 693 ctl_set_medium_error(&io->scsiio); 694 ctl_complete_beio(beio); 695 return; 696 } 697 698 /* 699 * If this is a write, we're all done. 700 * If this is a read, we can now send the data to the user. 701 */ 702 if (beio->bio_cmd == BIO_WRITE) { 703 ctl_set_success(&io->scsiio); 704 SDT_PROBE(cbb, kernel, write, file_done, 0, 0, 0, 0, 0); 705 ctl_complete_beio(beio); 706 } else { 707 SDT_PROBE(cbb, kernel, read, file_done, 0, 0, 0, 0, 0); 708 io->scsiio.be_move_done = ctl_be_block_move_done; 709 io->scsiio.kern_data_ptr = (uint8_t *)beio->sg_segs; 710 io->scsiio.kern_data_len = beio->io_len; 711 io->scsiio.kern_total_len = beio->io_len; 712 io->scsiio.kern_rel_offset = 0; 713 io->scsiio.kern_data_resid = 0; 714 io->scsiio.kern_sg_entries = beio->num_segs; 715 io->io_hdr.flags |= CTL_FLAG_ALLOCATED | CTL_FLAG_KDPTR_SGLIST; 716 #ifdef CTL_TIME_IO 717 getbintime(&io->io_hdr.dma_start_bt); 718 #endif 719 ctl_datamove(io); 720 } 721 } 722 723 static void 724 ctl_be_block_flush_dev(struct ctl_be_block_lun *be_lun, 725 struct ctl_be_block_io *beio) 726 { 727 struct bio *bio; 728 union ctl_io *io; 729 struct ctl_be_block_devdata *dev_data; 730 731 dev_data = &be_lun->backend.dev; 732 io = beio->io; 733 734 DPRINTF("entered\n"); 735 736 /* This can't fail, it's a blocking allocation. */ 737 bio = g_alloc_bio(); 738 739 bio->bio_cmd = BIO_FLUSH; 740 bio->bio_flags |= BIO_ORDERED; 741 bio->bio_dev = dev_data->cdev; 742 bio->bio_offset = 0; 743 bio->bio_data = 0; 744 bio->bio_done = ctl_be_block_biodone; 745 bio->bio_caller1 = beio; 746 bio->bio_pblkno = 0; 747 748 /* 749 * We don't need to acquire the LUN lock here, because we are only 750 * sending one bio, and so there is no other context to synchronize 751 * with. 752 */ 753 beio->num_bios_sent = 1; 754 beio->send_complete = 1; 755 756 binuptime(&beio->ds_t0); 757 devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0); 758 759 (*dev_data->csw->d_strategy)(bio); 760 } 761 762 static void 763 ctl_be_block_unmap_dev_range(struct ctl_be_block_lun *be_lun, 764 struct ctl_be_block_io *beio, 765 uint64_t off, uint64_t len, int last) 766 { 767 struct bio *bio; 768 struct ctl_be_block_devdata *dev_data; 769 uint64_t maxlen; 770 771 dev_data = &be_lun->backend.dev; 772 maxlen = LONG_MAX - (LONG_MAX % be_lun->blocksize); 773 while (len > 0) { 774 bio = g_alloc_bio(); 775 bio->bio_cmd = BIO_DELETE; 776 bio->bio_flags |= beio->bio_flags; 777 bio->bio_dev = dev_data->cdev; 778 bio->bio_offset = off; 779 bio->bio_length = MIN(len, maxlen); 780 bio->bio_data = 0; 781 bio->bio_done = ctl_be_block_biodone; 782 bio->bio_caller1 = beio; 783 bio->bio_pblkno = off / be_lun->blocksize; 784 785 off += bio->bio_length; 786 len -= bio->bio_length; 787 788 mtx_lock(&be_lun->lock); 789 beio->num_bios_sent++; 790 if (last && len == 0) 791 beio->send_complete = 1; 792 mtx_unlock(&be_lun->lock); 793 794 (*dev_data->csw->d_strategy)(bio); 795 } 796 } 797 798 static void 799 ctl_be_block_unmap_dev(struct ctl_be_block_lun *be_lun, 800 struct ctl_be_block_io *beio) 801 { 802 union ctl_io *io; 803 struct ctl_be_block_devdata *dev_data; 804 struct ctl_ptr_len_flags ptrlen; 805 struct scsi_unmap_desc *buf, *end; 806 uint64_t len; 807 808 dev_data = &be_lun->backend.dev; 809 io = beio->io; 810 811 DPRINTF("entered\n"); 812 813 binuptime(&beio->ds_t0); 814 devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0); 815 816 if (beio->io_offset == -1) { 817 beio->io_len = 0; 818 memcpy(&ptrlen, io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes, 819 sizeof(ptrlen)); 820 buf = (struct scsi_unmap_desc *)ptrlen.ptr; 821 end = buf + ptrlen.len / sizeof(*buf); 822 for (; buf < end; buf++) { 823 len = (uint64_t)scsi_4btoul(buf->length) * 824 be_lun->blocksize; 825 beio->io_len += len; 826 ctl_be_block_unmap_dev_range(be_lun, beio, 827 scsi_8btou64(buf->lba) * be_lun->blocksize, len, 828 (end - buf < 2) ? TRUE : FALSE); 829 } 830 } else 831 ctl_be_block_unmap_dev_range(be_lun, beio, 832 beio->io_offset, beio->io_len, TRUE); 833 } 834 835 static void 836 ctl_be_block_dispatch_dev(struct ctl_be_block_lun *be_lun, 837 struct ctl_be_block_io *beio) 838 { 839 int i; 840 struct bio *bio; 841 struct ctl_be_block_devdata *dev_data; 842 off_t cur_offset; 843 int max_iosize; 844 845 DPRINTF("entered\n"); 846 847 dev_data = &be_lun->backend.dev; 848 849 /* 850 * We have to limit our I/O size to the maximum supported by the 851 * backend device. Hopefully it is MAXPHYS. If the driver doesn't 852 * set it properly, use DFLTPHYS. 853 */ 854 max_iosize = dev_data->cdev->si_iosize_max; 855 if (max_iosize < PAGE_SIZE) 856 max_iosize = DFLTPHYS; 857 858 cur_offset = beio->io_offset; 859 860 /* 861 * XXX KDM need to accurately reflect the number of I/Os outstanding 862 * to a device. 863 */ 864 binuptime(&beio->ds_t0); 865 devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0); 866 867 for (i = 0; i < beio->num_segs; i++) { 868 size_t cur_size; 869 uint8_t *cur_ptr; 870 871 cur_size = beio->sg_segs[i].len; 872 cur_ptr = beio->sg_segs[i].addr; 873 874 while (cur_size > 0) { 875 /* This can't fail, it's a blocking allocation. */ 876 bio = g_alloc_bio(); 877 878 KASSERT(bio != NULL, ("g_alloc_bio() failed!\n")); 879 880 bio->bio_cmd = beio->bio_cmd; 881 bio->bio_flags |= beio->bio_flags; 882 bio->bio_dev = dev_data->cdev; 883 bio->bio_caller1 = beio; 884 bio->bio_length = min(cur_size, max_iosize); 885 bio->bio_offset = cur_offset; 886 bio->bio_data = cur_ptr; 887 bio->bio_done = ctl_be_block_biodone; 888 bio->bio_pblkno = cur_offset / be_lun->blocksize; 889 890 cur_offset += bio->bio_length; 891 cur_ptr += bio->bio_length; 892 cur_size -= bio->bio_length; 893 894 /* 895 * Make sure we set the complete bit just before we 896 * issue the last bio so we don't wind up with a 897 * race. 898 * 899 * Use the LUN mutex here instead of a combination 900 * of atomic variables for simplicity. 901 * 902 * XXX KDM we could have a per-IO lock, but that 903 * would cause additional per-IO setup and teardown 904 * overhead. Hopefully there won't be too much 905 * contention on the LUN lock. 906 */ 907 mtx_lock(&be_lun->lock); 908 909 beio->num_bios_sent++; 910 911 if ((i == beio->num_segs - 1) 912 && (cur_size == 0)) 913 beio->send_complete = 1; 914 915 mtx_unlock(&be_lun->lock); 916 917 (*dev_data->csw->d_strategy)(bio); 918 } 919 } 920 } 921 922 static void 923 ctl_be_block_cw_done_ws(struct ctl_be_block_io *beio) 924 { 925 union ctl_io *io; 926 927 io = beio->io; 928 ctl_free_beio(beio); 929 if (((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE) 930 && ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) { 931 ctl_config_write_done(io); 932 return; 933 } 934 935 ctl_be_block_config_write(io); 936 } 937 938 static void 939 ctl_be_block_cw_dispatch_ws(struct ctl_be_block_lun *be_lun, 940 union ctl_io *io) 941 { 942 struct ctl_be_block_io *beio; 943 struct ctl_be_block_softc *softc; 944 struct ctl_lba_len_flags lbalen; 945 uint64_t len_left, lba; 946 int i, seglen; 947 uint8_t *buf, *end; 948 949 DPRINTF("entered\n"); 950 951 beio = io->io_hdr.ctl_private[CTL_PRIV_BACKEND].ptr; 952 softc = be_lun->softc; 953 memcpy(&lbalen, io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes, 954 sizeof(lbalen)); 955 956 if (lbalen.flags & ~(SWS_LBDATA | SWS_UNMAP) || 957 (lbalen.flags & SWS_UNMAP && be_lun->unmap == NULL)) { 958 ctl_free_beio(beio); 959 ctl_set_invalid_field(&io->scsiio, 960 /*sks_valid*/ 1, 961 /*command*/ 1, 962 /*field*/ 1, 963 /*bit_valid*/ 0, 964 /*bit*/ 0); 965 ctl_config_write_done(io); 966 return; 967 } 968 969 /* 970 * If the I/O came down with an ordered or head of queue tag, set 971 * the BIO_ORDERED attribute. For head of queue tags, that's 972 * pretty much the best we can do. 973 */ 974 if ((io->scsiio.tag_type == CTL_TAG_ORDERED) 975 || (io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE)) 976 beio->bio_flags = BIO_ORDERED; 977 978 switch (io->scsiio.tag_type) { 979 case CTL_TAG_ORDERED: 980 beio->ds_tag_type = DEVSTAT_TAG_ORDERED; 981 break; 982 case CTL_TAG_HEAD_OF_QUEUE: 983 beio->ds_tag_type = DEVSTAT_TAG_HEAD; 984 break; 985 case CTL_TAG_UNTAGGED: 986 case CTL_TAG_SIMPLE: 987 case CTL_TAG_ACA: 988 default: 989 beio->ds_tag_type = DEVSTAT_TAG_SIMPLE; 990 break; 991 } 992 993 if (lbalen.flags & SWS_UNMAP) { 994 beio->io_offset = lbalen.lba * be_lun->blocksize; 995 beio->io_len = (uint64_t)lbalen.len * be_lun->blocksize; 996 beio->bio_cmd = BIO_DELETE; 997 beio->ds_trans_type = DEVSTAT_FREE; 998 999 be_lun->unmap(be_lun, beio); 1000 return; 1001 } 1002 1003 beio->bio_cmd = BIO_WRITE; 1004 beio->ds_trans_type = DEVSTAT_WRITE; 1005 1006 DPRINTF("WRITE SAME at LBA %jx len %u\n", 1007 (uintmax_t)lbalen.lba, lbalen.len); 1008 1009 len_left = (uint64_t)lbalen.len * be_lun->blocksize; 1010 for (i = 0, lba = 0; i < CTLBLK_MAX_SEGS && len_left > 0; i++) { 1011 1012 /* 1013 * Setup the S/G entry for this chunk. 1014 */ 1015 seglen = MIN(MAXPHYS, len_left); 1016 seglen -= seglen % be_lun->blocksize; 1017 beio->sg_segs[i].len = seglen; 1018 beio->sg_segs[i].addr = uma_zalloc(be_lun->lun_zone, M_WAITOK); 1019 1020 DPRINTF("segment %d addr %p len %zd\n", i, 1021 beio->sg_segs[i].addr, beio->sg_segs[i].len); 1022 1023 beio->num_segs++; 1024 len_left -= seglen; 1025 1026 buf = beio->sg_segs[i].addr; 1027 end = buf + seglen; 1028 for (; buf < end; buf += be_lun->blocksize) { 1029 memcpy(buf, io->scsiio.kern_data_ptr, be_lun->blocksize); 1030 if (lbalen.flags & SWS_LBDATA) 1031 scsi_ulto4b(lbalen.lba + lba, buf); 1032 lba++; 1033 } 1034 } 1035 1036 beio->io_offset = lbalen.lba * be_lun->blocksize; 1037 beio->io_len = lba * be_lun->blocksize; 1038 1039 /* We can not do all in one run. Correct and schedule rerun. */ 1040 if (len_left > 0) { 1041 lbalen.lba += lba; 1042 lbalen.len -= lba; 1043 memcpy(io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes, &lbalen, 1044 sizeof(lbalen)); 1045 beio->beio_cont = ctl_be_block_cw_done_ws; 1046 } 1047 1048 be_lun->dispatch(be_lun, beio); 1049 } 1050 1051 static void 1052 ctl_be_block_cw_dispatch_unmap(struct ctl_be_block_lun *be_lun, 1053 union ctl_io *io) 1054 { 1055 struct ctl_be_block_io *beio; 1056 struct ctl_be_block_softc *softc; 1057 struct ctl_ptr_len_flags ptrlen; 1058 1059 DPRINTF("entered\n"); 1060 1061 beio = io->io_hdr.ctl_private[CTL_PRIV_BACKEND].ptr; 1062 softc = be_lun->softc; 1063 memcpy(&ptrlen, io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes, 1064 sizeof(ptrlen)); 1065 1066 if (ptrlen.flags != 0 || be_lun->unmap == NULL) { 1067 ctl_free_beio(beio); 1068 ctl_set_invalid_field(&io->scsiio, 1069 /*sks_valid*/ 0, 1070 /*command*/ 1, 1071 /*field*/ 0, 1072 /*bit_valid*/ 0, 1073 /*bit*/ 0); 1074 ctl_config_write_done(io); 1075 return; 1076 } 1077 1078 /* 1079 * If the I/O came down with an ordered or head of queue tag, set 1080 * the BIO_ORDERED attribute. For head of queue tags, that's 1081 * pretty much the best we can do. 1082 */ 1083 if ((io->scsiio.tag_type == CTL_TAG_ORDERED) 1084 || (io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE)) 1085 beio->bio_flags = BIO_ORDERED; 1086 1087 switch (io->scsiio.tag_type) { 1088 case CTL_TAG_ORDERED: 1089 beio->ds_tag_type = DEVSTAT_TAG_ORDERED; 1090 break; 1091 case CTL_TAG_HEAD_OF_QUEUE: 1092 beio->ds_tag_type = DEVSTAT_TAG_HEAD; 1093 break; 1094 case CTL_TAG_UNTAGGED: 1095 case CTL_TAG_SIMPLE: 1096 case CTL_TAG_ACA: 1097 default: 1098 beio->ds_tag_type = DEVSTAT_TAG_SIMPLE; 1099 break; 1100 } 1101 1102 beio->io_len = 0; 1103 beio->io_offset = -1; 1104 1105 beio->bio_cmd = BIO_DELETE; 1106 beio->ds_trans_type = DEVSTAT_FREE; 1107 1108 DPRINTF("WRITE SAME at LBA %jx len %u\n", 1109 (uintmax_t)lbalen.lba, lbalen.len); 1110 1111 be_lun->unmap(be_lun, beio); 1112 } 1113 1114 static void 1115 ctl_be_block_cw_done(struct ctl_be_block_io *beio) 1116 { 1117 union ctl_io *io; 1118 1119 io = beio->io; 1120 ctl_free_beio(beio); 1121 ctl_config_write_done(io); 1122 } 1123 1124 static void 1125 ctl_be_block_cw_dispatch(struct ctl_be_block_lun *be_lun, 1126 union ctl_io *io) 1127 { 1128 struct ctl_be_block_io *beio; 1129 struct ctl_be_block_softc *softc; 1130 1131 DPRINTF("entered\n"); 1132 1133 softc = be_lun->softc; 1134 beio = ctl_alloc_beio(softc); 1135 beio->io = io; 1136 beio->lun = be_lun; 1137 beio->beio_cont = ctl_be_block_cw_done; 1138 io->io_hdr.ctl_private[CTL_PRIV_BACKEND].ptr = beio; 1139 1140 switch (io->scsiio.cdb[0]) { 1141 case SYNCHRONIZE_CACHE: 1142 case SYNCHRONIZE_CACHE_16: 1143 beio->bio_cmd = BIO_FLUSH; 1144 beio->ds_trans_type = DEVSTAT_NO_DATA; 1145 beio->ds_tag_type = DEVSTAT_TAG_ORDERED; 1146 beio->io_len = 0; 1147 be_lun->lun_flush(be_lun, beio); 1148 break; 1149 case WRITE_SAME_10: 1150 case WRITE_SAME_16: 1151 ctl_be_block_cw_dispatch_ws(be_lun, io); 1152 break; 1153 case UNMAP: 1154 ctl_be_block_cw_dispatch_unmap(be_lun, io); 1155 break; 1156 default: 1157 panic("Unhandled CDB type %#x", io->scsiio.cdb[0]); 1158 break; 1159 } 1160 } 1161 1162 SDT_PROBE_DEFINE1(cbb, kernel, read, start, "uint64_t"); 1163 SDT_PROBE_DEFINE1(cbb, kernel, write, start, "uint64_t"); 1164 SDT_PROBE_DEFINE1(cbb, kernel, read, alloc_done, "uint64_t"); 1165 SDT_PROBE_DEFINE1(cbb, kernel, write, alloc_done, "uint64_t"); 1166 1167 static void 1168 ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun, 1169 union ctl_io *io) 1170 { 1171 struct ctl_be_block_io *beio; 1172 struct ctl_be_block_softc *softc; 1173 struct ctl_lba_len lbalen; 1174 uint64_t len_left, io_size_bytes; 1175 int i; 1176 1177 softc = be_lun->softc; 1178 1179 DPRINTF("entered\n"); 1180 1181 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) { 1182 SDT_PROBE(cbb, kernel, read, start, 0, 0, 0, 0, 0); 1183 } else { 1184 SDT_PROBE(cbb, kernel, write, start, 0, 0, 0, 0, 0); 1185 } 1186 1187 memcpy(&lbalen, io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes, 1188 sizeof(lbalen)); 1189 1190 io_size_bytes = lbalen.len * be_lun->blocksize; 1191 1192 /* 1193 * XXX KDM this is temporary, until we implement chaining of beio 1194 * structures and multiple datamove calls to move all the data in 1195 * or out. 1196 */ 1197 if (io_size_bytes > CTLBLK_MAX_IO_SIZE) { 1198 printf("%s: IO length %ju > max io size %u\n", __func__, 1199 io_size_bytes, CTLBLK_MAX_IO_SIZE); 1200 ctl_set_invalid_field(&io->scsiio, 1201 /*sks_valid*/ 0, 1202 /*command*/ 1, 1203 /*field*/ 0, 1204 /*bit_valid*/ 0, 1205 /*bit*/ 0); 1206 ctl_done(io); 1207 return; 1208 } 1209 1210 beio = ctl_alloc_beio(softc); 1211 beio->io = io; 1212 beio->lun = be_lun; 1213 io->io_hdr.ctl_private[CTL_PRIV_BACKEND].ptr = beio; 1214 1215 /* 1216 * If the I/O came down with an ordered or head of queue tag, set 1217 * the BIO_ORDERED attribute. For head of queue tags, that's 1218 * pretty much the best we can do. 1219 * 1220 * XXX KDM we don't have a great way to easily know about the FUA 1221 * bit right now (it is decoded in ctl_read_write(), but we don't 1222 * pass that knowledge to the backend), and in any case we would 1223 * need to determine how to handle it. 1224 */ 1225 if ((io->scsiio.tag_type == CTL_TAG_ORDERED) 1226 || (io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE)) 1227 beio->bio_flags = BIO_ORDERED; 1228 1229 switch (io->scsiio.tag_type) { 1230 case CTL_TAG_ORDERED: 1231 beio->ds_tag_type = DEVSTAT_TAG_ORDERED; 1232 break; 1233 case CTL_TAG_HEAD_OF_QUEUE: 1234 beio->ds_tag_type = DEVSTAT_TAG_HEAD; 1235 break; 1236 case CTL_TAG_UNTAGGED: 1237 case CTL_TAG_SIMPLE: 1238 case CTL_TAG_ACA: 1239 default: 1240 beio->ds_tag_type = DEVSTAT_TAG_SIMPLE; 1241 break; 1242 } 1243 1244 /* 1245 * This path handles read and write only. The config write path 1246 * handles flush operations. 1247 */ 1248 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) { 1249 beio->bio_cmd = BIO_READ; 1250 beio->ds_trans_type = DEVSTAT_READ; 1251 } else { 1252 beio->bio_cmd = BIO_WRITE; 1253 beio->ds_trans_type = DEVSTAT_WRITE; 1254 } 1255 1256 beio->io_len = lbalen.len * be_lun->blocksize; 1257 beio->io_offset = lbalen.lba * be_lun->blocksize; 1258 1259 DPRINTF("%s at LBA %jx len %u\n", 1260 (beio->bio_cmd == BIO_READ) ? "READ" : "WRITE", 1261 (uintmax_t)lbalen.lba, lbalen.len); 1262 1263 for (i = 0, len_left = io_size_bytes; i < CTLBLK_MAX_SEGS && 1264 len_left > 0; i++) { 1265 1266 /* 1267 * Setup the S/G entry for this chunk. 1268 */ 1269 beio->sg_segs[i].len = min(MAXPHYS, len_left); 1270 beio->sg_segs[i].addr = uma_zalloc(be_lun->lun_zone, M_WAITOK); 1271 1272 DPRINTF("segment %d addr %p len %zd\n", i, 1273 beio->sg_segs[i].addr, beio->sg_segs[i].len); 1274 1275 beio->num_segs++; 1276 len_left -= beio->sg_segs[i].len; 1277 } 1278 1279 /* 1280 * For the read case, we need to read the data into our buffers and 1281 * then we can send it back to the user. For the write case, we 1282 * need to get the data from the user first. 1283 */ 1284 if (beio->bio_cmd == BIO_READ) { 1285 SDT_PROBE(cbb, kernel, read, alloc_done, 0, 0, 0, 0, 0); 1286 be_lun->dispatch(be_lun, beio); 1287 } else { 1288 SDT_PROBE(cbb, kernel, write, alloc_done, 0, 0, 0, 0, 0); 1289 io->scsiio.be_move_done = ctl_be_block_move_done; 1290 io->scsiio.kern_data_ptr = (uint8_t *)beio->sg_segs; 1291 io->scsiio.kern_data_len = beio->io_len; 1292 io->scsiio.kern_total_len = beio->io_len; 1293 io->scsiio.kern_rel_offset = 0; 1294 io->scsiio.kern_data_resid = 0; 1295 io->scsiio.kern_sg_entries = beio->num_segs; 1296 io->io_hdr.flags |= CTL_FLAG_ALLOCATED | CTL_FLAG_KDPTR_SGLIST; 1297 #ifdef CTL_TIME_IO 1298 getbintime(&io->io_hdr.dma_start_bt); 1299 #endif 1300 ctl_datamove(io); 1301 } 1302 } 1303 1304 static void 1305 ctl_be_block_worker(void *context, int pending) 1306 { 1307 struct ctl_be_block_lun *be_lun; 1308 struct ctl_be_block_softc *softc; 1309 union ctl_io *io; 1310 1311 be_lun = (struct ctl_be_block_lun *)context; 1312 softc = be_lun->softc; 1313 1314 DPRINTF("entered\n"); 1315 1316 mtx_lock(&be_lun->lock); 1317 for (;;) { 1318 io = (union ctl_io *)STAILQ_FIRST(&be_lun->datamove_queue); 1319 if (io != NULL) { 1320 struct ctl_be_block_io *beio; 1321 1322 DPRINTF("datamove queue\n"); 1323 1324 STAILQ_REMOVE(&be_lun->datamove_queue, &io->io_hdr, 1325 ctl_io_hdr, links); 1326 1327 mtx_unlock(&be_lun->lock); 1328 1329 beio = (struct ctl_be_block_io *) 1330 io->io_hdr.ctl_private[CTL_PRIV_BACKEND].ptr; 1331 1332 be_lun->dispatch(be_lun, beio); 1333 1334 mtx_lock(&be_lun->lock); 1335 continue; 1336 } 1337 io = (union ctl_io *)STAILQ_FIRST(&be_lun->config_write_queue); 1338 if (io != NULL) { 1339 1340 DPRINTF("config write queue\n"); 1341 1342 STAILQ_REMOVE(&be_lun->config_write_queue, &io->io_hdr, 1343 ctl_io_hdr, links); 1344 1345 mtx_unlock(&be_lun->lock); 1346 1347 ctl_be_block_cw_dispatch(be_lun, io); 1348 1349 mtx_lock(&be_lun->lock); 1350 continue; 1351 } 1352 io = (union ctl_io *)STAILQ_FIRST(&be_lun->input_queue); 1353 if (io != NULL) { 1354 DPRINTF("input queue\n"); 1355 1356 STAILQ_REMOVE(&be_lun->input_queue, &io->io_hdr, 1357 ctl_io_hdr, links); 1358 mtx_unlock(&be_lun->lock); 1359 1360 /* 1361 * We must drop the lock, since this routine and 1362 * its children may sleep. 1363 */ 1364 ctl_be_block_dispatch(be_lun, io); 1365 1366 mtx_lock(&be_lun->lock); 1367 continue; 1368 } 1369 1370 /* 1371 * If we get here, there is no work left in the queues, so 1372 * just break out and let the task queue go to sleep. 1373 */ 1374 break; 1375 } 1376 mtx_unlock(&be_lun->lock); 1377 } 1378 1379 /* 1380 * Entry point from CTL to the backend for I/O. We queue everything to a 1381 * work thread, so this just puts the I/O on a queue and wakes up the 1382 * thread. 1383 */ 1384 static int 1385 ctl_be_block_submit(union ctl_io *io) 1386 { 1387 struct ctl_be_block_lun *be_lun; 1388 struct ctl_be_lun *ctl_be_lun; 1389 int retval; 1390 1391 DPRINTF("entered\n"); 1392 1393 retval = CTL_RETVAL_COMPLETE; 1394 1395 ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[ 1396 CTL_PRIV_BACKEND_LUN].ptr; 1397 be_lun = (struct ctl_be_block_lun *)ctl_be_lun->be_lun; 1398 1399 /* 1400 * Make sure we only get SCSI I/O. 1401 */ 1402 KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, ("Non-SCSI I/O (type " 1403 "%#x) encountered", io->io_hdr.io_type)); 1404 1405 mtx_lock(&be_lun->lock); 1406 /* 1407 * XXX KDM make sure that links is okay to use at this point. 1408 * Otherwise, we either need to add another field to ctl_io_hdr, 1409 * or deal with resource allocation here. 1410 */ 1411 STAILQ_INSERT_TAIL(&be_lun->input_queue, &io->io_hdr, links); 1412 mtx_unlock(&be_lun->lock); 1413 1414 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); 1415 1416 return (retval); 1417 } 1418 1419 static int 1420 ctl_be_block_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, 1421 int flag, struct thread *td) 1422 { 1423 struct ctl_be_block_softc *softc; 1424 int error; 1425 1426 softc = &backend_block_softc; 1427 1428 error = 0; 1429 1430 switch (cmd) { 1431 case CTL_LUN_REQ: { 1432 struct ctl_lun_req *lun_req; 1433 1434 lun_req = (struct ctl_lun_req *)addr; 1435 1436 switch (lun_req->reqtype) { 1437 case CTL_LUNREQ_CREATE: 1438 error = ctl_be_block_create(softc, lun_req); 1439 break; 1440 case CTL_LUNREQ_RM: 1441 error = ctl_be_block_rm(softc, lun_req); 1442 break; 1443 case CTL_LUNREQ_MODIFY: 1444 error = ctl_be_block_modify(softc, lun_req); 1445 break; 1446 default: 1447 lun_req->status = CTL_LUN_ERROR; 1448 snprintf(lun_req->error_str, sizeof(lun_req->error_str), 1449 "%s: invalid LUN request type %d", __func__, 1450 lun_req->reqtype); 1451 break; 1452 } 1453 break; 1454 } 1455 default: 1456 error = ENOTTY; 1457 break; 1458 } 1459 1460 return (error); 1461 } 1462 1463 static int 1464 ctl_be_block_open_file(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req) 1465 { 1466 struct ctl_be_block_filedata *file_data; 1467 struct ctl_lun_create_params *params; 1468 struct vattr vattr; 1469 int error; 1470 1471 error = 0; 1472 file_data = &be_lun->backend.file; 1473 params = &req->reqdata.create; 1474 1475 be_lun->dev_type = CTL_BE_BLOCK_FILE; 1476 be_lun->dispatch = ctl_be_block_dispatch_file; 1477 be_lun->lun_flush = ctl_be_block_flush_file; 1478 1479 error = VOP_GETATTR(be_lun->vn, &vattr, curthread->td_ucred); 1480 if (error != 0) { 1481 snprintf(req->error_str, sizeof(req->error_str), 1482 "error calling VOP_GETATTR() for file %s", 1483 be_lun->dev_path); 1484 return (error); 1485 } 1486 1487 /* 1488 * Verify that we have the ability to upgrade to exclusive 1489 * access on this file so we can trap errors at open instead 1490 * of reporting them during first access. 1491 */ 1492 if (VOP_ISLOCKED(be_lun->vn) != LK_EXCLUSIVE) { 1493 vn_lock(be_lun->vn, LK_UPGRADE | LK_RETRY); 1494 if (be_lun->vn->v_iflag & VI_DOOMED) { 1495 error = EBADF; 1496 snprintf(req->error_str, sizeof(req->error_str), 1497 "error locking file %s", be_lun->dev_path); 1498 return (error); 1499 } 1500 } 1501 1502 1503 file_data->cred = crhold(curthread->td_ucred); 1504 if (params->lun_size_bytes != 0) 1505 be_lun->size_bytes = params->lun_size_bytes; 1506 else 1507 be_lun->size_bytes = vattr.va_size; 1508 /* 1509 * We set the multi thread flag for file operations because all 1510 * filesystems (in theory) are capable of allowing multiple readers 1511 * of a file at once. So we want to get the maximum possible 1512 * concurrency. 1513 */ 1514 be_lun->flags |= CTL_BE_BLOCK_LUN_MULTI_THREAD; 1515 1516 /* 1517 * XXX KDM vattr.va_blocksize may be larger than 512 bytes here. 1518 * With ZFS, it is 131072 bytes. Block sizes that large don't work 1519 * with disklabel and UFS on FreeBSD at least. Large block sizes 1520 * may not work with other OSes as well. So just export a sector 1521 * size of 512 bytes, which should work with any OS or 1522 * application. Since our backing is a file, any block size will 1523 * work fine for the backing store. 1524 */ 1525 #if 0 1526 be_lun->blocksize= vattr.va_blocksize; 1527 #endif 1528 if (params->blocksize_bytes != 0) 1529 be_lun->blocksize = params->blocksize_bytes; 1530 else 1531 be_lun->blocksize = 512; 1532 1533 /* 1534 * Sanity check. The media size has to be at least one 1535 * sector long. 1536 */ 1537 if (be_lun->size_bytes < be_lun->blocksize) { 1538 error = EINVAL; 1539 snprintf(req->error_str, sizeof(req->error_str), 1540 "file %s size %ju < block size %u", be_lun->dev_path, 1541 (uintmax_t)be_lun->size_bytes, be_lun->blocksize); 1542 } 1543 return (error); 1544 } 1545 1546 static int 1547 ctl_be_block_open_dev(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req) 1548 { 1549 struct ctl_lun_create_params *params; 1550 struct vattr vattr; 1551 struct cdev *dev; 1552 struct cdevsw *devsw; 1553 int error; 1554 off_t ps, pss, po, pos; 1555 1556 params = &req->reqdata.create; 1557 1558 be_lun->dev_type = CTL_BE_BLOCK_DEV; 1559 be_lun->dispatch = ctl_be_block_dispatch_dev; 1560 be_lun->lun_flush = ctl_be_block_flush_dev; 1561 be_lun->unmap = ctl_be_block_unmap_dev; 1562 be_lun->backend.dev.cdev = be_lun->vn->v_rdev; 1563 be_lun->backend.dev.csw = dev_refthread(be_lun->backend.dev.cdev, 1564 &be_lun->backend.dev.dev_ref); 1565 if (be_lun->backend.dev.csw == NULL) 1566 panic("Unable to retrieve device switch"); 1567 1568 error = VOP_GETATTR(be_lun->vn, &vattr, NOCRED); 1569 if (error) { 1570 snprintf(req->error_str, sizeof(req->error_str), 1571 "%s: error getting vnode attributes for device %s", 1572 __func__, be_lun->dev_path); 1573 return (error); 1574 } 1575 1576 dev = be_lun->vn->v_rdev; 1577 devsw = dev->si_devsw; 1578 if (!devsw->d_ioctl) { 1579 snprintf(req->error_str, sizeof(req->error_str), 1580 "%s: no d_ioctl for device %s!", __func__, 1581 be_lun->dev_path); 1582 return (ENODEV); 1583 } 1584 1585 error = devsw->d_ioctl(dev, DIOCGSECTORSIZE, 1586 (caddr_t)&be_lun->blocksize, FREAD, 1587 curthread); 1588 if (error) { 1589 snprintf(req->error_str, sizeof(req->error_str), 1590 "%s: error %d returned for DIOCGSECTORSIZE ioctl " 1591 "on %s!", __func__, error, be_lun->dev_path); 1592 return (error); 1593 } 1594 1595 /* 1596 * If the user has asked for a blocksize that is greater than the 1597 * backing device's blocksize, we can do it only if the blocksize 1598 * the user is asking for is an even multiple of the underlying 1599 * device's blocksize. 1600 */ 1601 if ((params->blocksize_bytes != 0) 1602 && (params->blocksize_bytes > be_lun->blocksize)) { 1603 uint32_t bs_multiple, tmp_blocksize; 1604 1605 bs_multiple = params->blocksize_bytes / be_lun->blocksize; 1606 1607 tmp_blocksize = bs_multiple * be_lun->blocksize; 1608 1609 if (tmp_blocksize == params->blocksize_bytes) { 1610 be_lun->blocksize = params->blocksize_bytes; 1611 } else { 1612 snprintf(req->error_str, sizeof(req->error_str), 1613 "%s: requested blocksize %u is not an even " 1614 "multiple of backing device blocksize %u", 1615 __func__, params->blocksize_bytes, 1616 be_lun->blocksize); 1617 return (EINVAL); 1618 1619 } 1620 } else if ((params->blocksize_bytes != 0) 1621 && (params->blocksize_bytes != be_lun->blocksize)) { 1622 snprintf(req->error_str, sizeof(req->error_str), 1623 "%s: requested blocksize %u < backing device " 1624 "blocksize %u", __func__, params->blocksize_bytes, 1625 be_lun->blocksize); 1626 return (EINVAL); 1627 } 1628 1629 error = devsw->d_ioctl(dev, DIOCGMEDIASIZE, 1630 (caddr_t)&be_lun->size_bytes, FREAD, 1631 curthread); 1632 if (error) { 1633 snprintf(req->error_str, sizeof(req->error_str), 1634 "%s: error %d returned for DIOCGMEDIASIZE " 1635 " ioctl on %s!", __func__, error, 1636 be_lun->dev_path); 1637 return (error); 1638 } 1639 1640 if (params->lun_size_bytes != 0) { 1641 if (params->lun_size_bytes > be_lun->size_bytes) { 1642 snprintf(req->error_str, sizeof(req->error_str), 1643 "%s: requested LUN size %ju > backing device " 1644 "size %ju", __func__, 1645 (uintmax_t)params->lun_size_bytes, 1646 (uintmax_t)be_lun->size_bytes); 1647 return (EINVAL); 1648 } 1649 1650 be_lun->size_bytes = params->lun_size_bytes; 1651 } 1652 1653 error = devsw->d_ioctl(dev, DIOCGSTRIPESIZE, 1654 (caddr_t)&ps, FREAD, curthread); 1655 if (error) 1656 ps = po = 0; 1657 else { 1658 error = devsw->d_ioctl(dev, DIOCGSTRIPEOFFSET, 1659 (caddr_t)&po, FREAD, curthread); 1660 if (error) 1661 po = 0; 1662 } 1663 pss = ps / be_lun->blocksize; 1664 pos = po / be_lun->blocksize; 1665 if ((pss > 0) && (pss * be_lun->blocksize == ps) && (pss >= pos) && 1666 ((pss & (pss - 1)) == 0) && (pos * be_lun->blocksize == po)) { 1667 be_lun->pblockexp = fls(pss) - 1; 1668 be_lun->pblockoff = (pss - pos) % pss; 1669 } 1670 1671 return (0); 1672 } 1673 1674 static int 1675 ctl_be_block_close(struct ctl_be_block_lun *be_lun) 1676 { 1677 DROP_GIANT(); 1678 if (be_lun->vn) { 1679 int flags = FREAD | FWRITE; 1680 1681 switch (be_lun->dev_type) { 1682 case CTL_BE_BLOCK_DEV: 1683 if (be_lun->backend.dev.csw) { 1684 dev_relthread(be_lun->backend.dev.cdev, 1685 be_lun->backend.dev.dev_ref); 1686 be_lun->backend.dev.csw = NULL; 1687 be_lun->backend.dev.cdev = NULL; 1688 } 1689 break; 1690 case CTL_BE_BLOCK_FILE: 1691 break; 1692 case CTL_BE_BLOCK_NONE: 1693 break; 1694 default: 1695 panic("Unexpected backend type."); 1696 break; 1697 } 1698 1699 (void)vn_close(be_lun->vn, flags, NOCRED, curthread); 1700 be_lun->vn = NULL; 1701 1702 switch (be_lun->dev_type) { 1703 case CTL_BE_BLOCK_DEV: 1704 break; 1705 case CTL_BE_BLOCK_FILE: 1706 if (be_lun->backend.file.cred != NULL) { 1707 crfree(be_lun->backend.file.cred); 1708 be_lun->backend.file.cred = NULL; 1709 } 1710 break; 1711 case CTL_BE_BLOCK_NONE: 1712 break; 1713 default: 1714 panic("Unexpected backend type."); 1715 break; 1716 } 1717 } 1718 PICKUP_GIANT(); 1719 1720 return (0); 1721 } 1722 1723 static int 1724 ctl_be_block_open(struct ctl_be_block_softc *softc, 1725 struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req) 1726 { 1727 struct nameidata nd; 1728 int flags; 1729 int error; 1730 1731 /* 1732 * XXX KDM allow a read-only option? 1733 */ 1734 flags = FREAD | FWRITE; 1735 error = 0; 1736 1737 if (rootvnode == NULL) { 1738 snprintf(req->error_str, sizeof(req->error_str), 1739 "%s: Root filesystem is not mounted", __func__); 1740 return (1); 1741 } 1742 1743 if (!curthread->td_proc->p_fd->fd_cdir) { 1744 curthread->td_proc->p_fd->fd_cdir = rootvnode; 1745 VREF(rootvnode); 1746 } 1747 if (!curthread->td_proc->p_fd->fd_rdir) { 1748 curthread->td_proc->p_fd->fd_rdir = rootvnode; 1749 VREF(rootvnode); 1750 } 1751 if (!curthread->td_proc->p_fd->fd_jdir) { 1752 curthread->td_proc->p_fd->fd_jdir = rootvnode; 1753 VREF(rootvnode); 1754 } 1755 1756 again: 1757 NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, be_lun->dev_path, curthread); 1758 error = vn_open(&nd, &flags, 0, NULL); 1759 if (error) { 1760 /* 1761 * This is the only reasonable guess we can make as far as 1762 * path if the user doesn't give us a fully qualified path. 1763 * If they want to specify a file, they need to specify the 1764 * full path. 1765 */ 1766 if (be_lun->dev_path[0] != '/') { 1767 char *dev_path = "/dev/"; 1768 char *dev_name; 1769 1770 /* Try adding device path at beginning of name */ 1771 dev_name = malloc(strlen(be_lun->dev_path) 1772 + strlen(dev_path) + 1, 1773 M_CTLBLK, M_WAITOK); 1774 if (dev_name) { 1775 sprintf(dev_name, "%s%s", dev_path, 1776 be_lun->dev_path); 1777 free(be_lun->dev_path, M_CTLBLK); 1778 be_lun->dev_path = dev_name; 1779 goto again; 1780 } 1781 } 1782 snprintf(req->error_str, sizeof(req->error_str), 1783 "%s: error opening %s", __func__, be_lun->dev_path); 1784 return (error); 1785 } 1786 1787 NDFREE(&nd, NDF_ONLY_PNBUF); 1788 1789 be_lun->vn = nd.ni_vp; 1790 1791 /* We only support disks and files. */ 1792 if (vn_isdisk(be_lun->vn, &error)) { 1793 error = ctl_be_block_open_dev(be_lun, req); 1794 } else if (be_lun->vn->v_type == VREG) { 1795 error = ctl_be_block_open_file(be_lun, req); 1796 } else { 1797 error = EINVAL; 1798 snprintf(req->error_str, sizeof(req->error_str), 1799 "%s is not a disk or plain file", be_lun->dev_path); 1800 } 1801 VOP_UNLOCK(be_lun->vn, 0); 1802 1803 if (error != 0) { 1804 ctl_be_block_close(be_lun); 1805 return (error); 1806 } 1807 1808 be_lun->blocksize_shift = fls(be_lun->blocksize) - 1; 1809 be_lun->size_blocks = be_lun->size_bytes >> be_lun->blocksize_shift; 1810 1811 return (0); 1812 } 1813 1814 static int 1815 ctl_be_block_create(struct ctl_be_block_softc *softc, struct ctl_lun_req *req) 1816 { 1817 struct ctl_be_block_lun *be_lun; 1818 struct ctl_lun_create_params *params; 1819 struct ctl_be_arg *file_arg; 1820 char tmpstr[32]; 1821 int retval, num_threads, unmap; 1822 int i; 1823 1824 params = &req->reqdata.create; 1825 retval = 0; 1826 1827 num_threads = cbb_num_threads; 1828 1829 file_arg = NULL; 1830 1831 be_lun = malloc(sizeof(*be_lun), M_CTLBLK, M_ZERO | M_WAITOK); 1832 1833 be_lun->softc = softc; 1834 STAILQ_INIT(&be_lun->input_queue); 1835 STAILQ_INIT(&be_lun->config_write_queue); 1836 STAILQ_INIT(&be_lun->datamove_queue); 1837 STAILQ_INIT(&be_lun->ctl_be_lun.options); 1838 sprintf(be_lun->lunname, "cblk%d", softc->num_luns); 1839 mtx_init(&be_lun->lock, be_lun->lunname, NULL, MTX_DEF); 1840 1841 be_lun->lun_zone = uma_zcreate(be_lun->lunname, MAXPHYS, 1842 NULL, NULL, NULL, NULL, /*align*/ 0, /*flags*/0); 1843 1844 if (be_lun->lun_zone == NULL) { 1845 snprintf(req->error_str, sizeof(req->error_str), 1846 "%s: error allocating UMA zone", __func__); 1847 goto bailout_error; 1848 } 1849 1850 if (params->flags & CTL_LUN_FLAG_DEV_TYPE) 1851 be_lun->ctl_be_lun.lun_type = params->device_type; 1852 else 1853 be_lun->ctl_be_lun.lun_type = T_DIRECT; 1854 1855 if (be_lun->ctl_be_lun.lun_type == T_DIRECT) { 1856 for (i = 0; i < req->num_be_args; i++) { 1857 if (strcmp(req->kern_be_args[i].kname, "file") == 0) { 1858 file_arg = &req->kern_be_args[i]; 1859 break; 1860 } 1861 } 1862 1863 if (file_arg == NULL) { 1864 snprintf(req->error_str, sizeof(req->error_str), 1865 "%s: no file argument specified", __func__); 1866 goto bailout_error; 1867 } 1868 1869 be_lun->dev_path = malloc(file_arg->vallen, M_CTLBLK, 1870 M_WAITOK | M_ZERO); 1871 1872 strlcpy(be_lun->dev_path, (char *)file_arg->kvalue, 1873 file_arg->vallen); 1874 1875 retval = ctl_be_block_open(softc, be_lun, req); 1876 if (retval != 0) { 1877 retval = 0; 1878 goto bailout_error; 1879 } 1880 1881 /* 1882 * Tell the user the size of the file/device. 1883 */ 1884 params->lun_size_bytes = be_lun->size_bytes; 1885 1886 /* 1887 * The maximum LBA is the size - 1. 1888 */ 1889 be_lun->ctl_be_lun.maxlba = be_lun->size_blocks - 1; 1890 } else { 1891 /* 1892 * For processor devices, we don't have any size. 1893 */ 1894 be_lun->blocksize = 0; 1895 be_lun->pblockexp = 0; 1896 be_lun->pblockoff = 0; 1897 be_lun->size_blocks = 0; 1898 be_lun->size_bytes = 0; 1899 be_lun->ctl_be_lun.maxlba = 0; 1900 params->lun_size_bytes = 0; 1901 1902 /* 1903 * Default to just 1 thread for processor devices. 1904 */ 1905 num_threads = 1; 1906 } 1907 1908 /* 1909 * XXX This searching loop might be refactored to be combined with 1910 * the loop above, 1911 */ 1912 unmap = 0; 1913 for (i = 0; i < req->num_be_args; i++) { 1914 if (strcmp(req->kern_be_args[i].kname, "num_threads") == 0) { 1915 struct ctl_be_arg *thread_arg; 1916 char num_thread_str[16]; 1917 int tmp_num_threads; 1918 1919 1920 thread_arg = &req->kern_be_args[i]; 1921 1922 strlcpy(num_thread_str, (char *)thread_arg->kvalue, 1923 min(thread_arg->vallen, 1924 sizeof(num_thread_str))); 1925 1926 tmp_num_threads = strtol(num_thread_str, NULL, 0); 1927 1928 /* 1929 * We don't let the user specify less than one 1930 * thread, but hope he's clueful enough not to 1931 * specify 1000 threads. 1932 */ 1933 if (tmp_num_threads < 1) { 1934 snprintf(req->error_str, sizeof(req->error_str), 1935 "%s: invalid number of threads %s", 1936 __func__, num_thread_str); 1937 goto bailout_error; 1938 } 1939 1940 num_threads = tmp_num_threads; 1941 } else if (strcmp(req->kern_be_args[i].kname, "unmap") == 0 && 1942 strcmp(req->kern_be_args[i].kvalue, "on") == 0) { 1943 unmap = 1; 1944 } else if (strcmp(req->kern_be_args[i].kname, "file") != 0 && 1945 strcmp(req->kern_be_args[i].kname, "dev") != 0) { 1946 struct ctl_be_lun_option *opt; 1947 1948 opt = malloc(sizeof(*opt), M_CTLBLK, M_WAITOK); 1949 opt->name = malloc(strlen(req->kern_be_args[i].kname) + 1, M_CTLBLK, M_WAITOK); 1950 strcpy(opt->name, req->kern_be_args[i].kname); 1951 opt->value = malloc(strlen(req->kern_be_args[i].kvalue) + 1, M_CTLBLK, M_WAITOK); 1952 strcpy(opt->value, req->kern_be_args[i].kvalue); 1953 STAILQ_INSERT_TAIL(&be_lun->ctl_be_lun.options, opt, links); 1954 } 1955 } 1956 1957 be_lun->flags = CTL_BE_BLOCK_LUN_UNCONFIGURED; 1958 be_lun->ctl_be_lun.flags = CTL_LUN_FLAG_PRIMARY; 1959 if (unmap) 1960 be_lun->ctl_be_lun.flags |= CTL_LUN_FLAG_UNMAP; 1961 be_lun->ctl_be_lun.be_lun = be_lun; 1962 be_lun->ctl_be_lun.blocksize = be_lun->blocksize; 1963 be_lun->ctl_be_lun.pblockexp = be_lun->pblockexp; 1964 be_lun->ctl_be_lun.pblockoff = be_lun->pblockoff; 1965 /* Tell the user the blocksize we ended up using */ 1966 params->blocksize_bytes = be_lun->blocksize; 1967 if (params->flags & CTL_LUN_FLAG_ID_REQ) { 1968 be_lun->ctl_be_lun.req_lun_id = params->req_lun_id; 1969 be_lun->ctl_be_lun.flags |= CTL_LUN_FLAG_ID_REQ; 1970 } else 1971 be_lun->ctl_be_lun.req_lun_id = 0; 1972 1973 be_lun->ctl_be_lun.lun_shutdown = ctl_be_block_lun_shutdown; 1974 be_lun->ctl_be_lun.lun_config_status = 1975 ctl_be_block_lun_config_status; 1976 be_lun->ctl_be_lun.be = &ctl_be_block_driver; 1977 1978 if ((params->flags & CTL_LUN_FLAG_SERIAL_NUM) == 0) { 1979 snprintf(tmpstr, sizeof(tmpstr), "MYSERIAL%4d", 1980 softc->num_luns); 1981 strncpy((char *)be_lun->ctl_be_lun.serial_num, tmpstr, 1982 ctl_min(sizeof(be_lun->ctl_be_lun.serial_num), 1983 sizeof(tmpstr))); 1984 1985 /* Tell the user what we used for a serial number */ 1986 strncpy((char *)params->serial_num, tmpstr, 1987 ctl_min(sizeof(params->serial_num), sizeof(tmpstr))); 1988 } else { 1989 strncpy((char *)be_lun->ctl_be_lun.serial_num, 1990 params->serial_num, 1991 ctl_min(sizeof(be_lun->ctl_be_lun.serial_num), 1992 sizeof(params->serial_num))); 1993 } 1994 if ((params->flags & CTL_LUN_FLAG_DEVID) == 0) { 1995 snprintf(tmpstr, sizeof(tmpstr), "MYDEVID%4d", softc->num_luns); 1996 strncpy((char *)be_lun->ctl_be_lun.device_id, tmpstr, 1997 ctl_min(sizeof(be_lun->ctl_be_lun.device_id), 1998 sizeof(tmpstr))); 1999 2000 /* Tell the user what we used for a device ID */ 2001 strncpy((char *)params->device_id, tmpstr, 2002 ctl_min(sizeof(params->device_id), sizeof(tmpstr))); 2003 } else { 2004 strncpy((char *)be_lun->ctl_be_lun.device_id, 2005 params->device_id, 2006 ctl_min(sizeof(be_lun->ctl_be_lun.device_id), 2007 sizeof(params->device_id))); 2008 } 2009 2010 TASK_INIT(&be_lun->io_task, /*priority*/0, ctl_be_block_worker, be_lun); 2011 2012 be_lun->io_taskqueue = taskqueue_create(be_lun->lunname, M_WAITOK, 2013 taskqueue_thread_enqueue, /*context*/&be_lun->io_taskqueue); 2014 2015 if (be_lun->io_taskqueue == NULL) { 2016 snprintf(req->error_str, sizeof(req->error_str), 2017 "%s: Unable to create taskqueue", __func__); 2018 goto bailout_error; 2019 } 2020 2021 /* 2022 * Note that we start the same number of threads by default for 2023 * both the file case and the block device case. For the file 2024 * case, we need multiple threads to allow concurrency, because the 2025 * vnode interface is designed to be a blocking interface. For the 2026 * block device case, ZFS zvols at least will block the caller's 2027 * context in many instances, and so we need multiple threads to 2028 * overcome that problem. Other block devices don't need as many 2029 * threads, but they shouldn't cause too many problems. 2030 * 2031 * If the user wants to just have a single thread for a block 2032 * device, he can specify that when the LUN is created, or change 2033 * the tunable/sysctl to alter the default number of threads. 2034 */ 2035 retval = taskqueue_start_threads(&be_lun->io_taskqueue, 2036 /*num threads*/num_threads, 2037 /*priority*/PWAIT, 2038 /*thread name*/ 2039 "%s taskq", be_lun->lunname); 2040 2041 if (retval != 0) 2042 goto bailout_error; 2043 2044 be_lun->num_threads = num_threads; 2045 2046 mtx_lock(&softc->lock); 2047 softc->num_luns++; 2048 STAILQ_INSERT_TAIL(&softc->lun_list, be_lun, links); 2049 2050 mtx_unlock(&softc->lock); 2051 2052 retval = ctl_add_lun(&be_lun->ctl_be_lun); 2053 if (retval != 0) { 2054 mtx_lock(&softc->lock); 2055 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_block_lun, 2056 links); 2057 softc->num_luns--; 2058 mtx_unlock(&softc->lock); 2059 snprintf(req->error_str, sizeof(req->error_str), 2060 "%s: ctl_add_lun() returned error %d, see dmesg for " 2061 "details", __func__, retval); 2062 retval = 0; 2063 goto bailout_error; 2064 } 2065 2066 mtx_lock(&softc->lock); 2067 2068 /* 2069 * Tell the config_status routine that we're waiting so it won't 2070 * clean up the LUN in the event of an error. 2071 */ 2072 be_lun->flags |= CTL_BE_BLOCK_LUN_WAITING; 2073 2074 while (be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) { 2075 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlblk", 0); 2076 if (retval == EINTR) 2077 break; 2078 } 2079 be_lun->flags &= ~CTL_BE_BLOCK_LUN_WAITING; 2080 2081 if (be_lun->flags & CTL_BE_BLOCK_LUN_CONFIG_ERR) { 2082 snprintf(req->error_str, sizeof(req->error_str), 2083 "%s: LUN configuration error, see dmesg for details", 2084 __func__); 2085 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_block_lun, 2086 links); 2087 softc->num_luns--; 2088 mtx_unlock(&softc->lock); 2089 goto bailout_error; 2090 } else { 2091 params->req_lun_id = be_lun->ctl_be_lun.lun_id; 2092 } 2093 2094 mtx_unlock(&softc->lock); 2095 2096 be_lun->disk_stats = devstat_new_entry("cbb", params->req_lun_id, 2097 be_lun->blocksize, 2098 DEVSTAT_ALL_SUPPORTED, 2099 be_lun->ctl_be_lun.lun_type 2100 | DEVSTAT_TYPE_IF_OTHER, 2101 DEVSTAT_PRIORITY_OTHER); 2102 2103 2104 req->status = CTL_LUN_OK; 2105 2106 return (retval); 2107 2108 bailout_error: 2109 req->status = CTL_LUN_ERROR; 2110 2111 ctl_be_block_close(be_lun); 2112 2113 free(be_lun->dev_path, M_CTLBLK); 2114 free(be_lun, M_CTLBLK); 2115 2116 return (retval); 2117 } 2118 2119 static int 2120 ctl_be_block_rm(struct ctl_be_block_softc *softc, struct ctl_lun_req *req) 2121 { 2122 struct ctl_lun_rm_params *params; 2123 struct ctl_be_block_lun *be_lun; 2124 int retval; 2125 2126 params = &req->reqdata.rm; 2127 2128 mtx_lock(&softc->lock); 2129 2130 be_lun = NULL; 2131 2132 STAILQ_FOREACH(be_lun, &softc->lun_list, links) { 2133 if (be_lun->ctl_be_lun.lun_id == params->lun_id) 2134 break; 2135 } 2136 mtx_unlock(&softc->lock); 2137 2138 if (be_lun == NULL) { 2139 snprintf(req->error_str, sizeof(req->error_str), 2140 "%s: LUN %u is not managed by the block backend", 2141 __func__, params->lun_id); 2142 goto bailout_error; 2143 } 2144 2145 retval = ctl_disable_lun(&be_lun->ctl_be_lun); 2146 2147 if (retval != 0) { 2148 snprintf(req->error_str, sizeof(req->error_str), 2149 "%s: error %d returned from ctl_disable_lun() for " 2150 "LUN %d", __func__, retval, params->lun_id); 2151 goto bailout_error; 2152 2153 } 2154 2155 retval = ctl_invalidate_lun(&be_lun->ctl_be_lun); 2156 if (retval != 0) { 2157 snprintf(req->error_str, sizeof(req->error_str), 2158 "%s: error %d returned from ctl_invalidate_lun() for " 2159 "LUN %d", __func__, retval, params->lun_id); 2160 goto bailout_error; 2161 } 2162 2163 mtx_lock(&softc->lock); 2164 2165 be_lun->flags |= CTL_BE_BLOCK_LUN_WAITING; 2166 2167 while ((be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) == 0) { 2168 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlblk", 0); 2169 if (retval == EINTR) 2170 break; 2171 } 2172 2173 be_lun->flags &= ~CTL_BE_BLOCK_LUN_WAITING; 2174 2175 if ((be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) == 0) { 2176 snprintf(req->error_str, sizeof(req->error_str), 2177 "%s: interrupted waiting for LUN to be freed", 2178 __func__); 2179 mtx_unlock(&softc->lock); 2180 goto bailout_error; 2181 } 2182 2183 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_block_lun, links); 2184 2185 softc->num_luns--; 2186 mtx_unlock(&softc->lock); 2187 2188 taskqueue_drain(be_lun->io_taskqueue, &be_lun->io_task); 2189 2190 taskqueue_free(be_lun->io_taskqueue); 2191 2192 ctl_be_block_close(be_lun); 2193 2194 if (be_lun->disk_stats != NULL) 2195 devstat_remove_entry(be_lun->disk_stats); 2196 2197 uma_zdestroy(be_lun->lun_zone); 2198 2199 free(be_lun->dev_path, M_CTLBLK); 2200 2201 free(be_lun, M_CTLBLK); 2202 2203 req->status = CTL_LUN_OK; 2204 2205 return (0); 2206 2207 bailout_error: 2208 2209 req->status = CTL_LUN_ERROR; 2210 2211 return (0); 2212 } 2213 2214 static int 2215 ctl_be_block_modify_file(struct ctl_be_block_lun *be_lun, 2216 struct ctl_lun_req *req) 2217 { 2218 struct vattr vattr; 2219 int error; 2220 struct ctl_lun_modify_params *params; 2221 2222 params = &req->reqdata.modify; 2223 2224 if (params->lun_size_bytes != 0) { 2225 be_lun->size_bytes = params->lun_size_bytes; 2226 } else { 2227 error = VOP_GETATTR(be_lun->vn, &vattr, curthread->td_ucred); 2228 if (error != 0) { 2229 snprintf(req->error_str, sizeof(req->error_str), 2230 "error calling VOP_GETATTR() for file %s", 2231 be_lun->dev_path); 2232 return (error); 2233 } 2234 2235 be_lun->size_bytes = vattr.va_size; 2236 } 2237 2238 return (0); 2239 } 2240 2241 static int 2242 ctl_be_block_modify_dev(struct ctl_be_block_lun *be_lun, 2243 struct ctl_lun_req *req) 2244 { 2245 struct cdev *dev; 2246 struct cdevsw *devsw; 2247 int error; 2248 struct ctl_lun_modify_params *params; 2249 uint64_t size_bytes; 2250 2251 params = &req->reqdata.modify; 2252 2253 dev = be_lun->vn->v_rdev; 2254 devsw = dev->si_devsw; 2255 if (!devsw->d_ioctl) { 2256 snprintf(req->error_str, sizeof(req->error_str), 2257 "%s: no d_ioctl for device %s!", __func__, 2258 be_lun->dev_path); 2259 return (ENODEV); 2260 } 2261 2262 error = devsw->d_ioctl(dev, DIOCGMEDIASIZE, 2263 (caddr_t)&size_bytes, FREAD, 2264 curthread); 2265 if (error) { 2266 snprintf(req->error_str, sizeof(req->error_str), 2267 "%s: error %d returned for DIOCGMEDIASIZE ioctl " 2268 "on %s!", __func__, error, be_lun->dev_path); 2269 return (error); 2270 } 2271 2272 if (params->lun_size_bytes != 0) { 2273 if (params->lun_size_bytes > size_bytes) { 2274 snprintf(req->error_str, sizeof(req->error_str), 2275 "%s: requested LUN size %ju > backing device " 2276 "size %ju", __func__, 2277 (uintmax_t)params->lun_size_bytes, 2278 (uintmax_t)size_bytes); 2279 return (EINVAL); 2280 } 2281 2282 be_lun->size_bytes = params->lun_size_bytes; 2283 } else { 2284 be_lun->size_bytes = size_bytes; 2285 } 2286 2287 return (0); 2288 } 2289 2290 static int 2291 ctl_be_block_modify(struct ctl_be_block_softc *softc, struct ctl_lun_req *req) 2292 { 2293 struct ctl_lun_modify_params *params; 2294 struct ctl_be_block_lun *be_lun; 2295 int error; 2296 2297 params = &req->reqdata.modify; 2298 2299 mtx_lock(&softc->lock); 2300 2301 be_lun = NULL; 2302 2303 STAILQ_FOREACH(be_lun, &softc->lun_list, links) { 2304 if (be_lun->ctl_be_lun.lun_id == params->lun_id) 2305 break; 2306 } 2307 mtx_unlock(&softc->lock); 2308 2309 if (be_lun == NULL) { 2310 snprintf(req->error_str, sizeof(req->error_str), 2311 "%s: LUN %u is not managed by the block backend", 2312 __func__, params->lun_id); 2313 goto bailout_error; 2314 } 2315 2316 if (params->lun_size_bytes != 0) { 2317 if (params->lun_size_bytes < be_lun->blocksize) { 2318 snprintf(req->error_str, sizeof(req->error_str), 2319 "%s: LUN size %ju < blocksize %u", __func__, 2320 params->lun_size_bytes, be_lun->blocksize); 2321 goto bailout_error; 2322 } 2323 } 2324 2325 vn_lock(be_lun->vn, LK_SHARED | LK_RETRY); 2326 2327 if (be_lun->vn->v_type == VREG) 2328 error = ctl_be_block_modify_file(be_lun, req); 2329 else 2330 error = ctl_be_block_modify_dev(be_lun, req); 2331 2332 VOP_UNLOCK(be_lun->vn, 0); 2333 2334 if (error != 0) 2335 goto bailout_error; 2336 2337 be_lun->size_blocks = be_lun->size_bytes >> be_lun->blocksize_shift; 2338 2339 /* 2340 * The maximum LBA is the size - 1. 2341 * 2342 * XXX: Note that this field is being updated without locking, 2343 * which might cause problems on 32-bit architectures. 2344 */ 2345 be_lun->ctl_be_lun.maxlba = be_lun->size_blocks - 1; 2346 ctl_lun_capacity_changed(&be_lun->ctl_be_lun); 2347 2348 /* Tell the user the exact size we ended up using */ 2349 params->lun_size_bytes = be_lun->size_bytes; 2350 2351 req->status = CTL_LUN_OK; 2352 2353 return (0); 2354 2355 bailout_error: 2356 req->status = CTL_LUN_ERROR; 2357 2358 return (0); 2359 } 2360 2361 static void 2362 ctl_be_block_lun_shutdown(void *be_lun) 2363 { 2364 struct ctl_be_block_lun *lun; 2365 struct ctl_be_block_softc *softc; 2366 2367 lun = (struct ctl_be_block_lun *)be_lun; 2368 2369 softc = lun->softc; 2370 2371 mtx_lock(&softc->lock); 2372 lun->flags |= CTL_BE_BLOCK_LUN_UNCONFIGURED; 2373 if (lun->flags & CTL_BE_BLOCK_LUN_WAITING) 2374 wakeup(lun); 2375 mtx_unlock(&softc->lock); 2376 2377 } 2378 2379 static void 2380 ctl_be_block_lun_config_status(void *be_lun, ctl_lun_config_status status) 2381 { 2382 struct ctl_be_block_lun *lun; 2383 struct ctl_be_block_softc *softc; 2384 2385 lun = (struct ctl_be_block_lun *)be_lun; 2386 softc = lun->softc; 2387 2388 if (status == CTL_LUN_CONFIG_OK) { 2389 mtx_lock(&softc->lock); 2390 lun->flags &= ~CTL_BE_BLOCK_LUN_UNCONFIGURED; 2391 if (lun->flags & CTL_BE_BLOCK_LUN_WAITING) 2392 wakeup(lun); 2393 mtx_unlock(&softc->lock); 2394 2395 /* 2396 * We successfully added the LUN, attempt to enable it. 2397 */ 2398 if (ctl_enable_lun(&lun->ctl_be_lun) != 0) { 2399 printf("%s: ctl_enable_lun() failed!\n", __func__); 2400 if (ctl_invalidate_lun(&lun->ctl_be_lun) != 0) { 2401 printf("%s: ctl_invalidate_lun() failed!\n", 2402 __func__); 2403 } 2404 } 2405 2406 return; 2407 } 2408 2409 2410 mtx_lock(&softc->lock); 2411 lun->flags &= ~CTL_BE_BLOCK_LUN_UNCONFIGURED; 2412 lun->flags |= CTL_BE_BLOCK_LUN_CONFIG_ERR; 2413 wakeup(lun); 2414 mtx_unlock(&softc->lock); 2415 } 2416 2417 2418 static int 2419 ctl_be_block_config_write(union ctl_io *io) 2420 { 2421 struct ctl_be_block_lun *be_lun; 2422 struct ctl_be_lun *ctl_be_lun; 2423 int retval; 2424 2425 retval = 0; 2426 2427 DPRINTF("entered\n"); 2428 2429 ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[ 2430 CTL_PRIV_BACKEND_LUN].ptr; 2431 be_lun = (struct ctl_be_block_lun *)ctl_be_lun->be_lun; 2432 2433 switch (io->scsiio.cdb[0]) { 2434 case SYNCHRONIZE_CACHE: 2435 case SYNCHRONIZE_CACHE_16: 2436 case WRITE_SAME_10: 2437 case WRITE_SAME_16: 2438 case UNMAP: 2439 /* 2440 * The upper level CTL code will filter out any CDBs with 2441 * the immediate bit set and return the proper error. 2442 * 2443 * We don't really need to worry about what LBA range the 2444 * user asked to be synced out. When they issue a sync 2445 * cache command, we'll sync out the whole thing. 2446 */ 2447 mtx_lock(&be_lun->lock); 2448 STAILQ_INSERT_TAIL(&be_lun->config_write_queue, &io->io_hdr, 2449 links); 2450 mtx_unlock(&be_lun->lock); 2451 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); 2452 break; 2453 case START_STOP_UNIT: { 2454 struct scsi_start_stop_unit *cdb; 2455 2456 cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb; 2457 2458 if (cdb->how & SSS_START) 2459 retval = ctl_start_lun(ctl_be_lun); 2460 else { 2461 retval = ctl_stop_lun(ctl_be_lun); 2462 /* 2463 * XXX KDM Copan-specific offline behavior. 2464 * Figure out a reasonable way to port this? 2465 */ 2466 #ifdef NEEDTOPORT 2467 if ((retval == 0) 2468 && (cdb->byte2 & SSS_ONOFFLINE)) 2469 retval = ctl_lun_offline(ctl_be_lun); 2470 #endif 2471 } 2472 2473 /* 2474 * In general, the above routines should not fail. They 2475 * just set state for the LUN. So we've got something 2476 * pretty wrong here if we can't start or stop the LUN. 2477 */ 2478 if (retval != 0) { 2479 ctl_set_internal_failure(&io->scsiio, 2480 /*sks_valid*/ 1, 2481 /*retry_count*/ 0xf051); 2482 retval = CTL_RETVAL_COMPLETE; 2483 } else { 2484 ctl_set_success(&io->scsiio); 2485 } 2486 ctl_config_write_done(io); 2487 break; 2488 } 2489 default: 2490 ctl_set_invalid_opcode(&io->scsiio); 2491 ctl_config_write_done(io); 2492 retval = CTL_RETVAL_COMPLETE; 2493 break; 2494 } 2495 2496 return (retval); 2497 2498 } 2499 2500 static int 2501 ctl_be_block_config_read(union ctl_io *io) 2502 { 2503 return (0); 2504 } 2505 2506 static int 2507 ctl_be_block_lun_info(void *be_lun, struct sbuf *sb) 2508 { 2509 struct ctl_be_block_lun *lun; 2510 int retval; 2511 2512 lun = (struct ctl_be_block_lun *)be_lun; 2513 retval = 0; 2514 2515 retval = sbuf_printf(sb, "<num_threads>"); 2516 2517 if (retval != 0) 2518 goto bailout; 2519 2520 retval = sbuf_printf(sb, "%d", lun->num_threads); 2521 2522 if (retval != 0) 2523 goto bailout; 2524 2525 retval = sbuf_printf(sb, "</num_threads>"); 2526 2527 /* 2528 * For processor devices, we don't have a path variable. 2529 */ 2530 if ((retval != 0) 2531 || (lun->dev_path == NULL)) 2532 goto bailout; 2533 2534 retval = sbuf_printf(sb, "<file>"); 2535 2536 if (retval != 0) 2537 goto bailout; 2538 2539 retval = ctl_sbuf_printf_esc(sb, lun->dev_path); 2540 2541 if (retval != 0) 2542 goto bailout; 2543 2544 retval = sbuf_printf(sb, "</file>\n"); 2545 2546 bailout: 2547 2548 return (retval); 2549 } 2550 2551 int 2552 ctl_be_block_init(void) 2553 { 2554 struct ctl_be_block_softc *softc; 2555 int retval; 2556 2557 softc = &backend_block_softc; 2558 retval = 0; 2559 2560 mtx_init(&softc->lock, "ctlblk", NULL, MTX_DEF); 2561 beio_zone = uma_zcreate("beio", sizeof(struct ctl_be_block_io), 2562 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 2563 STAILQ_INIT(&softc->disk_list); 2564 STAILQ_INIT(&softc->lun_list); 2565 2566 return (retval); 2567 } 2568