1 /*- 2 * Copyright (c) 2003 Silicon Graphics International Corp. 3 * Copyright (c) 2009-2011 Spectra Logic Corporation 4 * Copyright (c) 2012 The FreeBSD Foundation 5 * All rights reserved. 6 * 7 * Portions of this software were developed by Edward Tomasz Napierala 8 * under sponsorship from the FreeBSD Foundation. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions, and the following disclaimer, 15 * without modification. 16 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 17 * substantially similar to the "NO WARRANTY" disclaimer below 18 * ("Disclaimer") and any redistribution must be conditioned upon 19 * including a substantially similar Disclaimer requirement for further 20 * binary redistribution. 21 * 22 * NO WARRANTY 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 27 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 31 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 32 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 33 * POSSIBILITY OF SUCH DAMAGES. 34 * 35 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend_block.c#5 $ 36 */ 37 /* 38 * CAM Target Layer driver backend for block devices. 39 * 40 * Author: Ken Merry <ken@FreeBSD.org> 41 */ 42 #include <sys/cdefs.h> 43 __FBSDID("$FreeBSD$"); 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/kernel.h> 48 #include <sys/types.h> 49 #include <sys/kthread.h> 50 #include <sys/bio.h> 51 #include <sys/fcntl.h> 52 #include <sys/limits.h> 53 #include <sys/lock.h> 54 #include <sys/mutex.h> 55 #include <sys/condvar.h> 56 #include <sys/malloc.h> 57 #include <sys/conf.h> 58 #include <sys/ioccom.h> 59 #include <sys/queue.h> 60 #include <sys/sbuf.h> 61 #include <sys/endian.h> 62 #include <sys/uio.h> 63 #include <sys/buf.h> 64 #include <sys/taskqueue.h> 65 #include <sys/vnode.h> 66 #include <sys/namei.h> 67 #include <sys/mount.h> 68 #include <sys/disk.h> 69 #include <sys/fcntl.h> 70 #include <sys/filedesc.h> 71 #include <sys/proc.h> 72 #include <sys/pcpu.h> 73 #include <sys/module.h> 74 #include <sys/sdt.h> 75 #include <sys/devicestat.h> 76 #include <sys/sysctl.h> 77 78 #include <geom/geom.h> 79 80 #include <cam/cam.h> 81 #include <cam/scsi/scsi_all.h> 82 #include <cam/scsi/scsi_da.h> 83 #include <cam/ctl/ctl_io.h> 84 #include <cam/ctl/ctl.h> 85 #include <cam/ctl/ctl_backend.h> 86 #include <cam/ctl/ctl_frontend_internal.h> 87 #include <cam/ctl/ctl_ioctl.h> 88 #include <cam/ctl/ctl_scsi_all.h> 89 #include <cam/ctl/ctl_error.h> 90 91 /* 92 * The idea here is that we'll allocate enough S/G space to hold a 1MB 93 * I/O. If we get an I/O larger than that, we'll split it. 94 */ 95 #define CTLBLK_HALF_IO_SIZE (512 * 1024) 96 #define CTLBLK_MAX_IO_SIZE (CTLBLK_HALF_IO_SIZE * 2) 97 #define CTLBLK_MAX_SEG MAXPHYS 98 #define CTLBLK_HALF_SEGS MAX(CTLBLK_HALF_IO_SIZE / CTLBLK_MAX_SEG, 1) 99 #define CTLBLK_MAX_SEGS (CTLBLK_HALF_SEGS * 2) 100 101 #ifdef CTLBLK_DEBUG 102 #define DPRINTF(fmt, args...) \ 103 printf("cbb(%s:%d): " fmt, __FUNCTION__, __LINE__, ##args) 104 #else 105 #define DPRINTF(fmt, args...) do {} while(0) 106 #endif 107 108 #define PRIV(io) \ 109 ((struct ctl_ptr_len_flags *)&(io)->io_hdr.ctl_private[CTL_PRIV_BACKEND]) 110 #define ARGS(io) \ 111 ((struct ctl_lba_len_flags *)&(io)->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]) 112 113 SDT_PROVIDER_DEFINE(cbb); 114 115 typedef enum { 116 CTL_BE_BLOCK_LUN_UNCONFIGURED = 0x01, 117 CTL_BE_BLOCK_LUN_CONFIG_ERR = 0x02, 118 CTL_BE_BLOCK_LUN_WAITING = 0x04, 119 CTL_BE_BLOCK_LUN_MULTI_THREAD = 0x08 120 } ctl_be_block_lun_flags; 121 122 typedef enum { 123 CTL_BE_BLOCK_NONE, 124 CTL_BE_BLOCK_DEV, 125 CTL_BE_BLOCK_FILE 126 } ctl_be_block_type; 127 128 struct ctl_be_block_devdata { 129 struct cdev *cdev; 130 struct cdevsw *csw; 131 int dev_ref; 132 }; 133 134 struct ctl_be_block_filedata { 135 struct ucred *cred; 136 }; 137 138 union ctl_be_block_bedata { 139 struct ctl_be_block_devdata dev; 140 struct ctl_be_block_filedata file; 141 }; 142 143 struct ctl_be_block_io; 144 struct ctl_be_block_lun; 145 146 typedef void (*cbb_dispatch_t)(struct ctl_be_block_lun *be_lun, 147 struct ctl_be_block_io *beio); 148 149 /* 150 * Backend LUN structure. There is a 1:1 mapping between a block device 151 * and a backend block LUN, and between a backend block LUN and a CTL LUN. 152 */ 153 struct ctl_be_block_lun { 154 struct ctl_lun_create_params params; 155 struct ctl_block_disk *disk; 156 char lunname[32]; 157 char *dev_path; 158 ctl_be_block_type dev_type; 159 struct vnode *vn; 160 union ctl_be_block_bedata backend; 161 cbb_dispatch_t dispatch; 162 cbb_dispatch_t lun_flush; 163 cbb_dispatch_t unmap; 164 uma_zone_t lun_zone; 165 uint64_t size_blocks; 166 uint64_t size_bytes; 167 uint32_t blocksize; 168 int blocksize_shift; 169 uint16_t pblockexp; 170 uint16_t pblockoff; 171 struct ctl_be_block_softc *softc; 172 struct devstat *disk_stats; 173 ctl_be_block_lun_flags flags; 174 STAILQ_ENTRY(ctl_be_block_lun) links; 175 struct ctl_be_lun ctl_be_lun; 176 struct taskqueue *io_taskqueue; 177 struct task io_task; 178 int num_threads; 179 STAILQ_HEAD(, ctl_io_hdr) input_queue; 180 STAILQ_HEAD(, ctl_io_hdr) config_write_queue; 181 STAILQ_HEAD(, ctl_io_hdr) datamove_queue; 182 struct mtx_padalign io_lock; 183 struct mtx_padalign queue_lock; 184 }; 185 186 /* 187 * Overall softc structure for the block backend module. 188 */ 189 struct ctl_be_block_softc { 190 struct mtx lock; 191 int num_disks; 192 STAILQ_HEAD(, ctl_block_disk) disk_list; 193 int num_luns; 194 STAILQ_HEAD(, ctl_be_block_lun) lun_list; 195 }; 196 197 static struct ctl_be_block_softc backend_block_softc; 198 199 /* 200 * Per-I/O information. 201 */ 202 struct ctl_be_block_io { 203 union ctl_io *io; 204 struct ctl_sg_entry sg_segs[CTLBLK_MAX_SEGS]; 205 struct iovec xiovecs[CTLBLK_MAX_SEGS]; 206 int bio_cmd; 207 int num_segs; 208 int num_bios_sent; 209 int num_bios_done; 210 int send_complete; 211 int num_errors; 212 struct bintime ds_t0; 213 devstat_tag_type ds_tag_type; 214 devstat_trans_flags ds_trans_type; 215 uint64_t io_len; 216 uint64_t io_offset; 217 struct ctl_be_block_softc *softc; 218 struct ctl_be_block_lun *lun; 219 void (*beio_cont)(struct ctl_be_block_io *beio); /* to continue processing */ 220 }; 221 222 static int cbb_num_threads = 14; 223 SYSCTL_NODE(_kern_cam_ctl, OID_AUTO, block, CTLFLAG_RD, 0, 224 "CAM Target Layer Block Backend"); 225 SYSCTL_INT(_kern_cam_ctl_block, OID_AUTO, num_threads, CTLFLAG_RWTUN, 226 &cbb_num_threads, 0, "Number of threads per backing file"); 227 228 static struct ctl_be_block_io *ctl_alloc_beio(struct ctl_be_block_softc *softc); 229 static void ctl_free_beio(struct ctl_be_block_io *beio); 230 static void ctl_complete_beio(struct ctl_be_block_io *beio); 231 static int ctl_be_block_move_done(union ctl_io *io); 232 static void ctl_be_block_biodone(struct bio *bio); 233 static void ctl_be_block_flush_file(struct ctl_be_block_lun *be_lun, 234 struct ctl_be_block_io *beio); 235 static void ctl_be_block_dispatch_file(struct ctl_be_block_lun *be_lun, 236 struct ctl_be_block_io *beio); 237 static void ctl_be_block_flush_dev(struct ctl_be_block_lun *be_lun, 238 struct ctl_be_block_io *beio); 239 static void ctl_be_block_unmap_dev(struct ctl_be_block_lun *be_lun, 240 struct ctl_be_block_io *beio); 241 static void ctl_be_block_dispatch_dev(struct ctl_be_block_lun *be_lun, 242 struct ctl_be_block_io *beio); 243 static void ctl_be_block_cw_dispatch(struct ctl_be_block_lun *be_lun, 244 union ctl_io *io); 245 static void ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun, 246 union ctl_io *io); 247 static void ctl_be_block_worker(void *context, int pending); 248 static int ctl_be_block_submit(union ctl_io *io); 249 static int ctl_be_block_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, 250 int flag, struct thread *td); 251 static int ctl_be_block_open_file(struct ctl_be_block_lun *be_lun, 252 struct ctl_lun_req *req); 253 static int ctl_be_block_open_dev(struct ctl_be_block_lun *be_lun, 254 struct ctl_lun_req *req); 255 static int ctl_be_block_close(struct ctl_be_block_lun *be_lun); 256 static int ctl_be_block_open(struct ctl_be_block_softc *softc, 257 struct ctl_be_block_lun *be_lun, 258 struct ctl_lun_req *req); 259 static int ctl_be_block_create(struct ctl_be_block_softc *softc, 260 struct ctl_lun_req *req); 261 static int ctl_be_block_rm(struct ctl_be_block_softc *softc, 262 struct ctl_lun_req *req); 263 static int ctl_be_block_modify_file(struct ctl_be_block_lun *be_lun, 264 struct ctl_lun_req *req); 265 static int ctl_be_block_modify_dev(struct ctl_be_block_lun *be_lun, 266 struct ctl_lun_req *req); 267 static int ctl_be_block_modify(struct ctl_be_block_softc *softc, 268 struct ctl_lun_req *req); 269 static void ctl_be_block_lun_shutdown(void *be_lun); 270 static void ctl_be_block_lun_config_status(void *be_lun, 271 ctl_lun_config_status status); 272 static int ctl_be_block_config_write(union ctl_io *io); 273 static int ctl_be_block_config_read(union ctl_io *io); 274 static int ctl_be_block_lun_info(void *be_lun, struct sbuf *sb); 275 int ctl_be_block_init(void); 276 277 static struct ctl_backend_driver ctl_be_block_driver = 278 { 279 .name = "block", 280 .flags = CTL_BE_FLAG_HAS_CONFIG, 281 .init = ctl_be_block_init, 282 .data_submit = ctl_be_block_submit, 283 .data_move_done = ctl_be_block_move_done, 284 .config_read = ctl_be_block_config_read, 285 .config_write = ctl_be_block_config_write, 286 .ioctl = ctl_be_block_ioctl, 287 .lun_info = ctl_be_block_lun_info 288 }; 289 290 MALLOC_DEFINE(M_CTLBLK, "ctlblk", "Memory used for CTL block backend"); 291 CTL_BACKEND_DECLARE(cbb, ctl_be_block_driver); 292 293 static uma_zone_t beio_zone; 294 295 static struct ctl_be_block_io * 296 ctl_alloc_beio(struct ctl_be_block_softc *softc) 297 { 298 struct ctl_be_block_io *beio; 299 300 beio = uma_zalloc(beio_zone, M_WAITOK | M_ZERO); 301 beio->softc = softc; 302 return (beio); 303 } 304 305 static void 306 ctl_free_beio(struct ctl_be_block_io *beio) 307 { 308 int duplicate_free; 309 int i; 310 311 duplicate_free = 0; 312 313 for (i = 0; i < beio->num_segs; i++) { 314 if (beio->sg_segs[i].addr == NULL) 315 duplicate_free++; 316 317 uma_zfree(beio->lun->lun_zone, beio->sg_segs[i].addr); 318 beio->sg_segs[i].addr = NULL; 319 320 /* For compare we had two equal S/G lists. */ 321 if (ARGS(beio->io)->flags & CTL_LLF_COMPARE) { 322 uma_zfree(beio->lun->lun_zone, 323 beio->sg_segs[i + CTLBLK_HALF_SEGS].addr); 324 beio->sg_segs[i + CTLBLK_HALF_SEGS].addr = NULL; 325 } 326 } 327 328 if (duplicate_free > 0) { 329 printf("%s: %d duplicate frees out of %d segments\n", __func__, 330 duplicate_free, beio->num_segs); 331 } 332 333 uma_zfree(beio_zone, beio); 334 } 335 336 static void 337 ctl_complete_beio(struct ctl_be_block_io *beio) 338 { 339 union ctl_io *io = beio->io; 340 341 if (beio->beio_cont != NULL) { 342 beio->beio_cont(beio); 343 } else { 344 ctl_free_beio(beio); 345 ctl_data_submit_done(io); 346 } 347 } 348 349 static int 350 ctl_be_block_move_done(union ctl_io *io) 351 { 352 struct ctl_be_block_io *beio; 353 struct ctl_be_block_lun *be_lun; 354 struct ctl_lba_len_flags *lbalen; 355 #ifdef CTL_TIME_IO 356 struct bintime cur_bt; 357 #endif 358 int i; 359 360 beio = (struct ctl_be_block_io *)PRIV(io)->ptr; 361 be_lun = beio->lun; 362 363 DPRINTF("entered\n"); 364 365 #ifdef CTL_TIME_IO 366 getbintime(&cur_bt); 367 bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt); 368 bintime_add(&io->io_hdr.dma_bt, &cur_bt); 369 io->io_hdr.num_dmas++; 370 #endif 371 io->scsiio.kern_rel_offset += io->scsiio.kern_data_len; 372 373 /* 374 * We set status at this point for read commands, and write 375 * commands with errors. 376 */ 377 if ((io->io_hdr.port_status == 0) && 378 ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0) && 379 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) { 380 lbalen = ARGS(beio->io); 381 if (lbalen->flags & CTL_LLF_READ) { 382 ctl_set_success(&io->scsiio); 383 } else if (lbalen->flags & CTL_LLF_COMPARE) { 384 /* We have two data blocks ready for comparison. */ 385 for (i = 0; i < beio->num_segs; i++) { 386 if (memcmp(beio->sg_segs[i].addr, 387 beio->sg_segs[i + CTLBLK_HALF_SEGS].addr, 388 beio->sg_segs[i].len) != 0) 389 break; 390 } 391 if (i < beio->num_segs) 392 ctl_set_sense(&io->scsiio, 393 /*current_error*/ 1, 394 /*sense_key*/ SSD_KEY_MISCOMPARE, 395 /*asc*/ 0x1D, 396 /*ascq*/ 0x00, 397 SSD_ELEM_NONE); 398 else 399 ctl_set_success(&io->scsiio); 400 } 401 } 402 else if ((io->io_hdr.port_status != 0) 403 && ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0) 404 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) { 405 /* 406 * For hardware error sense keys, the sense key 407 * specific value is defined to be a retry count, 408 * but we use it to pass back an internal FETD 409 * error code. XXX KDM Hopefully the FETD is only 410 * using 16 bits for an error code, since that's 411 * all the space we have in the sks field. 412 */ 413 ctl_set_internal_failure(&io->scsiio, 414 /*sks_valid*/ 1, 415 /*retry_count*/ 416 io->io_hdr.port_status); 417 } 418 419 /* 420 * If this is a read, or a write with errors, it is done. 421 */ 422 if ((beio->bio_cmd == BIO_READ) 423 || ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0) 424 || ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE)) { 425 ctl_complete_beio(beio); 426 return (0); 427 } 428 429 /* 430 * At this point, we have a write and the DMA completed 431 * successfully. We now have to queue it to the task queue to 432 * execute the backend I/O. That is because we do blocking 433 * memory allocations, and in the file backing case, blocking I/O. 434 * This move done routine is generally called in the SIM's 435 * interrupt context, and therefore we cannot block. 436 */ 437 mtx_lock(&be_lun->queue_lock); 438 /* 439 * XXX KDM make sure that links is okay to use at this point. 440 * Otherwise, we either need to add another field to ctl_io_hdr, 441 * or deal with resource allocation here. 442 */ 443 STAILQ_INSERT_TAIL(&be_lun->datamove_queue, &io->io_hdr, links); 444 mtx_unlock(&be_lun->queue_lock); 445 446 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); 447 448 return (0); 449 } 450 451 static void 452 ctl_be_block_biodone(struct bio *bio) 453 { 454 struct ctl_be_block_io *beio; 455 struct ctl_be_block_lun *be_lun; 456 union ctl_io *io; 457 int error; 458 459 beio = bio->bio_caller1; 460 be_lun = beio->lun; 461 io = beio->io; 462 463 DPRINTF("entered\n"); 464 465 error = bio->bio_error; 466 mtx_lock(&be_lun->io_lock); 467 if (error != 0) 468 beio->num_errors++; 469 470 beio->num_bios_done++; 471 472 /* 473 * XXX KDM will this cause WITNESS to complain? Holding a lock 474 * during the free might cause it to complain. 475 */ 476 g_destroy_bio(bio); 477 478 /* 479 * If the send complete bit isn't set, or we aren't the last I/O to 480 * complete, then we're done. 481 */ 482 if ((beio->send_complete == 0) 483 || (beio->num_bios_done < beio->num_bios_sent)) { 484 mtx_unlock(&be_lun->io_lock); 485 return; 486 } 487 488 /* 489 * At this point, we've verified that we are the last I/O to 490 * complete, so it's safe to drop the lock. 491 */ 492 devstat_end_transaction(beio->lun->disk_stats, beio->io_len, 493 beio->ds_tag_type, beio->ds_trans_type, 494 /*now*/ NULL, /*then*/&beio->ds_t0); 495 mtx_unlock(&be_lun->io_lock); 496 497 /* 498 * If there are any errors from the backing device, we fail the 499 * entire I/O with a medium error. 500 */ 501 if (beio->num_errors > 0) { 502 if (error == EOPNOTSUPP) { 503 ctl_set_invalid_opcode(&io->scsiio); 504 } else if (error == ENOSPC) { 505 ctl_set_space_alloc_fail(&io->scsiio); 506 } else if (beio->bio_cmd == BIO_FLUSH) { 507 /* XXX KDM is there is a better error here? */ 508 ctl_set_internal_failure(&io->scsiio, 509 /*sks_valid*/ 1, 510 /*retry_count*/ 0xbad2); 511 } else 512 ctl_set_medium_error(&io->scsiio); 513 ctl_complete_beio(beio); 514 return; 515 } 516 517 /* 518 * If this is a write, a flush, a delete or verify, we're all done. 519 * If this is a read, we can now send the data to the user. 520 */ 521 if ((beio->bio_cmd == BIO_WRITE) 522 || (beio->bio_cmd == BIO_FLUSH) 523 || (beio->bio_cmd == BIO_DELETE) 524 || (ARGS(io)->flags & CTL_LLF_VERIFY)) { 525 ctl_set_success(&io->scsiio); 526 ctl_complete_beio(beio); 527 } else { 528 #ifdef CTL_TIME_IO 529 getbintime(&io->io_hdr.dma_start_bt); 530 #endif 531 ctl_datamove(io); 532 } 533 } 534 535 static void 536 ctl_be_block_flush_file(struct ctl_be_block_lun *be_lun, 537 struct ctl_be_block_io *beio) 538 { 539 union ctl_io *io = beio->io; 540 struct mount *mountpoint; 541 int error, lock_flags; 542 543 DPRINTF("entered\n"); 544 545 binuptime(&beio->ds_t0); 546 mtx_lock(&be_lun->io_lock); 547 devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0); 548 mtx_unlock(&be_lun->io_lock); 549 550 (void) vn_start_write(be_lun->vn, &mountpoint, V_WAIT); 551 552 if (MNT_SHARED_WRITES(mountpoint) 553 || ((mountpoint == NULL) 554 && MNT_SHARED_WRITES(be_lun->vn->v_mount))) 555 lock_flags = LK_SHARED; 556 else 557 lock_flags = LK_EXCLUSIVE; 558 559 vn_lock(be_lun->vn, lock_flags | LK_RETRY); 560 561 error = VOP_FSYNC(be_lun->vn, MNT_WAIT, curthread); 562 VOP_UNLOCK(be_lun->vn, 0); 563 564 vn_finished_write(mountpoint); 565 566 mtx_lock(&be_lun->io_lock); 567 devstat_end_transaction(beio->lun->disk_stats, beio->io_len, 568 beio->ds_tag_type, beio->ds_trans_type, 569 /*now*/ NULL, /*then*/&beio->ds_t0); 570 mtx_unlock(&be_lun->io_lock); 571 572 if (error == 0) 573 ctl_set_success(&io->scsiio); 574 else { 575 /* XXX KDM is there is a better error here? */ 576 ctl_set_internal_failure(&io->scsiio, 577 /*sks_valid*/ 1, 578 /*retry_count*/ 0xbad1); 579 } 580 581 ctl_complete_beio(beio); 582 } 583 584 SDT_PROBE_DEFINE1(cbb, kernel, read, file_start, "uint64_t"); 585 SDT_PROBE_DEFINE1(cbb, kernel, write, file_start, "uint64_t"); 586 SDT_PROBE_DEFINE1(cbb, kernel, read, file_done,"uint64_t"); 587 SDT_PROBE_DEFINE1(cbb, kernel, write, file_done, "uint64_t"); 588 589 static void 590 ctl_be_block_dispatch_file(struct ctl_be_block_lun *be_lun, 591 struct ctl_be_block_io *beio) 592 { 593 struct ctl_be_block_filedata *file_data; 594 union ctl_io *io; 595 struct uio xuio; 596 struct iovec *xiovec; 597 int flags; 598 int error, i; 599 600 DPRINTF("entered\n"); 601 602 file_data = &be_lun->backend.file; 603 io = beio->io; 604 flags = 0; 605 if (ARGS(io)->flags & CTL_LLF_DPO) 606 flags |= IO_DIRECT; 607 if (beio->bio_cmd == BIO_WRITE && ARGS(io)->flags & CTL_LLF_FUA) 608 flags |= IO_SYNC; 609 610 bzero(&xuio, sizeof(xuio)); 611 if (beio->bio_cmd == BIO_READ) { 612 SDT_PROBE(cbb, kernel, read, file_start, 0, 0, 0, 0, 0); 613 xuio.uio_rw = UIO_READ; 614 } else { 615 SDT_PROBE(cbb, kernel, write, file_start, 0, 0, 0, 0, 0); 616 xuio.uio_rw = UIO_WRITE; 617 } 618 xuio.uio_offset = beio->io_offset; 619 xuio.uio_resid = beio->io_len; 620 xuio.uio_segflg = UIO_SYSSPACE; 621 xuio.uio_iov = beio->xiovecs; 622 xuio.uio_iovcnt = beio->num_segs; 623 xuio.uio_td = curthread; 624 625 for (i = 0, xiovec = xuio.uio_iov; i < xuio.uio_iovcnt; i++, xiovec++) { 626 xiovec->iov_base = beio->sg_segs[i].addr; 627 xiovec->iov_len = beio->sg_segs[i].len; 628 } 629 630 binuptime(&beio->ds_t0); 631 mtx_lock(&be_lun->io_lock); 632 devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0); 633 mtx_unlock(&be_lun->io_lock); 634 635 if (beio->bio_cmd == BIO_READ) { 636 vn_lock(be_lun->vn, LK_SHARED | LK_RETRY); 637 638 /* 639 * UFS pays attention to IO_DIRECT for reads. If the 640 * DIRECTIO option is configured into the kernel, it calls 641 * ffs_rawread(). But that only works for single-segment 642 * uios with user space addresses. In our case, with a 643 * kernel uio, it still reads into the buffer cache, but it 644 * will just try to release the buffer from the cache later 645 * on in ffs_read(). 646 * 647 * ZFS does not pay attention to IO_DIRECT for reads. 648 * 649 * UFS does not pay attention to IO_SYNC for reads. 650 * 651 * ZFS pays attention to IO_SYNC (which translates into the 652 * Solaris define FRSYNC for zfs_read()) for reads. It 653 * attempts to sync the file before reading. 654 * 655 * So, to attempt to provide some barrier semantics in the 656 * BIO_ORDERED case, set both IO_DIRECT and IO_SYNC. 657 */ 658 error = VOP_READ(be_lun->vn, &xuio, flags, file_data->cred); 659 660 VOP_UNLOCK(be_lun->vn, 0); 661 SDT_PROBE(cbb, kernel, read, file_done, 0, 0, 0, 0, 0); 662 } else { 663 struct mount *mountpoint; 664 int lock_flags; 665 666 (void)vn_start_write(be_lun->vn, &mountpoint, V_WAIT); 667 668 if (MNT_SHARED_WRITES(mountpoint) 669 || ((mountpoint == NULL) 670 && MNT_SHARED_WRITES(be_lun->vn->v_mount))) 671 lock_flags = LK_SHARED; 672 else 673 lock_flags = LK_EXCLUSIVE; 674 675 vn_lock(be_lun->vn, lock_flags | LK_RETRY); 676 677 /* 678 * UFS pays attention to IO_DIRECT for writes. The write 679 * is done asynchronously. (Normally the write would just 680 * get put into cache. 681 * 682 * UFS pays attention to IO_SYNC for writes. It will 683 * attempt to write the buffer out synchronously if that 684 * flag is set. 685 * 686 * ZFS does not pay attention to IO_DIRECT for writes. 687 * 688 * ZFS pays attention to IO_SYNC (a.k.a. FSYNC or FRSYNC) 689 * for writes. It will flush the transaction from the 690 * cache before returning. 691 * 692 * So if we've got the BIO_ORDERED flag set, we want 693 * IO_SYNC in either the UFS or ZFS case. 694 */ 695 error = VOP_WRITE(be_lun->vn, &xuio, flags, file_data->cred); 696 VOP_UNLOCK(be_lun->vn, 0); 697 698 vn_finished_write(mountpoint); 699 SDT_PROBE(cbb, kernel, write, file_done, 0, 0, 0, 0, 0); 700 } 701 702 mtx_lock(&be_lun->io_lock); 703 devstat_end_transaction(beio->lun->disk_stats, beio->io_len, 704 beio->ds_tag_type, beio->ds_trans_type, 705 /*now*/ NULL, /*then*/&beio->ds_t0); 706 mtx_unlock(&be_lun->io_lock); 707 708 /* 709 * If we got an error, set the sense data to "MEDIUM ERROR" and 710 * return the I/O to the user. 711 */ 712 if (error != 0) { 713 char path_str[32]; 714 715 ctl_scsi_path_string(io, path_str, sizeof(path_str)); 716 printf("%s%s command returned errno %d\n", path_str, 717 (beio->bio_cmd == BIO_READ) ? "READ" : "WRITE", error); 718 if (error == ENOSPC) { 719 ctl_set_space_alloc_fail(&io->scsiio); 720 } else 721 ctl_set_medium_error(&io->scsiio); 722 ctl_complete_beio(beio); 723 return; 724 } 725 726 /* 727 * If this is a write or a verify, we're all done. 728 * If this is a read, we can now send the data to the user. 729 */ 730 if ((beio->bio_cmd == BIO_WRITE) || 731 (ARGS(io)->flags & CTL_LLF_VERIFY)) { 732 ctl_set_success(&io->scsiio); 733 ctl_complete_beio(beio); 734 } else { 735 #ifdef CTL_TIME_IO 736 getbintime(&io->io_hdr.dma_start_bt); 737 #endif 738 ctl_datamove(io); 739 } 740 } 741 742 static void 743 ctl_be_block_dispatch_zvol(struct ctl_be_block_lun *be_lun, 744 struct ctl_be_block_io *beio) 745 { 746 struct ctl_be_block_devdata *dev_data; 747 union ctl_io *io; 748 struct uio xuio; 749 struct iovec *xiovec; 750 int flags; 751 int error, i; 752 753 DPRINTF("entered\n"); 754 755 dev_data = &be_lun->backend.dev; 756 io = beio->io; 757 flags = 0; 758 if (ARGS(io)->flags & CTL_LLF_DPO) 759 flags |= IO_DIRECT; 760 if (beio->bio_cmd == BIO_WRITE && ARGS(io)->flags & CTL_LLF_FUA) 761 flags |= IO_SYNC; 762 763 bzero(&xuio, sizeof(xuio)); 764 if (beio->bio_cmd == BIO_READ) { 765 SDT_PROBE(cbb, kernel, read, file_start, 0, 0, 0, 0, 0); 766 xuio.uio_rw = UIO_READ; 767 } else { 768 SDT_PROBE(cbb, kernel, write, file_start, 0, 0, 0, 0, 0); 769 xuio.uio_rw = UIO_WRITE; 770 } 771 xuio.uio_offset = beio->io_offset; 772 xuio.uio_resid = beio->io_len; 773 xuio.uio_segflg = UIO_SYSSPACE; 774 xuio.uio_iov = beio->xiovecs; 775 xuio.uio_iovcnt = beio->num_segs; 776 xuio.uio_td = curthread; 777 778 for (i = 0, xiovec = xuio.uio_iov; i < xuio.uio_iovcnt; i++, xiovec++) { 779 xiovec->iov_base = beio->sg_segs[i].addr; 780 xiovec->iov_len = beio->sg_segs[i].len; 781 } 782 783 binuptime(&beio->ds_t0); 784 mtx_lock(&be_lun->io_lock); 785 devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0); 786 mtx_unlock(&be_lun->io_lock); 787 788 if (beio->bio_cmd == BIO_READ) { 789 error = (*dev_data->csw->d_read)(dev_data->cdev, &xuio, flags); 790 SDT_PROBE(cbb, kernel, read, file_done, 0, 0, 0, 0, 0); 791 } else { 792 error = (*dev_data->csw->d_write)(dev_data->cdev, &xuio, flags); 793 SDT_PROBE(cbb, kernel, write, file_done, 0, 0, 0, 0, 0); 794 } 795 796 mtx_lock(&be_lun->io_lock); 797 devstat_end_transaction(beio->lun->disk_stats, beio->io_len, 798 beio->ds_tag_type, beio->ds_trans_type, 799 /*now*/ NULL, /*then*/&beio->ds_t0); 800 mtx_unlock(&be_lun->io_lock); 801 802 /* 803 * If we got an error, set the sense data to "MEDIUM ERROR" and 804 * return the I/O to the user. 805 */ 806 if (error != 0) { 807 if (error == ENOSPC) { 808 ctl_set_space_alloc_fail(&io->scsiio); 809 } else 810 ctl_set_medium_error(&io->scsiio); 811 ctl_complete_beio(beio); 812 return; 813 } 814 815 /* 816 * If this is a write or a verify, we're all done. 817 * If this is a read, we can now send the data to the user. 818 */ 819 if ((beio->bio_cmd == BIO_WRITE) || 820 (ARGS(io)->flags & CTL_LLF_VERIFY)) { 821 ctl_set_success(&io->scsiio); 822 ctl_complete_beio(beio); 823 } else { 824 #ifdef CTL_TIME_IO 825 getbintime(&io->io_hdr.dma_start_bt); 826 #endif 827 ctl_datamove(io); 828 } 829 } 830 831 static void 832 ctl_be_block_flush_dev(struct ctl_be_block_lun *be_lun, 833 struct ctl_be_block_io *beio) 834 { 835 struct bio *bio; 836 union ctl_io *io; 837 struct ctl_be_block_devdata *dev_data; 838 839 dev_data = &be_lun->backend.dev; 840 io = beio->io; 841 842 DPRINTF("entered\n"); 843 844 /* This can't fail, it's a blocking allocation. */ 845 bio = g_alloc_bio(); 846 847 bio->bio_cmd = BIO_FLUSH; 848 bio->bio_flags |= BIO_ORDERED; 849 bio->bio_dev = dev_data->cdev; 850 bio->bio_offset = 0; 851 bio->bio_data = 0; 852 bio->bio_done = ctl_be_block_biodone; 853 bio->bio_caller1 = beio; 854 bio->bio_pblkno = 0; 855 856 /* 857 * We don't need to acquire the LUN lock here, because we are only 858 * sending one bio, and so there is no other context to synchronize 859 * with. 860 */ 861 beio->num_bios_sent = 1; 862 beio->send_complete = 1; 863 864 binuptime(&beio->ds_t0); 865 mtx_lock(&be_lun->io_lock); 866 devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0); 867 mtx_unlock(&be_lun->io_lock); 868 869 (*dev_data->csw->d_strategy)(bio); 870 } 871 872 static void 873 ctl_be_block_unmap_dev_range(struct ctl_be_block_lun *be_lun, 874 struct ctl_be_block_io *beio, 875 uint64_t off, uint64_t len, int last) 876 { 877 struct bio *bio; 878 struct ctl_be_block_devdata *dev_data; 879 uint64_t maxlen; 880 881 dev_data = &be_lun->backend.dev; 882 maxlen = LONG_MAX - (LONG_MAX % be_lun->blocksize); 883 while (len > 0) { 884 bio = g_alloc_bio(); 885 bio->bio_cmd = BIO_DELETE; 886 bio->bio_dev = dev_data->cdev; 887 bio->bio_offset = off; 888 bio->bio_length = MIN(len, maxlen); 889 bio->bio_data = 0; 890 bio->bio_done = ctl_be_block_biodone; 891 bio->bio_caller1 = beio; 892 bio->bio_pblkno = off / be_lun->blocksize; 893 894 off += bio->bio_length; 895 len -= bio->bio_length; 896 897 mtx_lock(&be_lun->io_lock); 898 beio->num_bios_sent++; 899 if (last && len == 0) 900 beio->send_complete = 1; 901 mtx_unlock(&be_lun->io_lock); 902 903 (*dev_data->csw->d_strategy)(bio); 904 } 905 } 906 907 static void 908 ctl_be_block_unmap_dev(struct ctl_be_block_lun *be_lun, 909 struct ctl_be_block_io *beio) 910 { 911 union ctl_io *io; 912 struct ctl_be_block_devdata *dev_data; 913 struct ctl_ptr_len_flags *ptrlen; 914 struct scsi_unmap_desc *buf, *end; 915 uint64_t len; 916 917 dev_data = &be_lun->backend.dev; 918 io = beio->io; 919 920 DPRINTF("entered\n"); 921 922 binuptime(&beio->ds_t0); 923 mtx_lock(&be_lun->io_lock); 924 devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0); 925 mtx_unlock(&be_lun->io_lock); 926 927 if (beio->io_offset == -1) { 928 beio->io_len = 0; 929 ptrlen = (struct ctl_ptr_len_flags *)&io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 930 buf = (struct scsi_unmap_desc *)ptrlen->ptr; 931 end = buf + ptrlen->len / sizeof(*buf); 932 for (; buf < end; buf++) { 933 len = (uint64_t)scsi_4btoul(buf->length) * 934 be_lun->blocksize; 935 beio->io_len += len; 936 ctl_be_block_unmap_dev_range(be_lun, beio, 937 scsi_8btou64(buf->lba) * be_lun->blocksize, len, 938 (end - buf < 2) ? TRUE : FALSE); 939 } 940 } else 941 ctl_be_block_unmap_dev_range(be_lun, beio, 942 beio->io_offset, beio->io_len, TRUE); 943 } 944 945 static void 946 ctl_be_block_dispatch_dev(struct ctl_be_block_lun *be_lun, 947 struct ctl_be_block_io *beio) 948 { 949 TAILQ_HEAD(, bio) queue = TAILQ_HEAD_INITIALIZER(queue); 950 int i; 951 struct bio *bio; 952 struct ctl_be_block_devdata *dev_data; 953 off_t cur_offset; 954 int max_iosize; 955 956 DPRINTF("entered\n"); 957 958 dev_data = &be_lun->backend.dev; 959 960 /* 961 * We have to limit our I/O size to the maximum supported by the 962 * backend device. Hopefully it is MAXPHYS. If the driver doesn't 963 * set it properly, use DFLTPHYS. 964 */ 965 max_iosize = dev_data->cdev->si_iosize_max; 966 if (max_iosize < PAGE_SIZE) 967 max_iosize = DFLTPHYS; 968 969 cur_offset = beio->io_offset; 970 for (i = 0; i < beio->num_segs; i++) { 971 size_t cur_size; 972 uint8_t *cur_ptr; 973 974 cur_size = beio->sg_segs[i].len; 975 cur_ptr = beio->sg_segs[i].addr; 976 977 while (cur_size > 0) { 978 /* This can't fail, it's a blocking allocation. */ 979 bio = g_alloc_bio(); 980 981 KASSERT(bio != NULL, ("g_alloc_bio() failed!\n")); 982 983 bio->bio_cmd = beio->bio_cmd; 984 bio->bio_dev = dev_data->cdev; 985 bio->bio_caller1 = beio; 986 bio->bio_length = min(cur_size, max_iosize); 987 bio->bio_offset = cur_offset; 988 bio->bio_data = cur_ptr; 989 bio->bio_done = ctl_be_block_biodone; 990 bio->bio_pblkno = cur_offset / be_lun->blocksize; 991 992 cur_offset += bio->bio_length; 993 cur_ptr += bio->bio_length; 994 cur_size -= bio->bio_length; 995 996 TAILQ_INSERT_TAIL(&queue, bio, bio_queue); 997 beio->num_bios_sent++; 998 } 999 } 1000 binuptime(&beio->ds_t0); 1001 mtx_lock(&be_lun->io_lock); 1002 devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0); 1003 beio->send_complete = 1; 1004 mtx_unlock(&be_lun->io_lock); 1005 1006 /* 1007 * Fire off all allocated requests! 1008 */ 1009 while ((bio = TAILQ_FIRST(&queue)) != NULL) { 1010 TAILQ_REMOVE(&queue, bio, bio_queue); 1011 (*dev_data->csw->d_strategy)(bio); 1012 } 1013 } 1014 1015 static void 1016 ctl_be_block_cw_done_ws(struct ctl_be_block_io *beio) 1017 { 1018 union ctl_io *io; 1019 1020 io = beio->io; 1021 ctl_free_beio(beio); 1022 if ((io->io_hdr.flags & CTL_FLAG_ABORT) || 1023 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 1024 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) { 1025 ctl_config_write_done(io); 1026 return; 1027 } 1028 1029 ctl_be_block_config_write(io); 1030 } 1031 1032 static void 1033 ctl_be_block_cw_dispatch_ws(struct ctl_be_block_lun *be_lun, 1034 union ctl_io *io) 1035 { 1036 struct ctl_be_block_io *beio; 1037 struct ctl_be_block_softc *softc; 1038 struct ctl_lba_len_flags *lbalen; 1039 uint64_t len_left, lba; 1040 int i, seglen; 1041 uint8_t *buf, *end; 1042 1043 DPRINTF("entered\n"); 1044 1045 beio = (struct ctl_be_block_io *)PRIV(io)->ptr; 1046 softc = be_lun->softc; 1047 lbalen = ARGS(beio->io); 1048 1049 if (lbalen->flags & ~(SWS_LBDATA | SWS_UNMAP | SWS_ANCHOR | SWS_NDOB) || 1050 (lbalen->flags & (SWS_UNMAP | SWS_ANCHOR) && be_lun->unmap == NULL)) { 1051 ctl_free_beio(beio); 1052 ctl_set_invalid_field(&io->scsiio, 1053 /*sks_valid*/ 1, 1054 /*command*/ 1, 1055 /*field*/ 1, 1056 /*bit_valid*/ 0, 1057 /*bit*/ 0); 1058 ctl_config_write_done(io); 1059 return; 1060 } 1061 1062 switch (io->scsiio.tag_type) { 1063 case CTL_TAG_ORDERED: 1064 beio->ds_tag_type = DEVSTAT_TAG_ORDERED; 1065 break; 1066 case CTL_TAG_HEAD_OF_QUEUE: 1067 beio->ds_tag_type = DEVSTAT_TAG_HEAD; 1068 break; 1069 case CTL_TAG_UNTAGGED: 1070 case CTL_TAG_SIMPLE: 1071 case CTL_TAG_ACA: 1072 default: 1073 beio->ds_tag_type = DEVSTAT_TAG_SIMPLE; 1074 break; 1075 } 1076 1077 if (lbalen->flags & (SWS_UNMAP | SWS_ANCHOR)) { 1078 beio->io_offset = lbalen->lba * be_lun->blocksize; 1079 beio->io_len = (uint64_t)lbalen->len * be_lun->blocksize; 1080 beio->bio_cmd = BIO_DELETE; 1081 beio->ds_trans_type = DEVSTAT_FREE; 1082 1083 be_lun->unmap(be_lun, beio); 1084 return; 1085 } 1086 1087 beio->bio_cmd = BIO_WRITE; 1088 beio->ds_trans_type = DEVSTAT_WRITE; 1089 1090 DPRINTF("WRITE SAME at LBA %jx len %u\n", 1091 (uintmax_t)lbalen->lba, lbalen->len); 1092 1093 len_left = (uint64_t)lbalen->len * be_lun->blocksize; 1094 for (i = 0, lba = 0; i < CTLBLK_MAX_SEGS && len_left > 0; i++) { 1095 1096 /* 1097 * Setup the S/G entry for this chunk. 1098 */ 1099 seglen = MIN(CTLBLK_MAX_SEG, len_left); 1100 seglen -= seglen % be_lun->blocksize; 1101 beio->sg_segs[i].len = seglen; 1102 beio->sg_segs[i].addr = uma_zalloc(be_lun->lun_zone, M_WAITOK); 1103 1104 DPRINTF("segment %d addr %p len %zd\n", i, 1105 beio->sg_segs[i].addr, beio->sg_segs[i].len); 1106 1107 beio->num_segs++; 1108 len_left -= seglen; 1109 1110 buf = beio->sg_segs[i].addr; 1111 end = buf + seglen; 1112 for (; buf < end; buf += be_lun->blocksize) { 1113 memcpy(buf, io->scsiio.kern_data_ptr, be_lun->blocksize); 1114 if (lbalen->flags & SWS_LBDATA) 1115 scsi_ulto4b(lbalen->lba + lba, buf); 1116 lba++; 1117 } 1118 } 1119 1120 beio->io_offset = lbalen->lba * be_lun->blocksize; 1121 beio->io_len = lba * be_lun->blocksize; 1122 1123 /* We can not do all in one run. Correct and schedule rerun. */ 1124 if (len_left > 0) { 1125 lbalen->lba += lba; 1126 lbalen->len -= lba; 1127 beio->beio_cont = ctl_be_block_cw_done_ws; 1128 } 1129 1130 be_lun->dispatch(be_lun, beio); 1131 } 1132 1133 static void 1134 ctl_be_block_cw_dispatch_unmap(struct ctl_be_block_lun *be_lun, 1135 union ctl_io *io) 1136 { 1137 struct ctl_be_block_io *beio; 1138 struct ctl_be_block_softc *softc; 1139 struct ctl_ptr_len_flags *ptrlen; 1140 1141 DPRINTF("entered\n"); 1142 1143 beio = (struct ctl_be_block_io *)PRIV(io)->ptr; 1144 softc = be_lun->softc; 1145 ptrlen = (struct ctl_ptr_len_flags *)&io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 1146 1147 if ((ptrlen->flags & ~SU_ANCHOR) != 0 || be_lun->unmap == NULL) { 1148 ctl_free_beio(beio); 1149 ctl_set_invalid_field(&io->scsiio, 1150 /*sks_valid*/ 0, 1151 /*command*/ 1, 1152 /*field*/ 0, 1153 /*bit_valid*/ 0, 1154 /*bit*/ 0); 1155 ctl_config_write_done(io); 1156 return; 1157 } 1158 1159 switch (io->scsiio.tag_type) { 1160 case CTL_TAG_ORDERED: 1161 beio->ds_tag_type = DEVSTAT_TAG_ORDERED; 1162 break; 1163 case CTL_TAG_HEAD_OF_QUEUE: 1164 beio->ds_tag_type = DEVSTAT_TAG_HEAD; 1165 break; 1166 case CTL_TAG_UNTAGGED: 1167 case CTL_TAG_SIMPLE: 1168 case CTL_TAG_ACA: 1169 default: 1170 beio->ds_tag_type = DEVSTAT_TAG_SIMPLE; 1171 break; 1172 } 1173 1174 beio->io_len = 0; 1175 beio->io_offset = -1; 1176 1177 beio->bio_cmd = BIO_DELETE; 1178 beio->ds_trans_type = DEVSTAT_FREE; 1179 1180 DPRINTF("UNMAP\n"); 1181 1182 be_lun->unmap(be_lun, beio); 1183 } 1184 1185 static void 1186 ctl_be_block_cw_done(struct ctl_be_block_io *beio) 1187 { 1188 union ctl_io *io; 1189 1190 io = beio->io; 1191 ctl_free_beio(beio); 1192 ctl_config_write_done(io); 1193 } 1194 1195 static void 1196 ctl_be_block_cw_dispatch(struct ctl_be_block_lun *be_lun, 1197 union ctl_io *io) 1198 { 1199 struct ctl_be_block_io *beio; 1200 struct ctl_be_block_softc *softc; 1201 1202 DPRINTF("entered\n"); 1203 1204 softc = be_lun->softc; 1205 beio = ctl_alloc_beio(softc); 1206 beio->io = io; 1207 beio->lun = be_lun; 1208 beio->beio_cont = ctl_be_block_cw_done; 1209 PRIV(io)->ptr = (void *)beio; 1210 1211 switch (io->scsiio.cdb[0]) { 1212 case SYNCHRONIZE_CACHE: 1213 case SYNCHRONIZE_CACHE_16: 1214 beio->bio_cmd = BIO_FLUSH; 1215 beio->ds_trans_type = DEVSTAT_NO_DATA; 1216 beio->ds_tag_type = DEVSTAT_TAG_ORDERED; 1217 beio->io_len = 0; 1218 be_lun->lun_flush(be_lun, beio); 1219 break; 1220 case WRITE_SAME_10: 1221 case WRITE_SAME_16: 1222 ctl_be_block_cw_dispatch_ws(be_lun, io); 1223 break; 1224 case UNMAP: 1225 ctl_be_block_cw_dispatch_unmap(be_lun, io); 1226 break; 1227 default: 1228 panic("Unhandled CDB type %#x", io->scsiio.cdb[0]); 1229 break; 1230 } 1231 } 1232 1233 SDT_PROBE_DEFINE1(cbb, kernel, read, start, "uint64_t"); 1234 SDT_PROBE_DEFINE1(cbb, kernel, write, start, "uint64_t"); 1235 SDT_PROBE_DEFINE1(cbb, kernel, read, alloc_done, "uint64_t"); 1236 SDT_PROBE_DEFINE1(cbb, kernel, write, alloc_done, "uint64_t"); 1237 1238 static void 1239 ctl_be_block_next(struct ctl_be_block_io *beio) 1240 { 1241 struct ctl_be_block_lun *be_lun; 1242 union ctl_io *io; 1243 1244 io = beio->io; 1245 be_lun = beio->lun; 1246 ctl_free_beio(beio); 1247 if ((io->io_hdr.flags & CTL_FLAG_ABORT) || 1248 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 1249 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) { 1250 ctl_data_submit_done(io); 1251 return; 1252 } 1253 1254 io->io_hdr.status &= ~CTL_STATUS_MASK; 1255 io->io_hdr.status |= CTL_STATUS_NONE; 1256 1257 mtx_lock(&be_lun->queue_lock); 1258 /* 1259 * XXX KDM make sure that links is okay to use at this point. 1260 * Otherwise, we either need to add another field to ctl_io_hdr, 1261 * or deal with resource allocation here. 1262 */ 1263 STAILQ_INSERT_TAIL(&be_lun->input_queue, &io->io_hdr, links); 1264 mtx_unlock(&be_lun->queue_lock); 1265 1266 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); 1267 } 1268 1269 static void 1270 ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun, 1271 union ctl_io *io) 1272 { 1273 struct ctl_be_block_io *beio; 1274 struct ctl_be_block_softc *softc; 1275 struct ctl_lba_len_flags *lbalen; 1276 struct ctl_ptr_len_flags *bptrlen; 1277 uint64_t len_left, lbas; 1278 int i; 1279 1280 softc = be_lun->softc; 1281 1282 DPRINTF("entered\n"); 1283 1284 lbalen = ARGS(io); 1285 if (lbalen->flags & CTL_LLF_WRITE) { 1286 SDT_PROBE(cbb, kernel, write, start, 0, 0, 0, 0, 0); 1287 } else { 1288 SDT_PROBE(cbb, kernel, read, start, 0, 0, 0, 0, 0); 1289 } 1290 1291 beio = ctl_alloc_beio(softc); 1292 beio->io = io; 1293 beio->lun = be_lun; 1294 bptrlen = PRIV(io); 1295 bptrlen->ptr = (void *)beio; 1296 1297 switch (io->scsiio.tag_type) { 1298 case CTL_TAG_ORDERED: 1299 beio->ds_tag_type = DEVSTAT_TAG_ORDERED; 1300 break; 1301 case CTL_TAG_HEAD_OF_QUEUE: 1302 beio->ds_tag_type = DEVSTAT_TAG_HEAD; 1303 break; 1304 case CTL_TAG_UNTAGGED: 1305 case CTL_TAG_SIMPLE: 1306 case CTL_TAG_ACA: 1307 default: 1308 beio->ds_tag_type = DEVSTAT_TAG_SIMPLE; 1309 break; 1310 } 1311 1312 if (lbalen->flags & CTL_LLF_WRITE) { 1313 beio->bio_cmd = BIO_WRITE; 1314 beio->ds_trans_type = DEVSTAT_WRITE; 1315 } else { 1316 beio->bio_cmd = BIO_READ; 1317 beio->ds_trans_type = DEVSTAT_READ; 1318 } 1319 1320 DPRINTF("%s at LBA %jx len %u @%ju\n", 1321 (beio->bio_cmd == BIO_READ) ? "READ" : "WRITE", 1322 (uintmax_t)lbalen->lba, lbalen->len, bptrlen->len); 1323 if (lbalen->flags & CTL_LLF_COMPARE) 1324 lbas = CTLBLK_HALF_IO_SIZE; 1325 else 1326 lbas = CTLBLK_MAX_IO_SIZE; 1327 lbas = MIN(lbalen->len - bptrlen->len, lbas / be_lun->blocksize); 1328 beio->io_offset = (lbalen->lba + bptrlen->len) * be_lun->blocksize; 1329 beio->io_len = lbas * be_lun->blocksize; 1330 bptrlen->len += lbas; 1331 1332 for (i = 0, len_left = beio->io_len; len_left > 0; i++) { 1333 KASSERT(i < CTLBLK_MAX_SEGS, ("Too many segs (%d >= %d)", 1334 i, CTLBLK_MAX_SEGS)); 1335 1336 /* 1337 * Setup the S/G entry for this chunk. 1338 */ 1339 beio->sg_segs[i].len = min(CTLBLK_MAX_SEG, len_left); 1340 beio->sg_segs[i].addr = uma_zalloc(be_lun->lun_zone, M_WAITOK); 1341 1342 DPRINTF("segment %d addr %p len %zd\n", i, 1343 beio->sg_segs[i].addr, beio->sg_segs[i].len); 1344 1345 /* Set up second segment for compare operation. */ 1346 if (lbalen->flags & CTL_LLF_COMPARE) { 1347 beio->sg_segs[i + CTLBLK_HALF_SEGS].len = 1348 beio->sg_segs[i].len; 1349 beio->sg_segs[i + CTLBLK_HALF_SEGS].addr = 1350 uma_zalloc(be_lun->lun_zone, M_WAITOK); 1351 } 1352 1353 beio->num_segs++; 1354 len_left -= beio->sg_segs[i].len; 1355 } 1356 if (bptrlen->len < lbalen->len) 1357 beio->beio_cont = ctl_be_block_next; 1358 io->scsiio.be_move_done = ctl_be_block_move_done; 1359 /* For compare we have separate S/G lists for read and datamove. */ 1360 if (lbalen->flags & CTL_LLF_COMPARE) 1361 io->scsiio.kern_data_ptr = (uint8_t *)&beio->sg_segs[CTLBLK_HALF_SEGS]; 1362 else 1363 io->scsiio.kern_data_ptr = (uint8_t *)beio->sg_segs; 1364 io->scsiio.kern_data_len = beio->io_len; 1365 io->scsiio.kern_data_resid = 0; 1366 io->scsiio.kern_sg_entries = beio->num_segs; 1367 io->io_hdr.flags |= CTL_FLAG_ALLOCATED | CTL_FLAG_KDPTR_SGLIST; 1368 1369 /* 1370 * For the read case, we need to read the data into our buffers and 1371 * then we can send it back to the user. For the write case, we 1372 * need to get the data from the user first. 1373 */ 1374 if (beio->bio_cmd == BIO_READ) { 1375 SDT_PROBE(cbb, kernel, read, alloc_done, 0, 0, 0, 0, 0); 1376 be_lun->dispatch(be_lun, beio); 1377 } else { 1378 SDT_PROBE(cbb, kernel, write, alloc_done, 0, 0, 0, 0, 0); 1379 #ifdef CTL_TIME_IO 1380 getbintime(&io->io_hdr.dma_start_bt); 1381 #endif 1382 ctl_datamove(io); 1383 } 1384 } 1385 1386 static void 1387 ctl_be_block_worker(void *context, int pending) 1388 { 1389 struct ctl_be_block_lun *be_lun; 1390 struct ctl_be_block_softc *softc; 1391 union ctl_io *io; 1392 1393 be_lun = (struct ctl_be_block_lun *)context; 1394 softc = be_lun->softc; 1395 1396 DPRINTF("entered\n"); 1397 1398 mtx_lock(&be_lun->queue_lock); 1399 for (;;) { 1400 io = (union ctl_io *)STAILQ_FIRST(&be_lun->datamove_queue); 1401 if (io != NULL) { 1402 struct ctl_be_block_io *beio; 1403 1404 DPRINTF("datamove queue\n"); 1405 1406 STAILQ_REMOVE(&be_lun->datamove_queue, &io->io_hdr, 1407 ctl_io_hdr, links); 1408 1409 mtx_unlock(&be_lun->queue_lock); 1410 1411 beio = (struct ctl_be_block_io *)PRIV(io)->ptr; 1412 1413 be_lun->dispatch(be_lun, beio); 1414 1415 mtx_lock(&be_lun->queue_lock); 1416 continue; 1417 } 1418 io = (union ctl_io *)STAILQ_FIRST(&be_lun->config_write_queue); 1419 if (io != NULL) { 1420 1421 DPRINTF("config write queue\n"); 1422 1423 STAILQ_REMOVE(&be_lun->config_write_queue, &io->io_hdr, 1424 ctl_io_hdr, links); 1425 1426 mtx_unlock(&be_lun->queue_lock); 1427 1428 ctl_be_block_cw_dispatch(be_lun, io); 1429 1430 mtx_lock(&be_lun->queue_lock); 1431 continue; 1432 } 1433 io = (union ctl_io *)STAILQ_FIRST(&be_lun->input_queue); 1434 if (io != NULL) { 1435 DPRINTF("input queue\n"); 1436 1437 STAILQ_REMOVE(&be_lun->input_queue, &io->io_hdr, 1438 ctl_io_hdr, links); 1439 mtx_unlock(&be_lun->queue_lock); 1440 1441 /* 1442 * We must drop the lock, since this routine and 1443 * its children may sleep. 1444 */ 1445 ctl_be_block_dispatch(be_lun, io); 1446 1447 mtx_lock(&be_lun->queue_lock); 1448 continue; 1449 } 1450 1451 /* 1452 * If we get here, there is no work left in the queues, so 1453 * just break out and let the task queue go to sleep. 1454 */ 1455 break; 1456 } 1457 mtx_unlock(&be_lun->queue_lock); 1458 } 1459 1460 /* 1461 * Entry point from CTL to the backend for I/O. We queue everything to a 1462 * work thread, so this just puts the I/O on a queue and wakes up the 1463 * thread. 1464 */ 1465 static int 1466 ctl_be_block_submit(union ctl_io *io) 1467 { 1468 struct ctl_be_block_lun *be_lun; 1469 struct ctl_be_lun *ctl_be_lun; 1470 1471 DPRINTF("entered\n"); 1472 1473 ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[ 1474 CTL_PRIV_BACKEND_LUN].ptr; 1475 be_lun = (struct ctl_be_block_lun *)ctl_be_lun->be_lun; 1476 1477 /* 1478 * Make sure we only get SCSI I/O. 1479 */ 1480 KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, ("Non-SCSI I/O (type " 1481 "%#x) encountered", io->io_hdr.io_type)); 1482 1483 PRIV(io)->len = 0; 1484 1485 mtx_lock(&be_lun->queue_lock); 1486 /* 1487 * XXX KDM make sure that links is okay to use at this point. 1488 * Otherwise, we either need to add another field to ctl_io_hdr, 1489 * or deal with resource allocation here. 1490 */ 1491 STAILQ_INSERT_TAIL(&be_lun->input_queue, &io->io_hdr, links); 1492 mtx_unlock(&be_lun->queue_lock); 1493 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); 1494 1495 return (CTL_RETVAL_COMPLETE); 1496 } 1497 1498 static int 1499 ctl_be_block_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, 1500 int flag, struct thread *td) 1501 { 1502 struct ctl_be_block_softc *softc; 1503 int error; 1504 1505 softc = &backend_block_softc; 1506 1507 error = 0; 1508 1509 switch (cmd) { 1510 case CTL_LUN_REQ: { 1511 struct ctl_lun_req *lun_req; 1512 1513 lun_req = (struct ctl_lun_req *)addr; 1514 1515 switch (lun_req->reqtype) { 1516 case CTL_LUNREQ_CREATE: 1517 error = ctl_be_block_create(softc, lun_req); 1518 break; 1519 case CTL_LUNREQ_RM: 1520 error = ctl_be_block_rm(softc, lun_req); 1521 break; 1522 case CTL_LUNREQ_MODIFY: 1523 error = ctl_be_block_modify(softc, lun_req); 1524 break; 1525 default: 1526 lun_req->status = CTL_LUN_ERROR; 1527 snprintf(lun_req->error_str, sizeof(lun_req->error_str), 1528 "invalid LUN request type %d", 1529 lun_req->reqtype); 1530 break; 1531 } 1532 break; 1533 } 1534 default: 1535 error = ENOTTY; 1536 break; 1537 } 1538 1539 return (error); 1540 } 1541 1542 static int 1543 ctl_be_block_open_file(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req) 1544 { 1545 struct ctl_be_block_filedata *file_data; 1546 struct ctl_lun_create_params *params; 1547 struct vattr vattr; 1548 off_t pss; 1549 int error; 1550 1551 error = 0; 1552 file_data = &be_lun->backend.file; 1553 params = &be_lun->params; 1554 1555 be_lun->dev_type = CTL_BE_BLOCK_FILE; 1556 be_lun->dispatch = ctl_be_block_dispatch_file; 1557 be_lun->lun_flush = ctl_be_block_flush_file; 1558 1559 error = VOP_GETATTR(be_lun->vn, &vattr, curthread->td_ucred); 1560 if (error != 0) { 1561 snprintf(req->error_str, sizeof(req->error_str), 1562 "error calling VOP_GETATTR() for file %s", 1563 be_lun->dev_path); 1564 return (error); 1565 } 1566 1567 /* 1568 * Verify that we have the ability to upgrade to exclusive 1569 * access on this file so we can trap errors at open instead 1570 * of reporting them during first access. 1571 */ 1572 if (VOP_ISLOCKED(be_lun->vn) != LK_EXCLUSIVE) { 1573 vn_lock(be_lun->vn, LK_UPGRADE | LK_RETRY); 1574 if (be_lun->vn->v_iflag & VI_DOOMED) { 1575 error = EBADF; 1576 snprintf(req->error_str, sizeof(req->error_str), 1577 "error locking file %s", be_lun->dev_path); 1578 return (error); 1579 } 1580 } 1581 1582 1583 file_data->cred = crhold(curthread->td_ucred); 1584 if (params->lun_size_bytes != 0) 1585 be_lun->size_bytes = params->lun_size_bytes; 1586 else 1587 be_lun->size_bytes = vattr.va_size; 1588 /* 1589 * We set the multi thread flag for file operations because all 1590 * filesystems (in theory) are capable of allowing multiple readers 1591 * of a file at once. So we want to get the maximum possible 1592 * concurrency. 1593 */ 1594 be_lun->flags |= CTL_BE_BLOCK_LUN_MULTI_THREAD; 1595 1596 /* 1597 * For files we can use any logical block size. Prefer 512 bytes 1598 * for compatibility reasons. If file's vattr.va_blocksize 1599 * (preferred I/O block size) is bigger and multiple to chosen 1600 * logical block size -- report it as physical block size. 1601 */ 1602 if (params->blocksize_bytes != 0) 1603 be_lun->blocksize = params->blocksize_bytes; 1604 else 1605 be_lun->blocksize = 512; 1606 pss = vattr.va_blocksize / be_lun->blocksize; 1607 if ((pss > 0) && (pss * be_lun->blocksize == vattr.va_blocksize) && 1608 ((pss & (pss - 1)) == 0)) { 1609 be_lun->pblockexp = fls(pss) - 1; 1610 be_lun->pblockoff = 0; 1611 } 1612 1613 /* 1614 * Sanity check. The media size has to be at least one 1615 * sector long. 1616 */ 1617 if (be_lun->size_bytes < be_lun->blocksize) { 1618 error = EINVAL; 1619 snprintf(req->error_str, sizeof(req->error_str), 1620 "file %s size %ju < block size %u", be_lun->dev_path, 1621 (uintmax_t)be_lun->size_bytes, be_lun->blocksize); 1622 } 1623 return (error); 1624 } 1625 1626 static int 1627 ctl_be_block_open_dev(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req) 1628 { 1629 struct ctl_lun_create_params *params; 1630 struct vattr vattr; 1631 struct cdev *dev; 1632 struct cdevsw *devsw; 1633 int error; 1634 off_t ps, pss, po, pos; 1635 1636 params = &be_lun->params; 1637 1638 be_lun->dev_type = CTL_BE_BLOCK_DEV; 1639 be_lun->backend.dev.cdev = be_lun->vn->v_rdev; 1640 be_lun->backend.dev.csw = dev_refthread(be_lun->backend.dev.cdev, 1641 &be_lun->backend.dev.dev_ref); 1642 if (be_lun->backend.dev.csw == NULL) 1643 panic("Unable to retrieve device switch"); 1644 if (strcmp(be_lun->backend.dev.csw->d_name, "zvol") == 0) 1645 be_lun->dispatch = ctl_be_block_dispatch_zvol; 1646 else 1647 be_lun->dispatch = ctl_be_block_dispatch_dev; 1648 be_lun->lun_flush = ctl_be_block_flush_dev; 1649 be_lun->unmap = ctl_be_block_unmap_dev; 1650 1651 error = VOP_GETATTR(be_lun->vn, &vattr, NOCRED); 1652 if (error) { 1653 snprintf(req->error_str, sizeof(req->error_str), 1654 "error getting vnode attributes for device %s", 1655 be_lun->dev_path); 1656 return (error); 1657 } 1658 1659 dev = be_lun->vn->v_rdev; 1660 devsw = dev->si_devsw; 1661 if (!devsw->d_ioctl) { 1662 snprintf(req->error_str, sizeof(req->error_str), 1663 "no d_ioctl for device %s!", 1664 be_lun->dev_path); 1665 return (ENODEV); 1666 } 1667 1668 error = devsw->d_ioctl(dev, DIOCGSECTORSIZE, 1669 (caddr_t)&be_lun->blocksize, FREAD, 1670 curthread); 1671 if (error) { 1672 snprintf(req->error_str, sizeof(req->error_str), 1673 "error %d returned for DIOCGSECTORSIZE ioctl " 1674 "on %s!", error, be_lun->dev_path); 1675 return (error); 1676 } 1677 1678 /* 1679 * If the user has asked for a blocksize that is greater than the 1680 * backing device's blocksize, we can do it only if the blocksize 1681 * the user is asking for is an even multiple of the underlying 1682 * device's blocksize. 1683 */ 1684 if ((params->blocksize_bytes != 0) 1685 && (params->blocksize_bytes > be_lun->blocksize)) { 1686 uint32_t bs_multiple, tmp_blocksize; 1687 1688 bs_multiple = params->blocksize_bytes / be_lun->blocksize; 1689 1690 tmp_blocksize = bs_multiple * be_lun->blocksize; 1691 1692 if (tmp_blocksize == params->blocksize_bytes) { 1693 be_lun->blocksize = params->blocksize_bytes; 1694 } else { 1695 snprintf(req->error_str, sizeof(req->error_str), 1696 "requested blocksize %u is not an even " 1697 "multiple of backing device blocksize %u", 1698 params->blocksize_bytes, 1699 be_lun->blocksize); 1700 return (EINVAL); 1701 1702 } 1703 } else if ((params->blocksize_bytes != 0) 1704 && (params->blocksize_bytes != be_lun->blocksize)) { 1705 snprintf(req->error_str, sizeof(req->error_str), 1706 "requested blocksize %u < backing device " 1707 "blocksize %u", params->blocksize_bytes, 1708 be_lun->blocksize); 1709 return (EINVAL); 1710 } 1711 1712 error = devsw->d_ioctl(dev, DIOCGMEDIASIZE, 1713 (caddr_t)&be_lun->size_bytes, FREAD, 1714 curthread); 1715 if (error) { 1716 snprintf(req->error_str, sizeof(req->error_str), 1717 "error %d returned for DIOCGMEDIASIZE " 1718 " ioctl on %s!", error, 1719 be_lun->dev_path); 1720 return (error); 1721 } 1722 1723 if (params->lun_size_bytes != 0) { 1724 if (params->lun_size_bytes > be_lun->size_bytes) { 1725 snprintf(req->error_str, sizeof(req->error_str), 1726 "requested LUN size %ju > backing device " 1727 "size %ju", 1728 (uintmax_t)params->lun_size_bytes, 1729 (uintmax_t)be_lun->size_bytes); 1730 return (EINVAL); 1731 } 1732 1733 be_lun->size_bytes = params->lun_size_bytes; 1734 } 1735 1736 error = devsw->d_ioctl(dev, DIOCGSTRIPESIZE, 1737 (caddr_t)&ps, FREAD, curthread); 1738 if (error) 1739 ps = po = 0; 1740 else { 1741 error = devsw->d_ioctl(dev, DIOCGSTRIPEOFFSET, 1742 (caddr_t)&po, FREAD, curthread); 1743 if (error) 1744 po = 0; 1745 } 1746 pss = ps / be_lun->blocksize; 1747 pos = po / be_lun->blocksize; 1748 if ((pss > 0) && (pss * be_lun->blocksize == ps) && (pss >= pos) && 1749 ((pss & (pss - 1)) == 0) && (pos * be_lun->blocksize == po)) { 1750 be_lun->pblockexp = fls(pss) - 1; 1751 be_lun->pblockoff = (pss - pos) % pss; 1752 } 1753 1754 return (0); 1755 } 1756 1757 static int 1758 ctl_be_block_close(struct ctl_be_block_lun *be_lun) 1759 { 1760 DROP_GIANT(); 1761 if (be_lun->vn) { 1762 int flags = FREAD | FWRITE; 1763 1764 switch (be_lun->dev_type) { 1765 case CTL_BE_BLOCK_DEV: 1766 if (be_lun->backend.dev.csw) { 1767 dev_relthread(be_lun->backend.dev.cdev, 1768 be_lun->backend.dev.dev_ref); 1769 be_lun->backend.dev.csw = NULL; 1770 be_lun->backend.dev.cdev = NULL; 1771 } 1772 break; 1773 case CTL_BE_BLOCK_FILE: 1774 break; 1775 case CTL_BE_BLOCK_NONE: 1776 break; 1777 default: 1778 panic("Unexpected backend type."); 1779 break; 1780 } 1781 1782 (void)vn_close(be_lun->vn, flags, NOCRED, curthread); 1783 be_lun->vn = NULL; 1784 1785 switch (be_lun->dev_type) { 1786 case CTL_BE_BLOCK_DEV: 1787 break; 1788 case CTL_BE_BLOCK_FILE: 1789 if (be_lun->backend.file.cred != NULL) { 1790 crfree(be_lun->backend.file.cred); 1791 be_lun->backend.file.cred = NULL; 1792 } 1793 break; 1794 case CTL_BE_BLOCK_NONE: 1795 break; 1796 default: 1797 panic("Unexpected backend type."); 1798 break; 1799 } 1800 be_lun->dev_type = CTL_BE_BLOCK_NONE; 1801 } 1802 PICKUP_GIANT(); 1803 1804 return (0); 1805 } 1806 1807 static int 1808 ctl_be_block_open(struct ctl_be_block_softc *softc, 1809 struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req) 1810 { 1811 struct nameidata nd; 1812 int flags; 1813 int error; 1814 1815 /* 1816 * XXX KDM allow a read-only option? 1817 */ 1818 flags = FREAD | FWRITE; 1819 error = 0; 1820 1821 if (rootvnode == NULL) { 1822 snprintf(req->error_str, sizeof(req->error_str), 1823 "Root filesystem is not mounted"); 1824 return (1); 1825 } 1826 1827 if (!curthread->td_proc->p_fd->fd_cdir) { 1828 curthread->td_proc->p_fd->fd_cdir = rootvnode; 1829 VREF(rootvnode); 1830 } 1831 if (!curthread->td_proc->p_fd->fd_rdir) { 1832 curthread->td_proc->p_fd->fd_rdir = rootvnode; 1833 VREF(rootvnode); 1834 } 1835 if (!curthread->td_proc->p_fd->fd_jdir) { 1836 curthread->td_proc->p_fd->fd_jdir = rootvnode; 1837 VREF(rootvnode); 1838 } 1839 1840 again: 1841 NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, be_lun->dev_path, curthread); 1842 error = vn_open(&nd, &flags, 0, NULL); 1843 if (error) { 1844 /* 1845 * This is the only reasonable guess we can make as far as 1846 * path if the user doesn't give us a fully qualified path. 1847 * If they want to specify a file, they need to specify the 1848 * full path. 1849 */ 1850 if (be_lun->dev_path[0] != '/') { 1851 char *dev_path = "/dev/"; 1852 char *dev_name; 1853 1854 /* Try adding device path at beginning of name */ 1855 dev_name = malloc(strlen(be_lun->dev_path) 1856 + strlen(dev_path) + 1, 1857 M_CTLBLK, M_WAITOK); 1858 if (dev_name) { 1859 sprintf(dev_name, "%s%s", dev_path, 1860 be_lun->dev_path); 1861 free(be_lun->dev_path, M_CTLBLK); 1862 be_lun->dev_path = dev_name; 1863 goto again; 1864 } 1865 } 1866 snprintf(req->error_str, sizeof(req->error_str), 1867 "error opening %s: %d", be_lun->dev_path, error); 1868 return (error); 1869 } 1870 1871 NDFREE(&nd, NDF_ONLY_PNBUF); 1872 1873 be_lun->vn = nd.ni_vp; 1874 1875 /* We only support disks and files. */ 1876 if (vn_isdisk(be_lun->vn, &error)) { 1877 error = ctl_be_block_open_dev(be_lun, req); 1878 } else if (be_lun->vn->v_type == VREG) { 1879 error = ctl_be_block_open_file(be_lun, req); 1880 } else { 1881 error = EINVAL; 1882 snprintf(req->error_str, sizeof(req->error_str), 1883 "%s is not a disk or plain file", be_lun->dev_path); 1884 } 1885 VOP_UNLOCK(be_lun->vn, 0); 1886 1887 if (error != 0) { 1888 ctl_be_block_close(be_lun); 1889 return (error); 1890 } 1891 1892 be_lun->blocksize_shift = fls(be_lun->blocksize) - 1; 1893 be_lun->size_blocks = be_lun->size_bytes >> be_lun->blocksize_shift; 1894 1895 return (0); 1896 } 1897 1898 static int 1899 ctl_be_block_create(struct ctl_be_block_softc *softc, struct ctl_lun_req *req) 1900 { 1901 struct ctl_be_block_lun *be_lun; 1902 struct ctl_lun_create_params *params; 1903 char num_thread_str[16]; 1904 char tmpstr[32]; 1905 char *value; 1906 int retval, num_threads, unmap; 1907 int tmp_num_threads; 1908 1909 params = &req->reqdata.create; 1910 retval = 0; 1911 req->status = CTL_LUN_OK; 1912 1913 num_threads = cbb_num_threads; 1914 1915 be_lun = malloc(sizeof(*be_lun), M_CTLBLK, M_ZERO | M_WAITOK); 1916 1917 be_lun->params = req->reqdata.create; 1918 be_lun->softc = softc; 1919 STAILQ_INIT(&be_lun->input_queue); 1920 STAILQ_INIT(&be_lun->config_write_queue); 1921 STAILQ_INIT(&be_lun->datamove_queue); 1922 sprintf(be_lun->lunname, "cblk%d", softc->num_luns); 1923 mtx_init(&be_lun->io_lock, "cblk io lock", NULL, MTX_DEF); 1924 mtx_init(&be_lun->queue_lock, "cblk queue lock", NULL, MTX_DEF); 1925 ctl_init_opts(&be_lun->ctl_be_lun.options, 1926 req->num_be_args, req->kern_be_args); 1927 1928 be_lun->lun_zone = uma_zcreate(be_lun->lunname, CTLBLK_MAX_SEG, 1929 NULL, NULL, NULL, NULL, /*align*/ 0, /*flags*/0); 1930 1931 if (be_lun->lun_zone == NULL) { 1932 snprintf(req->error_str, sizeof(req->error_str), 1933 "error allocating UMA zone"); 1934 goto bailout_error; 1935 } 1936 1937 if (params->flags & CTL_LUN_FLAG_DEV_TYPE) 1938 be_lun->ctl_be_lun.lun_type = params->device_type; 1939 else 1940 be_lun->ctl_be_lun.lun_type = T_DIRECT; 1941 1942 if (be_lun->ctl_be_lun.lun_type == T_DIRECT) { 1943 value = ctl_get_opt(&be_lun->ctl_be_lun.options, "file"); 1944 if (value == NULL) { 1945 snprintf(req->error_str, sizeof(req->error_str), 1946 "no file argument specified"); 1947 goto bailout_error; 1948 } 1949 be_lun->dev_path = strdup(value, M_CTLBLK); 1950 be_lun->blocksize = 512; 1951 be_lun->blocksize_shift = fls(be_lun->blocksize) - 1; 1952 1953 retval = ctl_be_block_open(softc, be_lun, req); 1954 if (retval != 0) { 1955 retval = 0; 1956 req->status = CTL_LUN_WARNING; 1957 } 1958 } else { 1959 /* 1960 * For processor devices, we don't have any size. 1961 */ 1962 be_lun->blocksize = 0; 1963 be_lun->pblockexp = 0; 1964 be_lun->pblockoff = 0; 1965 be_lun->size_blocks = 0; 1966 be_lun->size_bytes = 0; 1967 be_lun->ctl_be_lun.maxlba = 0; 1968 1969 /* 1970 * Default to just 1 thread for processor devices. 1971 */ 1972 num_threads = 1; 1973 } 1974 1975 /* 1976 * XXX This searching loop might be refactored to be combined with 1977 * the loop above, 1978 */ 1979 value = ctl_get_opt(&be_lun->ctl_be_lun.options, "num_threads"); 1980 if (value != NULL) { 1981 tmp_num_threads = strtol(value, NULL, 0); 1982 1983 /* 1984 * We don't let the user specify less than one 1985 * thread, but hope he's clueful enough not to 1986 * specify 1000 threads. 1987 */ 1988 if (tmp_num_threads < 1) { 1989 snprintf(req->error_str, sizeof(req->error_str), 1990 "invalid number of threads %s", 1991 num_thread_str); 1992 goto bailout_error; 1993 } 1994 num_threads = tmp_num_threads; 1995 } 1996 unmap = 0; 1997 value = ctl_get_opt(&be_lun->ctl_be_lun.options, "unmap"); 1998 if (value != NULL && strcmp(value, "on") == 0) 1999 unmap = 1; 2000 2001 be_lun->flags = CTL_BE_BLOCK_LUN_UNCONFIGURED; 2002 be_lun->ctl_be_lun.flags = CTL_LUN_FLAG_PRIMARY; 2003 if (be_lun->vn == NULL) 2004 be_lun->ctl_be_lun.flags |= CTL_LUN_FLAG_OFFLINE; 2005 if (unmap) 2006 be_lun->ctl_be_lun.flags |= CTL_LUN_FLAG_UNMAP; 2007 be_lun->ctl_be_lun.be_lun = be_lun; 2008 be_lun->ctl_be_lun.maxlba = (be_lun->size_blocks == 0) ? 2009 0 : (be_lun->size_blocks - 1); 2010 be_lun->ctl_be_lun.blocksize = be_lun->blocksize; 2011 be_lun->ctl_be_lun.pblockexp = be_lun->pblockexp; 2012 be_lun->ctl_be_lun.pblockoff = be_lun->pblockoff; 2013 if (be_lun->dispatch == ctl_be_block_dispatch_zvol && 2014 be_lun->blocksize != 0) 2015 be_lun->ctl_be_lun.atomicblock = CTLBLK_MAX_IO_SIZE / 2016 be_lun->blocksize; 2017 /* Tell the user the blocksize we ended up using */ 2018 params->lun_size_bytes = be_lun->size_bytes; 2019 params->blocksize_bytes = be_lun->blocksize; 2020 if (params->flags & CTL_LUN_FLAG_ID_REQ) { 2021 be_lun->ctl_be_lun.req_lun_id = params->req_lun_id; 2022 be_lun->ctl_be_lun.flags |= CTL_LUN_FLAG_ID_REQ; 2023 } else 2024 be_lun->ctl_be_lun.req_lun_id = 0; 2025 2026 be_lun->ctl_be_lun.lun_shutdown = ctl_be_block_lun_shutdown; 2027 be_lun->ctl_be_lun.lun_config_status = 2028 ctl_be_block_lun_config_status; 2029 be_lun->ctl_be_lun.be = &ctl_be_block_driver; 2030 2031 if ((params->flags & CTL_LUN_FLAG_SERIAL_NUM) == 0) { 2032 snprintf(tmpstr, sizeof(tmpstr), "MYSERIAL%4d", 2033 softc->num_luns); 2034 strncpy((char *)be_lun->ctl_be_lun.serial_num, tmpstr, 2035 ctl_min(sizeof(be_lun->ctl_be_lun.serial_num), 2036 sizeof(tmpstr))); 2037 2038 /* Tell the user what we used for a serial number */ 2039 strncpy((char *)params->serial_num, tmpstr, 2040 ctl_min(sizeof(params->serial_num), sizeof(tmpstr))); 2041 } else { 2042 strncpy((char *)be_lun->ctl_be_lun.serial_num, 2043 params->serial_num, 2044 ctl_min(sizeof(be_lun->ctl_be_lun.serial_num), 2045 sizeof(params->serial_num))); 2046 } 2047 if ((params->flags & CTL_LUN_FLAG_DEVID) == 0) { 2048 snprintf(tmpstr, sizeof(tmpstr), "MYDEVID%4d", softc->num_luns); 2049 strncpy((char *)be_lun->ctl_be_lun.device_id, tmpstr, 2050 ctl_min(sizeof(be_lun->ctl_be_lun.device_id), 2051 sizeof(tmpstr))); 2052 2053 /* Tell the user what we used for a device ID */ 2054 strncpy((char *)params->device_id, tmpstr, 2055 ctl_min(sizeof(params->device_id), sizeof(tmpstr))); 2056 } else { 2057 strncpy((char *)be_lun->ctl_be_lun.device_id, 2058 params->device_id, 2059 ctl_min(sizeof(be_lun->ctl_be_lun.device_id), 2060 sizeof(params->device_id))); 2061 } 2062 2063 TASK_INIT(&be_lun->io_task, /*priority*/0, ctl_be_block_worker, be_lun); 2064 2065 be_lun->io_taskqueue = taskqueue_create(be_lun->lunname, M_WAITOK, 2066 taskqueue_thread_enqueue, /*context*/&be_lun->io_taskqueue); 2067 2068 if (be_lun->io_taskqueue == NULL) { 2069 snprintf(req->error_str, sizeof(req->error_str), 2070 "unable to create taskqueue"); 2071 goto bailout_error; 2072 } 2073 2074 /* 2075 * Note that we start the same number of threads by default for 2076 * both the file case and the block device case. For the file 2077 * case, we need multiple threads to allow concurrency, because the 2078 * vnode interface is designed to be a blocking interface. For the 2079 * block device case, ZFS zvols at least will block the caller's 2080 * context in many instances, and so we need multiple threads to 2081 * overcome that problem. Other block devices don't need as many 2082 * threads, but they shouldn't cause too many problems. 2083 * 2084 * If the user wants to just have a single thread for a block 2085 * device, he can specify that when the LUN is created, or change 2086 * the tunable/sysctl to alter the default number of threads. 2087 */ 2088 retval = taskqueue_start_threads(&be_lun->io_taskqueue, 2089 /*num threads*/num_threads, 2090 /*priority*/PWAIT, 2091 /*thread name*/ 2092 "%s taskq", be_lun->lunname); 2093 2094 if (retval != 0) 2095 goto bailout_error; 2096 2097 be_lun->num_threads = num_threads; 2098 2099 mtx_lock(&softc->lock); 2100 softc->num_luns++; 2101 STAILQ_INSERT_TAIL(&softc->lun_list, be_lun, links); 2102 2103 mtx_unlock(&softc->lock); 2104 2105 retval = ctl_add_lun(&be_lun->ctl_be_lun); 2106 if (retval != 0) { 2107 mtx_lock(&softc->lock); 2108 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_block_lun, 2109 links); 2110 softc->num_luns--; 2111 mtx_unlock(&softc->lock); 2112 snprintf(req->error_str, sizeof(req->error_str), 2113 "ctl_add_lun() returned error %d, see dmesg for " 2114 "details", retval); 2115 retval = 0; 2116 goto bailout_error; 2117 } 2118 2119 mtx_lock(&softc->lock); 2120 2121 /* 2122 * Tell the config_status routine that we're waiting so it won't 2123 * clean up the LUN in the event of an error. 2124 */ 2125 be_lun->flags |= CTL_BE_BLOCK_LUN_WAITING; 2126 2127 while (be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) { 2128 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlblk", 0); 2129 if (retval == EINTR) 2130 break; 2131 } 2132 be_lun->flags &= ~CTL_BE_BLOCK_LUN_WAITING; 2133 2134 if (be_lun->flags & CTL_BE_BLOCK_LUN_CONFIG_ERR) { 2135 snprintf(req->error_str, sizeof(req->error_str), 2136 "LUN configuration error, see dmesg for details"); 2137 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_block_lun, 2138 links); 2139 softc->num_luns--; 2140 mtx_unlock(&softc->lock); 2141 goto bailout_error; 2142 } else { 2143 params->req_lun_id = be_lun->ctl_be_lun.lun_id; 2144 } 2145 2146 mtx_unlock(&softc->lock); 2147 2148 be_lun->disk_stats = devstat_new_entry("cbb", params->req_lun_id, 2149 be_lun->blocksize, 2150 DEVSTAT_ALL_SUPPORTED, 2151 be_lun->ctl_be_lun.lun_type 2152 | DEVSTAT_TYPE_IF_OTHER, 2153 DEVSTAT_PRIORITY_OTHER); 2154 2155 return (retval); 2156 2157 bailout_error: 2158 req->status = CTL_LUN_ERROR; 2159 2160 if (be_lun->io_taskqueue != NULL) 2161 taskqueue_free(be_lun->io_taskqueue); 2162 ctl_be_block_close(be_lun); 2163 if (be_lun->dev_path != NULL) 2164 free(be_lun->dev_path, M_CTLBLK); 2165 if (be_lun->lun_zone != NULL) 2166 uma_zdestroy(be_lun->lun_zone); 2167 ctl_free_opts(&be_lun->ctl_be_lun.options); 2168 mtx_destroy(&be_lun->queue_lock); 2169 mtx_destroy(&be_lun->io_lock); 2170 free(be_lun, M_CTLBLK); 2171 2172 return (retval); 2173 } 2174 2175 static int 2176 ctl_be_block_rm(struct ctl_be_block_softc *softc, struct ctl_lun_req *req) 2177 { 2178 struct ctl_lun_rm_params *params; 2179 struct ctl_be_block_lun *be_lun; 2180 int retval; 2181 2182 params = &req->reqdata.rm; 2183 2184 mtx_lock(&softc->lock); 2185 2186 be_lun = NULL; 2187 2188 STAILQ_FOREACH(be_lun, &softc->lun_list, links) { 2189 if (be_lun->ctl_be_lun.lun_id == params->lun_id) 2190 break; 2191 } 2192 mtx_unlock(&softc->lock); 2193 2194 if (be_lun == NULL) { 2195 snprintf(req->error_str, sizeof(req->error_str), 2196 "LUN %u is not managed by the block backend", 2197 params->lun_id); 2198 goto bailout_error; 2199 } 2200 2201 retval = ctl_disable_lun(&be_lun->ctl_be_lun); 2202 2203 if (retval != 0) { 2204 snprintf(req->error_str, sizeof(req->error_str), 2205 "error %d returned from ctl_disable_lun() for " 2206 "LUN %d", retval, params->lun_id); 2207 goto bailout_error; 2208 2209 } 2210 2211 retval = ctl_invalidate_lun(&be_lun->ctl_be_lun); 2212 if (retval != 0) { 2213 snprintf(req->error_str, sizeof(req->error_str), 2214 "error %d returned from ctl_invalidate_lun() for " 2215 "LUN %d", retval, params->lun_id); 2216 goto bailout_error; 2217 } 2218 2219 mtx_lock(&softc->lock); 2220 2221 be_lun->flags |= CTL_BE_BLOCK_LUN_WAITING; 2222 2223 while ((be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) == 0) { 2224 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlblk", 0); 2225 if (retval == EINTR) 2226 break; 2227 } 2228 2229 be_lun->flags &= ~CTL_BE_BLOCK_LUN_WAITING; 2230 2231 if ((be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) == 0) { 2232 snprintf(req->error_str, sizeof(req->error_str), 2233 "interrupted waiting for LUN to be freed"); 2234 mtx_unlock(&softc->lock); 2235 goto bailout_error; 2236 } 2237 2238 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_block_lun, links); 2239 2240 softc->num_luns--; 2241 mtx_unlock(&softc->lock); 2242 2243 taskqueue_drain(be_lun->io_taskqueue, &be_lun->io_task); 2244 2245 taskqueue_free(be_lun->io_taskqueue); 2246 2247 ctl_be_block_close(be_lun); 2248 2249 if (be_lun->disk_stats != NULL) 2250 devstat_remove_entry(be_lun->disk_stats); 2251 2252 uma_zdestroy(be_lun->lun_zone); 2253 2254 ctl_free_opts(&be_lun->ctl_be_lun.options); 2255 free(be_lun->dev_path, M_CTLBLK); 2256 mtx_destroy(&be_lun->queue_lock); 2257 mtx_destroy(&be_lun->io_lock); 2258 free(be_lun, M_CTLBLK); 2259 2260 req->status = CTL_LUN_OK; 2261 2262 return (0); 2263 2264 bailout_error: 2265 2266 req->status = CTL_LUN_ERROR; 2267 2268 return (0); 2269 } 2270 2271 static int 2272 ctl_be_block_modify_file(struct ctl_be_block_lun *be_lun, 2273 struct ctl_lun_req *req) 2274 { 2275 struct vattr vattr; 2276 int error; 2277 struct ctl_lun_create_params *params = &be_lun->params; 2278 2279 if (params->lun_size_bytes != 0) { 2280 be_lun->size_bytes = params->lun_size_bytes; 2281 } else { 2282 vn_lock(be_lun->vn, LK_SHARED | LK_RETRY); 2283 error = VOP_GETATTR(be_lun->vn, &vattr, curthread->td_ucred); 2284 VOP_UNLOCK(be_lun->vn, 0); 2285 if (error != 0) { 2286 snprintf(req->error_str, sizeof(req->error_str), 2287 "error calling VOP_GETATTR() for file %s", 2288 be_lun->dev_path); 2289 return (error); 2290 } 2291 2292 be_lun->size_bytes = vattr.va_size; 2293 } 2294 2295 return (0); 2296 } 2297 2298 static int 2299 ctl_be_block_modify_dev(struct ctl_be_block_lun *be_lun, 2300 struct ctl_lun_req *req) 2301 { 2302 struct ctl_be_block_devdata *dev_data; 2303 int error; 2304 struct ctl_lun_create_params *params = &be_lun->params; 2305 uint64_t size_bytes; 2306 2307 dev_data = &be_lun->backend.dev; 2308 if (!dev_data->csw->d_ioctl) { 2309 snprintf(req->error_str, sizeof(req->error_str), 2310 "no d_ioctl for device %s!", be_lun->dev_path); 2311 return (ENODEV); 2312 } 2313 2314 error = dev_data->csw->d_ioctl(dev_data->cdev, DIOCGMEDIASIZE, 2315 (caddr_t)&size_bytes, FREAD, 2316 curthread); 2317 if (error) { 2318 snprintf(req->error_str, sizeof(req->error_str), 2319 "error %d returned for DIOCGMEDIASIZE ioctl " 2320 "on %s!", error, be_lun->dev_path); 2321 return (error); 2322 } 2323 2324 if (params->lun_size_bytes != 0) { 2325 if (params->lun_size_bytes > size_bytes) { 2326 snprintf(req->error_str, sizeof(req->error_str), 2327 "requested LUN size %ju > backing device " 2328 "size %ju", 2329 (uintmax_t)params->lun_size_bytes, 2330 (uintmax_t)size_bytes); 2331 return (EINVAL); 2332 } 2333 2334 be_lun->size_bytes = params->lun_size_bytes; 2335 } else { 2336 be_lun->size_bytes = size_bytes; 2337 } 2338 2339 return (0); 2340 } 2341 2342 static int 2343 ctl_be_block_modify(struct ctl_be_block_softc *softc, struct ctl_lun_req *req) 2344 { 2345 struct ctl_lun_modify_params *params; 2346 struct ctl_be_block_lun *be_lun; 2347 uint64_t oldsize; 2348 int error; 2349 2350 params = &req->reqdata.modify; 2351 2352 mtx_lock(&softc->lock); 2353 be_lun = NULL; 2354 STAILQ_FOREACH(be_lun, &softc->lun_list, links) { 2355 if (be_lun->ctl_be_lun.lun_id == params->lun_id) 2356 break; 2357 } 2358 mtx_unlock(&softc->lock); 2359 2360 if (be_lun == NULL) { 2361 snprintf(req->error_str, sizeof(req->error_str), 2362 "LUN %u is not managed by the block backend", 2363 params->lun_id); 2364 goto bailout_error; 2365 } 2366 2367 be_lun->params.lun_size_bytes = params->lun_size_bytes; 2368 2369 oldsize = be_lun->size_blocks; 2370 if (be_lun->vn == NULL) 2371 error = ctl_be_block_open(softc, be_lun, req); 2372 else if (be_lun->vn->v_type == VREG) 2373 error = ctl_be_block_modify_file(be_lun, req); 2374 else 2375 error = ctl_be_block_modify_dev(be_lun, req); 2376 2377 if (error == 0 && be_lun->size_blocks != oldsize) { 2378 be_lun->size_blocks = be_lun->size_bytes >> 2379 be_lun->blocksize_shift; 2380 2381 /* 2382 * The maximum LBA is the size - 1. 2383 * 2384 * XXX: Note that this field is being updated without locking, 2385 * which might cause problems on 32-bit architectures. 2386 */ 2387 be_lun->ctl_be_lun.maxlba = (be_lun->size_blocks == 0) ? 2388 0 : (be_lun->size_blocks - 1); 2389 be_lun->ctl_be_lun.blocksize = be_lun->blocksize; 2390 be_lun->ctl_be_lun.pblockexp = be_lun->pblockexp; 2391 be_lun->ctl_be_lun.pblockoff = be_lun->pblockoff; 2392 if (be_lun->dispatch == ctl_be_block_dispatch_zvol && 2393 be_lun->blocksize != 0) 2394 be_lun->ctl_be_lun.atomicblock = CTLBLK_MAX_IO_SIZE / 2395 be_lun->blocksize; 2396 ctl_lun_capacity_changed(&be_lun->ctl_be_lun); 2397 if (oldsize == 0 && be_lun->size_blocks != 0) 2398 ctl_lun_online(&be_lun->ctl_be_lun); 2399 } 2400 2401 /* Tell the user the exact size we ended up using */ 2402 params->lun_size_bytes = be_lun->size_bytes; 2403 2404 req->status = error ? CTL_LUN_WARNING : CTL_LUN_OK; 2405 2406 return (0); 2407 2408 bailout_error: 2409 req->status = CTL_LUN_ERROR; 2410 2411 return (0); 2412 } 2413 2414 static void 2415 ctl_be_block_lun_shutdown(void *be_lun) 2416 { 2417 struct ctl_be_block_lun *lun; 2418 struct ctl_be_block_softc *softc; 2419 2420 lun = (struct ctl_be_block_lun *)be_lun; 2421 2422 softc = lun->softc; 2423 2424 mtx_lock(&softc->lock); 2425 lun->flags |= CTL_BE_BLOCK_LUN_UNCONFIGURED; 2426 if (lun->flags & CTL_BE_BLOCK_LUN_WAITING) 2427 wakeup(lun); 2428 mtx_unlock(&softc->lock); 2429 2430 } 2431 2432 static void 2433 ctl_be_block_lun_config_status(void *be_lun, ctl_lun_config_status status) 2434 { 2435 struct ctl_be_block_lun *lun; 2436 struct ctl_be_block_softc *softc; 2437 2438 lun = (struct ctl_be_block_lun *)be_lun; 2439 softc = lun->softc; 2440 2441 if (status == CTL_LUN_CONFIG_OK) { 2442 mtx_lock(&softc->lock); 2443 lun->flags &= ~CTL_BE_BLOCK_LUN_UNCONFIGURED; 2444 if (lun->flags & CTL_BE_BLOCK_LUN_WAITING) 2445 wakeup(lun); 2446 mtx_unlock(&softc->lock); 2447 2448 /* 2449 * We successfully added the LUN, attempt to enable it. 2450 */ 2451 if (ctl_enable_lun(&lun->ctl_be_lun) != 0) { 2452 printf("%s: ctl_enable_lun() failed!\n", __func__); 2453 if (ctl_invalidate_lun(&lun->ctl_be_lun) != 0) { 2454 printf("%s: ctl_invalidate_lun() failed!\n", 2455 __func__); 2456 } 2457 } 2458 2459 return; 2460 } 2461 2462 2463 mtx_lock(&softc->lock); 2464 lun->flags &= ~CTL_BE_BLOCK_LUN_UNCONFIGURED; 2465 lun->flags |= CTL_BE_BLOCK_LUN_CONFIG_ERR; 2466 wakeup(lun); 2467 mtx_unlock(&softc->lock); 2468 } 2469 2470 2471 static int 2472 ctl_be_block_config_write(union ctl_io *io) 2473 { 2474 struct ctl_be_block_lun *be_lun; 2475 struct ctl_be_lun *ctl_be_lun; 2476 int retval; 2477 2478 retval = 0; 2479 2480 DPRINTF("entered\n"); 2481 2482 ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[ 2483 CTL_PRIV_BACKEND_LUN].ptr; 2484 be_lun = (struct ctl_be_block_lun *)ctl_be_lun->be_lun; 2485 2486 switch (io->scsiio.cdb[0]) { 2487 case SYNCHRONIZE_CACHE: 2488 case SYNCHRONIZE_CACHE_16: 2489 case WRITE_SAME_10: 2490 case WRITE_SAME_16: 2491 case UNMAP: 2492 /* 2493 * The upper level CTL code will filter out any CDBs with 2494 * the immediate bit set and return the proper error. 2495 * 2496 * We don't really need to worry about what LBA range the 2497 * user asked to be synced out. When they issue a sync 2498 * cache command, we'll sync out the whole thing. 2499 */ 2500 mtx_lock(&be_lun->queue_lock); 2501 STAILQ_INSERT_TAIL(&be_lun->config_write_queue, &io->io_hdr, 2502 links); 2503 mtx_unlock(&be_lun->queue_lock); 2504 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task); 2505 break; 2506 case START_STOP_UNIT: { 2507 struct scsi_start_stop_unit *cdb; 2508 2509 cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb; 2510 2511 if (cdb->how & SSS_START) 2512 retval = ctl_start_lun(ctl_be_lun); 2513 else { 2514 retval = ctl_stop_lun(ctl_be_lun); 2515 /* 2516 * XXX KDM Copan-specific offline behavior. 2517 * Figure out a reasonable way to port this? 2518 */ 2519 #ifdef NEEDTOPORT 2520 if ((retval == 0) 2521 && (cdb->byte2 & SSS_ONOFFLINE)) 2522 retval = ctl_lun_offline(ctl_be_lun); 2523 #endif 2524 } 2525 2526 /* 2527 * In general, the above routines should not fail. They 2528 * just set state for the LUN. So we've got something 2529 * pretty wrong here if we can't start or stop the LUN. 2530 */ 2531 if (retval != 0) { 2532 ctl_set_internal_failure(&io->scsiio, 2533 /*sks_valid*/ 1, 2534 /*retry_count*/ 0xf051); 2535 retval = CTL_RETVAL_COMPLETE; 2536 } else { 2537 ctl_set_success(&io->scsiio); 2538 } 2539 ctl_config_write_done(io); 2540 break; 2541 } 2542 default: 2543 ctl_set_invalid_opcode(&io->scsiio); 2544 ctl_config_write_done(io); 2545 retval = CTL_RETVAL_COMPLETE; 2546 break; 2547 } 2548 2549 return (retval); 2550 2551 } 2552 2553 static int 2554 ctl_be_block_config_read(union ctl_io *io) 2555 { 2556 return (0); 2557 } 2558 2559 static int 2560 ctl_be_block_lun_info(void *be_lun, struct sbuf *sb) 2561 { 2562 struct ctl_be_block_lun *lun; 2563 int retval; 2564 2565 lun = (struct ctl_be_block_lun *)be_lun; 2566 retval = 0; 2567 2568 retval = sbuf_printf(sb, "\t<num_threads>"); 2569 2570 if (retval != 0) 2571 goto bailout; 2572 2573 retval = sbuf_printf(sb, "%d", lun->num_threads); 2574 2575 if (retval != 0) 2576 goto bailout; 2577 2578 retval = sbuf_printf(sb, "</num_threads>\n"); 2579 2580 bailout: 2581 2582 return (retval); 2583 } 2584 2585 int 2586 ctl_be_block_init(void) 2587 { 2588 struct ctl_be_block_softc *softc; 2589 int retval; 2590 2591 softc = &backend_block_softc; 2592 retval = 0; 2593 2594 mtx_init(&softc->lock, "ctlblock", NULL, MTX_DEF); 2595 beio_zone = uma_zcreate("beio", sizeof(struct ctl_be_block_io), 2596 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 2597 STAILQ_INIT(&softc->disk_list); 2598 STAILQ_INIT(&softc->lun_list); 2599 2600 return (retval); 2601 } 2602