1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2003, 2008 Silicon Graphics International Corp. 5 * Copyright (c) 2012 The FreeBSD Foundation 6 * Copyright (c) 2014-2017 Alexander Motin <mav@FreeBSD.org> 7 * All rights reserved. 8 * 9 * Portions of this software were developed by Edward Tomasz Napierala 10 * under sponsorship from the FreeBSD Foundation. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions, and the following disclaimer, 17 * without modification. 18 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 19 * substantially similar to the "NO WARRANTY" disclaimer below 20 * ("Disclaimer") and any redistribution must be conditioned upon 21 * including a substantially similar Disclaimer requirement for further 22 * binary redistribution. 23 * 24 * NO WARRANTY 25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 29 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 33 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 34 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGES. 36 * 37 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend_ramdisk.c#3 $ 38 */ 39 /* 40 * CAM Target Layer black hole and RAM disk backend. 41 * 42 * Author: Ken Merry <ken@FreeBSD.org> 43 */ 44 45 #include <sys/cdefs.h> 46 __FBSDID("$FreeBSD$"); 47 48 #include <sys/param.h> 49 #include <sys/systm.h> 50 #include <sys/kernel.h> 51 #include <sys/condvar.h> 52 #include <sys/types.h> 53 #include <sys/limits.h> 54 #include <sys/lock.h> 55 #include <sys/mutex.h> 56 #include <sys/malloc.h> 57 #include <sys/sx.h> 58 #include <sys/taskqueue.h> 59 #include <sys/time.h> 60 #include <sys/queue.h> 61 #include <sys/conf.h> 62 #include <sys/ioccom.h> 63 #include <sys/module.h> 64 #include <sys/sysctl.h> 65 66 #include <cam/scsi/scsi_all.h> 67 #include <cam/scsi/scsi_da.h> 68 #include <cam/ctl/ctl_io.h> 69 #include <cam/ctl/ctl.h> 70 #include <cam/ctl/ctl_util.h> 71 #include <cam/ctl/ctl_backend.h> 72 #include <cam/ctl/ctl_debug.h> 73 #include <cam/ctl/ctl_ioctl.h> 74 #include <cam/ctl/ctl_ha.h> 75 #include <cam/ctl/ctl_private.h> 76 #include <cam/ctl/ctl_error.h> 77 78 #define PRIV(io) \ 79 ((struct ctl_ptr_len_flags *)&(io)->io_hdr.ctl_private[CTL_PRIV_BACKEND]) 80 #define ARGS(io) \ 81 ((struct ctl_lba_len_flags *)&(io)->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]) 82 83 #define PPP (PAGE_SIZE / sizeof(uint8_t **)) 84 #ifdef __LP64__ 85 #define PPPS (PAGE_SHIFT - 3) 86 #else 87 #define PPPS (PAGE_SHIFT - 2) 88 #endif 89 #define SGPP (PAGE_SIZE / sizeof(struct ctl_sg_entry)) 90 91 #define P_UNMAPPED NULL /* Page is unmapped. */ 92 #define P_ANCHORED ((void *)(uintptr_t)1) /* Page is anchored. */ 93 94 typedef enum { 95 GP_READ, /* Return data page or zero page. */ 96 GP_WRITE, /* Return data page, try allocate if none. */ 97 GP_ANCHOR, /* Return data page, try anchor if none. */ 98 GP_OTHER, /* Return what present, do not allocate/anchor. */ 99 } getpage_op_t; 100 101 typedef enum { 102 CTL_BE_RAMDISK_LUN_UNCONFIGURED = 0x01, 103 CTL_BE_RAMDISK_LUN_CONFIG_ERR = 0x02, 104 CTL_BE_RAMDISK_LUN_WAITING = 0x04 105 } ctl_be_ramdisk_lun_flags; 106 107 struct ctl_be_ramdisk_lun { 108 struct ctl_lun_create_params params; 109 char lunname[32]; 110 int indir; 111 uint8_t **pages; 112 uint8_t *zero_page; 113 struct sx page_lock; 114 u_int pblocksize; 115 u_int pblockmul; 116 uint64_t size_bytes; 117 uint64_t size_blocks; 118 uint64_t cap_bytes; 119 uint64_t cap_used; 120 struct ctl_be_ramdisk_softc *softc; 121 ctl_be_ramdisk_lun_flags flags; 122 STAILQ_ENTRY(ctl_be_ramdisk_lun) links; 123 struct ctl_be_lun cbe_lun; 124 struct taskqueue *io_taskqueue; 125 struct task io_task; 126 STAILQ_HEAD(, ctl_io_hdr) cont_queue; 127 struct mtx_padalign queue_lock; 128 }; 129 130 struct ctl_be_ramdisk_softc { 131 struct mtx lock; 132 int num_luns; 133 STAILQ_HEAD(, ctl_be_ramdisk_lun) lun_list; 134 }; 135 136 static struct ctl_be_ramdisk_softc rd_softc; 137 extern struct ctl_softc *control_softc; 138 139 static int ctl_backend_ramdisk_init(void); 140 static int ctl_backend_ramdisk_shutdown(void); 141 static int ctl_backend_ramdisk_move_done(union ctl_io *io); 142 static void ctl_backend_ramdisk_compare(union ctl_io *io); 143 static void ctl_backend_ramdisk_rw(union ctl_io *io); 144 static int ctl_backend_ramdisk_submit(union ctl_io *io); 145 static void ctl_backend_ramdisk_worker(void *context, int pending); 146 static int ctl_backend_ramdisk_config_read(union ctl_io *io); 147 static int ctl_backend_ramdisk_config_write(union ctl_io *io); 148 static uint64_t ctl_backend_ramdisk_lun_attr(void *be_lun, const char *attrname); 149 static int ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd, 150 caddr_t addr, int flag, struct thread *td); 151 static int ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc, 152 struct ctl_lun_req *req); 153 static int ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc, 154 struct ctl_lun_req *req); 155 static int ctl_backend_ramdisk_modify(struct ctl_be_ramdisk_softc *softc, 156 struct ctl_lun_req *req); 157 static void ctl_backend_ramdisk_lun_shutdown(void *be_lun); 158 static void ctl_backend_ramdisk_lun_config_status(void *be_lun, 159 ctl_lun_config_status status); 160 161 static struct ctl_backend_driver ctl_be_ramdisk_driver = 162 { 163 .name = "ramdisk", 164 .flags = CTL_BE_FLAG_HAS_CONFIG, 165 .init = ctl_backend_ramdisk_init, 166 .shutdown = ctl_backend_ramdisk_shutdown, 167 .data_submit = ctl_backend_ramdisk_submit, 168 .data_move_done = ctl_backend_ramdisk_move_done, 169 .config_read = ctl_backend_ramdisk_config_read, 170 .config_write = ctl_backend_ramdisk_config_write, 171 .ioctl = ctl_backend_ramdisk_ioctl, 172 .lun_attr = ctl_backend_ramdisk_lun_attr, 173 }; 174 175 MALLOC_DEFINE(M_RAMDISK, "ramdisk", "Memory used for CTL RAMdisk"); 176 CTL_BACKEND_DECLARE(cbr, ctl_be_ramdisk_driver); 177 178 static int 179 ctl_backend_ramdisk_init(void) 180 { 181 struct ctl_be_ramdisk_softc *softc = &rd_softc; 182 183 memset(softc, 0, sizeof(*softc)); 184 mtx_init(&softc->lock, "ctlramdisk", NULL, MTX_DEF); 185 STAILQ_INIT(&softc->lun_list); 186 return (0); 187 } 188 189 static int 190 ctl_backend_ramdisk_shutdown(void) 191 { 192 struct ctl_be_ramdisk_softc *softc = &rd_softc; 193 struct ctl_be_ramdisk_lun *lun, *next_lun; 194 195 mtx_lock(&softc->lock); 196 STAILQ_FOREACH_SAFE(lun, &softc->lun_list, links, next_lun) { 197 /* 198 * Drop our lock here. Since ctl_invalidate_lun() can call 199 * back into us, this could potentially lead to a recursive 200 * lock of the same mutex, which would cause a hang. 201 */ 202 mtx_unlock(&softc->lock); 203 ctl_disable_lun(&lun->cbe_lun); 204 ctl_invalidate_lun(&lun->cbe_lun); 205 mtx_lock(&softc->lock); 206 } 207 mtx_unlock(&softc->lock); 208 mtx_destroy(&softc->lock); 209 return (0); 210 } 211 212 static uint8_t * 213 ctl_backend_ramdisk_getpage(struct ctl_be_ramdisk_lun *be_lun, off_t pn, 214 getpage_op_t op) 215 { 216 uint8_t **p, ***pp; 217 off_t i; 218 int s; 219 220 if (be_lun->cap_bytes == 0) { 221 switch (op) { 222 case GP_READ: 223 return (be_lun->zero_page); 224 case GP_WRITE: 225 return ((uint8_t *)be_lun->pages); 226 case GP_ANCHOR: 227 return (P_ANCHORED); 228 default: 229 return (P_UNMAPPED); 230 } 231 } 232 if (op == GP_WRITE || op == GP_ANCHOR) { 233 sx_xlock(&be_lun->page_lock); 234 pp = &be_lun->pages; 235 for (s = (be_lun->indir - 1) * PPPS; s >= 0; s -= PPPS) { 236 if (*pp == NULL) { 237 *pp = malloc(PAGE_SIZE, M_RAMDISK, 238 M_WAITOK|M_ZERO); 239 } 240 i = pn >> s; 241 pp = (uint8_t ***)&(*pp)[i]; 242 pn -= i << s; 243 } 244 if (*pp == P_UNMAPPED && be_lun->cap_used < be_lun->cap_bytes) { 245 if (op == GP_WRITE) { 246 *pp = malloc(be_lun->pblocksize, M_RAMDISK, 247 M_WAITOK|M_ZERO); 248 } else 249 *pp = P_ANCHORED; 250 be_lun->cap_used += be_lun->pblocksize; 251 } else if (*pp == P_ANCHORED && op == GP_WRITE) { 252 *pp = malloc(be_lun->pblocksize, M_RAMDISK, 253 M_WAITOK|M_ZERO); 254 } 255 sx_xunlock(&be_lun->page_lock); 256 return ((uint8_t *)*pp); 257 } else { 258 sx_slock(&be_lun->page_lock); 259 p = be_lun->pages; 260 for (s = (be_lun->indir - 1) * PPPS; s >= 0; s -= PPPS) { 261 if (p == NULL) 262 break; 263 i = pn >> s; 264 p = (uint8_t **)p[i]; 265 pn -= i << s; 266 } 267 sx_sunlock(&be_lun->page_lock); 268 if ((p == P_UNMAPPED || p == P_ANCHORED) && op == GP_READ) 269 return (be_lun->zero_page); 270 return ((uint8_t *)p); 271 } 272 }; 273 274 static void 275 ctl_backend_ramdisk_unmappage(struct ctl_be_ramdisk_lun *be_lun, off_t pn) 276 { 277 uint8_t ***pp; 278 off_t i; 279 int s; 280 281 if (be_lun->cap_bytes == 0) 282 return; 283 sx_xlock(&be_lun->page_lock); 284 pp = &be_lun->pages; 285 for (s = (be_lun->indir - 1) * PPPS; s >= 0; s -= PPPS) { 286 if (*pp == NULL) 287 goto noindir; 288 i = pn >> s; 289 pp = (uint8_t ***)&(*pp)[i]; 290 pn -= i << s; 291 } 292 if (*pp == P_ANCHORED) { 293 be_lun->cap_used -= be_lun->pblocksize; 294 *pp = P_UNMAPPED; 295 } else if (*pp != P_UNMAPPED) { 296 free(*pp, M_RAMDISK); 297 be_lun->cap_used -= be_lun->pblocksize; 298 *pp = P_UNMAPPED; 299 } 300 noindir: 301 sx_xunlock(&be_lun->page_lock); 302 }; 303 304 static void 305 ctl_backend_ramdisk_anchorpage(struct ctl_be_ramdisk_lun *be_lun, off_t pn) 306 { 307 uint8_t ***pp; 308 off_t i; 309 int s; 310 311 if (be_lun->cap_bytes == 0) 312 return; 313 sx_xlock(&be_lun->page_lock); 314 pp = &be_lun->pages; 315 for (s = (be_lun->indir - 1) * PPPS; s >= 0; s -= PPPS) { 316 if (*pp == NULL) 317 goto noindir; 318 i = pn >> s; 319 pp = (uint8_t ***)&(*pp)[i]; 320 pn -= i << s; 321 } 322 if (*pp == P_UNMAPPED && be_lun->cap_used < be_lun->cap_bytes) { 323 be_lun->cap_used += be_lun->pblocksize; 324 *pp = P_ANCHORED; 325 } else if (*pp != P_ANCHORED) { 326 free(*pp, M_RAMDISK); 327 *pp = P_ANCHORED; 328 } 329 noindir: 330 sx_xunlock(&be_lun->page_lock); 331 }; 332 333 static void 334 ctl_backend_ramdisk_freeallpages(uint8_t **p, int indir) 335 { 336 int i; 337 338 if (p == NULL) 339 return; 340 if (indir == 0) { 341 free(p, M_RAMDISK); 342 return; 343 } 344 for (i = 0; i < PPP; i++) { 345 if (p[i] == NULL) 346 continue; 347 ctl_backend_ramdisk_freeallpages((uint8_t **)p[i], indir - 1); 348 } 349 free(p, M_RAMDISK); 350 }; 351 352 static size_t 353 cmp(uint8_t *a, uint8_t *b, size_t size) 354 { 355 size_t i; 356 357 for (i = 0; i < size; i++) { 358 if (a[i] != b[i]) 359 break; 360 } 361 return (i); 362 } 363 364 static int 365 ctl_backend_ramdisk_cmp(union ctl_io *io) 366 { 367 struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io); 368 struct ctl_be_ramdisk_lun *be_lun = cbe_lun->be_lun; 369 uint8_t *page; 370 uint8_t info[8]; 371 uint64_t lba; 372 u_int lbaoff, lbas, res, off; 373 374 lbas = io->scsiio.kern_data_len / cbe_lun->blocksize; 375 lba = ARGS(io)->lba + PRIV(io)->len - lbas; 376 off = 0; 377 for (; lbas > 0; lbas--, lba++) { 378 page = ctl_backend_ramdisk_getpage(be_lun, 379 lba >> cbe_lun->pblockexp, GP_READ); 380 lbaoff = lba & ~(UINT_MAX << cbe_lun->pblockexp); 381 page += lbaoff * cbe_lun->blocksize; 382 res = cmp(io->scsiio.kern_data_ptr + off, page, 383 cbe_lun->blocksize); 384 off += res; 385 if (res < cbe_lun->blocksize) 386 break; 387 } 388 if (lbas > 0) { 389 off += io->scsiio.kern_rel_offset - io->scsiio.kern_data_len; 390 scsi_u64to8b(off, info); 391 ctl_set_sense(&io->scsiio, /*current_error*/ 1, 392 /*sense_key*/ SSD_KEY_MISCOMPARE, 393 /*asc*/ 0x1D, /*ascq*/ 0x00, 394 /*type*/ SSD_ELEM_INFO, 395 /*size*/ sizeof(info), /*data*/ &info, 396 /*type*/ SSD_ELEM_NONE); 397 return (1); 398 } 399 return (0); 400 } 401 402 static int 403 ctl_backend_ramdisk_move_done(union ctl_io *io) 404 { 405 struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io); 406 struct ctl_be_ramdisk_lun *be_lun = cbe_lun->be_lun; 407 #ifdef CTL_TIME_IO 408 struct bintime cur_bt; 409 #endif 410 411 CTL_DEBUG_PRINT(("ctl_backend_ramdisk_move_done\n")); 412 #ifdef CTL_TIME_IO 413 getbinuptime(&cur_bt); 414 bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt); 415 bintime_add(&io->io_hdr.dma_bt, &cur_bt); 416 #endif 417 io->io_hdr.num_dmas++; 418 if (io->scsiio.kern_sg_entries > 0) 419 free(io->scsiio.kern_data_ptr, M_RAMDISK); 420 io->scsiio.kern_rel_offset += io->scsiio.kern_data_len; 421 if (io->io_hdr.flags & CTL_FLAG_ABORT) { 422 ; 423 } else if (io->io_hdr.port_status != 0 && 424 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 425 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 426 ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 1, 427 /*retry_count*/ io->io_hdr.port_status); 428 } else if (io->scsiio.kern_data_resid != 0 && 429 (io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT && 430 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 431 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 432 ctl_set_invalid_field_ciu(&io->scsiio); 433 } else if ((io->io_hdr.port_status == 0) && 434 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) { 435 if (ARGS(io)->flags & CTL_LLF_COMPARE) { 436 /* We have data block ready for comparison. */ 437 if (ctl_backend_ramdisk_cmp(io)) 438 goto done; 439 } 440 if (ARGS(io)->len > PRIV(io)->len) { 441 mtx_lock(&be_lun->queue_lock); 442 STAILQ_INSERT_TAIL(&be_lun->cont_queue, 443 &io->io_hdr, links); 444 mtx_unlock(&be_lun->queue_lock); 445 taskqueue_enqueue(be_lun->io_taskqueue, 446 &be_lun->io_task); 447 return (0); 448 } 449 ctl_set_success(&io->scsiio); 450 } 451 done: 452 ctl_data_submit_done(io); 453 return(0); 454 } 455 456 static void 457 ctl_backend_ramdisk_compare(union ctl_io *io) 458 { 459 struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io); 460 u_int lbas, len; 461 462 lbas = ARGS(io)->len - PRIV(io)->len; 463 lbas = MIN(lbas, 131072 / cbe_lun->blocksize); 464 len = lbas * cbe_lun->blocksize; 465 466 io->scsiio.be_move_done = ctl_backend_ramdisk_move_done; 467 io->scsiio.kern_data_ptr = malloc(len, M_RAMDISK, M_WAITOK); 468 io->scsiio.kern_data_len = len; 469 io->scsiio.kern_sg_entries = 0; 470 io->io_hdr.flags |= CTL_FLAG_ALLOCATED; 471 PRIV(io)->len += lbas; 472 #ifdef CTL_TIME_IO 473 getbinuptime(&io->io_hdr.dma_start_bt); 474 #endif 475 ctl_datamove(io); 476 } 477 478 static void 479 ctl_backend_ramdisk_rw(union ctl_io *io) 480 { 481 struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io); 482 struct ctl_be_ramdisk_lun *be_lun = cbe_lun->be_lun; 483 struct ctl_sg_entry *sg_entries; 484 uint8_t *page; 485 uint64_t lba; 486 u_int i, len, lbaoff, lbas, sgs, off; 487 getpage_op_t op; 488 489 lba = ARGS(io)->lba + PRIV(io)->len; 490 lbaoff = lba & ~(UINT_MAX << cbe_lun->pblockexp); 491 lbas = ARGS(io)->len - PRIV(io)->len; 492 lbas = MIN(lbas, (SGPP << cbe_lun->pblockexp) - lbaoff); 493 sgs = (lbas + lbaoff + be_lun->pblockmul - 1) >> cbe_lun->pblockexp; 494 off = lbaoff * cbe_lun->blocksize; 495 op = (ARGS(io)->flags & CTL_LLF_WRITE) ? GP_WRITE : GP_READ; 496 if (sgs > 1) { 497 io->scsiio.kern_data_ptr = malloc(sizeof(struct ctl_sg_entry) * 498 sgs, M_RAMDISK, M_WAITOK); 499 sg_entries = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; 500 len = lbas * cbe_lun->blocksize; 501 for (i = 0; i < sgs; i++) { 502 page = ctl_backend_ramdisk_getpage(be_lun, 503 (lba >> cbe_lun->pblockexp) + i, op); 504 if (page == P_UNMAPPED || page == P_ANCHORED) { 505 free(io->scsiio.kern_data_ptr, M_RAMDISK); 506 nospc: 507 ctl_set_space_alloc_fail(&io->scsiio); 508 ctl_data_submit_done(io); 509 return; 510 } 511 sg_entries[i].addr = page + off; 512 sg_entries[i].len = MIN(len, be_lun->pblocksize - off); 513 len -= sg_entries[i].len; 514 off = 0; 515 } 516 } else { 517 page = ctl_backend_ramdisk_getpage(be_lun, 518 lba >> cbe_lun->pblockexp, op); 519 if (page == P_UNMAPPED || page == P_ANCHORED) 520 goto nospc; 521 sgs = 0; 522 io->scsiio.kern_data_ptr = page + off; 523 } 524 525 io->scsiio.be_move_done = ctl_backend_ramdisk_move_done; 526 io->scsiio.kern_data_len = lbas * cbe_lun->blocksize; 527 io->scsiio.kern_sg_entries = sgs; 528 io->io_hdr.flags |= CTL_FLAG_ALLOCATED; 529 PRIV(io)->len += lbas; 530 if ((ARGS(io)->flags & CTL_LLF_READ) && 531 ARGS(io)->len <= PRIV(io)->len) { 532 ctl_set_success(&io->scsiio); 533 ctl_serseq_done(io); 534 } 535 #ifdef CTL_TIME_IO 536 getbinuptime(&io->io_hdr.dma_start_bt); 537 #endif 538 ctl_datamove(io); 539 } 540 541 static int 542 ctl_backend_ramdisk_submit(union ctl_io *io) 543 { 544 struct ctl_lba_len_flags *lbalen = ARGS(io); 545 546 if (lbalen->flags & CTL_LLF_VERIFY) { 547 ctl_set_success(&io->scsiio); 548 ctl_data_submit_done(io); 549 return (CTL_RETVAL_COMPLETE); 550 } 551 PRIV(io)->len = 0; 552 if (lbalen->flags & CTL_LLF_COMPARE) 553 ctl_backend_ramdisk_compare(io); 554 else 555 ctl_backend_ramdisk_rw(io); 556 return (CTL_RETVAL_COMPLETE); 557 } 558 559 static void 560 ctl_backend_ramdisk_worker(void *context, int pending) 561 { 562 struct ctl_be_ramdisk_lun *be_lun; 563 union ctl_io *io; 564 565 be_lun = (struct ctl_be_ramdisk_lun *)context; 566 mtx_lock(&be_lun->queue_lock); 567 for (;;) { 568 io = (union ctl_io *)STAILQ_FIRST(&be_lun->cont_queue); 569 if (io != NULL) { 570 STAILQ_REMOVE(&be_lun->cont_queue, &io->io_hdr, 571 ctl_io_hdr, links); 572 mtx_unlock(&be_lun->queue_lock); 573 if (ARGS(io)->flags & CTL_LLF_COMPARE) 574 ctl_backend_ramdisk_compare(io); 575 else 576 ctl_backend_ramdisk_rw(io); 577 mtx_lock(&be_lun->queue_lock); 578 continue; 579 } 580 581 /* 582 * If we get here, there is no work left in the queues, so 583 * just break out and let the task queue go to sleep. 584 */ 585 break; 586 } 587 mtx_unlock(&be_lun->queue_lock); 588 } 589 590 static int 591 ctl_backend_ramdisk_gls(union ctl_io *io) 592 { 593 struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io); 594 struct ctl_be_ramdisk_lun *be_lun = cbe_lun->be_lun; 595 struct scsi_get_lba_status_data *data; 596 uint8_t *page; 597 u_int lbaoff; 598 599 data = (struct scsi_get_lba_status_data *)io->scsiio.kern_data_ptr; 600 scsi_u64to8b(ARGS(io)->lba, data->descr[0].addr); 601 lbaoff = ARGS(io)->lba & ~(UINT_MAX << cbe_lun->pblockexp); 602 scsi_ulto4b(be_lun->pblockmul - lbaoff, data->descr[0].length); 603 page = ctl_backend_ramdisk_getpage(be_lun, 604 ARGS(io)->lba >> cbe_lun->pblockexp, GP_OTHER); 605 if (page == P_UNMAPPED) 606 data->descr[0].status = 1; 607 else if (page == P_ANCHORED) 608 data->descr[0].status = 2; 609 else 610 data->descr[0].status = 0; 611 ctl_config_read_done(io); 612 return (CTL_RETVAL_COMPLETE); 613 } 614 615 static int 616 ctl_backend_ramdisk_config_read(union ctl_io *io) 617 { 618 int retval = 0; 619 620 switch (io->scsiio.cdb[0]) { 621 case SERVICE_ACTION_IN: 622 if (io->scsiio.cdb[1] == SGLS_SERVICE_ACTION) { 623 retval = ctl_backend_ramdisk_gls(io); 624 break; 625 } 626 ctl_set_invalid_field(&io->scsiio, 627 /*sks_valid*/ 1, 628 /*command*/ 1, 629 /*field*/ 1, 630 /*bit_valid*/ 1, 631 /*bit*/ 4); 632 ctl_config_read_done(io); 633 retval = CTL_RETVAL_COMPLETE; 634 break; 635 default: 636 ctl_set_invalid_opcode(&io->scsiio); 637 ctl_config_read_done(io); 638 retval = CTL_RETVAL_COMPLETE; 639 break; 640 } 641 return (retval); 642 } 643 644 static void 645 ctl_backend_ramdisk_delete(struct ctl_be_lun *cbe_lun, off_t lba, off_t len, 646 int anchor) 647 { 648 struct ctl_be_ramdisk_lun *be_lun = cbe_lun->be_lun; 649 uint8_t *page; 650 uint64_t p, lp; 651 u_int lbaoff; 652 getpage_op_t op = anchor ? GP_ANCHOR : GP_OTHER; 653 654 /* Partially zero first partial page. */ 655 p = lba >> cbe_lun->pblockexp; 656 lbaoff = lba & ~(UINT_MAX << cbe_lun->pblockexp); 657 if (lbaoff != 0) { 658 page = ctl_backend_ramdisk_getpage(be_lun, p, op); 659 if (page != P_UNMAPPED && page != P_ANCHORED) { 660 memset(page + lbaoff * cbe_lun->blocksize, 0, 661 min(len, be_lun->pblockmul - lbaoff) * 662 cbe_lun->blocksize); 663 } 664 p++; 665 } 666 667 /* Partially zero last partial page. */ 668 lp = (lba + len) >> cbe_lun->pblockexp; 669 lbaoff = (lba + len) & ~(UINT_MAX << cbe_lun->pblockexp); 670 if (p <= lp && lbaoff != 0) { 671 page = ctl_backend_ramdisk_getpage(be_lun, lp, op); 672 if (page != P_UNMAPPED && page != P_ANCHORED) 673 memset(page, 0, lbaoff * cbe_lun->blocksize); 674 } 675 676 /* Delete remaining full pages. */ 677 if (anchor) { 678 for (; p < lp; p++) 679 ctl_backend_ramdisk_anchorpage(be_lun, p); 680 } else { 681 for (; p < lp; p++) 682 ctl_backend_ramdisk_unmappage(be_lun, p); 683 } 684 } 685 686 static void 687 ctl_backend_ramdisk_ws(union ctl_io *io) 688 { 689 struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io); 690 struct ctl_be_ramdisk_lun *be_lun = cbe_lun->be_lun; 691 struct ctl_lba_len_flags *lbalen = ARGS(io); 692 uint8_t *page; 693 uint64_t lba; 694 u_int lbaoff, lbas; 695 696 if (lbalen->flags & ~(SWS_LBDATA | SWS_UNMAP | SWS_ANCHOR | SWS_NDOB)) { 697 ctl_set_invalid_field(&io->scsiio, 698 /*sks_valid*/ 1, 699 /*command*/ 1, 700 /*field*/ 1, 701 /*bit_valid*/ 0, 702 /*bit*/ 0); 703 ctl_config_write_done(io); 704 return; 705 } 706 if (lbalen->flags & SWS_UNMAP) { 707 ctl_backend_ramdisk_delete(cbe_lun, lbalen->lba, lbalen->len, 708 (lbalen->flags & SWS_ANCHOR) != 0); 709 ctl_set_success(&io->scsiio); 710 ctl_config_write_done(io); 711 return; 712 } 713 714 for (lba = lbalen->lba, lbas = lbalen->len; lbas > 0; lba++, lbas--) { 715 page = ctl_backend_ramdisk_getpage(be_lun, 716 lba >> cbe_lun->pblockexp, GP_WRITE); 717 if (page == P_UNMAPPED || page == P_ANCHORED) { 718 ctl_set_space_alloc_fail(&io->scsiio); 719 ctl_data_submit_done(io); 720 return; 721 } 722 lbaoff = lba & ~(UINT_MAX << cbe_lun->pblockexp); 723 page += lbaoff * cbe_lun->blocksize; 724 if (lbalen->flags & SWS_NDOB) { 725 memset(page, 0, cbe_lun->blocksize); 726 } else { 727 memcpy(page, io->scsiio.kern_data_ptr, 728 cbe_lun->blocksize); 729 } 730 if (lbalen->flags & SWS_LBDATA) 731 scsi_ulto4b(lba, page); 732 } 733 ctl_set_success(&io->scsiio); 734 ctl_config_write_done(io); 735 } 736 737 static void 738 ctl_backend_ramdisk_unmap(union ctl_io *io) 739 { 740 struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io); 741 struct ctl_ptr_len_flags *ptrlen = (struct ctl_ptr_len_flags *)ARGS(io); 742 struct scsi_unmap_desc *buf, *end; 743 744 if ((ptrlen->flags & ~SU_ANCHOR) != 0) { 745 ctl_set_invalid_field(&io->scsiio, 746 /*sks_valid*/ 0, 747 /*command*/ 0, 748 /*field*/ 0, 749 /*bit_valid*/ 0, 750 /*bit*/ 0); 751 ctl_config_write_done(io); 752 return; 753 } 754 755 buf = (struct scsi_unmap_desc *)ptrlen->ptr; 756 end = buf + ptrlen->len / sizeof(*buf); 757 for (; buf < end; buf++) { 758 ctl_backend_ramdisk_delete(cbe_lun, 759 scsi_8btou64(buf->lba), scsi_4btoul(buf->length), 760 (ptrlen->flags & SU_ANCHOR) != 0); 761 } 762 763 ctl_set_success(&io->scsiio); 764 ctl_config_write_done(io); 765 } 766 767 static int 768 ctl_backend_ramdisk_config_write(union ctl_io *io) 769 { 770 struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io); 771 int retval = 0; 772 773 switch (io->scsiio.cdb[0]) { 774 case SYNCHRONIZE_CACHE: 775 case SYNCHRONIZE_CACHE_16: 776 /* We have no cache to flush. */ 777 ctl_set_success(&io->scsiio); 778 ctl_config_write_done(io); 779 break; 780 case START_STOP_UNIT: { 781 struct scsi_start_stop_unit *cdb; 782 783 cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb; 784 if ((cdb->how & SSS_PC_MASK) != 0) { 785 ctl_set_success(&io->scsiio); 786 ctl_config_write_done(io); 787 break; 788 } 789 if (cdb->how & SSS_START) { 790 if (cdb->how & SSS_LOEJ) 791 ctl_lun_has_media(cbe_lun); 792 ctl_start_lun(cbe_lun); 793 } else { 794 ctl_stop_lun(cbe_lun); 795 if (cdb->how & SSS_LOEJ) 796 ctl_lun_ejected(cbe_lun); 797 } 798 ctl_set_success(&io->scsiio); 799 ctl_config_write_done(io); 800 break; 801 } 802 case PREVENT_ALLOW: 803 ctl_set_success(&io->scsiio); 804 ctl_config_write_done(io); 805 break; 806 case WRITE_SAME_10: 807 case WRITE_SAME_16: 808 ctl_backend_ramdisk_ws(io); 809 break; 810 case UNMAP: 811 ctl_backend_ramdisk_unmap(io); 812 break; 813 default: 814 ctl_set_invalid_opcode(&io->scsiio); 815 ctl_config_write_done(io); 816 retval = CTL_RETVAL_COMPLETE; 817 break; 818 } 819 820 return (retval); 821 } 822 823 static uint64_t 824 ctl_backend_ramdisk_lun_attr(void *arg, const char *attrname) 825 { 826 struct ctl_be_ramdisk_lun *be_lun = arg; 827 uint64_t val; 828 829 val = UINT64_MAX; 830 if (be_lun->cap_bytes == 0) 831 return (val); 832 sx_slock(&be_lun->page_lock); 833 if (strcmp(attrname, "blocksused") == 0) { 834 val = be_lun->cap_used / be_lun->cbe_lun.blocksize; 835 } else if (strcmp(attrname, "blocksavail") == 0) { 836 val = (be_lun->cap_bytes - be_lun->cap_used) / 837 be_lun->cbe_lun.blocksize; 838 } 839 sx_sunlock(&be_lun->page_lock); 840 return (val); 841 } 842 843 static int 844 ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, 845 int flag, struct thread *td) 846 { 847 struct ctl_be_ramdisk_softc *softc = &rd_softc; 848 struct ctl_lun_req *lun_req; 849 int retval; 850 851 retval = 0; 852 switch (cmd) { 853 case CTL_LUN_REQ: 854 lun_req = (struct ctl_lun_req *)addr; 855 switch (lun_req->reqtype) { 856 case CTL_LUNREQ_CREATE: 857 retval = ctl_backend_ramdisk_create(softc, lun_req); 858 break; 859 case CTL_LUNREQ_RM: 860 retval = ctl_backend_ramdisk_rm(softc, lun_req); 861 break; 862 case CTL_LUNREQ_MODIFY: 863 retval = ctl_backend_ramdisk_modify(softc, lun_req); 864 break; 865 default: 866 lun_req->status = CTL_LUN_ERROR; 867 snprintf(lun_req->error_str, sizeof(lun_req->error_str), 868 "%s: invalid LUN request type %d", __func__, 869 lun_req->reqtype); 870 break; 871 } 872 break; 873 default: 874 retval = ENOTTY; 875 break; 876 } 877 878 return (retval); 879 } 880 881 static int 882 ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc, 883 struct ctl_lun_req *req) 884 { 885 struct ctl_be_ramdisk_lun *be_lun; 886 struct ctl_lun_rm_params *params; 887 int retval; 888 889 params = &req->reqdata.rm; 890 mtx_lock(&softc->lock); 891 STAILQ_FOREACH(be_lun, &softc->lun_list, links) { 892 if (be_lun->cbe_lun.lun_id == params->lun_id) 893 break; 894 } 895 mtx_unlock(&softc->lock); 896 if (be_lun == NULL) { 897 snprintf(req->error_str, sizeof(req->error_str), 898 "%s: LUN %u is not managed by the ramdisk backend", 899 __func__, params->lun_id); 900 goto bailout_error; 901 } 902 903 retval = ctl_disable_lun(&be_lun->cbe_lun); 904 if (retval != 0) { 905 snprintf(req->error_str, sizeof(req->error_str), 906 "%s: error %d returned from ctl_disable_lun() for " 907 "LUN %d", __func__, retval, params->lun_id); 908 goto bailout_error; 909 } 910 911 /* 912 * Set the waiting flag before we invalidate the LUN. Our shutdown 913 * routine can be called any time after we invalidate the LUN, 914 * and can be called from our context. 915 * 916 * This tells the shutdown routine that we're waiting, or we're 917 * going to wait for the shutdown to happen. 918 */ 919 mtx_lock(&softc->lock); 920 be_lun->flags |= CTL_BE_RAMDISK_LUN_WAITING; 921 mtx_unlock(&softc->lock); 922 923 retval = ctl_invalidate_lun(&be_lun->cbe_lun); 924 if (retval != 0) { 925 snprintf(req->error_str, sizeof(req->error_str), 926 "%s: error %d returned from ctl_invalidate_lun() for " 927 "LUN %d", __func__, retval, params->lun_id); 928 mtx_lock(&softc->lock); 929 be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING; 930 mtx_unlock(&softc->lock); 931 goto bailout_error; 932 } 933 934 mtx_lock(&softc->lock); 935 while ((be_lun->flags & CTL_BE_RAMDISK_LUN_UNCONFIGURED) == 0) { 936 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlram", 0); 937 if (retval == EINTR) 938 break; 939 } 940 be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING; 941 942 /* 943 * We only remove this LUN from the list and free it (below) if 944 * retval == 0. If the user interrupted the wait, we just bail out 945 * without actually freeing the LUN. We let the shutdown routine 946 * free the LUN if that happens. 947 */ 948 if (retval == 0) { 949 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun, 950 links); 951 softc->num_luns--; 952 } 953 954 mtx_unlock(&softc->lock); 955 956 if (retval == 0) { 957 taskqueue_drain_all(be_lun->io_taskqueue); 958 taskqueue_free(be_lun->io_taskqueue); 959 ctl_free_opts(&be_lun->cbe_lun.options); 960 free(be_lun->zero_page, M_RAMDISK); 961 ctl_backend_ramdisk_freeallpages(be_lun->pages, be_lun->indir); 962 sx_destroy(&be_lun->page_lock); 963 mtx_destroy(&be_lun->queue_lock); 964 free(be_lun, M_RAMDISK); 965 } 966 967 req->status = CTL_LUN_OK; 968 return (retval); 969 970 bailout_error: 971 req->status = CTL_LUN_ERROR; 972 return (0); 973 } 974 975 static int 976 ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc, 977 struct ctl_lun_req *req) 978 { 979 struct ctl_be_ramdisk_lun *be_lun; 980 struct ctl_be_lun *cbe_lun; 981 struct ctl_lun_create_params *params; 982 char *value; 983 char tmpstr[32]; 984 uint64_t t; 985 int retval; 986 987 retval = 0; 988 params = &req->reqdata.create; 989 990 be_lun = malloc(sizeof(*be_lun), M_RAMDISK, M_ZERO | M_WAITOK); 991 cbe_lun = &be_lun->cbe_lun; 992 cbe_lun->be_lun = be_lun; 993 be_lun->params = req->reqdata.create; 994 be_lun->softc = softc; 995 sprintf(be_lun->lunname, "cram%d", softc->num_luns); 996 ctl_init_opts(&cbe_lun->options, req->num_be_args, req->kern_be_args); 997 998 if (params->flags & CTL_LUN_FLAG_DEV_TYPE) 999 cbe_lun->lun_type = params->device_type; 1000 else 1001 cbe_lun->lun_type = T_DIRECT; 1002 be_lun->flags = CTL_BE_RAMDISK_LUN_UNCONFIGURED; 1003 cbe_lun->flags = 0; 1004 value = ctl_get_opt(&cbe_lun->options, "ha_role"); 1005 if (value != NULL) { 1006 if (strcmp(value, "primary") == 0) 1007 cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY; 1008 } else if (control_softc->flags & CTL_FLAG_ACTIVE_SHELF) 1009 cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY; 1010 1011 be_lun->pblocksize = PAGE_SIZE; 1012 value = ctl_get_opt(&cbe_lun->options, "pblocksize"); 1013 if (value != NULL) { 1014 ctl_expand_number(value, &t); 1015 be_lun->pblocksize = t; 1016 } 1017 if (be_lun->pblocksize < 512 || be_lun->pblocksize > 131072) { 1018 snprintf(req->error_str, sizeof(req->error_str), 1019 "%s: unsupported pblocksize %u", __func__, 1020 be_lun->pblocksize); 1021 goto bailout_error; 1022 } 1023 1024 if (cbe_lun->lun_type == T_DIRECT || 1025 cbe_lun->lun_type == T_CDROM) { 1026 if (params->blocksize_bytes != 0) 1027 cbe_lun->blocksize = params->blocksize_bytes; 1028 else if (cbe_lun->lun_type == T_CDROM) 1029 cbe_lun->blocksize = 2048; 1030 else 1031 cbe_lun->blocksize = 512; 1032 be_lun->pblockmul = be_lun->pblocksize / cbe_lun->blocksize; 1033 if (be_lun->pblockmul < 1 || !powerof2(be_lun->pblockmul)) { 1034 snprintf(req->error_str, sizeof(req->error_str), 1035 "%s: pblocksize %u not exp2 of blocksize %u", 1036 __func__, 1037 be_lun->pblocksize, cbe_lun->blocksize); 1038 goto bailout_error; 1039 } 1040 if (params->lun_size_bytes < cbe_lun->blocksize) { 1041 snprintf(req->error_str, sizeof(req->error_str), 1042 "%s: LUN size %ju < blocksize %u", __func__, 1043 params->lun_size_bytes, cbe_lun->blocksize); 1044 goto bailout_error; 1045 } 1046 be_lun->size_blocks = params->lun_size_bytes / cbe_lun->blocksize; 1047 be_lun->size_bytes = be_lun->size_blocks * cbe_lun->blocksize; 1048 be_lun->indir = 0; 1049 t = be_lun->size_bytes / be_lun->pblocksize; 1050 while (t > 1) { 1051 t /= PPP; 1052 be_lun->indir++; 1053 } 1054 cbe_lun->maxlba = be_lun->size_blocks - 1; 1055 cbe_lun->pblockexp = fls(be_lun->pblockmul) - 1; 1056 cbe_lun->pblockoff = 0; 1057 cbe_lun->ublockexp = cbe_lun->pblockexp; 1058 cbe_lun->ublockoff = 0; 1059 cbe_lun->atomicblock = be_lun->pblocksize; 1060 cbe_lun->opttxferlen = SGPP * be_lun->pblocksize; 1061 value = ctl_get_opt(&cbe_lun->options, "capacity"); 1062 if (value != NULL) 1063 ctl_expand_number(value, &be_lun->cap_bytes); 1064 } else { 1065 be_lun->pblockmul = 1; 1066 cbe_lun->pblockexp = 0; 1067 } 1068 1069 /* Tell the user the blocksize we ended up using */ 1070 params->blocksize_bytes = cbe_lun->blocksize; 1071 params->lun_size_bytes = be_lun->size_bytes; 1072 1073 value = ctl_get_opt(&cbe_lun->options, "unmap"); 1074 if (value == NULL || strcmp(value, "off") != 0) 1075 cbe_lun->flags |= CTL_LUN_FLAG_UNMAP; 1076 value = ctl_get_opt(&cbe_lun->options, "readonly"); 1077 if (value != NULL) { 1078 if (strcmp(value, "on") == 0) 1079 cbe_lun->flags |= CTL_LUN_FLAG_READONLY; 1080 } else if (cbe_lun->lun_type != T_DIRECT) 1081 cbe_lun->flags |= CTL_LUN_FLAG_READONLY; 1082 cbe_lun->serseq = CTL_LUN_SERSEQ_OFF; 1083 value = ctl_get_opt(&cbe_lun->options, "serseq"); 1084 if (value != NULL && strcmp(value, "on") == 0) 1085 cbe_lun->serseq = CTL_LUN_SERSEQ_ON; 1086 else if (value != NULL && strcmp(value, "read") == 0) 1087 cbe_lun->serseq = CTL_LUN_SERSEQ_READ; 1088 else if (value != NULL && strcmp(value, "off") == 0) 1089 cbe_lun->serseq = CTL_LUN_SERSEQ_OFF; 1090 1091 if (params->flags & CTL_LUN_FLAG_ID_REQ) { 1092 cbe_lun->req_lun_id = params->req_lun_id; 1093 cbe_lun->flags |= CTL_LUN_FLAG_ID_REQ; 1094 } else 1095 cbe_lun->req_lun_id = 0; 1096 1097 cbe_lun->lun_shutdown = ctl_backend_ramdisk_lun_shutdown; 1098 cbe_lun->lun_config_status = ctl_backend_ramdisk_lun_config_status; 1099 cbe_lun->be = &ctl_be_ramdisk_driver; 1100 if ((params->flags & CTL_LUN_FLAG_SERIAL_NUM) == 0) { 1101 snprintf(tmpstr, sizeof(tmpstr), "MYSERIAL%04d", 1102 softc->num_luns); 1103 strncpy((char *)cbe_lun->serial_num, tmpstr, 1104 MIN(sizeof(cbe_lun->serial_num), sizeof(tmpstr))); 1105 1106 /* Tell the user what we used for a serial number */ 1107 strncpy((char *)params->serial_num, tmpstr, 1108 MIN(sizeof(params->serial_num), sizeof(tmpstr))); 1109 } else { 1110 strncpy((char *)cbe_lun->serial_num, params->serial_num, 1111 MIN(sizeof(cbe_lun->serial_num), 1112 sizeof(params->serial_num))); 1113 } 1114 if ((params->flags & CTL_LUN_FLAG_DEVID) == 0) { 1115 snprintf(tmpstr, sizeof(tmpstr), "MYDEVID%04d", softc->num_luns); 1116 strncpy((char *)cbe_lun->device_id, tmpstr, 1117 MIN(sizeof(cbe_lun->device_id), sizeof(tmpstr))); 1118 1119 /* Tell the user what we used for a device ID */ 1120 strncpy((char *)params->device_id, tmpstr, 1121 MIN(sizeof(params->device_id), sizeof(tmpstr))); 1122 } else { 1123 strncpy((char *)cbe_lun->device_id, params->device_id, 1124 MIN(sizeof(cbe_lun->device_id), 1125 sizeof(params->device_id))); 1126 } 1127 1128 STAILQ_INIT(&be_lun->cont_queue); 1129 sx_init(&be_lun->page_lock, "cram page lock"); 1130 if (be_lun->cap_bytes == 0) { 1131 be_lun->indir = 0; 1132 be_lun->pages = malloc(be_lun->pblocksize, M_RAMDISK, M_WAITOK); 1133 } 1134 be_lun->zero_page = malloc(be_lun->pblocksize, M_RAMDISK, 1135 M_WAITOK|M_ZERO); 1136 mtx_init(&be_lun->queue_lock, "cram queue lock", NULL, MTX_DEF); 1137 TASK_INIT(&be_lun->io_task, /*priority*/0, ctl_backend_ramdisk_worker, 1138 be_lun); 1139 1140 be_lun->io_taskqueue = taskqueue_create(be_lun->lunname, M_WAITOK, 1141 taskqueue_thread_enqueue, /*context*/&be_lun->io_taskqueue); 1142 if (be_lun->io_taskqueue == NULL) { 1143 snprintf(req->error_str, sizeof(req->error_str), 1144 "%s: Unable to create taskqueue", __func__); 1145 goto bailout_error; 1146 } 1147 1148 retval = taskqueue_start_threads(&be_lun->io_taskqueue, 1149 /*num threads*/1, 1150 /*priority*/PWAIT, 1151 /*thread name*/ 1152 "%s taskq", be_lun->lunname); 1153 if (retval != 0) 1154 goto bailout_error; 1155 1156 mtx_lock(&softc->lock); 1157 softc->num_luns++; 1158 STAILQ_INSERT_TAIL(&softc->lun_list, be_lun, links); 1159 mtx_unlock(&softc->lock); 1160 1161 retval = ctl_add_lun(&be_lun->cbe_lun); 1162 if (retval != 0) { 1163 mtx_lock(&softc->lock); 1164 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun, 1165 links); 1166 softc->num_luns--; 1167 mtx_unlock(&softc->lock); 1168 snprintf(req->error_str, sizeof(req->error_str), 1169 "%s: ctl_add_lun() returned error %d, see dmesg for " 1170 "details", __func__, retval); 1171 retval = 0; 1172 goto bailout_error; 1173 } 1174 1175 mtx_lock(&softc->lock); 1176 1177 /* 1178 * Tell the config_status routine that we're waiting so it won't 1179 * clean up the LUN in the event of an error. 1180 */ 1181 be_lun->flags |= CTL_BE_RAMDISK_LUN_WAITING; 1182 1183 while (be_lun->flags & CTL_BE_RAMDISK_LUN_UNCONFIGURED) { 1184 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlram", 0); 1185 if (retval == EINTR) 1186 break; 1187 } 1188 be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING; 1189 1190 if (be_lun->flags & CTL_BE_RAMDISK_LUN_CONFIG_ERR) { 1191 snprintf(req->error_str, sizeof(req->error_str), 1192 "%s: LUN configuration error, see dmesg for details", 1193 __func__); 1194 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun, 1195 links); 1196 softc->num_luns--; 1197 mtx_unlock(&softc->lock); 1198 goto bailout_error; 1199 } else { 1200 params->req_lun_id = cbe_lun->lun_id; 1201 } 1202 mtx_unlock(&softc->lock); 1203 1204 req->status = CTL_LUN_OK; 1205 return (retval); 1206 1207 bailout_error: 1208 req->status = CTL_LUN_ERROR; 1209 if (be_lun != NULL) { 1210 if (be_lun->io_taskqueue != NULL) 1211 taskqueue_free(be_lun->io_taskqueue); 1212 ctl_free_opts(&cbe_lun->options); 1213 free(be_lun->zero_page, M_RAMDISK); 1214 ctl_backend_ramdisk_freeallpages(be_lun->pages, be_lun->indir); 1215 sx_destroy(&be_lun->page_lock); 1216 mtx_destroy(&be_lun->queue_lock); 1217 free(be_lun, M_RAMDISK); 1218 } 1219 return (retval); 1220 } 1221 1222 static int 1223 ctl_backend_ramdisk_modify(struct ctl_be_ramdisk_softc *softc, 1224 struct ctl_lun_req *req) 1225 { 1226 struct ctl_be_ramdisk_lun *be_lun; 1227 struct ctl_be_lun *cbe_lun; 1228 struct ctl_lun_modify_params *params; 1229 char *value; 1230 uint32_t blocksize; 1231 int wasprim; 1232 1233 params = &req->reqdata.modify; 1234 1235 mtx_lock(&softc->lock); 1236 STAILQ_FOREACH(be_lun, &softc->lun_list, links) { 1237 if (be_lun->cbe_lun.lun_id == params->lun_id) 1238 break; 1239 } 1240 mtx_unlock(&softc->lock); 1241 if (be_lun == NULL) { 1242 snprintf(req->error_str, sizeof(req->error_str), 1243 "%s: LUN %u is not managed by the ramdisk backend", 1244 __func__, params->lun_id); 1245 goto bailout_error; 1246 } 1247 cbe_lun = &be_lun->cbe_lun; 1248 1249 if (params->lun_size_bytes != 0) 1250 be_lun->params.lun_size_bytes = params->lun_size_bytes; 1251 ctl_update_opts(&cbe_lun->options, req->num_be_args, req->kern_be_args); 1252 1253 wasprim = (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY); 1254 value = ctl_get_opt(&cbe_lun->options, "ha_role"); 1255 if (value != NULL) { 1256 if (strcmp(value, "primary") == 0) 1257 cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY; 1258 else 1259 cbe_lun->flags &= ~CTL_LUN_FLAG_PRIMARY; 1260 } else if (control_softc->flags & CTL_FLAG_ACTIVE_SHELF) 1261 cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY; 1262 else 1263 cbe_lun->flags &= ~CTL_LUN_FLAG_PRIMARY; 1264 if (wasprim != (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY)) { 1265 if (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY) 1266 ctl_lun_primary(cbe_lun); 1267 else 1268 ctl_lun_secondary(cbe_lun); 1269 } 1270 1271 blocksize = be_lun->cbe_lun.blocksize; 1272 if (be_lun->params.lun_size_bytes < blocksize) { 1273 snprintf(req->error_str, sizeof(req->error_str), 1274 "%s: LUN size %ju < blocksize %u", __func__, 1275 be_lun->params.lun_size_bytes, blocksize); 1276 goto bailout_error; 1277 } 1278 be_lun->size_blocks = be_lun->params.lun_size_bytes / blocksize; 1279 be_lun->size_bytes = be_lun->size_blocks * blocksize; 1280 be_lun->cbe_lun.maxlba = be_lun->size_blocks - 1; 1281 ctl_lun_capacity_changed(&be_lun->cbe_lun); 1282 1283 /* Tell the user the exact size we ended up using */ 1284 params->lun_size_bytes = be_lun->size_bytes; 1285 1286 req->status = CTL_LUN_OK; 1287 return (0); 1288 1289 bailout_error: 1290 req->status = CTL_LUN_ERROR; 1291 return (0); 1292 } 1293 1294 static void 1295 ctl_backend_ramdisk_lun_shutdown(void *be_lun) 1296 { 1297 struct ctl_be_ramdisk_lun *lun = be_lun; 1298 struct ctl_be_ramdisk_softc *softc = lun->softc; 1299 1300 mtx_lock(&softc->lock); 1301 lun->flags |= CTL_BE_RAMDISK_LUN_UNCONFIGURED; 1302 if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING) { 1303 wakeup(lun); 1304 } else { 1305 STAILQ_REMOVE(&softc->lun_list, lun, ctl_be_ramdisk_lun, 1306 links); 1307 softc->num_luns--; 1308 free(be_lun, M_RAMDISK); 1309 } 1310 mtx_unlock(&softc->lock); 1311 } 1312 1313 static void 1314 ctl_backend_ramdisk_lun_config_status(void *be_lun, 1315 ctl_lun_config_status status) 1316 { 1317 struct ctl_be_ramdisk_lun *lun; 1318 struct ctl_be_ramdisk_softc *softc; 1319 1320 lun = (struct ctl_be_ramdisk_lun *)be_lun; 1321 softc = lun->softc; 1322 1323 if (status == CTL_LUN_CONFIG_OK) { 1324 mtx_lock(&softc->lock); 1325 lun->flags &= ~CTL_BE_RAMDISK_LUN_UNCONFIGURED; 1326 if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING) 1327 wakeup(lun); 1328 mtx_unlock(&softc->lock); 1329 1330 /* 1331 * We successfully added the LUN, attempt to enable it. 1332 */ 1333 if (ctl_enable_lun(&lun->cbe_lun) != 0) { 1334 printf("%s: ctl_enable_lun() failed!\n", __func__); 1335 if (ctl_invalidate_lun(&lun->cbe_lun) != 0) { 1336 printf("%s: ctl_invalidate_lun() failed!\n", 1337 __func__); 1338 } 1339 } 1340 1341 return; 1342 } 1343 1344 1345 mtx_lock(&softc->lock); 1346 lun->flags &= ~CTL_BE_RAMDISK_LUN_UNCONFIGURED; 1347 1348 /* 1349 * If we have a user waiting, let him handle the cleanup. If not, 1350 * clean things up here. 1351 */ 1352 if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING) { 1353 lun->flags |= CTL_BE_RAMDISK_LUN_CONFIG_ERR; 1354 wakeup(lun); 1355 } else { 1356 STAILQ_REMOVE(&softc->lun_list, lun, ctl_be_ramdisk_lun, 1357 links); 1358 softc->num_luns--; 1359 free(lun, M_RAMDISK); 1360 } 1361 mtx_unlock(&softc->lock); 1362 } 1363