1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2003, 2008 Silicon Graphics International Corp. 5 * Copyright (c) 2012 The FreeBSD Foundation 6 * Copyright (c) 2014-2017 Alexander Motin <mav@FreeBSD.org> 7 * All rights reserved. 8 * 9 * Portions of this software were developed by Edward Tomasz Napierala 10 * under sponsorship from the FreeBSD Foundation. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions, and the following disclaimer, 17 * without modification. 18 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 19 * substantially similar to the "NO WARRANTY" disclaimer below 20 * ("Disclaimer") and any redistribution must be conditioned upon 21 * including a substantially similar Disclaimer requirement for further 22 * binary redistribution. 23 * 24 * NO WARRANTY 25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 29 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 33 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 34 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGES. 36 * 37 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend_ramdisk.c#3 $ 38 */ 39 /* 40 * CAM Target Layer black hole and RAM disk backend. 41 * 42 * Author: Ken Merry <ken@FreeBSD.org> 43 */ 44 45 #include <sys/cdefs.h> 46 __FBSDID("$FreeBSD$"); 47 48 #include <sys/param.h> 49 #include <sys/systm.h> 50 #include <sys/kernel.h> 51 #include <sys/condvar.h> 52 #include <sys/types.h> 53 #include <sys/limits.h> 54 #include <sys/lock.h> 55 #include <sys/mutex.h> 56 #include <sys/malloc.h> 57 #include <sys/sx.h> 58 #include <sys/taskqueue.h> 59 #include <sys/time.h> 60 #include <sys/queue.h> 61 #include <sys/conf.h> 62 #include <sys/ioccom.h> 63 #include <sys/module.h> 64 #include <sys/sysctl.h> 65 #include <sys/nv.h> 66 #include <sys/dnv.h> 67 68 #include <cam/scsi/scsi_all.h> 69 #include <cam/scsi/scsi_da.h> 70 #include <cam/ctl/ctl_io.h> 71 #include <cam/ctl/ctl.h> 72 #include <cam/ctl/ctl_util.h> 73 #include <cam/ctl/ctl_backend.h> 74 #include <cam/ctl/ctl_debug.h> 75 #include <cam/ctl/ctl_ioctl.h> 76 #include <cam/ctl/ctl_ha.h> 77 #include <cam/ctl/ctl_private.h> 78 #include <cam/ctl/ctl_error.h> 79 80 #define PRIV(io) \ 81 ((struct ctl_ptr_len_flags *)&(io)->io_hdr.ctl_private[CTL_PRIV_BACKEND]) 82 #define ARGS(io) \ 83 ((struct ctl_lba_len_flags *)&(io)->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]) 84 85 #define PPP (PAGE_SIZE / sizeof(uint8_t **)) 86 #ifdef __LP64__ 87 #define PPPS (PAGE_SHIFT - 3) 88 #else 89 #define PPPS (PAGE_SHIFT - 2) 90 #endif 91 #define SGPP (PAGE_SIZE / sizeof(struct ctl_sg_entry)) 92 93 #define P_UNMAPPED NULL /* Page is unmapped. */ 94 #define P_ANCHORED ((void *)(uintptr_t)1) /* Page is anchored. */ 95 96 typedef enum { 97 GP_READ, /* Return data page or zero page. */ 98 GP_WRITE, /* Return data page, try allocate if none. */ 99 GP_ANCHOR, /* Return data page, try anchor if none. */ 100 GP_OTHER, /* Return what present, do not allocate/anchor. */ 101 } getpage_op_t; 102 103 typedef enum { 104 CTL_BE_RAMDISK_LUN_UNCONFIGURED = 0x01, 105 CTL_BE_RAMDISK_LUN_WAITING = 0x04 106 } ctl_be_ramdisk_lun_flags; 107 108 struct ctl_be_ramdisk_lun { 109 struct ctl_be_lun cbe_lun; /* Must be first element. */ 110 struct ctl_lun_create_params params; 111 int indir; 112 uint8_t **pages; 113 uint8_t *zero_page; 114 struct sx page_lock; 115 u_int pblocksize; 116 u_int pblockmul; 117 uint64_t size_bytes; 118 uint64_t size_blocks; 119 uint64_t cap_bytes; 120 uint64_t cap_used; 121 struct ctl_be_ramdisk_softc *softc; 122 ctl_be_ramdisk_lun_flags flags; 123 SLIST_ENTRY(ctl_be_ramdisk_lun) links; 124 struct taskqueue *io_taskqueue; 125 struct task io_task; 126 STAILQ_HEAD(, ctl_io_hdr) cont_queue; 127 struct mtx_padalign queue_lock; 128 }; 129 130 struct ctl_be_ramdisk_softc { 131 struct sx modify_lock; 132 struct mtx lock; 133 int num_luns; 134 SLIST_HEAD(, ctl_be_ramdisk_lun) lun_list; 135 }; 136 137 static struct ctl_be_ramdisk_softc rd_softc; 138 extern struct ctl_softc *control_softc; 139 140 static int ctl_backend_ramdisk_init(void); 141 static int ctl_backend_ramdisk_shutdown(void); 142 static int ctl_backend_ramdisk_move_done(union ctl_io *io, bool samethr); 143 static void ctl_backend_ramdisk_compare(union ctl_io *io); 144 static void ctl_backend_ramdisk_rw(union ctl_io *io); 145 static int ctl_backend_ramdisk_submit(union ctl_io *io); 146 static void ctl_backend_ramdisk_worker(void *context, int pending); 147 static int ctl_backend_ramdisk_config_read(union ctl_io *io); 148 static int ctl_backend_ramdisk_config_write(union ctl_io *io); 149 static uint64_t ctl_backend_ramdisk_lun_attr(struct ctl_be_lun *cbe_lun, const char *attrname); 150 static int ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd, 151 caddr_t addr, int flag, struct thread *td); 152 static int ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc, 153 struct ctl_lun_req *req); 154 static int ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc, 155 struct ctl_lun_req *req); 156 static int ctl_backend_ramdisk_modify(struct ctl_be_ramdisk_softc *softc, 157 struct ctl_lun_req *req); 158 static void ctl_backend_ramdisk_lun_shutdown(struct ctl_be_lun *cbe_lun); 159 160 static struct ctl_backend_driver ctl_be_ramdisk_driver = 161 { 162 .name = "ramdisk", 163 .flags = CTL_BE_FLAG_HAS_CONFIG, 164 .init = ctl_backend_ramdisk_init, 165 .shutdown = ctl_backend_ramdisk_shutdown, 166 .data_submit = ctl_backend_ramdisk_submit, 167 .config_read = ctl_backend_ramdisk_config_read, 168 .config_write = ctl_backend_ramdisk_config_write, 169 .ioctl = ctl_backend_ramdisk_ioctl, 170 .lun_attr = ctl_backend_ramdisk_lun_attr, 171 }; 172 173 MALLOC_DEFINE(M_RAMDISK, "ctlramdisk", "Memory used for CTL RAMdisk"); 174 CTL_BACKEND_DECLARE(cbr, ctl_be_ramdisk_driver); 175 176 static int 177 ctl_backend_ramdisk_init(void) 178 { 179 struct ctl_be_ramdisk_softc *softc = &rd_softc; 180 181 memset(softc, 0, sizeof(*softc)); 182 sx_init(&softc->modify_lock, "ctlrammod"); 183 mtx_init(&softc->lock, "ctlram", NULL, MTX_DEF); 184 SLIST_INIT(&softc->lun_list); 185 return (0); 186 } 187 188 static int 189 ctl_backend_ramdisk_shutdown(void) 190 { 191 struct ctl_be_ramdisk_softc *softc = &rd_softc; 192 struct ctl_be_ramdisk_lun *lun; 193 194 mtx_lock(&softc->lock); 195 while ((lun = SLIST_FIRST(&softc->lun_list)) != NULL) { 196 SLIST_REMOVE_HEAD(&softc->lun_list, links); 197 softc->num_luns--; 198 /* 199 * Drop our lock here. Since ctl_remove_lun() can call 200 * back into us, this could potentially lead to a recursive 201 * lock of the same mutex, which would cause a hang. 202 */ 203 mtx_unlock(&softc->lock); 204 ctl_remove_lun(&lun->cbe_lun); 205 mtx_lock(&softc->lock); 206 } 207 mtx_unlock(&softc->lock); 208 mtx_destroy(&softc->lock); 209 sx_destroy(&softc->modify_lock); 210 return (0); 211 } 212 213 static uint8_t * 214 ctl_backend_ramdisk_getpage(struct ctl_be_ramdisk_lun *be_lun, off_t pn, 215 getpage_op_t op) 216 { 217 uint8_t **p, ***pp; 218 off_t i; 219 int s; 220 221 if (be_lun->cap_bytes == 0) { 222 switch (op) { 223 case GP_READ: 224 return (be_lun->zero_page); 225 case GP_WRITE: 226 return ((uint8_t *)be_lun->pages); 227 case GP_ANCHOR: 228 return (P_ANCHORED); 229 default: 230 return (P_UNMAPPED); 231 } 232 } 233 if (op == GP_WRITE || op == GP_ANCHOR) { 234 sx_xlock(&be_lun->page_lock); 235 pp = &be_lun->pages; 236 for (s = (be_lun->indir - 1) * PPPS; s >= 0; s -= PPPS) { 237 if (*pp == NULL) { 238 *pp = malloc(PAGE_SIZE, M_RAMDISK, 239 M_WAITOK|M_ZERO); 240 } 241 i = pn >> s; 242 pp = (uint8_t ***)&(*pp)[i]; 243 pn -= i << s; 244 } 245 if (*pp == P_UNMAPPED && be_lun->cap_used < be_lun->cap_bytes) { 246 if (op == GP_WRITE) { 247 *pp = malloc(be_lun->pblocksize, M_RAMDISK, 248 M_WAITOK|M_ZERO); 249 } else 250 *pp = P_ANCHORED; 251 be_lun->cap_used += be_lun->pblocksize; 252 } else if (*pp == P_ANCHORED && op == GP_WRITE) { 253 *pp = malloc(be_lun->pblocksize, M_RAMDISK, 254 M_WAITOK|M_ZERO); 255 } 256 sx_xunlock(&be_lun->page_lock); 257 return ((uint8_t *)*pp); 258 } else { 259 sx_slock(&be_lun->page_lock); 260 p = be_lun->pages; 261 for (s = (be_lun->indir - 1) * PPPS; s >= 0; s -= PPPS) { 262 if (p == NULL) 263 break; 264 i = pn >> s; 265 p = (uint8_t **)p[i]; 266 pn -= i << s; 267 } 268 sx_sunlock(&be_lun->page_lock); 269 if ((p == P_UNMAPPED || p == P_ANCHORED) && op == GP_READ) 270 return (be_lun->zero_page); 271 return ((uint8_t *)p); 272 } 273 }; 274 275 static void 276 ctl_backend_ramdisk_unmappage(struct ctl_be_ramdisk_lun *be_lun, off_t pn) 277 { 278 uint8_t ***pp; 279 off_t i; 280 int s; 281 282 if (be_lun->cap_bytes == 0) 283 return; 284 sx_xlock(&be_lun->page_lock); 285 pp = &be_lun->pages; 286 for (s = (be_lun->indir - 1) * PPPS; s >= 0; s -= PPPS) { 287 if (*pp == NULL) 288 goto noindir; 289 i = pn >> s; 290 pp = (uint8_t ***)&(*pp)[i]; 291 pn -= i << s; 292 } 293 if (*pp == P_ANCHORED) { 294 be_lun->cap_used -= be_lun->pblocksize; 295 *pp = P_UNMAPPED; 296 } else if (*pp != P_UNMAPPED) { 297 free(*pp, M_RAMDISK); 298 be_lun->cap_used -= be_lun->pblocksize; 299 *pp = P_UNMAPPED; 300 } 301 noindir: 302 sx_xunlock(&be_lun->page_lock); 303 }; 304 305 static void 306 ctl_backend_ramdisk_anchorpage(struct ctl_be_ramdisk_lun *be_lun, off_t pn) 307 { 308 uint8_t ***pp; 309 off_t i; 310 int s; 311 312 if (be_lun->cap_bytes == 0) 313 return; 314 sx_xlock(&be_lun->page_lock); 315 pp = &be_lun->pages; 316 for (s = (be_lun->indir - 1) * PPPS; s >= 0; s -= PPPS) { 317 if (*pp == NULL) 318 goto noindir; 319 i = pn >> s; 320 pp = (uint8_t ***)&(*pp)[i]; 321 pn -= i << s; 322 } 323 if (*pp == P_UNMAPPED && be_lun->cap_used < be_lun->cap_bytes) { 324 be_lun->cap_used += be_lun->pblocksize; 325 *pp = P_ANCHORED; 326 } else if (*pp != P_ANCHORED) { 327 free(*pp, M_RAMDISK); 328 *pp = P_ANCHORED; 329 } 330 noindir: 331 sx_xunlock(&be_lun->page_lock); 332 }; 333 334 static void 335 ctl_backend_ramdisk_freeallpages(uint8_t **p, int indir) 336 { 337 int i; 338 339 if (p == NULL) 340 return; 341 if (indir == 0) { 342 free(p, M_RAMDISK); 343 return; 344 } 345 for (i = 0; i < PPP; i++) { 346 if (p[i] == NULL) 347 continue; 348 ctl_backend_ramdisk_freeallpages((uint8_t **)p[i], indir - 1); 349 } 350 free(p, M_RAMDISK); 351 }; 352 353 static size_t 354 cmp(uint8_t *a, uint8_t *b, size_t size) 355 { 356 size_t i; 357 358 for (i = 0; i < size; i++) { 359 if (a[i] != b[i]) 360 break; 361 } 362 return (i); 363 } 364 365 static int 366 ctl_backend_ramdisk_cmp(union ctl_io *io) 367 { 368 struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io); 369 struct ctl_be_ramdisk_lun *be_lun = (struct ctl_be_ramdisk_lun *)cbe_lun; 370 uint8_t *page; 371 uint8_t info[8]; 372 uint64_t lba; 373 u_int lbaoff, lbas, res, off; 374 375 lbas = io->scsiio.kern_data_len / cbe_lun->blocksize; 376 lba = ARGS(io)->lba + PRIV(io)->len - lbas; 377 off = 0; 378 for (; lbas > 0; lbas--, lba++) { 379 page = ctl_backend_ramdisk_getpage(be_lun, 380 lba >> cbe_lun->pblockexp, GP_READ); 381 lbaoff = lba & ~(UINT_MAX << cbe_lun->pblockexp); 382 page += lbaoff * cbe_lun->blocksize; 383 res = cmp(io->scsiio.kern_data_ptr + off, page, 384 cbe_lun->blocksize); 385 off += res; 386 if (res < cbe_lun->blocksize) 387 break; 388 } 389 free(io->scsiio.kern_data_ptr, M_RAMDISK); 390 if (lbas > 0) { 391 off += io->scsiio.kern_rel_offset - io->scsiio.kern_data_len; 392 scsi_u64to8b(off, info); 393 ctl_set_sense(&io->scsiio, /*current_error*/ 1, 394 /*sense_key*/ SSD_KEY_MISCOMPARE, 395 /*asc*/ 0x1D, /*ascq*/ 0x00, 396 /*type*/ SSD_ELEM_INFO, 397 /*size*/ sizeof(info), /*data*/ &info, 398 /*type*/ SSD_ELEM_NONE); 399 return (1); 400 } 401 return (0); 402 } 403 404 static int 405 ctl_backend_ramdisk_move_done(union ctl_io *io, bool samethr) 406 { 407 struct ctl_be_ramdisk_lun *be_lun = 408 (struct ctl_be_ramdisk_lun *)CTL_BACKEND_LUN(io); 409 410 CTL_DEBUG_PRINT(("ctl_backend_ramdisk_move_done\n")); 411 if (io->scsiio.kern_sg_entries > 0) 412 free(io->scsiio.kern_data_ptr, M_RAMDISK); 413 io->scsiio.kern_rel_offset += io->scsiio.kern_data_len; 414 if ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && 415 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE) { 416 if (ARGS(io)->flags & CTL_LLF_COMPARE) { 417 /* We have data block ready for comparison. */ 418 if (ctl_backend_ramdisk_cmp(io)) 419 goto done; 420 } 421 if (ARGS(io)->len > PRIV(io)->len) { 422 mtx_lock(&be_lun->queue_lock); 423 STAILQ_INSERT_TAIL(&be_lun->cont_queue, 424 &io->io_hdr, links); 425 mtx_unlock(&be_lun->queue_lock); 426 taskqueue_enqueue(be_lun->io_taskqueue, 427 &be_lun->io_task); 428 return (0); 429 } 430 ctl_set_success(&io->scsiio); 431 } 432 done: 433 ctl_data_submit_done(io); 434 return(0); 435 } 436 437 static void 438 ctl_backend_ramdisk_compare(union ctl_io *io) 439 { 440 struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io); 441 u_int lbas, len; 442 443 lbas = ARGS(io)->len - PRIV(io)->len; 444 lbas = MIN(lbas, 131072 / cbe_lun->blocksize); 445 len = lbas * cbe_lun->blocksize; 446 447 io->scsiio.be_move_done = ctl_backend_ramdisk_move_done; 448 io->scsiio.kern_data_ptr = malloc(len, M_RAMDISK, M_WAITOK); 449 io->scsiio.kern_data_len = len; 450 io->scsiio.kern_sg_entries = 0; 451 io->io_hdr.flags |= CTL_FLAG_ALLOCATED; 452 PRIV(io)->len += lbas; 453 ctl_datamove(io); 454 } 455 456 static void 457 ctl_backend_ramdisk_rw(union ctl_io *io) 458 { 459 struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io); 460 struct ctl_be_ramdisk_lun *be_lun = (struct ctl_be_ramdisk_lun *)cbe_lun; 461 struct ctl_sg_entry *sg_entries; 462 uint8_t *page; 463 uint64_t lba; 464 u_int i, len, lbaoff, lbas, sgs, off; 465 getpage_op_t op; 466 467 lba = ARGS(io)->lba + PRIV(io)->len; 468 lbaoff = lba & ~(UINT_MAX << cbe_lun->pblockexp); 469 lbas = ARGS(io)->len - PRIV(io)->len; 470 lbas = MIN(lbas, (SGPP << cbe_lun->pblockexp) - lbaoff); 471 sgs = (lbas + lbaoff + be_lun->pblockmul - 1) >> cbe_lun->pblockexp; 472 off = lbaoff * cbe_lun->blocksize; 473 op = (ARGS(io)->flags & CTL_LLF_WRITE) ? GP_WRITE : GP_READ; 474 if (sgs > 1) { 475 io->scsiio.kern_data_ptr = malloc(sizeof(struct ctl_sg_entry) * 476 sgs, M_RAMDISK, M_WAITOK); 477 sg_entries = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; 478 len = lbas * cbe_lun->blocksize; 479 for (i = 0; i < sgs; i++) { 480 page = ctl_backend_ramdisk_getpage(be_lun, 481 (lba >> cbe_lun->pblockexp) + i, op); 482 if (page == P_UNMAPPED || page == P_ANCHORED) { 483 free(io->scsiio.kern_data_ptr, M_RAMDISK); 484 nospc: 485 ctl_set_space_alloc_fail(&io->scsiio); 486 ctl_data_submit_done(io); 487 return; 488 } 489 sg_entries[i].addr = page + off; 490 sg_entries[i].len = MIN(len, be_lun->pblocksize - off); 491 len -= sg_entries[i].len; 492 off = 0; 493 } 494 } else { 495 page = ctl_backend_ramdisk_getpage(be_lun, 496 lba >> cbe_lun->pblockexp, op); 497 if (page == P_UNMAPPED || page == P_ANCHORED) 498 goto nospc; 499 sgs = 0; 500 io->scsiio.kern_data_ptr = page + off; 501 } 502 503 io->scsiio.be_move_done = ctl_backend_ramdisk_move_done; 504 io->scsiio.kern_data_len = lbas * cbe_lun->blocksize; 505 io->scsiio.kern_sg_entries = sgs; 506 io->io_hdr.flags |= CTL_FLAG_ALLOCATED; 507 PRIV(io)->len += lbas; 508 if ((ARGS(io)->flags & CTL_LLF_READ) && 509 ARGS(io)->len <= PRIV(io)->len) { 510 ctl_set_success(&io->scsiio); 511 if (cbe_lun->serseq >= CTL_LUN_SERSEQ_SOFT) 512 ctl_serseq_done(io); 513 } 514 ctl_datamove(io); 515 } 516 517 static int 518 ctl_backend_ramdisk_submit(union ctl_io *io) 519 { 520 struct ctl_lba_len_flags *lbalen = ARGS(io); 521 522 if (lbalen->flags & CTL_LLF_VERIFY) { 523 ctl_set_success(&io->scsiio); 524 ctl_data_submit_done(io); 525 return (CTL_RETVAL_COMPLETE); 526 } 527 PRIV(io)->len = 0; 528 if (lbalen->flags & CTL_LLF_COMPARE) 529 ctl_backend_ramdisk_compare(io); 530 else 531 ctl_backend_ramdisk_rw(io); 532 return (CTL_RETVAL_COMPLETE); 533 } 534 535 static void 536 ctl_backend_ramdisk_worker(void *context, int pending) 537 { 538 struct ctl_be_ramdisk_lun *be_lun; 539 union ctl_io *io; 540 541 be_lun = (struct ctl_be_ramdisk_lun *)context; 542 mtx_lock(&be_lun->queue_lock); 543 for (;;) { 544 io = (union ctl_io *)STAILQ_FIRST(&be_lun->cont_queue); 545 if (io != NULL) { 546 STAILQ_REMOVE_HEAD(&be_lun->cont_queue, links); 547 mtx_unlock(&be_lun->queue_lock); 548 if (ARGS(io)->flags & CTL_LLF_COMPARE) 549 ctl_backend_ramdisk_compare(io); 550 else 551 ctl_backend_ramdisk_rw(io); 552 mtx_lock(&be_lun->queue_lock); 553 continue; 554 } 555 556 /* 557 * If we get here, there is no work left in the queues, so 558 * just break out and let the task queue go to sleep. 559 */ 560 break; 561 } 562 mtx_unlock(&be_lun->queue_lock); 563 } 564 565 static int 566 ctl_backend_ramdisk_gls(union ctl_io *io) 567 { 568 struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io); 569 struct ctl_be_ramdisk_lun *be_lun = (struct ctl_be_ramdisk_lun *)cbe_lun; 570 struct scsi_get_lba_status_data *data; 571 uint8_t *page; 572 u_int lbaoff; 573 574 data = (struct scsi_get_lba_status_data *)io->scsiio.kern_data_ptr; 575 scsi_u64to8b(ARGS(io)->lba, data->descr[0].addr); 576 lbaoff = ARGS(io)->lba & ~(UINT_MAX << cbe_lun->pblockexp); 577 scsi_ulto4b(be_lun->pblockmul - lbaoff, data->descr[0].length); 578 page = ctl_backend_ramdisk_getpage(be_lun, 579 ARGS(io)->lba >> cbe_lun->pblockexp, GP_OTHER); 580 if (page == P_UNMAPPED) 581 data->descr[0].status = 1; 582 else if (page == P_ANCHORED) 583 data->descr[0].status = 2; 584 else 585 data->descr[0].status = 0; 586 ctl_config_read_done(io); 587 return (CTL_RETVAL_COMPLETE); 588 } 589 590 static int 591 ctl_backend_ramdisk_config_read(union ctl_io *io) 592 { 593 int retval = 0; 594 595 switch (io->scsiio.cdb[0]) { 596 case SERVICE_ACTION_IN: 597 if (io->scsiio.cdb[1] == SGLS_SERVICE_ACTION) { 598 retval = ctl_backend_ramdisk_gls(io); 599 break; 600 } 601 ctl_set_invalid_field(&io->scsiio, 602 /*sks_valid*/ 1, 603 /*command*/ 1, 604 /*field*/ 1, 605 /*bit_valid*/ 1, 606 /*bit*/ 4); 607 ctl_config_read_done(io); 608 retval = CTL_RETVAL_COMPLETE; 609 break; 610 default: 611 ctl_set_invalid_opcode(&io->scsiio); 612 ctl_config_read_done(io); 613 retval = CTL_RETVAL_COMPLETE; 614 break; 615 } 616 return (retval); 617 } 618 619 static void 620 ctl_backend_ramdisk_delete(struct ctl_be_lun *cbe_lun, off_t lba, off_t len, 621 int anchor) 622 { 623 struct ctl_be_ramdisk_lun *be_lun = (struct ctl_be_ramdisk_lun *)cbe_lun; 624 uint8_t *page; 625 uint64_t p, lp; 626 u_int lbaoff; 627 getpage_op_t op = anchor ? GP_ANCHOR : GP_OTHER; 628 629 /* Partially zero first partial page. */ 630 p = lba >> cbe_lun->pblockexp; 631 lbaoff = lba & ~(UINT_MAX << cbe_lun->pblockexp); 632 if (lbaoff != 0) { 633 page = ctl_backend_ramdisk_getpage(be_lun, p, op); 634 if (page != P_UNMAPPED && page != P_ANCHORED) { 635 memset(page + lbaoff * cbe_lun->blocksize, 0, 636 min(len, be_lun->pblockmul - lbaoff) * 637 cbe_lun->blocksize); 638 } 639 p++; 640 } 641 642 /* Partially zero last partial page. */ 643 lp = (lba + len) >> cbe_lun->pblockexp; 644 lbaoff = (lba + len) & ~(UINT_MAX << cbe_lun->pblockexp); 645 if (p <= lp && lbaoff != 0) { 646 page = ctl_backend_ramdisk_getpage(be_lun, lp, op); 647 if (page != P_UNMAPPED && page != P_ANCHORED) 648 memset(page, 0, lbaoff * cbe_lun->blocksize); 649 } 650 651 /* Delete remaining full pages. */ 652 if (anchor) { 653 for (; p < lp; p++) 654 ctl_backend_ramdisk_anchorpage(be_lun, p); 655 } else { 656 for (; p < lp; p++) 657 ctl_backend_ramdisk_unmappage(be_lun, p); 658 } 659 } 660 661 static void 662 ctl_backend_ramdisk_ws(union ctl_io *io) 663 { 664 struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io); 665 struct ctl_be_ramdisk_lun *be_lun = (struct ctl_be_ramdisk_lun *)cbe_lun; 666 struct ctl_lba_len_flags *lbalen = ARGS(io); 667 uint8_t *page; 668 uint64_t lba; 669 u_int lbaoff, lbas; 670 671 if (lbalen->flags & ~(SWS_LBDATA | SWS_UNMAP | SWS_ANCHOR | SWS_NDOB)) { 672 ctl_set_invalid_field(&io->scsiio, 673 /*sks_valid*/ 1, 674 /*command*/ 1, 675 /*field*/ 1, 676 /*bit_valid*/ 0, 677 /*bit*/ 0); 678 ctl_config_write_done(io); 679 return; 680 } 681 if (lbalen->flags & SWS_UNMAP) { 682 ctl_backend_ramdisk_delete(cbe_lun, lbalen->lba, lbalen->len, 683 (lbalen->flags & SWS_ANCHOR) != 0); 684 ctl_set_success(&io->scsiio); 685 ctl_config_write_done(io); 686 return; 687 } 688 689 for (lba = lbalen->lba, lbas = lbalen->len; lbas > 0; lba++, lbas--) { 690 page = ctl_backend_ramdisk_getpage(be_lun, 691 lba >> cbe_lun->pblockexp, GP_WRITE); 692 if (page == P_UNMAPPED || page == P_ANCHORED) { 693 ctl_set_space_alloc_fail(&io->scsiio); 694 ctl_data_submit_done(io); 695 return; 696 } 697 lbaoff = lba & ~(UINT_MAX << cbe_lun->pblockexp); 698 page += lbaoff * cbe_lun->blocksize; 699 if (lbalen->flags & SWS_NDOB) { 700 memset(page, 0, cbe_lun->blocksize); 701 } else { 702 memcpy(page, io->scsiio.kern_data_ptr, 703 cbe_lun->blocksize); 704 } 705 if (lbalen->flags & SWS_LBDATA) 706 scsi_ulto4b(lba, page); 707 } 708 ctl_set_success(&io->scsiio); 709 ctl_config_write_done(io); 710 } 711 712 static void 713 ctl_backend_ramdisk_unmap(union ctl_io *io) 714 { 715 struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io); 716 struct ctl_ptr_len_flags *ptrlen = (struct ctl_ptr_len_flags *)ARGS(io); 717 struct scsi_unmap_desc *buf, *end; 718 719 if ((ptrlen->flags & ~SU_ANCHOR) != 0) { 720 ctl_set_invalid_field(&io->scsiio, 721 /*sks_valid*/ 0, 722 /*command*/ 0, 723 /*field*/ 0, 724 /*bit_valid*/ 0, 725 /*bit*/ 0); 726 ctl_config_write_done(io); 727 return; 728 } 729 730 buf = (struct scsi_unmap_desc *)ptrlen->ptr; 731 end = buf + ptrlen->len / sizeof(*buf); 732 for (; buf < end; buf++) { 733 ctl_backend_ramdisk_delete(cbe_lun, 734 scsi_8btou64(buf->lba), scsi_4btoul(buf->length), 735 (ptrlen->flags & SU_ANCHOR) != 0); 736 } 737 738 ctl_set_success(&io->scsiio); 739 ctl_config_write_done(io); 740 } 741 742 static int 743 ctl_backend_ramdisk_config_write(union ctl_io *io) 744 { 745 struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io); 746 int retval = 0; 747 748 switch (io->scsiio.cdb[0]) { 749 case SYNCHRONIZE_CACHE: 750 case SYNCHRONIZE_CACHE_16: 751 /* We have no cache to flush. */ 752 ctl_set_success(&io->scsiio); 753 ctl_config_write_done(io); 754 break; 755 case START_STOP_UNIT: { 756 struct scsi_start_stop_unit *cdb; 757 758 cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb; 759 if ((cdb->how & SSS_PC_MASK) != 0) { 760 ctl_set_success(&io->scsiio); 761 ctl_config_write_done(io); 762 break; 763 } 764 if (cdb->how & SSS_START) { 765 if (cdb->how & SSS_LOEJ) 766 ctl_lun_has_media(cbe_lun); 767 ctl_start_lun(cbe_lun); 768 } else { 769 ctl_stop_lun(cbe_lun); 770 if (cdb->how & SSS_LOEJ) 771 ctl_lun_ejected(cbe_lun); 772 } 773 ctl_set_success(&io->scsiio); 774 ctl_config_write_done(io); 775 break; 776 } 777 case PREVENT_ALLOW: 778 ctl_set_success(&io->scsiio); 779 ctl_config_write_done(io); 780 break; 781 case WRITE_SAME_10: 782 case WRITE_SAME_16: 783 ctl_backend_ramdisk_ws(io); 784 break; 785 case UNMAP: 786 ctl_backend_ramdisk_unmap(io); 787 break; 788 default: 789 ctl_set_invalid_opcode(&io->scsiio); 790 ctl_config_write_done(io); 791 retval = CTL_RETVAL_COMPLETE; 792 break; 793 } 794 795 return (retval); 796 } 797 798 static uint64_t 799 ctl_backend_ramdisk_lun_attr(struct ctl_be_lun *cbe_lun, const char *attrname) 800 { 801 struct ctl_be_ramdisk_lun *be_lun = (struct ctl_be_ramdisk_lun *)cbe_lun; 802 uint64_t val; 803 804 val = UINT64_MAX; 805 if (be_lun->cap_bytes == 0) 806 return (val); 807 sx_slock(&be_lun->page_lock); 808 if (strcmp(attrname, "blocksused") == 0) { 809 val = be_lun->cap_used / be_lun->cbe_lun.blocksize; 810 } else if (strcmp(attrname, "blocksavail") == 0) { 811 val = (be_lun->cap_bytes - be_lun->cap_used) / 812 be_lun->cbe_lun.blocksize; 813 } 814 sx_sunlock(&be_lun->page_lock); 815 return (val); 816 } 817 818 static int 819 ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, 820 int flag, struct thread *td) 821 { 822 struct ctl_be_ramdisk_softc *softc = &rd_softc; 823 struct ctl_lun_req *lun_req; 824 int retval; 825 826 retval = 0; 827 switch (cmd) { 828 case CTL_LUN_REQ: 829 lun_req = (struct ctl_lun_req *)addr; 830 switch (lun_req->reqtype) { 831 case CTL_LUNREQ_CREATE: 832 retval = ctl_backend_ramdisk_create(softc, lun_req); 833 break; 834 case CTL_LUNREQ_RM: 835 retval = ctl_backend_ramdisk_rm(softc, lun_req); 836 break; 837 case CTL_LUNREQ_MODIFY: 838 retval = ctl_backend_ramdisk_modify(softc, lun_req); 839 break; 840 default: 841 lun_req->status = CTL_LUN_ERROR; 842 snprintf(lun_req->error_str, sizeof(lun_req->error_str), 843 "%s: invalid LUN request type %d", __func__, 844 lun_req->reqtype); 845 break; 846 } 847 break; 848 default: 849 retval = ENOTTY; 850 break; 851 } 852 853 return (retval); 854 } 855 856 static int 857 ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc, 858 struct ctl_lun_req *req) 859 { 860 struct ctl_be_ramdisk_lun *be_lun; 861 struct ctl_lun_rm_params *params; 862 int retval; 863 864 params = &req->reqdata.rm; 865 sx_xlock(&softc->modify_lock); 866 mtx_lock(&softc->lock); 867 SLIST_FOREACH(be_lun, &softc->lun_list, links) { 868 if (be_lun->cbe_lun.lun_id == params->lun_id) { 869 SLIST_REMOVE(&softc->lun_list, be_lun, 870 ctl_be_ramdisk_lun, links); 871 softc->num_luns--; 872 break; 873 } 874 } 875 mtx_unlock(&softc->lock); 876 sx_xunlock(&softc->modify_lock); 877 if (be_lun == NULL) { 878 snprintf(req->error_str, sizeof(req->error_str), 879 "%s: LUN %u is not managed by the ramdisk backend", 880 __func__, params->lun_id); 881 goto bailout_error; 882 } 883 884 /* 885 * Set the waiting flag before we invalidate the LUN. Our shutdown 886 * routine can be called any time after we invalidate the LUN, 887 * and can be called from our context. 888 * 889 * This tells the shutdown routine that we're waiting, or we're 890 * going to wait for the shutdown to happen. 891 */ 892 mtx_lock(&softc->lock); 893 be_lun->flags |= CTL_BE_RAMDISK_LUN_WAITING; 894 mtx_unlock(&softc->lock); 895 896 retval = ctl_remove_lun(&be_lun->cbe_lun); 897 if (retval != 0) { 898 snprintf(req->error_str, sizeof(req->error_str), 899 "%s: error %d returned from ctl_remove_lun() for " 900 "LUN %d", __func__, retval, params->lun_id); 901 mtx_lock(&softc->lock); 902 be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING; 903 mtx_unlock(&softc->lock); 904 goto bailout_error; 905 } 906 907 mtx_lock(&softc->lock); 908 while ((be_lun->flags & CTL_BE_RAMDISK_LUN_UNCONFIGURED) == 0) { 909 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlramrm", 0); 910 if (retval == EINTR) 911 break; 912 } 913 be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING; 914 if (be_lun->flags & CTL_BE_RAMDISK_LUN_UNCONFIGURED) { 915 mtx_unlock(&softc->lock); 916 free(be_lun, M_RAMDISK); 917 } else { 918 mtx_unlock(&softc->lock); 919 return (EINTR); 920 } 921 922 req->status = CTL_LUN_OK; 923 return (retval); 924 925 bailout_error: 926 req->status = CTL_LUN_ERROR; 927 return (0); 928 } 929 930 static int 931 ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc, 932 struct ctl_lun_req *req) 933 { 934 struct ctl_be_ramdisk_lun *be_lun; 935 struct ctl_be_lun *cbe_lun; 936 struct ctl_lun_create_params *params; 937 const char *value; 938 char tmpstr[32]; 939 uint64_t t; 940 int retval; 941 942 retval = 0; 943 params = &req->reqdata.create; 944 945 be_lun = malloc(sizeof(*be_lun), M_RAMDISK, M_ZERO | M_WAITOK); 946 cbe_lun = &be_lun->cbe_lun; 947 cbe_lun->options = nvlist_clone(req->args_nvl); 948 be_lun->params = req->reqdata.create; 949 be_lun->softc = softc; 950 951 if (params->flags & CTL_LUN_FLAG_DEV_TYPE) 952 cbe_lun->lun_type = params->device_type; 953 else 954 cbe_lun->lun_type = T_DIRECT; 955 be_lun->flags = 0; 956 cbe_lun->flags = 0; 957 value = dnvlist_get_string(cbe_lun->options, "ha_role", NULL); 958 if (value != NULL) { 959 if (strcmp(value, "primary") == 0) 960 cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY; 961 } else if (control_softc->flags & CTL_FLAG_ACTIVE_SHELF) 962 cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY; 963 964 be_lun->pblocksize = PAGE_SIZE; 965 value = dnvlist_get_string(cbe_lun->options, "pblocksize", NULL); 966 if (value != NULL) { 967 ctl_expand_number(value, &t); 968 be_lun->pblocksize = t; 969 } 970 if (be_lun->pblocksize < 512 || be_lun->pblocksize > 131072) { 971 snprintf(req->error_str, sizeof(req->error_str), 972 "%s: unsupported pblocksize %u", __func__, 973 be_lun->pblocksize); 974 goto bailout_error; 975 } 976 977 if (cbe_lun->lun_type == T_DIRECT || 978 cbe_lun->lun_type == T_CDROM) { 979 if (params->blocksize_bytes != 0) 980 cbe_lun->blocksize = params->blocksize_bytes; 981 else if (cbe_lun->lun_type == T_CDROM) 982 cbe_lun->blocksize = 2048; 983 else 984 cbe_lun->blocksize = 512; 985 be_lun->pblockmul = be_lun->pblocksize / cbe_lun->blocksize; 986 if (be_lun->pblockmul < 1 || !powerof2(be_lun->pblockmul)) { 987 snprintf(req->error_str, sizeof(req->error_str), 988 "%s: pblocksize %u not exp2 of blocksize %u", 989 __func__, 990 be_lun->pblocksize, cbe_lun->blocksize); 991 goto bailout_error; 992 } 993 if (params->lun_size_bytes < cbe_lun->blocksize) { 994 snprintf(req->error_str, sizeof(req->error_str), 995 "%s: LUN size %ju < blocksize %u", __func__, 996 params->lun_size_bytes, cbe_lun->blocksize); 997 goto bailout_error; 998 } 999 be_lun->size_blocks = params->lun_size_bytes / cbe_lun->blocksize; 1000 be_lun->size_bytes = be_lun->size_blocks * cbe_lun->blocksize; 1001 be_lun->indir = 0; 1002 t = be_lun->size_bytes / be_lun->pblocksize; 1003 while (t > 1) { 1004 t /= PPP; 1005 be_lun->indir++; 1006 } 1007 cbe_lun->maxlba = be_lun->size_blocks - 1; 1008 cbe_lun->pblockexp = fls(be_lun->pblockmul) - 1; 1009 cbe_lun->pblockoff = 0; 1010 cbe_lun->ublockexp = cbe_lun->pblockexp; 1011 cbe_lun->ublockoff = 0; 1012 cbe_lun->atomicblock = be_lun->pblocksize; 1013 cbe_lun->opttxferlen = SGPP * be_lun->pblocksize; 1014 value = dnvlist_get_string(cbe_lun->options, "capacity", NULL); 1015 if (value != NULL) 1016 ctl_expand_number(value, &be_lun->cap_bytes); 1017 } else { 1018 be_lun->pblockmul = 1; 1019 cbe_lun->pblockexp = 0; 1020 } 1021 1022 /* Tell the user the blocksize we ended up using */ 1023 params->blocksize_bytes = cbe_lun->blocksize; 1024 params->lun_size_bytes = be_lun->size_bytes; 1025 1026 value = dnvlist_get_string(cbe_lun->options, "unmap", NULL); 1027 if (value == NULL || strcmp(value, "off") != 0) 1028 cbe_lun->flags |= CTL_LUN_FLAG_UNMAP; 1029 value = dnvlist_get_string(cbe_lun->options, "readonly", NULL); 1030 if (value != NULL) { 1031 if (strcmp(value, "on") == 0) 1032 cbe_lun->flags |= CTL_LUN_FLAG_READONLY; 1033 } else if (cbe_lun->lun_type != T_DIRECT) 1034 cbe_lun->flags |= CTL_LUN_FLAG_READONLY; 1035 cbe_lun->serseq = CTL_LUN_SERSEQ_OFF; 1036 value = dnvlist_get_string(cbe_lun->options, "serseq", NULL); 1037 if (value != NULL && strcmp(value, "on") == 0) 1038 cbe_lun->serseq = CTL_LUN_SERSEQ_ON; 1039 else if (value != NULL && strcmp(value, "read") == 0) 1040 cbe_lun->serseq = CTL_LUN_SERSEQ_READ; 1041 else if (value != NULL && strcmp(value, "soft") == 0) 1042 cbe_lun->serseq = CTL_LUN_SERSEQ_SOFT; 1043 else if (value != NULL && strcmp(value, "off") == 0) 1044 cbe_lun->serseq = CTL_LUN_SERSEQ_OFF; 1045 1046 if (params->flags & CTL_LUN_FLAG_ID_REQ) { 1047 cbe_lun->req_lun_id = params->req_lun_id; 1048 cbe_lun->flags |= CTL_LUN_FLAG_ID_REQ; 1049 } else 1050 cbe_lun->req_lun_id = 0; 1051 1052 cbe_lun->lun_shutdown = ctl_backend_ramdisk_lun_shutdown; 1053 cbe_lun->be = &ctl_be_ramdisk_driver; 1054 if ((params->flags & CTL_LUN_FLAG_SERIAL_NUM) == 0) { 1055 snprintf(tmpstr, sizeof(tmpstr), "MYSERIAL%04d", 1056 softc->num_luns); 1057 strncpy((char *)cbe_lun->serial_num, tmpstr, 1058 MIN(sizeof(cbe_lun->serial_num), sizeof(tmpstr))); 1059 1060 /* Tell the user what we used for a serial number */ 1061 strncpy((char *)params->serial_num, tmpstr, 1062 MIN(sizeof(params->serial_num), sizeof(tmpstr))); 1063 } else { 1064 strncpy((char *)cbe_lun->serial_num, params->serial_num, 1065 MIN(sizeof(cbe_lun->serial_num), 1066 sizeof(params->serial_num))); 1067 } 1068 if ((params->flags & CTL_LUN_FLAG_DEVID) == 0) { 1069 snprintf(tmpstr, sizeof(tmpstr), "MYDEVID%04d", softc->num_luns); 1070 strncpy((char *)cbe_lun->device_id, tmpstr, 1071 MIN(sizeof(cbe_lun->device_id), sizeof(tmpstr))); 1072 1073 /* Tell the user what we used for a device ID */ 1074 strncpy((char *)params->device_id, tmpstr, 1075 MIN(sizeof(params->device_id), sizeof(tmpstr))); 1076 } else { 1077 strncpy((char *)cbe_lun->device_id, params->device_id, 1078 MIN(sizeof(cbe_lun->device_id), 1079 sizeof(params->device_id))); 1080 } 1081 1082 STAILQ_INIT(&be_lun->cont_queue); 1083 sx_init(&be_lun->page_lock, "ctlram page"); 1084 if (be_lun->cap_bytes == 0) { 1085 be_lun->indir = 0; 1086 be_lun->pages = malloc(be_lun->pblocksize, M_RAMDISK, M_WAITOK); 1087 } 1088 be_lun->zero_page = malloc(be_lun->pblocksize, M_RAMDISK, 1089 M_WAITOK|M_ZERO); 1090 mtx_init(&be_lun->queue_lock, "ctlram queue", NULL, MTX_DEF); 1091 TASK_INIT(&be_lun->io_task, /*priority*/0, ctl_backend_ramdisk_worker, 1092 be_lun); 1093 1094 be_lun->io_taskqueue = taskqueue_create("ctlramtq", M_WAITOK, 1095 taskqueue_thread_enqueue, /*context*/&be_lun->io_taskqueue); 1096 if (be_lun->io_taskqueue == NULL) { 1097 snprintf(req->error_str, sizeof(req->error_str), 1098 "%s: Unable to create taskqueue", __func__); 1099 goto bailout_error; 1100 } 1101 1102 retval = taskqueue_start_threads_in_proc(&be_lun->io_taskqueue, 1103 /*num threads*/1, 1104 /*priority*/PUSER, 1105 /*proc*/control_softc->ctl_proc, 1106 /*thread name*/"ramdisk"); 1107 if (retval != 0) 1108 goto bailout_error; 1109 1110 retval = ctl_add_lun(&be_lun->cbe_lun); 1111 if (retval != 0) { 1112 snprintf(req->error_str, sizeof(req->error_str), 1113 "%s: ctl_add_lun() returned error %d, see dmesg for " 1114 "details", __func__, retval); 1115 retval = 0; 1116 goto bailout_error; 1117 } 1118 1119 mtx_lock(&softc->lock); 1120 softc->num_luns++; 1121 SLIST_INSERT_HEAD(&softc->lun_list, be_lun, links); 1122 mtx_unlock(&softc->lock); 1123 1124 params->req_lun_id = cbe_lun->lun_id; 1125 1126 req->status = CTL_LUN_OK; 1127 return (retval); 1128 1129 bailout_error: 1130 req->status = CTL_LUN_ERROR; 1131 if (be_lun != NULL) { 1132 if (be_lun->io_taskqueue != NULL) 1133 taskqueue_free(be_lun->io_taskqueue); 1134 nvlist_destroy(cbe_lun->options); 1135 free(be_lun->zero_page, M_RAMDISK); 1136 ctl_backend_ramdisk_freeallpages(be_lun->pages, be_lun->indir); 1137 sx_destroy(&be_lun->page_lock); 1138 mtx_destroy(&be_lun->queue_lock); 1139 free(be_lun, M_RAMDISK); 1140 } 1141 return (retval); 1142 } 1143 1144 static int 1145 ctl_backend_ramdisk_modify(struct ctl_be_ramdisk_softc *softc, 1146 struct ctl_lun_req *req) 1147 { 1148 struct ctl_be_ramdisk_lun *be_lun; 1149 struct ctl_be_lun *cbe_lun; 1150 struct ctl_lun_modify_params *params; 1151 const char *value; 1152 uint32_t blocksize; 1153 int wasprim; 1154 1155 params = &req->reqdata.modify; 1156 sx_xlock(&softc->modify_lock); 1157 mtx_lock(&softc->lock); 1158 SLIST_FOREACH(be_lun, &softc->lun_list, links) { 1159 if (be_lun->cbe_lun.lun_id == params->lun_id) 1160 break; 1161 } 1162 mtx_unlock(&softc->lock); 1163 if (be_lun == NULL) { 1164 snprintf(req->error_str, sizeof(req->error_str), 1165 "%s: LUN %u is not managed by the ramdisk backend", 1166 __func__, params->lun_id); 1167 goto bailout_error; 1168 } 1169 cbe_lun = &be_lun->cbe_lun; 1170 1171 if (params->lun_size_bytes != 0) 1172 be_lun->params.lun_size_bytes = params->lun_size_bytes; 1173 1174 if (req->args_nvl != NULL) { 1175 nvlist_destroy(cbe_lun->options); 1176 cbe_lun->options = nvlist_clone(req->args_nvl); 1177 } 1178 1179 wasprim = (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY); 1180 value = dnvlist_get_string(cbe_lun->options, "ha_role", NULL); 1181 if (value != NULL) { 1182 if (strcmp(value, "primary") == 0) 1183 cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY; 1184 else 1185 cbe_lun->flags &= ~CTL_LUN_FLAG_PRIMARY; 1186 } else if (control_softc->flags & CTL_FLAG_ACTIVE_SHELF) 1187 cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY; 1188 else 1189 cbe_lun->flags &= ~CTL_LUN_FLAG_PRIMARY; 1190 if (wasprim != (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY)) { 1191 if (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY) 1192 ctl_lun_primary(cbe_lun); 1193 else 1194 ctl_lun_secondary(cbe_lun); 1195 } 1196 1197 blocksize = be_lun->cbe_lun.blocksize; 1198 if (be_lun->params.lun_size_bytes < blocksize) { 1199 snprintf(req->error_str, sizeof(req->error_str), 1200 "%s: LUN size %ju < blocksize %u", __func__, 1201 be_lun->params.lun_size_bytes, blocksize); 1202 goto bailout_error; 1203 } 1204 be_lun->size_blocks = be_lun->params.lun_size_bytes / blocksize; 1205 be_lun->size_bytes = be_lun->size_blocks * blocksize; 1206 be_lun->cbe_lun.maxlba = be_lun->size_blocks - 1; 1207 ctl_lun_capacity_changed(&be_lun->cbe_lun); 1208 1209 /* Tell the user the exact size we ended up using */ 1210 params->lun_size_bytes = be_lun->size_bytes; 1211 1212 sx_xunlock(&softc->modify_lock); 1213 req->status = CTL_LUN_OK; 1214 return (0); 1215 1216 bailout_error: 1217 sx_xunlock(&softc->modify_lock); 1218 req->status = CTL_LUN_ERROR; 1219 return (0); 1220 } 1221 1222 static void 1223 ctl_backend_ramdisk_lun_shutdown(struct ctl_be_lun *cbe_lun) 1224 { 1225 struct ctl_be_ramdisk_lun *be_lun = (struct ctl_be_ramdisk_lun *)cbe_lun; 1226 struct ctl_be_ramdisk_softc *softc = be_lun->softc; 1227 1228 taskqueue_drain_all(be_lun->io_taskqueue); 1229 taskqueue_free(be_lun->io_taskqueue); 1230 nvlist_destroy(be_lun->cbe_lun.options); 1231 free(be_lun->zero_page, M_RAMDISK); 1232 ctl_backend_ramdisk_freeallpages(be_lun->pages, be_lun->indir); 1233 sx_destroy(&be_lun->page_lock); 1234 mtx_destroy(&be_lun->queue_lock); 1235 1236 mtx_lock(&softc->lock); 1237 be_lun->flags |= CTL_BE_RAMDISK_LUN_UNCONFIGURED; 1238 if (be_lun->flags & CTL_BE_RAMDISK_LUN_WAITING) 1239 wakeup(be_lun); 1240 else 1241 free(be_lun, M_RAMDISK); 1242 mtx_unlock(&softc->lock); 1243 } 1244