1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2003, 2008 Silicon Graphics International Corp. 5 * Copyright (c) 2012 The FreeBSD Foundation 6 * Copyright (c) 2014-2017 Alexander Motin <mav@FreeBSD.org> 7 * All rights reserved. 8 * 9 * Portions of this software were developed by Edward Tomasz Napierala 10 * under sponsorship from the FreeBSD Foundation. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions, and the following disclaimer, 17 * without modification. 18 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 19 * substantially similar to the "NO WARRANTY" disclaimer below 20 * ("Disclaimer") and any redistribution must be conditioned upon 21 * including a substantially similar Disclaimer requirement for further 22 * binary redistribution. 23 * 24 * NO WARRANTY 25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 29 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 33 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 34 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGES. 36 * 37 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend_ramdisk.c#3 $ 38 */ 39 /* 40 * CAM Target Layer black hole and RAM disk backend. 41 * 42 * Author: Ken Merry <ken@FreeBSD.org> 43 */ 44 45 #include <sys/cdefs.h> 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/kernel.h> 49 #include <sys/condvar.h> 50 #include <sys/types.h> 51 #include <sys/limits.h> 52 #include <sys/lock.h> 53 #include <sys/mutex.h> 54 #include <sys/malloc.h> 55 #include <sys/sx.h> 56 #include <sys/taskqueue.h> 57 #include <sys/time.h> 58 #include <sys/queue.h> 59 #include <sys/conf.h> 60 #include <sys/ioccom.h> 61 #include <sys/module.h> 62 #include <sys/sysctl.h> 63 #include <sys/nv.h> 64 #include <sys/dnv.h> 65 66 #include <cam/scsi/scsi_all.h> 67 #include <cam/scsi/scsi_da.h> 68 #include <cam/ctl/ctl_io.h> 69 #include <cam/ctl/ctl.h> 70 #include <cam/ctl/ctl_util.h> 71 #include <cam/ctl/ctl_backend.h> 72 #include <cam/ctl/ctl_debug.h> 73 #include <cam/ctl/ctl_ioctl.h> 74 #include <cam/ctl/ctl_ha.h> 75 #include <cam/ctl/ctl_private.h> 76 #include <cam/ctl/ctl_error.h> 77 78 #define PRIV(io) \ 79 ((struct ctl_ptr_len_flags *)&(io)->io_hdr.ctl_private[CTL_PRIV_BACKEND]) 80 #define ARGS(io) \ 81 ((struct ctl_lba_len_flags *)&(io)->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]) 82 83 #define PPP (PAGE_SIZE / sizeof(uint8_t **)) 84 #ifdef __LP64__ 85 #define PPPS (PAGE_SHIFT - 3) 86 #else 87 #define PPPS (PAGE_SHIFT - 2) 88 #endif 89 #define SGPP (PAGE_SIZE / sizeof(struct ctl_sg_entry)) 90 91 #define P_UNMAPPED NULL /* Page is unmapped. */ 92 #define P_ANCHORED ((void *)(uintptr_t)1) /* Page is anchored. */ 93 94 typedef enum { 95 GP_READ, /* Return data page or zero page. */ 96 GP_WRITE, /* Return data page, try allocate if none. */ 97 GP_ANCHOR, /* Return data page, try anchor if none. */ 98 GP_OTHER, /* Return what present, do not allocate/anchor. */ 99 } getpage_op_t; 100 101 typedef enum { 102 CTL_BE_RAMDISK_LUN_UNCONFIGURED = 0x01, 103 CTL_BE_RAMDISK_LUN_WAITING = 0x04 104 } ctl_be_ramdisk_lun_flags; 105 106 struct ctl_be_ramdisk_lun { 107 struct ctl_be_lun cbe_lun; /* Must be first element. */ 108 struct ctl_lun_create_params params; 109 int indir; 110 uint8_t **pages; 111 uint8_t *zero_page; 112 struct sx page_lock; 113 u_int pblocksize; 114 u_int pblockmul; 115 uint64_t size_bytes; 116 uint64_t size_blocks; 117 uint64_t cap_bytes; 118 uint64_t cap_used; 119 struct ctl_be_ramdisk_softc *softc; 120 ctl_be_ramdisk_lun_flags flags; 121 SLIST_ENTRY(ctl_be_ramdisk_lun) links; 122 struct taskqueue *io_taskqueue; 123 struct task io_task; 124 STAILQ_HEAD(, ctl_io_hdr) cont_queue; 125 struct mtx_padalign queue_lock; 126 }; 127 128 struct ctl_be_ramdisk_softc { 129 struct sx modify_lock; 130 struct mtx lock; 131 int num_luns; 132 SLIST_HEAD(, ctl_be_ramdisk_lun) lun_list; 133 }; 134 135 static struct ctl_be_ramdisk_softc rd_softc; 136 extern struct ctl_softc *control_softc; 137 138 static int ctl_backend_ramdisk_init(void); 139 static int ctl_backend_ramdisk_shutdown(void); 140 static int ctl_backend_ramdisk_move_done(union ctl_io *io, bool samethr); 141 static void ctl_backend_ramdisk_compare(union ctl_io *io); 142 static void ctl_backend_ramdisk_rw(union ctl_io *io); 143 static int ctl_backend_ramdisk_submit(union ctl_io *io); 144 static void ctl_backend_ramdisk_worker(void *context, int pending); 145 static int ctl_backend_ramdisk_config_read(union ctl_io *io); 146 static int ctl_backend_ramdisk_config_write(union ctl_io *io); 147 static uint64_t ctl_backend_ramdisk_lun_attr(struct ctl_be_lun *cbe_lun, const char *attrname); 148 static int ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd, 149 caddr_t addr, int flag, struct thread *td); 150 static int ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc, 151 struct ctl_lun_req *req); 152 static int ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc, 153 struct ctl_lun_req *req); 154 static int ctl_backend_ramdisk_modify(struct ctl_be_ramdisk_softc *softc, 155 struct ctl_lun_req *req); 156 static void ctl_backend_ramdisk_lun_shutdown(struct ctl_be_lun *cbe_lun); 157 158 static struct ctl_backend_driver ctl_be_ramdisk_driver = 159 { 160 .name = "ramdisk", 161 .flags = CTL_BE_FLAG_HAS_CONFIG, 162 .init = ctl_backend_ramdisk_init, 163 .shutdown = ctl_backend_ramdisk_shutdown, 164 .data_submit = ctl_backend_ramdisk_submit, 165 .config_read = ctl_backend_ramdisk_config_read, 166 .config_write = ctl_backend_ramdisk_config_write, 167 .ioctl = ctl_backend_ramdisk_ioctl, 168 .lun_attr = ctl_backend_ramdisk_lun_attr, 169 }; 170 171 MALLOC_DEFINE(M_RAMDISK, "ctlramdisk", "Memory used for CTL RAMdisk"); 172 CTL_BACKEND_DECLARE(cbr, ctl_be_ramdisk_driver); 173 174 static int 175 ctl_backend_ramdisk_init(void) 176 { 177 struct ctl_be_ramdisk_softc *softc = &rd_softc; 178 179 memset(softc, 0, sizeof(*softc)); 180 sx_init(&softc->modify_lock, "ctlrammod"); 181 mtx_init(&softc->lock, "ctlram", NULL, MTX_DEF); 182 SLIST_INIT(&softc->lun_list); 183 return (0); 184 } 185 186 static int 187 ctl_backend_ramdisk_shutdown(void) 188 { 189 struct ctl_be_ramdisk_softc *softc = &rd_softc; 190 struct ctl_be_ramdisk_lun *lun; 191 192 mtx_lock(&softc->lock); 193 while ((lun = SLIST_FIRST(&softc->lun_list)) != NULL) { 194 SLIST_REMOVE_HEAD(&softc->lun_list, links); 195 softc->num_luns--; 196 /* 197 * Drop our lock here. Since ctl_remove_lun() can call 198 * back into us, this could potentially lead to a recursive 199 * lock of the same mutex, which would cause a hang. 200 */ 201 mtx_unlock(&softc->lock); 202 ctl_remove_lun(&lun->cbe_lun); 203 mtx_lock(&softc->lock); 204 } 205 mtx_unlock(&softc->lock); 206 mtx_destroy(&softc->lock); 207 sx_destroy(&softc->modify_lock); 208 return (0); 209 } 210 211 static uint8_t * 212 ctl_backend_ramdisk_getpage(struct ctl_be_ramdisk_lun *be_lun, off_t pn, 213 getpage_op_t op) 214 { 215 uint8_t **p, ***pp; 216 off_t i; 217 int s; 218 219 if (be_lun->cap_bytes == 0) { 220 switch (op) { 221 case GP_READ: 222 return (be_lun->zero_page); 223 case GP_WRITE: 224 return ((uint8_t *)be_lun->pages); 225 case GP_ANCHOR: 226 return (P_ANCHORED); 227 default: 228 return (P_UNMAPPED); 229 } 230 } 231 if (op == GP_WRITE || op == GP_ANCHOR) { 232 sx_xlock(&be_lun->page_lock); 233 pp = &be_lun->pages; 234 for (s = (be_lun->indir - 1) * PPPS; s >= 0; s -= PPPS) { 235 if (*pp == NULL) { 236 *pp = malloc(PAGE_SIZE, M_RAMDISK, 237 M_WAITOK|M_ZERO); 238 } 239 i = pn >> s; 240 pp = (uint8_t ***)&(*pp)[i]; 241 pn -= i << s; 242 } 243 if (*pp == P_UNMAPPED && be_lun->cap_used < be_lun->cap_bytes) { 244 if (op == GP_WRITE) { 245 *pp = malloc(be_lun->pblocksize, M_RAMDISK, 246 M_WAITOK|M_ZERO); 247 } else 248 *pp = P_ANCHORED; 249 be_lun->cap_used += be_lun->pblocksize; 250 } else if (*pp == P_ANCHORED && op == GP_WRITE) { 251 *pp = malloc(be_lun->pblocksize, M_RAMDISK, 252 M_WAITOK|M_ZERO); 253 } 254 sx_xunlock(&be_lun->page_lock); 255 return ((uint8_t *)*pp); 256 } else { 257 sx_slock(&be_lun->page_lock); 258 p = be_lun->pages; 259 for (s = (be_lun->indir - 1) * PPPS; s >= 0; s -= PPPS) { 260 if (p == NULL) 261 break; 262 i = pn >> s; 263 p = (uint8_t **)p[i]; 264 pn -= i << s; 265 } 266 sx_sunlock(&be_lun->page_lock); 267 if ((p == P_UNMAPPED || p == P_ANCHORED) && op == GP_READ) 268 return (be_lun->zero_page); 269 return ((uint8_t *)p); 270 } 271 }; 272 273 static void 274 ctl_backend_ramdisk_unmappage(struct ctl_be_ramdisk_lun *be_lun, off_t pn) 275 { 276 uint8_t ***pp; 277 off_t i; 278 int s; 279 280 if (be_lun->cap_bytes == 0) 281 return; 282 sx_xlock(&be_lun->page_lock); 283 pp = &be_lun->pages; 284 for (s = (be_lun->indir - 1) * PPPS; s >= 0; s -= PPPS) { 285 if (*pp == NULL) 286 goto noindir; 287 i = pn >> s; 288 pp = (uint8_t ***)&(*pp)[i]; 289 pn -= i << s; 290 } 291 if (*pp == P_ANCHORED) { 292 be_lun->cap_used -= be_lun->pblocksize; 293 *pp = P_UNMAPPED; 294 } else if (*pp != P_UNMAPPED) { 295 free(*pp, M_RAMDISK); 296 be_lun->cap_used -= be_lun->pblocksize; 297 *pp = P_UNMAPPED; 298 } 299 noindir: 300 sx_xunlock(&be_lun->page_lock); 301 }; 302 303 static void 304 ctl_backend_ramdisk_anchorpage(struct ctl_be_ramdisk_lun *be_lun, off_t pn) 305 { 306 uint8_t ***pp; 307 off_t i; 308 int s; 309 310 if (be_lun->cap_bytes == 0) 311 return; 312 sx_xlock(&be_lun->page_lock); 313 pp = &be_lun->pages; 314 for (s = (be_lun->indir - 1) * PPPS; s >= 0; s -= PPPS) { 315 if (*pp == NULL) 316 goto noindir; 317 i = pn >> s; 318 pp = (uint8_t ***)&(*pp)[i]; 319 pn -= i << s; 320 } 321 if (*pp == P_UNMAPPED && be_lun->cap_used < be_lun->cap_bytes) { 322 be_lun->cap_used += be_lun->pblocksize; 323 *pp = P_ANCHORED; 324 } else if (*pp != P_ANCHORED) { 325 free(*pp, M_RAMDISK); 326 *pp = P_ANCHORED; 327 } 328 noindir: 329 sx_xunlock(&be_lun->page_lock); 330 }; 331 332 static void 333 ctl_backend_ramdisk_freeallpages(uint8_t **p, int indir) 334 { 335 int i; 336 337 if (p == NULL) 338 return; 339 if (indir == 0) { 340 free(p, M_RAMDISK); 341 return; 342 } 343 for (i = 0; i < PPP; i++) { 344 if (p[i] == NULL) 345 continue; 346 ctl_backend_ramdisk_freeallpages((uint8_t **)p[i], indir - 1); 347 } 348 free(p, M_RAMDISK); 349 }; 350 351 static size_t 352 cmp(uint8_t *a, uint8_t *b, size_t size) 353 { 354 size_t i; 355 356 for (i = 0; i < size; i++) { 357 if (a[i] != b[i]) 358 break; 359 } 360 return (i); 361 } 362 363 static int 364 ctl_backend_ramdisk_cmp(union ctl_io *io) 365 { 366 struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io); 367 struct ctl_be_ramdisk_lun *be_lun = (struct ctl_be_ramdisk_lun *)cbe_lun; 368 uint8_t *page; 369 uint8_t info[8]; 370 uint64_t lba; 371 u_int lbaoff, lbas, res, off; 372 373 lbas = io->scsiio.kern_data_len / cbe_lun->blocksize; 374 lba = ARGS(io)->lba + PRIV(io)->len - lbas; 375 off = 0; 376 for (; lbas > 0; lbas--, lba++) { 377 page = ctl_backend_ramdisk_getpage(be_lun, 378 lba >> cbe_lun->pblockexp, GP_READ); 379 lbaoff = lba & ~(UINT_MAX << cbe_lun->pblockexp); 380 page += lbaoff * cbe_lun->blocksize; 381 res = cmp(io->scsiio.kern_data_ptr + off, page, 382 cbe_lun->blocksize); 383 off += res; 384 if (res < cbe_lun->blocksize) 385 break; 386 } 387 free(io->scsiio.kern_data_ptr, M_RAMDISK); 388 if (lbas > 0) { 389 off += io->scsiio.kern_rel_offset - io->scsiio.kern_data_len; 390 scsi_u64to8b(off, info); 391 ctl_set_sense(&io->scsiio, /*current_error*/ 1, 392 /*sense_key*/ SSD_KEY_MISCOMPARE, 393 /*asc*/ 0x1D, /*ascq*/ 0x00, 394 /*type*/ SSD_ELEM_INFO, 395 /*size*/ sizeof(info), /*data*/ &info, 396 /*type*/ SSD_ELEM_NONE); 397 return (1); 398 } 399 return (0); 400 } 401 402 static int 403 ctl_backend_ramdisk_move_done(union ctl_io *io, bool samethr) 404 { 405 struct ctl_be_ramdisk_lun *be_lun = 406 (struct ctl_be_ramdisk_lun *)CTL_BACKEND_LUN(io); 407 408 CTL_DEBUG_PRINT(("ctl_backend_ramdisk_move_done\n")); 409 if (io->scsiio.kern_sg_entries > 0) 410 free(io->scsiio.kern_data_ptr, M_RAMDISK); 411 io->scsiio.kern_rel_offset += io->scsiio.kern_data_len; 412 if ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && 413 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE) { 414 if (ARGS(io)->flags & CTL_LLF_COMPARE) { 415 /* We have data block ready for comparison. */ 416 if (ctl_backend_ramdisk_cmp(io)) 417 goto done; 418 } 419 if (ARGS(io)->len > PRIV(io)->len) { 420 mtx_lock(&be_lun->queue_lock); 421 STAILQ_INSERT_TAIL(&be_lun->cont_queue, 422 &io->io_hdr, links); 423 mtx_unlock(&be_lun->queue_lock); 424 taskqueue_enqueue(be_lun->io_taskqueue, 425 &be_lun->io_task); 426 return (0); 427 } 428 ctl_set_success(&io->scsiio); 429 } 430 done: 431 ctl_data_submit_done(io); 432 return(0); 433 } 434 435 static void 436 ctl_backend_ramdisk_compare(union ctl_io *io) 437 { 438 struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io); 439 u_int lbas, len; 440 441 lbas = ARGS(io)->len - PRIV(io)->len; 442 lbas = MIN(lbas, 131072 / cbe_lun->blocksize); 443 len = lbas * cbe_lun->blocksize; 444 445 io->scsiio.be_move_done = ctl_backend_ramdisk_move_done; 446 io->scsiio.kern_data_ptr = malloc(len, M_RAMDISK, M_WAITOK); 447 io->scsiio.kern_data_len = len; 448 io->scsiio.kern_sg_entries = 0; 449 io->io_hdr.flags |= CTL_FLAG_ALLOCATED; 450 PRIV(io)->len += lbas; 451 ctl_datamove(io); 452 } 453 454 static void 455 ctl_backend_ramdisk_rw(union ctl_io *io) 456 { 457 struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io); 458 struct ctl_be_ramdisk_lun *be_lun = (struct ctl_be_ramdisk_lun *)cbe_lun; 459 struct ctl_sg_entry *sg_entries; 460 uint8_t *page; 461 uint64_t lba; 462 u_int i, len, lbaoff, lbas, sgs, off; 463 getpage_op_t op; 464 465 lba = ARGS(io)->lba + PRIV(io)->len; 466 lbaoff = lba & ~(UINT_MAX << cbe_lun->pblockexp); 467 lbas = ARGS(io)->len - PRIV(io)->len; 468 lbas = MIN(lbas, (SGPP << cbe_lun->pblockexp) - lbaoff); 469 sgs = (lbas + lbaoff + be_lun->pblockmul - 1) >> cbe_lun->pblockexp; 470 off = lbaoff * cbe_lun->blocksize; 471 op = (ARGS(io)->flags & CTL_LLF_WRITE) ? GP_WRITE : GP_READ; 472 if (sgs > 1) { 473 io->scsiio.kern_data_ptr = malloc(sizeof(struct ctl_sg_entry) * 474 sgs, M_RAMDISK, M_WAITOK); 475 sg_entries = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; 476 len = lbas * cbe_lun->blocksize; 477 for (i = 0; i < sgs; i++) { 478 page = ctl_backend_ramdisk_getpage(be_lun, 479 (lba >> cbe_lun->pblockexp) + i, op); 480 if (page == P_UNMAPPED || page == P_ANCHORED) { 481 free(io->scsiio.kern_data_ptr, M_RAMDISK); 482 nospc: 483 ctl_set_space_alloc_fail(&io->scsiio); 484 ctl_data_submit_done(io); 485 return; 486 } 487 sg_entries[i].addr = page + off; 488 sg_entries[i].len = MIN(len, be_lun->pblocksize - off); 489 len -= sg_entries[i].len; 490 off = 0; 491 } 492 } else { 493 page = ctl_backend_ramdisk_getpage(be_lun, 494 lba >> cbe_lun->pblockexp, op); 495 if (page == P_UNMAPPED || page == P_ANCHORED) 496 goto nospc; 497 sgs = 0; 498 io->scsiio.kern_data_ptr = page + off; 499 } 500 501 io->scsiio.be_move_done = ctl_backend_ramdisk_move_done; 502 io->scsiio.kern_data_len = lbas * cbe_lun->blocksize; 503 io->scsiio.kern_sg_entries = sgs; 504 io->io_hdr.flags |= CTL_FLAG_ALLOCATED; 505 PRIV(io)->len += lbas; 506 if ((ARGS(io)->flags & CTL_LLF_READ) && 507 ARGS(io)->len <= PRIV(io)->len) { 508 ctl_set_success(&io->scsiio); 509 if (cbe_lun->serseq >= CTL_LUN_SERSEQ_SOFT) 510 ctl_serseq_done(io); 511 } 512 ctl_datamove(io); 513 } 514 515 static int 516 ctl_backend_ramdisk_submit(union ctl_io *io) 517 { 518 struct ctl_lba_len_flags *lbalen = ARGS(io); 519 520 if (lbalen->flags & CTL_LLF_VERIFY) { 521 ctl_set_success(&io->scsiio); 522 ctl_data_submit_done(io); 523 return (CTL_RETVAL_COMPLETE); 524 } 525 PRIV(io)->len = 0; 526 if (lbalen->flags & CTL_LLF_COMPARE) 527 ctl_backend_ramdisk_compare(io); 528 else 529 ctl_backend_ramdisk_rw(io); 530 return (CTL_RETVAL_COMPLETE); 531 } 532 533 static void 534 ctl_backend_ramdisk_worker(void *context, int pending) 535 { 536 struct ctl_be_ramdisk_lun *be_lun; 537 union ctl_io *io; 538 539 be_lun = (struct ctl_be_ramdisk_lun *)context; 540 mtx_lock(&be_lun->queue_lock); 541 for (;;) { 542 io = (union ctl_io *)STAILQ_FIRST(&be_lun->cont_queue); 543 if (io != NULL) { 544 STAILQ_REMOVE_HEAD(&be_lun->cont_queue, links); 545 mtx_unlock(&be_lun->queue_lock); 546 if (ARGS(io)->flags & CTL_LLF_COMPARE) 547 ctl_backend_ramdisk_compare(io); 548 else 549 ctl_backend_ramdisk_rw(io); 550 mtx_lock(&be_lun->queue_lock); 551 continue; 552 } 553 554 /* 555 * If we get here, there is no work left in the queues, so 556 * just break out and let the task queue go to sleep. 557 */ 558 break; 559 } 560 mtx_unlock(&be_lun->queue_lock); 561 } 562 563 static int 564 ctl_backend_ramdisk_gls(union ctl_io *io) 565 { 566 struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io); 567 struct ctl_be_ramdisk_lun *be_lun = (struct ctl_be_ramdisk_lun *)cbe_lun; 568 struct scsi_get_lba_status_data *data; 569 uint8_t *page; 570 u_int lbaoff; 571 572 data = (struct scsi_get_lba_status_data *)io->scsiio.kern_data_ptr; 573 scsi_u64to8b(ARGS(io)->lba, data->descr[0].addr); 574 lbaoff = ARGS(io)->lba & ~(UINT_MAX << cbe_lun->pblockexp); 575 scsi_ulto4b(be_lun->pblockmul - lbaoff, data->descr[0].length); 576 page = ctl_backend_ramdisk_getpage(be_lun, 577 ARGS(io)->lba >> cbe_lun->pblockexp, GP_OTHER); 578 if (page == P_UNMAPPED) 579 data->descr[0].status = 1; 580 else if (page == P_ANCHORED) 581 data->descr[0].status = 2; 582 else 583 data->descr[0].status = 0; 584 ctl_config_read_done(io); 585 return (CTL_RETVAL_COMPLETE); 586 } 587 588 static int 589 ctl_backend_ramdisk_config_read(union ctl_io *io) 590 { 591 int retval = 0; 592 593 switch (io->scsiio.cdb[0]) { 594 case SERVICE_ACTION_IN: 595 if (io->scsiio.cdb[1] == SGLS_SERVICE_ACTION) { 596 retval = ctl_backend_ramdisk_gls(io); 597 break; 598 } 599 ctl_set_invalid_field(&io->scsiio, 600 /*sks_valid*/ 1, 601 /*command*/ 1, 602 /*field*/ 1, 603 /*bit_valid*/ 1, 604 /*bit*/ 4); 605 ctl_config_read_done(io); 606 retval = CTL_RETVAL_COMPLETE; 607 break; 608 default: 609 ctl_set_invalid_opcode(&io->scsiio); 610 ctl_config_read_done(io); 611 retval = CTL_RETVAL_COMPLETE; 612 break; 613 } 614 return (retval); 615 } 616 617 static void 618 ctl_backend_ramdisk_delete(struct ctl_be_lun *cbe_lun, off_t lba, off_t len, 619 int anchor) 620 { 621 struct ctl_be_ramdisk_lun *be_lun = (struct ctl_be_ramdisk_lun *)cbe_lun; 622 uint8_t *page; 623 uint64_t p, lp; 624 u_int lbaoff; 625 getpage_op_t op = anchor ? GP_ANCHOR : GP_OTHER; 626 627 /* Partially zero first partial page. */ 628 p = lba >> cbe_lun->pblockexp; 629 lbaoff = lba & ~(UINT_MAX << cbe_lun->pblockexp); 630 if (lbaoff != 0) { 631 page = ctl_backend_ramdisk_getpage(be_lun, p, op); 632 if (page != P_UNMAPPED && page != P_ANCHORED) { 633 memset(page + lbaoff * cbe_lun->blocksize, 0, 634 min(len, be_lun->pblockmul - lbaoff) * 635 cbe_lun->blocksize); 636 } 637 p++; 638 } 639 640 /* Partially zero last partial page. */ 641 lp = (lba + len) >> cbe_lun->pblockexp; 642 lbaoff = (lba + len) & ~(UINT_MAX << cbe_lun->pblockexp); 643 if (p <= lp && lbaoff != 0) { 644 page = ctl_backend_ramdisk_getpage(be_lun, lp, op); 645 if (page != P_UNMAPPED && page != P_ANCHORED) 646 memset(page, 0, lbaoff * cbe_lun->blocksize); 647 } 648 649 /* Delete remaining full pages. */ 650 if (anchor) { 651 for (; p < lp; p++) 652 ctl_backend_ramdisk_anchorpage(be_lun, p); 653 } else { 654 for (; p < lp; p++) 655 ctl_backend_ramdisk_unmappage(be_lun, p); 656 } 657 } 658 659 static void 660 ctl_backend_ramdisk_ws(union ctl_io *io) 661 { 662 struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io); 663 struct ctl_be_ramdisk_lun *be_lun = (struct ctl_be_ramdisk_lun *)cbe_lun; 664 struct ctl_lba_len_flags *lbalen = ARGS(io); 665 uint8_t *page; 666 uint64_t lba; 667 u_int lbaoff, lbas; 668 669 if (lbalen->flags & ~(SWS_LBDATA | SWS_UNMAP | SWS_ANCHOR | SWS_NDOB)) { 670 ctl_set_invalid_field(&io->scsiio, 671 /*sks_valid*/ 1, 672 /*command*/ 1, 673 /*field*/ 1, 674 /*bit_valid*/ 0, 675 /*bit*/ 0); 676 ctl_config_write_done(io); 677 return; 678 } 679 if (lbalen->flags & SWS_UNMAP) { 680 ctl_backend_ramdisk_delete(cbe_lun, lbalen->lba, lbalen->len, 681 (lbalen->flags & SWS_ANCHOR) != 0); 682 ctl_set_success(&io->scsiio); 683 ctl_config_write_done(io); 684 return; 685 } 686 687 for (lba = lbalen->lba, lbas = lbalen->len; lbas > 0; lba++, lbas--) { 688 page = ctl_backend_ramdisk_getpage(be_lun, 689 lba >> cbe_lun->pblockexp, GP_WRITE); 690 if (page == P_UNMAPPED || page == P_ANCHORED) { 691 ctl_set_space_alloc_fail(&io->scsiio); 692 ctl_data_submit_done(io); 693 return; 694 } 695 lbaoff = lba & ~(UINT_MAX << cbe_lun->pblockexp); 696 page += lbaoff * cbe_lun->blocksize; 697 if (lbalen->flags & SWS_NDOB) { 698 memset(page, 0, cbe_lun->blocksize); 699 } else { 700 memcpy(page, io->scsiio.kern_data_ptr, 701 cbe_lun->blocksize); 702 } 703 if (lbalen->flags & SWS_LBDATA) 704 scsi_ulto4b(lba, page); 705 } 706 ctl_set_success(&io->scsiio); 707 ctl_config_write_done(io); 708 } 709 710 static void 711 ctl_backend_ramdisk_unmap(union ctl_io *io) 712 { 713 struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io); 714 struct ctl_ptr_len_flags *ptrlen = (struct ctl_ptr_len_flags *)ARGS(io); 715 struct scsi_unmap_desc *buf, *end; 716 717 if ((ptrlen->flags & ~SU_ANCHOR) != 0) { 718 ctl_set_invalid_field(&io->scsiio, 719 /*sks_valid*/ 0, 720 /*command*/ 0, 721 /*field*/ 0, 722 /*bit_valid*/ 0, 723 /*bit*/ 0); 724 ctl_config_write_done(io); 725 return; 726 } 727 728 buf = (struct scsi_unmap_desc *)ptrlen->ptr; 729 end = buf + ptrlen->len / sizeof(*buf); 730 for (; buf < end; buf++) { 731 ctl_backend_ramdisk_delete(cbe_lun, 732 scsi_8btou64(buf->lba), scsi_4btoul(buf->length), 733 (ptrlen->flags & SU_ANCHOR) != 0); 734 } 735 736 ctl_set_success(&io->scsiio); 737 ctl_config_write_done(io); 738 } 739 740 static int 741 ctl_backend_ramdisk_config_write(union ctl_io *io) 742 { 743 struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io); 744 int retval = 0; 745 746 switch (io->scsiio.cdb[0]) { 747 case SYNCHRONIZE_CACHE: 748 case SYNCHRONIZE_CACHE_16: 749 /* We have no cache to flush. */ 750 ctl_set_success(&io->scsiio); 751 ctl_config_write_done(io); 752 break; 753 case START_STOP_UNIT: { 754 struct scsi_start_stop_unit *cdb; 755 756 cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb; 757 if ((cdb->how & SSS_PC_MASK) != 0) { 758 ctl_set_success(&io->scsiio); 759 ctl_config_write_done(io); 760 break; 761 } 762 if (cdb->how & SSS_START) { 763 if (cdb->how & SSS_LOEJ) 764 ctl_lun_has_media(cbe_lun); 765 ctl_start_lun(cbe_lun); 766 } else { 767 ctl_stop_lun(cbe_lun); 768 if (cdb->how & SSS_LOEJ) 769 ctl_lun_ejected(cbe_lun); 770 } 771 ctl_set_success(&io->scsiio); 772 ctl_config_write_done(io); 773 break; 774 } 775 case PREVENT_ALLOW: 776 ctl_set_success(&io->scsiio); 777 ctl_config_write_done(io); 778 break; 779 case WRITE_SAME_10: 780 case WRITE_SAME_16: 781 ctl_backend_ramdisk_ws(io); 782 break; 783 case UNMAP: 784 ctl_backend_ramdisk_unmap(io); 785 break; 786 default: 787 ctl_set_invalid_opcode(&io->scsiio); 788 ctl_config_write_done(io); 789 retval = CTL_RETVAL_COMPLETE; 790 break; 791 } 792 793 return (retval); 794 } 795 796 static uint64_t 797 ctl_backend_ramdisk_lun_attr(struct ctl_be_lun *cbe_lun, const char *attrname) 798 { 799 struct ctl_be_ramdisk_lun *be_lun = (struct ctl_be_ramdisk_lun *)cbe_lun; 800 uint64_t val; 801 802 val = UINT64_MAX; 803 if (be_lun->cap_bytes == 0) 804 return (val); 805 sx_slock(&be_lun->page_lock); 806 if (strcmp(attrname, "blocksused") == 0) { 807 val = be_lun->cap_used / be_lun->cbe_lun.blocksize; 808 } else if (strcmp(attrname, "blocksavail") == 0) { 809 val = (be_lun->cap_bytes - be_lun->cap_used) / 810 be_lun->cbe_lun.blocksize; 811 } 812 sx_sunlock(&be_lun->page_lock); 813 return (val); 814 } 815 816 static int 817 ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, 818 int flag, struct thread *td) 819 { 820 struct ctl_be_ramdisk_softc *softc = &rd_softc; 821 struct ctl_lun_req *lun_req; 822 int retval; 823 824 retval = 0; 825 switch (cmd) { 826 case CTL_LUN_REQ: 827 lun_req = (struct ctl_lun_req *)addr; 828 switch (lun_req->reqtype) { 829 case CTL_LUNREQ_CREATE: 830 retval = ctl_backend_ramdisk_create(softc, lun_req); 831 break; 832 case CTL_LUNREQ_RM: 833 retval = ctl_backend_ramdisk_rm(softc, lun_req); 834 break; 835 case CTL_LUNREQ_MODIFY: 836 retval = ctl_backend_ramdisk_modify(softc, lun_req); 837 break; 838 default: 839 lun_req->status = CTL_LUN_ERROR; 840 snprintf(lun_req->error_str, sizeof(lun_req->error_str), 841 "%s: invalid LUN request type %d", __func__, 842 lun_req->reqtype); 843 break; 844 } 845 break; 846 default: 847 retval = ENOTTY; 848 break; 849 } 850 851 return (retval); 852 } 853 854 static int 855 ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc, 856 struct ctl_lun_req *req) 857 { 858 struct ctl_be_ramdisk_lun *be_lun; 859 struct ctl_lun_rm_params *params; 860 int retval; 861 862 params = &req->reqdata.rm; 863 sx_xlock(&softc->modify_lock); 864 mtx_lock(&softc->lock); 865 SLIST_FOREACH(be_lun, &softc->lun_list, links) { 866 if (be_lun->cbe_lun.lun_id == params->lun_id) { 867 SLIST_REMOVE(&softc->lun_list, be_lun, 868 ctl_be_ramdisk_lun, links); 869 softc->num_luns--; 870 break; 871 } 872 } 873 mtx_unlock(&softc->lock); 874 sx_xunlock(&softc->modify_lock); 875 if (be_lun == NULL) { 876 snprintf(req->error_str, sizeof(req->error_str), 877 "%s: LUN %u is not managed by the ramdisk backend", 878 __func__, params->lun_id); 879 goto bailout_error; 880 } 881 882 /* 883 * Set the waiting flag before we invalidate the LUN. Our shutdown 884 * routine can be called any time after we invalidate the LUN, 885 * and can be called from our context. 886 * 887 * This tells the shutdown routine that we're waiting, or we're 888 * going to wait for the shutdown to happen. 889 */ 890 mtx_lock(&softc->lock); 891 be_lun->flags |= CTL_BE_RAMDISK_LUN_WAITING; 892 mtx_unlock(&softc->lock); 893 894 retval = ctl_remove_lun(&be_lun->cbe_lun); 895 if (retval != 0) { 896 snprintf(req->error_str, sizeof(req->error_str), 897 "%s: error %d returned from ctl_remove_lun() for " 898 "LUN %d", __func__, retval, params->lun_id); 899 mtx_lock(&softc->lock); 900 be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING; 901 mtx_unlock(&softc->lock); 902 goto bailout_error; 903 } 904 905 mtx_lock(&softc->lock); 906 while ((be_lun->flags & CTL_BE_RAMDISK_LUN_UNCONFIGURED) == 0) { 907 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlramrm", 0); 908 if (retval == EINTR) 909 break; 910 } 911 be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING; 912 if (be_lun->flags & CTL_BE_RAMDISK_LUN_UNCONFIGURED) { 913 mtx_unlock(&softc->lock); 914 free(be_lun, M_RAMDISK); 915 } else { 916 mtx_unlock(&softc->lock); 917 return (EINTR); 918 } 919 920 req->status = CTL_LUN_OK; 921 return (retval); 922 923 bailout_error: 924 req->status = CTL_LUN_ERROR; 925 return (0); 926 } 927 928 static int 929 ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc, 930 struct ctl_lun_req *req) 931 { 932 struct ctl_be_ramdisk_lun *be_lun; 933 struct ctl_be_lun *cbe_lun; 934 struct ctl_lun_create_params *params; 935 const char *value; 936 char tmpstr[32]; 937 uint64_t t; 938 int retval; 939 940 retval = 0; 941 params = &req->reqdata.create; 942 943 be_lun = malloc(sizeof(*be_lun), M_RAMDISK, M_ZERO | M_WAITOK); 944 cbe_lun = &be_lun->cbe_lun; 945 cbe_lun->options = nvlist_clone(req->args_nvl); 946 be_lun->params = req->reqdata.create; 947 be_lun->softc = softc; 948 949 if (params->flags & CTL_LUN_FLAG_DEV_TYPE) 950 cbe_lun->lun_type = params->device_type; 951 else 952 cbe_lun->lun_type = T_DIRECT; 953 be_lun->flags = 0; 954 cbe_lun->flags = 0; 955 value = dnvlist_get_string(cbe_lun->options, "ha_role", NULL); 956 if (value != NULL) { 957 if (strcmp(value, "primary") == 0) 958 cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY; 959 } else if (control_softc->flags & CTL_FLAG_ACTIVE_SHELF) 960 cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY; 961 962 be_lun->pblocksize = PAGE_SIZE; 963 value = dnvlist_get_string(cbe_lun->options, "pblocksize", NULL); 964 if (value != NULL) { 965 ctl_expand_number(value, &t); 966 be_lun->pblocksize = t; 967 } 968 if (be_lun->pblocksize < 512 || be_lun->pblocksize > 131072) { 969 snprintf(req->error_str, sizeof(req->error_str), 970 "%s: unsupported pblocksize %u", __func__, 971 be_lun->pblocksize); 972 goto bailout_error; 973 } 974 975 if (cbe_lun->lun_type == T_DIRECT || 976 cbe_lun->lun_type == T_CDROM) { 977 if (params->blocksize_bytes != 0) 978 cbe_lun->blocksize = params->blocksize_bytes; 979 else if (cbe_lun->lun_type == T_CDROM) 980 cbe_lun->blocksize = 2048; 981 else 982 cbe_lun->blocksize = 512; 983 be_lun->pblockmul = be_lun->pblocksize / cbe_lun->blocksize; 984 if (be_lun->pblockmul < 1 || !powerof2(be_lun->pblockmul)) { 985 snprintf(req->error_str, sizeof(req->error_str), 986 "%s: pblocksize %u not exp2 of blocksize %u", 987 __func__, 988 be_lun->pblocksize, cbe_lun->blocksize); 989 goto bailout_error; 990 } 991 if (params->lun_size_bytes < cbe_lun->blocksize) { 992 snprintf(req->error_str, sizeof(req->error_str), 993 "%s: LUN size %ju < blocksize %u", __func__, 994 params->lun_size_bytes, cbe_lun->blocksize); 995 goto bailout_error; 996 } 997 be_lun->size_blocks = params->lun_size_bytes / cbe_lun->blocksize; 998 be_lun->size_bytes = be_lun->size_blocks * cbe_lun->blocksize; 999 be_lun->indir = 0; 1000 t = be_lun->size_bytes / be_lun->pblocksize; 1001 while (t > 1) { 1002 t /= PPP; 1003 be_lun->indir++; 1004 } 1005 cbe_lun->maxlba = be_lun->size_blocks - 1; 1006 cbe_lun->pblockexp = fls(be_lun->pblockmul) - 1; 1007 cbe_lun->pblockoff = 0; 1008 cbe_lun->ublockexp = cbe_lun->pblockexp; 1009 cbe_lun->ublockoff = 0; 1010 cbe_lun->atomicblock = be_lun->pblocksize; 1011 cbe_lun->opttxferlen = SGPP * be_lun->pblocksize; 1012 value = dnvlist_get_string(cbe_lun->options, "capacity", NULL); 1013 if (value != NULL) 1014 ctl_expand_number(value, &be_lun->cap_bytes); 1015 } else { 1016 be_lun->pblockmul = 1; 1017 cbe_lun->pblockexp = 0; 1018 } 1019 1020 /* Tell the user the blocksize we ended up using */ 1021 params->blocksize_bytes = cbe_lun->blocksize; 1022 params->lun_size_bytes = be_lun->size_bytes; 1023 1024 value = dnvlist_get_string(cbe_lun->options, "unmap", NULL); 1025 if (value == NULL || strcmp(value, "off") != 0) 1026 cbe_lun->flags |= CTL_LUN_FLAG_UNMAP; 1027 value = dnvlist_get_string(cbe_lun->options, "readonly", NULL); 1028 if (value != NULL) { 1029 if (strcmp(value, "on") == 0) 1030 cbe_lun->flags |= CTL_LUN_FLAG_READONLY; 1031 } else if (cbe_lun->lun_type != T_DIRECT) 1032 cbe_lun->flags |= CTL_LUN_FLAG_READONLY; 1033 cbe_lun->serseq = CTL_LUN_SERSEQ_OFF; 1034 value = dnvlist_get_string(cbe_lun->options, "serseq", NULL); 1035 if (value != NULL && strcmp(value, "on") == 0) 1036 cbe_lun->serseq = CTL_LUN_SERSEQ_ON; 1037 else if (value != NULL && strcmp(value, "read") == 0) 1038 cbe_lun->serseq = CTL_LUN_SERSEQ_READ; 1039 else if (value != NULL && strcmp(value, "soft") == 0) 1040 cbe_lun->serseq = CTL_LUN_SERSEQ_SOFT; 1041 else if (value != NULL && strcmp(value, "off") == 0) 1042 cbe_lun->serseq = CTL_LUN_SERSEQ_OFF; 1043 1044 if (params->flags & CTL_LUN_FLAG_ID_REQ) { 1045 cbe_lun->req_lun_id = params->req_lun_id; 1046 cbe_lun->flags |= CTL_LUN_FLAG_ID_REQ; 1047 } else 1048 cbe_lun->req_lun_id = 0; 1049 1050 cbe_lun->lun_shutdown = ctl_backend_ramdisk_lun_shutdown; 1051 cbe_lun->be = &ctl_be_ramdisk_driver; 1052 if ((params->flags & CTL_LUN_FLAG_SERIAL_NUM) == 0) { 1053 snprintf(tmpstr, sizeof(tmpstr), "MYSERIAL%04d", 1054 softc->num_luns); 1055 strncpy((char *)cbe_lun->serial_num, tmpstr, 1056 MIN(sizeof(cbe_lun->serial_num), sizeof(tmpstr))); 1057 1058 /* Tell the user what we used for a serial number */ 1059 strncpy((char *)params->serial_num, tmpstr, 1060 MIN(sizeof(params->serial_num), sizeof(tmpstr))); 1061 } else { 1062 strncpy((char *)cbe_lun->serial_num, params->serial_num, 1063 MIN(sizeof(cbe_lun->serial_num), 1064 sizeof(params->serial_num))); 1065 } 1066 if ((params->flags & CTL_LUN_FLAG_DEVID) == 0) { 1067 snprintf(tmpstr, sizeof(tmpstr), "MYDEVID%04d", softc->num_luns); 1068 strncpy((char *)cbe_lun->device_id, tmpstr, 1069 MIN(sizeof(cbe_lun->device_id), sizeof(tmpstr))); 1070 1071 /* Tell the user what we used for a device ID */ 1072 strncpy((char *)params->device_id, tmpstr, 1073 MIN(sizeof(params->device_id), sizeof(tmpstr))); 1074 } else { 1075 strncpy((char *)cbe_lun->device_id, params->device_id, 1076 MIN(sizeof(cbe_lun->device_id), 1077 sizeof(params->device_id))); 1078 } 1079 1080 STAILQ_INIT(&be_lun->cont_queue); 1081 sx_init(&be_lun->page_lock, "ctlram page"); 1082 if (be_lun->cap_bytes == 0) { 1083 be_lun->indir = 0; 1084 be_lun->pages = malloc(be_lun->pblocksize, M_RAMDISK, M_WAITOK); 1085 } 1086 be_lun->zero_page = malloc(be_lun->pblocksize, M_RAMDISK, 1087 M_WAITOK|M_ZERO); 1088 mtx_init(&be_lun->queue_lock, "ctlram queue", NULL, MTX_DEF); 1089 TASK_INIT(&be_lun->io_task, /*priority*/0, ctl_backend_ramdisk_worker, 1090 be_lun); 1091 1092 be_lun->io_taskqueue = taskqueue_create("ctlramtq", M_WAITOK, 1093 taskqueue_thread_enqueue, /*context*/&be_lun->io_taskqueue); 1094 if (be_lun->io_taskqueue == NULL) { 1095 snprintf(req->error_str, sizeof(req->error_str), 1096 "%s: Unable to create taskqueue", __func__); 1097 goto bailout_error; 1098 } 1099 1100 retval = taskqueue_start_threads_in_proc(&be_lun->io_taskqueue, 1101 /*num threads*/1, 1102 /*priority*/PUSER, 1103 /*proc*/control_softc->ctl_proc, 1104 /*thread name*/"ramdisk"); 1105 if (retval != 0) 1106 goto bailout_error; 1107 1108 retval = ctl_add_lun(&be_lun->cbe_lun); 1109 if (retval != 0) { 1110 snprintf(req->error_str, sizeof(req->error_str), 1111 "%s: ctl_add_lun() returned error %d, see dmesg for " 1112 "details", __func__, retval); 1113 retval = 0; 1114 goto bailout_error; 1115 } 1116 1117 mtx_lock(&softc->lock); 1118 softc->num_luns++; 1119 SLIST_INSERT_HEAD(&softc->lun_list, be_lun, links); 1120 mtx_unlock(&softc->lock); 1121 1122 params->req_lun_id = cbe_lun->lun_id; 1123 1124 req->status = CTL_LUN_OK; 1125 return (retval); 1126 1127 bailout_error: 1128 req->status = CTL_LUN_ERROR; 1129 if (be_lun != NULL) { 1130 if (be_lun->io_taskqueue != NULL) 1131 taskqueue_free(be_lun->io_taskqueue); 1132 nvlist_destroy(cbe_lun->options); 1133 free(be_lun->zero_page, M_RAMDISK); 1134 ctl_backend_ramdisk_freeallpages(be_lun->pages, be_lun->indir); 1135 sx_destroy(&be_lun->page_lock); 1136 mtx_destroy(&be_lun->queue_lock); 1137 free(be_lun, M_RAMDISK); 1138 } 1139 return (retval); 1140 } 1141 1142 static int 1143 ctl_backend_ramdisk_modify(struct ctl_be_ramdisk_softc *softc, 1144 struct ctl_lun_req *req) 1145 { 1146 struct ctl_be_ramdisk_lun *be_lun; 1147 struct ctl_be_lun *cbe_lun; 1148 struct ctl_lun_modify_params *params; 1149 const char *value; 1150 uint32_t blocksize; 1151 int wasprim; 1152 1153 params = &req->reqdata.modify; 1154 sx_xlock(&softc->modify_lock); 1155 mtx_lock(&softc->lock); 1156 SLIST_FOREACH(be_lun, &softc->lun_list, links) { 1157 if (be_lun->cbe_lun.lun_id == params->lun_id) 1158 break; 1159 } 1160 mtx_unlock(&softc->lock); 1161 if (be_lun == NULL) { 1162 snprintf(req->error_str, sizeof(req->error_str), 1163 "%s: LUN %u is not managed by the ramdisk backend", 1164 __func__, params->lun_id); 1165 goto bailout_error; 1166 } 1167 cbe_lun = &be_lun->cbe_lun; 1168 1169 if (params->lun_size_bytes != 0) 1170 be_lun->params.lun_size_bytes = params->lun_size_bytes; 1171 1172 if (req->args_nvl != NULL) { 1173 nvlist_destroy(cbe_lun->options); 1174 cbe_lun->options = nvlist_clone(req->args_nvl); 1175 } 1176 1177 wasprim = (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY); 1178 value = dnvlist_get_string(cbe_lun->options, "ha_role", NULL); 1179 if (value != NULL) { 1180 if (strcmp(value, "primary") == 0) 1181 cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY; 1182 else 1183 cbe_lun->flags &= ~CTL_LUN_FLAG_PRIMARY; 1184 } else if (control_softc->flags & CTL_FLAG_ACTIVE_SHELF) 1185 cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY; 1186 else 1187 cbe_lun->flags &= ~CTL_LUN_FLAG_PRIMARY; 1188 if (wasprim != (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY)) { 1189 if (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY) 1190 ctl_lun_primary(cbe_lun); 1191 else 1192 ctl_lun_secondary(cbe_lun); 1193 } 1194 1195 blocksize = be_lun->cbe_lun.blocksize; 1196 if (be_lun->params.lun_size_bytes < blocksize) { 1197 snprintf(req->error_str, sizeof(req->error_str), 1198 "%s: LUN size %ju < blocksize %u", __func__, 1199 be_lun->params.lun_size_bytes, blocksize); 1200 goto bailout_error; 1201 } 1202 be_lun->size_blocks = be_lun->params.lun_size_bytes / blocksize; 1203 be_lun->size_bytes = be_lun->size_blocks * blocksize; 1204 be_lun->cbe_lun.maxlba = be_lun->size_blocks - 1; 1205 ctl_lun_capacity_changed(&be_lun->cbe_lun); 1206 1207 /* Tell the user the exact size we ended up using */ 1208 params->lun_size_bytes = be_lun->size_bytes; 1209 1210 sx_xunlock(&softc->modify_lock); 1211 req->status = CTL_LUN_OK; 1212 return (0); 1213 1214 bailout_error: 1215 sx_xunlock(&softc->modify_lock); 1216 req->status = CTL_LUN_ERROR; 1217 return (0); 1218 } 1219 1220 static void 1221 ctl_backend_ramdisk_lun_shutdown(struct ctl_be_lun *cbe_lun) 1222 { 1223 struct ctl_be_ramdisk_lun *be_lun = (struct ctl_be_ramdisk_lun *)cbe_lun; 1224 struct ctl_be_ramdisk_softc *softc = be_lun->softc; 1225 1226 taskqueue_drain_all(be_lun->io_taskqueue); 1227 taskqueue_free(be_lun->io_taskqueue); 1228 nvlist_destroy(be_lun->cbe_lun.options); 1229 free(be_lun->zero_page, M_RAMDISK); 1230 ctl_backend_ramdisk_freeallpages(be_lun->pages, be_lun->indir); 1231 sx_destroy(&be_lun->page_lock); 1232 mtx_destroy(&be_lun->queue_lock); 1233 1234 mtx_lock(&softc->lock); 1235 be_lun->flags |= CTL_BE_RAMDISK_LUN_UNCONFIGURED; 1236 if (be_lun->flags & CTL_BE_RAMDISK_LUN_WAITING) 1237 wakeup(be_lun); 1238 else 1239 free(be_lun, M_RAMDISK); 1240 mtx_unlock(&softc->lock); 1241 } 1242