1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2003, 2008 Silicon Graphics International Corp. 5 * Copyright (c) 2012 The FreeBSD Foundation 6 * Copyright (c) 2014-2017 Alexander Motin <mav@FreeBSD.org> 7 * All rights reserved. 8 * 9 * Portions of this software were developed by Edward Tomasz Napierala 10 * under sponsorship from the FreeBSD Foundation. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions, and the following disclaimer, 17 * without modification. 18 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 19 * substantially similar to the "NO WARRANTY" disclaimer below 20 * ("Disclaimer") and any redistribution must be conditioned upon 21 * including a substantially similar Disclaimer requirement for further 22 * binary redistribution. 23 * 24 * NO WARRANTY 25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 29 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 33 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 34 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGES. 36 * 37 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend_ramdisk.c#3 $ 38 */ 39 /* 40 * CAM Target Layer black hole and RAM disk backend. 41 * 42 * Author: Ken Merry <ken@FreeBSD.org> 43 */ 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/kernel.h> 48 #include <sys/condvar.h> 49 #include <sys/types.h> 50 #include <sys/limits.h> 51 #include <sys/lock.h> 52 #include <sys/mutex.h> 53 #include <sys/malloc.h> 54 #include <sys/sx.h> 55 #include <sys/taskqueue.h> 56 #include <sys/time.h> 57 #include <sys/queue.h> 58 #include <sys/conf.h> 59 #include <sys/ioccom.h> 60 #include <sys/module.h> 61 #include <sys/sysctl.h> 62 #include <sys/nv.h> 63 #include <sys/dnv.h> 64 65 #include <cam/scsi/scsi_all.h> 66 #include <cam/scsi/scsi_da.h> 67 #include <cam/ctl/ctl_io.h> 68 #include <cam/ctl/ctl.h> 69 #include <cam/ctl/ctl_util.h> 70 #include <cam/ctl/ctl_backend.h> 71 #include <cam/ctl/ctl_debug.h> 72 #include <cam/ctl/ctl_ioctl.h> 73 #include <cam/ctl/ctl_ha.h> 74 #include <cam/ctl/ctl_private.h> 75 #include <cam/ctl/ctl_error.h> 76 77 #define PRIV(io) \ 78 ((struct ctl_ptr_len_flags *)&(io)->io_hdr.ctl_private[CTL_PRIV_BACKEND]) 79 #define ARGS(io) \ 80 ((struct ctl_lba_len_flags *)&(io)->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]) 81 82 #define PPP (PAGE_SIZE / sizeof(uint8_t **)) 83 #ifdef __LP64__ 84 #define PPPS (PAGE_SHIFT - 3) 85 #else 86 #define PPPS (PAGE_SHIFT - 2) 87 #endif 88 #define SGPP (PAGE_SIZE / sizeof(struct ctl_sg_entry)) 89 90 #define P_UNMAPPED NULL /* Page is unmapped. */ 91 #define P_ANCHORED ((void *)(uintptr_t)1) /* Page is anchored. */ 92 93 typedef enum { 94 GP_READ, /* Return data page or zero page. */ 95 GP_WRITE, /* Return data page, try allocate if none. */ 96 GP_ANCHOR, /* Return data page, try anchor if none. */ 97 GP_OTHER, /* Return what present, do not allocate/anchor. */ 98 } getpage_op_t; 99 100 typedef enum { 101 CTL_BE_RAMDISK_LUN_UNCONFIGURED = 0x01, 102 CTL_BE_RAMDISK_LUN_WAITING = 0x04 103 } ctl_be_ramdisk_lun_flags; 104 105 struct ctl_be_ramdisk_lun { 106 struct ctl_be_lun cbe_lun; /* Must be first element. */ 107 struct ctl_lun_create_params params; 108 int indir; 109 uint8_t **pages; 110 uint8_t *zero_page; 111 struct sx page_lock; 112 u_int pblocksize; 113 u_int pblockmul; 114 uint64_t size_bytes; 115 uint64_t size_blocks; 116 uint64_t cap_bytes; 117 uint64_t cap_used; 118 struct ctl_be_ramdisk_softc *softc; 119 ctl_be_ramdisk_lun_flags flags; 120 SLIST_ENTRY(ctl_be_ramdisk_lun) links; 121 struct taskqueue *io_taskqueue; 122 struct task io_task; 123 STAILQ_HEAD(, ctl_io_hdr) cont_queue; 124 struct mtx_padalign queue_lock; 125 }; 126 127 struct ctl_be_ramdisk_softc { 128 struct sx modify_lock; 129 struct mtx lock; 130 int num_luns; 131 SLIST_HEAD(, ctl_be_ramdisk_lun) lun_list; 132 }; 133 134 static struct ctl_be_ramdisk_softc rd_softc; 135 extern struct ctl_softc *control_softc; 136 137 static int ctl_backend_ramdisk_init(void); 138 static int ctl_backend_ramdisk_shutdown(void); 139 static int ctl_backend_ramdisk_move_done(union ctl_io *io, bool samethr); 140 static void ctl_backend_ramdisk_compare(union ctl_io *io); 141 static void ctl_backend_ramdisk_rw(union ctl_io *io); 142 static int ctl_backend_ramdisk_submit(union ctl_io *io); 143 static void ctl_backend_ramdisk_worker(void *context, int pending); 144 static int ctl_backend_ramdisk_config_read(union ctl_io *io); 145 static int ctl_backend_ramdisk_config_write(union ctl_io *io); 146 static uint64_t ctl_backend_ramdisk_lun_attr(struct ctl_be_lun *cbe_lun, const char *attrname); 147 static int ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd, 148 caddr_t addr, int flag, struct thread *td); 149 static int ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc, 150 struct ctl_lun_req *req); 151 static int ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc, 152 struct ctl_lun_req *req); 153 static int ctl_backend_ramdisk_modify(struct ctl_be_ramdisk_softc *softc, 154 struct ctl_lun_req *req); 155 static void ctl_backend_ramdisk_lun_shutdown(struct ctl_be_lun *cbe_lun); 156 157 static struct ctl_backend_driver ctl_be_ramdisk_driver = 158 { 159 .name = "ramdisk", 160 .flags = CTL_BE_FLAG_HAS_CONFIG, 161 .init = ctl_backend_ramdisk_init, 162 .shutdown = ctl_backend_ramdisk_shutdown, 163 .data_submit = ctl_backend_ramdisk_submit, 164 .config_read = ctl_backend_ramdisk_config_read, 165 .config_write = ctl_backend_ramdisk_config_write, 166 .ioctl = ctl_backend_ramdisk_ioctl, 167 .lun_attr = ctl_backend_ramdisk_lun_attr, 168 }; 169 170 MALLOC_DEFINE(M_RAMDISK, "ctlramdisk", "Memory used for CTL RAMdisk"); 171 CTL_BACKEND_DECLARE(cbr, ctl_be_ramdisk_driver); 172 173 static int 174 ctl_backend_ramdisk_init(void) 175 { 176 struct ctl_be_ramdisk_softc *softc = &rd_softc; 177 178 memset(softc, 0, sizeof(*softc)); 179 sx_init(&softc->modify_lock, "ctlrammod"); 180 mtx_init(&softc->lock, "ctlram", NULL, MTX_DEF); 181 SLIST_INIT(&softc->lun_list); 182 return (0); 183 } 184 185 static int 186 ctl_backend_ramdisk_shutdown(void) 187 { 188 struct ctl_be_ramdisk_softc *softc = &rd_softc; 189 struct ctl_be_ramdisk_lun *lun; 190 191 mtx_lock(&softc->lock); 192 while ((lun = SLIST_FIRST(&softc->lun_list)) != NULL) { 193 SLIST_REMOVE_HEAD(&softc->lun_list, links); 194 softc->num_luns--; 195 /* 196 * Drop our lock here. Since ctl_remove_lun() can call 197 * back into us, this could potentially lead to a recursive 198 * lock of the same mutex, which would cause a hang. 199 */ 200 mtx_unlock(&softc->lock); 201 ctl_remove_lun(&lun->cbe_lun); 202 mtx_lock(&softc->lock); 203 } 204 mtx_unlock(&softc->lock); 205 mtx_destroy(&softc->lock); 206 sx_destroy(&softc->modify_lock); 207 return (0); 208 } 209 210 static uint8_t * 211 ctl_backend_ramdisk_getpage(struct ctl_be_ramdisk_lun *be_lun, off_t pn, 212 getpage_op_t op) 213 { 214 uint8_t **p, ***pp; 215 off_t i; 216 int s; 217 218 if (be_lun->cap_bytes == 0) { 219 switch (op) { 220 case GP_READ: 221 return (be_lun->zero_page); 222 case GP_WRITE: 223 return ((uint8_t *)be_lun->pages); 224 case GP_ANCHOR: 225 return (P_ANCHORED); 226 default: 227 return (P_UNMAPPED); 228 } 229 } 230 if (op == GP_WRITE || op == GP_ANCHOR) { 231 sx_xlock(&be_lun->page_lock); 232 pp = &be_lun->pages; 233 for (s = (be_lun->indir - 1) * PPPS; s >= 0; s -= PPPS) { 234 if (*pp == NULL) { 235 *pp = malloc(PAGE_SIZE, M_RAMDISK, 236 M_WAITOK|M_ZERO); 237 } 238 i = pn >> s; 239 pp = (uint8_t ***)&(*pp)[i]; 240 pn -= i << s; 241 } 242 if (*pp == P_UNMAPPED && be_lun->cap_used < be_lun->cap_bytes) { 243 if (op == GP_WRITE) { 244 *pp = malloc(be_lun->pblocksize, M_RAMDISK, 245 M_WAITOK|M_ZERO); 246 } else 247 *pp = P_ANCHORED; 248 be_lun->cap_used += be_lun->pblocksize; 249 } else if (*pp == P_ANCHORED && op == GP_WRITE) { 250 *pp = malloc(be_lun->pblocksize, M_RAMDISK, 251 M_WAITOK|M_ZERO); 252 } 253 sx_xunlock(&be_lun->page_lock); 254 return ((uint8_t *)*pp); 255 } else { 256 sx_slock(&be_lun->page_lock); 257 p = be_lun->pages; 258 for (s = (be_lun->indir - 1) * PPPS; s >= 0; s -= PPPS) { 259 if (p == NULL) 260 break; 261 i = pn >> s; 262 p = (uint8_t **)p[i]; 263 pn -= i << s; 264 } 265 sx_sunlock(&be_lun->page_lock); 266 if ((p == P_UNMAPPED || p == P_ANCHORED) && op == GP_READ) 267 return (be_lun->zero_page); 268 return ((uint8_t *)p); 269 } 270 }; 271 272 static void 273 ctl_backend_ramdisk_unmappage(struct ctl_be_ramdisk_lun *be_lun, off_t pn) 274 { 275 uint8_t ***pp; 276 off_t i; 277 int s; 278 279 if (be_lun->cap_bytes == 0) 280 return; 281 sx_xlock(&be_lun->page_lock); 282 pp = &be_lun->pages; 283 for (s = (be_lun->indir - 1) * PPPS; s >= 0; s -= PPPS) { 284 if (*pp == NULL) 285 goto noindir; 286 i = pn >> s; 287 pp = (uint8_t ***)&(*pp)[i]; 288 pn -= i << s; 289 } 290 if (*pp == P_ANCHORED) { 291 be_lun->cap_used -= be_lun->pblocksize; 292 *pp = P_UNMAPPED; 293 } else if (*pp != P_UNMAPPED) { 294 free(*pp, M_RAMDISK); 295 be_lun->cap_used -= be_lun->pblocksize; 296 *pp = P_UNMAPPED; 297 } 298 noindir: 299 sx_xunlock(&be_lun->page_lock); 300 }; 301 302 static void 303 ctl_backend_ramdisk_anchorpage(struct ctl_be_ramdisk_lun *be_lun, off_t pn) 304 { 305 uint8_t ***pp; 306 off_t i; 307 int s; 308 309 if (be_lun->cap_bytes == 0) 310 return; 311 sx_xlock(&be_lun->page_lock); 312 pp = &be_lun->pages; 313 for (s = (be_lun->indir - 1) * PPPS; s >= 0; s -= PPPS) { 314 if (*pp == NULL) 315 goto noindir; 316 i = pn >> s; 317 pp = (uint8_t ***)&(*pp)[i]; 318 pn -= i << s; 319 } 320 if (*pp == P_UNMAPPED && be_lun->cap_used < be_lun->cap_bytes) { 321 be_lun->cap_used += be_lun->pblocksize; 322 *pp = P_ANCHORED; 323 } else if (*pp != P_ANCHORED) { 324 free(*pp, M_RAMDISK); 325 *pp = P_ANCHORED; 326 } 327 noindir: 328 sx_xunlock(&be_lun->page_lock); 329 }; 330 331 static void 332 ctl_backend_ramdisk_freeallpages(uint8_t **p, int indir) 333 { 334 int i; 335 336 if (p == NULL) 337 return; 338 if (indir == 0) { 339 free(p, M_RAMDISK); 340 return; 341 } 342 for (i = 0; i < PPP; i++) { 343 if (p[i] == NULL) 344 continue; 345 ctl_backend_ramdisk_freeallpages((uint8_t **)p[i], indir - 1); 346 } 347 free(p, M_RAMDISK); 348 }; 349 350 static size_t 351 cmp(uint8_t *a, uint8_t *b, size_t size) 352 { 353 size_t i; 354 355 for (i = 0; i < size; i++) { 356 if (a[i] != b[i]) 357 break; 358 } 359 return (i); 360 } 361 362 static int 363 ctl_backend_ramdisk_cmp(union ctl_io *io) 364 { 365 struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io); 366 struct ctl_be_ramdisk_lun *be_lun = (struct ctl_be_ramdisk_lun *)cbe_lun; 367 uint8_t *page; 368 uint8_t info[8]; 369 uint64_t lba; 370 u_int lbaoff, lbas, res, off; 371 372 lbas = io->scsiio.kern_data_len / cbe_lun->blocksize; 373 lba = ARGS(io)->lba + PRIV(io)->len - lbas; 374 off = 0; 375 for (; lbas > 0; lbas--, lba++) { 376 page = ctl_backend_ramdisk_getpage(be_lun, 377 lba >> cbe_lun->pblockexp, GP_READ); 378 lbaoff = lba & ~(UINT_MAX << cbe_lun->pblockexp); 379 page += lbaoff * cbe_lun->blocksize; 380 res = cmp(io->scsiio.kern_data_ptr + off, page, 381 cbe_lun->blocksize); 382 off += res; 383 if (res < cbe_lun->blocksize) 384 break; 385 } 386 free(io->scsiio.kern_data_ptr, M_RAMDISK); 387 if (lbas > 0) { 388 off += io->scsiio.kern_rel_offset - io->scsiio.kern_data_len; 389 scsi_u64to8b(off, info); 390 ctl_set_sense(&io->scsiio, /*current_error*/ 1, 391 /*sense_key*/ SSD_KEY_MISCOMPARE, 392 /*asc*/ 0x1D, /*ascq*/ 0x00, 393 /*type*/ SSD_ELEM_INFO, 394 /*size*/ sizeof(info), /*data*/ &info, 395 /*type*/ SSD_ELEM_NONE); 396 return (1); 397 } 398 return (0); 399 } 400 401 static int 402 ctl_backend_ramdisk_move_done(union ctl_io *io, bool samethr) 403 { 404 struct ctl_be_ramdisk_lun *be_lun = 405 (struct ctl_be_ramdisk_lun *)CTL_BACKEND_LUN(io); 406 407 CTL_DEBUG_PRINT(("ctl_backend_ramdisk_move_done\n")); 408 if (io->scsiio.kern_sg_entries > 0) 409 free(io->scsiio.kern_data_ptr, M_RAMDISK); 410 io->scsiio.kern_rel_offset += io->scsiio.kern_data_len; 411 if ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && 412 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE) { 413 if (ARGS(io)->flags & CTL_LLF_COMPARE) { 414 /* We have data block ready for comparison. */ 415 if (ctl_backend_ramdisk_cmp(io)) 416 goto done; 417 } 418 if (ARGS(io)->len > PRIV(io)->len) { 419 mtx_lock(&be_lun->queue_lock); 420 STAILQ_INSERT_TAIL(&be_lun->cont_queue, 421 &io->io_hdr, links); 422 mtx_unlock(&be_lun->queue_lock); 423 taskqueue_enqueue(be_lun->io_taskqueue, 424 &be_lun->io_task); 425 return (0); 426 } 427 ctl_set_success(&io->scsiio); 428 } 429 done: 430 ctl_data_submit_done(io); 431 return(0); 432 } 433 434 static void 435 ctl_backend_ramdisk_compare(union ctl_io *io) 436 { 437 struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io); 438 u_int lbas, len; 439 440 lbas = ARGS(io)->len - PRIV(io)->len; 441 lbas = MIN(lbas, 131072 / cbe_lun->blocksize); 442 len = lbas * cbe_lun->blocksize; 443 444 io->scsiio.be_move_done = ctl_backend_ramdisk_move_done; 445 io->scsiio.kern_data_ptr = malloc(len, M_RAMDISK, M_WAITOK); 446 io->scsiio.kern_data_len = len; 447 io->scsiio.kern_sg_entries = 0; 448 io->io_hdr.flags |= CTL_FLAG_ALLOCATED; 449 PRIV(io)->len += lbas; 450 ctl_datamove(io); 451 } 452 453 static void 454 ctl_backend_ramdisk_rw(union ctl_io *io) 455 { 456 struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io); 457 struct ctl_be_ramdisk_lun *be_lun = (struct ctl_be_ramdisk_lun *)cbe_lun; 458 struct ctl_sg_entry *sg_entries; 459 uint8_t *page; 460 uint64_t lba; 461 u_int i, len, lbaoff, lbas, sgs, off; 462 getpage_op_t op; 463 464 lba = ARGS(io)->lba + PRIV(io)->len; 465 lbaoff = lba & ~(UINT_MAX << cbe_lun->pblockexp); 466 lbas = ARGS(io)->len - PRIV(io)->len; 467 lbas = MIN(lbas, (SGPP << cbe_lun->pblockexp) - lbaoff); 468 sgs = (lbas + lbaoff + be_lun->pblockmul - 1) >> cbe_lun->pblockexp; 469 off = lbaoff * cbe_lun->blocksize; 470 op = (ARGS(io)->flags & CTL_LLF_WRITE) ? GP_WRITE : GP_READ; 471 if (sgs > 1) { 472 io->scsiio.kern_data_ptr = malloc(sizeof(struct ctl_sg_entry) * 473 sgs, M_RAMDISK, M_WAITOK); 474 sg_entries = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; 475 len = lbas * cbe_lun->blocksize; 476 for (i = 0; i < sgs; i++) { 477 page = ctl_backend_ramdisk_getpage(be_lun, 478 (lba >> cbe_lun->pblockexp) + i, op); 479 if (page == P_UNMAPPED || page == P_ANCHORED) { 480 free(io->scsiio.kern_data_ptr, M_RAMDISK); 481 nospc: 482 ctl_set_space_alloc_fail(&io->scsiio); 483 ctl_data_submit_done(io); 484 return; 485 } 486 sg_entries[i].addr = page + off; 487 sg_entries[i].len = MIN(len, be_lun->pblocksize - off); 488 len -= sg_entries[i].len; 489 off = 0; 490 } 491 } else { 492 page = ctl_backend_ramdisk_getpage(be_lun, 493 lba >> cbe_lun->pblockexp, op); 494 if (page == P_UNMAPPED || page == P_ANCHORED) 495 goto nospc; 496 sgs = 0; 497 io->scsiio.kern_data_ptr = page + off; 498 } 499 500 io->scsiio.be_move_done = ctl_backend_ramdisk_move_done; 501 io->scsiio.kern_data_len = lbas * cbe_lun->blocksize; 502 io->scsiio.kern_sg_entries = sgs; 503 io->io_hdr.flags |= CTL_FLAG_ALLOCATED; 504 PRIV(io)->len += lbas; 505 if ((ARGS(io)->flags & CTL_LLF_READ) && 506 ARGS(io)->len <= PRIV(io)->len) { 507 ctl_set_success(&io->scsiio); 508 if (cbe_lun->serseq >= CTL_LUN_SERSEQ_SOFT) 509 ctl_serseq_done(io); 510 } 511 ctl_datamove(io); 512 } 513 514 static int 515 ctl_backend_ramdisk_submit(union ctl_io *io) 516 { 517 struct ctl_lba_len_flags *lbalen = ARGS(io); 518 519 if (lbalen->flags & CTL_LLF_VERIFY) { 520 ctl_set_success(&io->scsiio); 521 ctl_data_submit_done(io); 522 return (CTL_RETVAL_COMPLETE); 523 } 524 PRIV(io)->len = 0; 525 if (lbalen->flags & CTL_LLF_COMPARE) 526 ctl_backend_ramdisk_compare(io); 527 else 528 ctl_backend_ramdisk_rw(io); 529 return (CTL_RETVAL_COMPLETE); 530 } 531 532 static void 533 ctl_backend_ramdisk_worker(void *context, int pending) 534 { 535 struct ctl_be_ramdisk_lun *be_lun; 536 union ctl_io *io; 537 538 be_lun = (struct ctl_be_ramdisk_lun *)context; 539 mtx_lock(&be_lun->queue_lock); 540 for (;;) { 541 io = (union ctl_io *)STAILQ_FIRST(&be_lun->cont_queue); 542 if (io != NULL) { 543 STAILQ_REMOVE_HEAD(&be_lun->cont_queue, links); 544 mtx_unlock(&be_lun->queue_lock); 545 if (ARGS(io)->flags & CTL_LLF_COMPARE) 546 ctl_backend_ramdisk_compare(io); 547 else 548 ctl_backend_ramdisk_rw(io); 549 mtx_lock(&be_lun->queue_lock); 550 continue; 551 } 552 553 /* 554 * If we get here, there is no work left in the queues, so 555 * just break out and let the task queue go to sleep. 556 */ 557 break; 558 } 559 mtx_unlock(&be_lun->queue_lock); 560 } 561 562 static int 563 ctl_backend_ramdisk_gls(union ctl_io *io) 564 { 565 struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io); 566 struct ctl_be_ramdisk_lun *be_lun = (struct ctl_be_ramdisk_lun *)cbe_lun; 567 struct scsi_get_lba_status_data *data; 568 uint8_t *page; 569 u_int lbaoff; 570 571 data = (struct scsi_get_lba_status_data *)io->scsiio.kern_data_ptr; 572 scsi_u64to8b(ARGS(io)->lba, data->descr[0].addr); 573 lbaoff = ARGS(io)->lba & ~(UINT_MAX << cbe_lun->pblockexp); 574 scsi_ulto4b(be_lun->pblockmul - lbaoff, data->descr[0].length); 575 page = ctl_backend_ramdisk_getpage(be_lun, 576 ARGS(io)->lba >> cbe_lun->pblockexp, GP_OTHER); 577 if (page == P_UNMAPPED) 578 data->descr[0].status = 1; 579 else if (page == P_ANCHORED) 580 data->descr[0].status = 2; 581 else 582 data->descr[0].status = 0; 583 ctl_config_read_done(io); 584 return (CTL_RETVAL_COMPLETE); 585 } 586 587 static int 588 ctl_backend_ramdisk_config_read(union ctl_io *io) 589 { 590 int retval = 0; 591 592 switch (io->scsiio.cdb[0]) { 593 case SERVICE_ACTION_IN: 594 if (io->scsiio.cdb[1] == SGLS_SERVICE_ACTION) { 595 retval = ctl_backend_ramdisk_gls(io); 596 break; 597 } 598 ctl_set_invalid_field(&io->scsiio, 599 /*sks_valid*/ 1, 600 /*command*/ 1, 601 /*field*/ 1, 602 /*bit_valid*/ 1, 603 /*bit*/ 4); 604 ctl_config_read_done(io); 605 retval = CTL_RETVAL_COMPLETE; 606 break; 607 default: 608 ctl_set_invalid_opcode(&io->scsiio); 609 ctl_config_read_done(io); 610 retval = CTL_RETVAL_COMPLETE; 611 break; 612 } 613 return (retval); 614 } 615 616 static void 617 ctl_backend_ramdisk_delete(struct ctl_be_lun *cbe_lun, off_t lba, off_t len, 618 int anchor) 619 { 620 struct ctl_be_ramdisk_lun *be_lun = (struct ctl_be_ramdisk_lun *)cbe_lun; 621 uint8_t *page; 622 uint64_t p, lp; 623 u_int lbaoff; 624 getpage_op_t op = anchor ? GP_ANCHOR : GP_OTHER; 625 626 /* Partially zero first partial page. */ 627 p = lba >> cbe_lun->pblockexp; 628 lbaoff = lba & ~(UINT_MAX << cbe_lun->pblockexp); 629 if (lbaoff != 0) { 630 page = ctl_backend_ramdisk_getpage(be_lun, p, op); 631 if (page != P_UNMAPPED && page != P_ANCHORED) { 632 memset(page + lbaoff * cbe_lun->blocksize, 0, 633 min(len, be_lun->pblockmul - lbaoff) * 634 cbe_lun->blocksize); 635 } 636 p++; 637 } 638 639 /* Partially zero last partial page. */ 640 lp = (lba + len) >> cbe_lun->pblockexp; 641 lbaoff = (lba + len) & ~(UINT_MAX << cbe_lun->pblockexp); 642 if (p <= lp && lbaoff != 0) { 643 page = ctl_backend_ramdisk_getpage(be_lun, lp, op); 644 if (page != P_UNMAPPED && page != P_ANCHORED) 645 memset(page, 0, lbaoff * cbe_lun->blocksize); 646 } 647 648 /* Delete remaining full pages. */ 649 if (anchor) { 650 for (; p < lp; p++) 651 ctl_backend_ramdisk_anchorpage(be_lun, p); 652 } else { 653 for (; p < lp; p++) 654 ctl_backend_ramdisk_unmappage(be_lun, p); 655 } 656 } 657 658 static void 659 ctl_backend_ramdisk_ws(union ctl_io *io) 660 { 661 struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io); 662 struct ctl_be_ramdisk_lun *be_lun = (struct ctl_be_ramdisk_lun *)cbe_lun; 663 struct ctl_lba_len_flags *lbalen = ARGS(io); 664 uint8_t *page; 665 uint64_t lba; 666 u_int lbaoff, lbas; 667 668 if (lbalen->flags & ~(SWS_LBDATA | SWS_UNMAP | SWS_ANCHOR | SWS_NDOB)) { 669 ctl_set_invalid_field(&io->scsiio, 670 /*sks_valid*/ 1, 671 /*command*/ 1, 672 /*field*/ 1, 673 /*bit_valid*/ 0, 674 /*bit*/ 0); 675 ctl_config_write_done(io); 676 return; 677 } 678 if (lbalen->flags & SWS_UNMAP) { 679 ctl_backend_ramdisk_delete(cbe_lun, lbalen->lba, lbalen->len, 680 (lbalen->flags & SWS_ANCHOR) != 0); 681 ctl_set_success(&io->scsiio); 682 ctl_config_write_done(io); 683 return; 684 } 685 686 for (lba = lbalen->lba, lbas = lbalen->len; lbas > 0; lba++, lbas--) { 687 page = ctl_backend_ramdisk_getpage(be_lun, 688 lba >> cbe_lun->pblockexp, GP_WRITE); 689 if (page == P_UNMAPPED || page == P_ANCHORED) { 690 ctl_set_space_alloc_fail(&io->scsiio); 691 ctl_data_submit_done(io); 692 return; 693 } 694 lbaoff = lba & ~(UINT_MAX << cbe_lun->pblockexp); 695 page += lbaoff * cbe_lun->blocksize; 696 if (lbalen->flags & SWS_NDOB) { 697 memset(page, 0, cbe_lun->blocksize); 698 } else { 699 memcpy(page, io->scsiio.kern_data_ptr, 700 cbe_lun->blocksize); 701 } 702 if (lbalen->flags & SWS_LBDATA) 703 scsi_ulto4b(lba, page); 704 } 705 ctl_set_success(&io->scsiio); 706 ctl_config_write_done(io); 707 } 708 709 static void 710 ctl_backend_ramdisk_unmap(union ctl_io *io) 711 { 712 struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io); 713 struct ctl_ptr_len_flags *ptrlen = (struct ctl_ptr_len_flags *)ARGS(io); 714 struct scsi_unmap_desc *buf, *end; 715 716 if ((ptrlen->flags & ~SU_ANCHOR) != 0) { 717 ctl_set_invalid_field(&io->scsiio, 718 /*sks_valid*/ 0, 719 /*command*/ 0, 720 /*field*/ 0, 721 /*bit_valid*/ 0, 722 /*bit*/ 0); 723 ctl_config_write_done(io); 724 return; 725 } 726 727 buf = (struct scsi_unmap_desc *)ptrlen->ptr; 728 end = buf + ptrlen->len / sizeof(*buf); 729 for (; buf < end; buf++) { 730 ctl_backend_ramdisk_delete(cbe_lun, 731 scsi_8btou64(buf->lba), scsi_4btoul(buf->length), 732 (ptrlen->flags & SU_ANCHOR) != 0); 733 } 734 735 ctl_set_success(&io->scsiio); 736 ctl_config_write_done(io); 737 } 738 739 static int 740 ctl_backend_ramdisk_config_write(union ctl_io *io) 741 { 742 struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io); 743 int retval = 0; 744 745 switch (io->scsiio.cdb[0]) { 746 case SYNCHRONIZE_CACHE: 747 case SYNCHRONIZE_CACHE_16: 748 /* We have no cache to flush. */ 749 ctl_set_success(&io->scsiio); 750 ctl_config_write_done(io); 751 break; 752 case START_STOP_UNIT: { 753 struct scsi_start_stop_unit *cdb; 754 755 cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb; 756 if ((cdb->how & SSS_PC_MASK) != 0) { 757 ctl_set_success(&io->scsiio); 758 ctl_config_write_done(io); 759 break; 760 } 761 if (cdb->how & SSS_START) { 762 if (cdb->how & SSS_LOEJ) 763 ctl_lun_has_media(cbe_lun); 764 ctl_start_lun(cbe_lun); 765 } else { 766 ctl_stop_lun(cbe_lun); 767 if (cdb->how & SSS_LOEJ) 768 ctl_lun_ejected(cbe_lun); 769 } 770 ctl_set_success(&io->scsiio); 771 ctl_config_write_done(io); 772 break; 773 } 774 case PREVENT_ALLOW: 775 ctl_set_success(&io->scsiio); 776 ctl_config_write_done(io); 777 break; 778 case WRITE_SAME_10: 779 case WRITE_SAME_16: 780 ctl_backend_ramdisk_ws(io); 781 break; 782 case UNMAP: 783 ctl_backend_ramdisk_unmap(io); 784 break; 785 default: 786 ctl_set_invalid_opcode(&io->scsiio); 787 ctl_config_write_done(io); 788 retval = CTL_RETVAL_COMPLETE; 789 break; 790 } 791 792 return (retval); 793 } 794 795 static uint64_t 796 ctl_backend_ramdisk_lun_attr(struct ctl_be_lun *cbe_lun, const char *attrname) 797 { 798 struct ctl_be_ramdisk_lun *be_lun = (struct ctl_be_ramdisk_lun *)cbe_lun; 799 uint64_t val; 800 801 val = UINT64_MAX; 802 if (be_lun->cap_bytes == 0) 803 return (val); 804 sx_slock(&be_lun->page_lock); 805 if (strcmp(attrname, "blocksused") == 0) { 806 val = be_lun->cap_used / be_lun->cbe_lun.blocksize; 807 } else if (strcmp(attrname, "blocksavail") == 0) { 808 val = (be_lun->cap_bytes - be_lun->cap_used) / 809 be_lun->cbe_lun.blocksize; 810 } 811 sx_sunlock(&be_lun->page_lock); 812 return (val); 813 } 814 815 static int 816 ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, 817 int flag, struct thread *td) 818 { 819 struct ctl_be_ramdisk_softc *softc = &rd_softc; 820 struct ctl_lun_req *lun_req; 821 int retval; 822 823 retval = 0; 824 switch (cmd) { 825 case CTL_LUN_REQ: 826 lun_req = (struct ctl_lun_req *)addr; 827 switch (lun_req->reqtype) { 828 case CTL_LUNREQ_CREATE: 829 retval = ctl_backend_ramdisk_create(softc, lun_req); 830 break; 831 case CTL_LUNREQ_RM: 832 retval = ctl_backend_ramdisk_rm(softc, lun_req); 833 break; 834 case CTL_LUNREQ_MODIFY: 835 retval = ctl_backend_ramdisk_modify(softc, lun_req); 836 break; 837 default: 838 lun_req->status = CTL_LUN_ERROR; 839 snprintf(lun_req->error_str, sizeof(lun_req->error_str), 840 "%s: invalid LUN request type %d", __func__, 841 lun_req->reqtype); 842 break; 843 } 844 break; 845 default: 846 retval = ENOTTY; 847 break; 848 } 849 850 return (retval); 851 } 852 853 static int 854 ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc, 855 struct ctl_lun_req *req) 856 { 857 struct ctl_be_ramdisk_lun *be_lun; 858 struct ctl_lun_rm_params *params; 859 int retval; 860 861 params = &req->reqdata.rm; 862 sx_xlock(&softc->modify_lock); 863 mtx_lock(&softc->lock); 864 SLIST_FOREACH(be_lun, &softc->lun_list, links) { 865 if (be_lun->cbe_lun.lun_id == params->lun_id) { 866 SLIST_REMOVE(&softc->lun_list, be_lun, 867 ctl_be_ramdisk_lun, links); 868 softc->num_luns--; 869 break; 870 } 871 } 872 mtx_unlock(&softc->lock); 873 sx_xunlock(&softc->modify_lock); 874 if (be_lun == NULL) { 875 snprintf(req->error_str, sizeof(req->error_str), 876 "%s: LUN %u is not managed by the ramdisk backend", 877 __func__, params->lun_id); 878 goto bailout_error; 879 } 880 881 /* 882 * Set the waiting flag before we invalidate the LUN. Our shutdown 883 * routine can be called any time after we invalidate the LUN, 884 * and can be called from our context. 885 * 886 * This tells the shutdown routine that we're waiting, or we're 887 * going to wait for the shutdown to happen. 888 */ 889 mtx_lock(&softc->lock); 890 be_lun->flags |= CTL_BE_RAMDISK_LUN_WAITING; 891 mtx_unlock(&softc->lock); 892 893 retval = ctl_remove_lun(&be_lun->cbe_lun); 894 if (retval != 0) { 895 snprintf(req->error_str, sizeof(req->error_str), 896 "%s: error %d returned from ctl_remove_lun() for " 897 "LUN %d", __func__, retval, params->lun_id); 898 mtx_lock(&softc->lock); 899 be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING; 900 mtx_unlock(&softc->lock); 901 goto bailout_error; 902 } 903 904 mtx_lock(&softc->lock); 905 while ((be_lun->flags & CTL_BE_RAMDISK_LUN_UNCONFIGURED) == 0) { 906 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlramrm", 0); 907 if (retval == EINTR) 908 break; 909 } 910 be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING; 911 if (be_lun->flags & CTL_BE_RAMDISK_LUN_UNCONFIGURED) { 912 mtx_unlock(&softc->lock); 913 free(be_lun, M_RAMDISK); 914 } else { 915 mtx_unlock(&softc->lock); 916 return (EINTR); 917 } 918 919 req->status = CTL_LUN_OK; 920 return (retval); 921 922 bailout_error: 923 req->status = CTL_LUN_ERROR; 924 return (0); 925 } 926 927 static int 928 ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc, 929 struct ctl_lun_req *req) 930 { 931 struct ctl_be_ramdisk_lun *be_lun; 932 struct ctl_be_lun *cbe_lun; 933 struct ctl_lun_create_params *params; 934 const char *value; 935 char tmpstr[32]; 936 uint64_t t; 937 int retval; 938 939 retval = 0; 940 params = &req->reqdata.create; 941 942 be_lun = malloc(sizeof(*be_lun), M_RAMDISK, M_ZERO | M_WAITOK); 943 cbe_lun = &be_lun->cbe_lun; 944 cbe_lun->options = nvlist_clone(req->args_nvl); 945 be_lun->params = req->reqdata.create; 946 be_lun->softc = softc; 947 948 if (params->flags & CTL_LUN_FLAG_DEV_TYPE) 949 cbe_lun->lun_type = params->device_type; 950 else 951 cbe_lun->lun_type = T_DIRECT; 952 be_lun->flags = 0; 953 cbe_lun->flags = 0; 954 value = dnvlist_get_string(cbe_lun->options, "ha_role", NULL); 955 if (value != NULL) { 956 if (strcmp(value, "primary") == 0) 957 cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY; 958 } else if (control_softc->flags & CTL_FLAG_ACTIVE_SHELF) 959 cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY; 960 961 be_lun->pblocksize = PAGE_SIZE; 962 value = dnvlist_get_string(cbe_lun->options, "pblocksize", NULL); 963 if (value != NULL) { 964 ctl_expand_number(value, &t); 965 be_lun->pblocksize = t; 966 } 967 if (be_lun->pblocksize < 512 || be_lun->pblocksize > 131072) { 968 snprintf(req->error_str, sizeof(req->error_str), 969 "%s: unsupported pblocksize %u", __func__, 970 be_lun->pblocksize); 971 goto bailout_error; 972 } 973 974 if (cbe_lun->lun_type == T_DIRECT || 975 cbe_lun->lun_type == T_CDROM) { 976 if (params->blocksize_bytes != 0) 977 cbe_lun->blocksize = params->blocksize_bytes; 978 else if (cbe_lun->lun_type == T_CDROM) 979 cbe_lun->blocksize = 2048; 980 else 981 cbe_lun->blocksize = 512; 982 be_lun->pblockmul = be_lun->pblocksize / cbe_lun->blocksize; 983 if (be_lun->pblockmul < 1 || !powerof2(be_lun->pblockmul)) { 984 snprintf(req->error_str, sizeof(req->error_str), 985 "%s: pblocksize %u not exp2 of blocksize %u", 986 __func__, 987 be_lun->pblocksize, cbe_lun->blocksize); 988 goto bailout_error; 989 } 990 if (params->lun_size_bytes < cbe_lun->blocksize) { 991 snprintf(req->error_str, sizeof(req->error_str), 992 "%s: LUN size %ju < blocksize %u", __func__, 993 params->lun_size_bytes, cbe_lun->blocksize); 994 goto bailout_error; 995 } 996 be_lun->size_blocks = params->lun_size_bytes / cbe_lun->blocksize; 997 be_lun->size_bytes = be_lun->size_blocks * cbe_lun->blocksize; 998 be_lun->indir = 0; 999 t = be_lun->size_bytes / be_lun->pblocksize; 1000 while (t > 1) { 1001 t /= PPP; 1002 be_lun->indir++; 1003 } 1004 cbe_lun->maxlba = be_lun->size_blocks - 1; 1005 cbe_lun->pblockexp = fls(be_lun->pblockmul) - 1; 1006 cbe_lun->pblockoff = 0; 1007 cbe_lun->ublockexp = cbe_lun->pblockexp; 1008 cbe_lun->ublockoff = 0; 1009 cbe_lun->atomicblock = be_lun->pblocksize; 1010 cbe_lun->opttxferlen = SGPP * be_lun->pblocksize; 1011 value = dnvlist_get_string(cbe_lun->options, "capacity", NULL); 1012 if (value != NULL) 1013 ctl_expand_number(value, &be_lun->cap_bytes); 1014 } else { 1015 be_lun->pblockmul = 1; 1016 cbe_lun->pblockexp = 0; 1017 } 1018 1019 /* Tell the user the blocksize we ended up using */ 1020 params->blocksize_bytes = cbe_lun->blocksize; 1021 params->lun_size_bytes = be_lun->size_bytes; 1022 1023 value = dnvlist_get_string(cbe_lun->options, "unmap", NULL); 1024 if (value == NULL || strcmp(value, "off") != 0) 1025 cbe_lun->flags |= CTL_LUN_FLAG_UNMAP; 1026 value = dnvlist_get_string(cbe_lun->options, "readonly", NULL); 1027 if (value != NULL) { 1028 if (strcmp(value, "on") == 0) 1029 cbe_lun->flags |= CTL_LUN_FLAG_READONLY; 1030 } else if (cbe_lun->lun_type != T_DIRECT) 1031 cbe_lun->flags |= CTL_LUN_FLAG_READONLY; 1032 cbe_lun->serseq = CTL_LUN_SERSEQ_OFF; 1033 value = dnvlist_get_string(cbe_lun->options, "serseq", NULL); 1034 if (value != NULL && strcmp(value, "on") == 0) 1035 cbe_lun->serseq = CTL_LUN_SERSEQ_ON; 1036 else if (value != NULL && strcmp(value, "read") == 0) 1037 cbe_lun->serseq = CTL_LUN_SERSEQ_READ; 1038 else if (value != NULL && strcmp(value, "soft") == 0) 1039 cbe_lun->serseq = CTL_LUN_SERSEQ_SOFT; 1040 else if (value != NULL && strcmp(value, "off") == 0) 1041 cbe_lun->serseq = CTL_LUN_SERSEQ_OFF; 1042 1043 if (params->flags & CTL_LUN_FLAG_ID_REQ) { 1044 cbe_lun->req_lun_id = params->req_lun_id; 1045 cbe_lun->flags |= CTL_LUN_FLAG_ID_REQ; 1046 } else 1047 cbe_lun->req_lun_id = 0; 1048 1049 cbe_lun->lun_shutdown = ctl_backend_ramdisk_lun_shutdown; 1050 cbe_lun->be = &ctl_be_ramdisk_driver; 1051 if ((params->flags & CTL_LUN_FLAG_SERIAL_NUM) == 0) { 1052 snprintf(tmpstr, sizeof(tmpstr), "MYSERIAL%04d", 1053 softc->num_luns); 1054 strncpy((char *)cbe_lun->serial_num, tmpstr, 1055 MIN(sizeof(cbe_lun->serial_num), sizeof(tmpstr))); 1056 1057 /* Tell the user what we used for a serial number */ 1058 strncpy((char *)params->serial_num, tmpstr, 1059 MIN(sizeof(params->serial_num), sizeof(tmpstr))); 1060 } else { 1061 strncpy((char *)cbe_lun->serial_num, params->serial_num, 1062 MIN(sizeof(cbe_lun->serial_num), 1063 sizeof(params->serial_num))); 1064 } 1065 if ((params->flags & CTL_LUN_FLAG_DEVID) == 0) { 1066 snprintf(tmpstr, sizeof(tmpstr), "MYDEVID%04d", softc->num_luns); 1067 strncpy((char *)cbe_lun->device_id, tmpstr, 1068 MIN(sizeof(cbe_lun->device_id), sizeof(tmpstr))); 1069 1070 /* Tell the user what we used for a device ID */ 1071 strncpy((char *)params->device_id, tmpstr, 1072 MIN(sizeof(params->device_id), sizeof(tmpstr))); 1073 } else { 1074 strncpy((char *)cbe_lun->device_id, params->device_id, 1075 MIN(sizeof(cbe_lun->device_id), 1076 sizeof(params->device_id))); 1077 } 1078 1079 STAILQ_INIT(&be_lun->cont_queue); 1080 sx_init(&be_lun->page_lock, "ctlram page"); 1081 if (be_lun->cap_bytes == 0) { 1082 be_lun->indir = 0; 1083 be_lun->pages = malloc(be_lun->pblocksize, M_RAMDISK, M_WAITOK); 1084 } 1085 be_lun->zero_page = malloc(be_lun->pblocksize, M_RAMDISK, 1086 M_WAITOK|M_ZERO); 1087 mtx_init(&be_lun->queue_lock, "ctlram queue", NULL, MTX_DEF); 1088 TASK_INIT(&be_lun->io_task, /*priority*/0, ctl_backend_ramdisk_worker, 1089 be_lun); 1090 1091 be_lun->io_taskqueue = taskqueue_create("ctlramtq", M_WAITOK, 1092 taskqueue_thread_enqueue, /*context*/&be_lun->io_taskqueue); 1093 if (be_lun->io_taskqueue == NULL) { 1094 snprintf(req->error_str, sizeof(req->error_str), 1095 "%s: Unable to create taskqueue", __func__); 1096 goto bailout_error; 1097 } 1098 1099 retval = taskqueue_start_threads_in_proc(&be_lun->io_taskqueue, 1100 /*num threads*/1, 1101 /*priority*/PUSER, 1102 /*proc*/control_softc->ctl_proc, 1103 /*thread name*/"ramdisk"); 1104 if (retval != 0) 1105 goto bailout_error; 1106 1107 retval = ctl_add_lun(&be_lun->cbe_lun); 1108 if (retval != 0) { 1109 snprintf(req->error_str, sizeof(req->error_str), 1110 "%s: ctl_add_lun() returned error %d, see dmesg for " 1111 "details", __func__, retval); 1112 retval = 0; 1113 goto bailout_error; 1114 } 1115 1116 mtx_lock(&softc->lock); 1117 softc->num_luns++; 1118 SLIST_INSERT_HEAD(&softc->lun_list, be_lun, links); 1119 mtx_unlock(&softc->lock); 1120 1121 params->req_lun_id = cbe_lun->lun_id; 1122 1123 req->status = CTL_LUN_OK; 1124 return (retval); 1125 1126 bailout_error: 1127 req->status = CTL_LUN_ERROR; 1128 if (be_lun != NULL) { 1129 if (be_lun->io_taskqueue != NULL) 1130 taskqueue_free(be_lun->io_taskqueue); 1131 nvlist_destroy(cbe_lun->options); 1132 free(be_lun->zero_page, M_RAMDISK); 1133 ctl_backend_ramdisk_freeallpages(be_lun->pages, be_lun->indir); 1134 sx_destroy(&be_lun->page_lock); 1135 mtx_destroy(&be_lun->queue_lock); 1136 free(be_lun, M_RAMDISK); 1137 } 1138 return (retval); 1139 } 1140 1141 static int 1142 ctl_backend_ramdisk_modify(struct ctl_be_ramdisk_softc *softc, 1143 struct ctl_lun_req *req) 1144 { 1145 struct ctl_be_ramdisk_lun *be_lun; 1146 struct ctl_be_lun *cbe_lun; 1147 struct ctl_lun_modify_params *params; 1148 const char *value; 1149 uint32_t blocksize; 1150 int wasprim; 1151 1152 params = &req->reqdata.modify; 1153 sx_xlock(&softc->modify_lock); 1154 mtx_lock(&softc->lock); 1155 SLIST_FOREACH(be_lun, &softc->lun_list, links) { 1156 if (be_lun->cbe_lun.lun_id == params->lun_id) 1157 break; 1158 } 1159 mtx_unlock(&softc->lock); 1160 if (be_lun == NULL) { 1161 snprintf(req->error_str, sizeof(req->error_str), 1162 "%s: LUN %u is not managed by the ramdisk backend", 1163 __func__, params->lun_id); 1164 goto bailout_error; 1165 } 1166 cbe_lun = &be_lun->cbe_lun; 1167 1168 if (params->lun_size_bytes != 0) 1169 be_lun->params.lun_size_bytes = params->lun_size_bytes; 1170 1171 if (req->args_nvl != NULL) { 1172 nvlist_destroy(cbe_lun->options); 1173 cbe_lun->options = nvlist_clone(req->args_nvl); 1174 } 1175 1176 wasprim = (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY); 1177 value = dnvlist_get_string(cbe_lun->options, "ha_role", NULL); 1178 if (value != NULL) { 1179 if (strcmp(value, "primary") == 0) 1180 cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY; 1181 else 1182 cbe_lun->flags &= ~CTL_LUN_FLAG_PRIMARY; 1183 } else if (control_softc->flags & CTL_FLAG_ACTIVE_SHELF) 1184 cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY; 1185 else 1186 cbe_lun->flags &= ~CTL_LUN_FLAG_PRIMARY; 1187 if (wasprim != (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY)) { 1188 if (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY) 1189 ctl_lun_primary(cbe_lun); 1190 else 1191 ctl_lun_secondary(cbe_lun); 1192 } 1193 1194 blocksize = be_lun->cbe_lun.blocksize; 1195 if (be_lun->params.lun_size_bytes < blocksize) { 1196 snprintf(req->error_str, sizeof(req->error_str), 1197 "%s: LUN size %ju < blocksize %u", __func__, 1198 be_lun->params.lun_size_bytes, blocksize); 1199 goto bailout_error; 1200 } 1201 be_lun->size_blocks = be_lun->params.lun_size_bytes / blocksize; 1202 be_lun->size_bytes = be_lun->size_blocks * blocksize; 1203 be_lun->cbe_lun.maxlba = be_lun->size_blocks - 1; 1204 ctl_lun_capacity_changed(&be_lun->cbe_lun); 1205 1206 /* Tell the user the exact size we ended up using */ 1207 params->lun_size_bytes = be_lun->size_bytes; 1208 1209 sx_xunlock(&softc->modify_lock); 1210 req->status = CTL_LUN_OK; 1211 return (0); 1212 1213 bailout_error: 1214 sx_xunlock(&softc->modify_lock); 1215 req->status = CTL_LUN_ERROR; 1216 return (0); 1217 } 1218 1219 static void 1220 ctl_backend_ramdisk_lun_shutdown(struct ctl_be_lun *cbe_lun) 1221 { 1222 struct ctl_be_ramdisk_lun *be_lun = (struct ctl_be_ramdisk_lun *)cbe_lun; 1223 struct ctl_be_ramdisk_softc *softc = be_lun->softc; 1224 1225 taskqueue_drain_all(be_lun->io_taskqueue); 1226 taskqueue_free(be_lun->io_taskqueue); 1227 nvlist_destroy(be_lun->cbe_lun.options); 1228 free(be_lun->zero_page, M_RAMDISK); 1229 ctl_backend_ramdisk_freeallpages(be_lun->pages, be_lun->indir); 1230 sx_destroy(&be_lun->page_lock); 1231 mtx_destroy(&be_lun->queue_lock); 1232 1233 mtx_lock(&softc->lock); 1234 be_lun->flags |= CTL_BE_RAMDISK_LUN_UNCONFIGURED; 1235 if (be_lun->flags & CTL_BE_RAMDISK_LUN_WAITING) 1236 wakeup(be_lun); 1237 else 1238 free(be_lun, M_RAMDISK); 1239 mtx_unlock(&softc->lock); 1240 } 1241