1 /*- 2 * Copyright (c) 2003, 2008 Silicon Graphics International Corp. 3 * Copyright (c) 2012 The FreeBSD Foundation 4 * Copyright (c) 2014-2015 Alexander Motin <mav@FreeBSD.org> 5 * All rights reserved. 6 * 7 * Portions of this software were developed by Edward Tomasz Napierala 8 * under sponsorship from the FreeBSD Foundation. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions, and the following disclaimer, 15 * without modification. 16 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 17 * substantially similar to the "NO WARRANTY" disclaimer below 18 * ("Disclaimer") and any redistribution must be conditioned upon 19 * including a substantially similar Disclaimer requirement for further 20 * binary redistribution. 21 * 22 * NO WARRANTY 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 27 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 31 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 32 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 33 * POSSIBILITY OF SUCH DAMAGES. 34 * 35 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend_ramdisk.c#3 $ 36 */ 37 /* 38 * CAM Target Layer backend for a "fake" ramdisk. 39 * 40 * Author: Ken Merry <ken@FreeBSD.org> 41 */ 42 43 #include <sys/cdefs.h> 44 __FBSDID("$FreeBSD$"); 45 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/kernel.h> 49 #include <sys/condvar.h> 50 #include <sys/types.h> 51 #include <sys/lock.h> 52 #include <sys/mutex.h> 53 #include <sys/malloc.h> 54 #include <sys/taskqueue.h> 55 #include <sys/time.h> 56 #include <sys/queue.h> 57 #include <sys/conf.h> 58 #include <sys/ioccom.h> 59 #include <sys/module.h> 60 #include <sys/sysctl.h> 61 62 #include <cam/scsi/scsi_all.h> 63 #include <cam/scsi/scsi_da.h> 64 #include <cam/ctl/ctl_io.h> 65 #include <cam/ctl/ctl.h> 66 #include <cam/ctl/ctl_util.h> 67 #include <cam/ctl/ctl_backend.h> 68 #include <cam/ctl/ctl_debug.h> 69 #include <cam/ctl/ctl_ioctl.h> 70 #include <cam/ctl/ctl_ha.h> 71 #include <cam/ctl/ctl_private.h> 72 #include <cam/ctl/ctl_error.h> 73 74 typedef enum { 75 CTL_BE_RAMDISK_LUN_UNCONFIGURED = 0x01, 76 CTL_BE_RAMDISK_LUN_CONFIG_ERR = 0x02, 77 CTL_BE_RAMDISK_LUN_WAITING = 0x04 78 } ctl_be_ramdisk_lun_flags; 79 80 struct ctl_be_ramdisk_lun { 81 struct ctl_lun_create_params params; 82 char lunname[32]; 83 uint64_t size_bytes; 84 uint64_t size_blocks; 85 struct ctl_be_ramdisk_softc *softc; 86 ctl_be_ramdisk_lun_flags flags; 87 STAILQ_ENTRY(ctl_be_ramdisk_lun) links; 88 struct ctl_be_lun cbe_lun; 89 struct taskqueue *io_taskqueue; 90 struct task io_task; 91 STAILQ_HEAD(, ctl_io_hdr) cont_queue; 92 struct mtx_padalign queue_lock; 93 }; 94 95 struct ctl_be_ramdisk_softc { 96 struct mtx lock; 97 int rd_size; 98 #ifdef CTL_RAMDISK_PAGES 99 uint8_t **ramdisk_pages; 100 int num_pages; 101 #else 102 uint8_t *ramdisk_buffer; 103 #endif 104 int num_luns; 105 STAILQ_HEAD(, ctl_be_ramdisk_lun) lun_list; 106 }; 107 108 static struct ctl_be_ramdisk_softc rd_softc; 109 extern struct ctl_softc *control_softc; 110 111 int ctl_backend_ramdisk_init(void); 112 void ctl_backend_ramdisk_shutdown(void); 113 static int ctl_backend_ramdisk_move_done(union ctl_io *io); 114 static int ctl_backend_ramdisk_submit(union ctl_io *io); 115 static void ctl_backend_ramdisk_continue(union ctl_io *io); 116 static int ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd, 117 caddr_t addr, int flag, struct thread *td); 118 static int ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc, 119 struct ctl_lun_req *req); 120 static int ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc, 121 struct ctl_lun_req *req); 122 static int ctl_backend_ramdisk_modify(struct ctl_be_ramdisk_softc *softc, 123 struct ctl_lun_req *req); 124 static void ctl_backend_ramdisk_worker(void *context, int pending); 125 static void ctl_backend_ramdisk_lun_shutdown(void *be_lun); 126 static void ctl_backend_ramdisk_lun_config_status(void *be_lun, 127 ctl_lun_config_status status); 128 static int ctl_backend_ramdisk_config_write(union ctl_io *io); 129 static int ctl_backend_ramdisk_config_read(union ctl_io *io); 130 131 static struct ctl_backend_driver ctl_be_ramdisk_driver = 132 { 133 .name = "ramdisk", 134 .flags = CTL_BE_FLAG_HAS_CONFIG, 135 .init = ctl_backend_ramdisk_init, 136 .data_submit = ctl_backend_ramdisk_submit, 137 .data_move_done = ctl_backend_ramdisk_move_done, 138 .config_read = ctl_backend_ramdisk_config_read, 139 .config_write = ctl_backend_ramdisk_config_write, 140 .ioctl = ctl_backend_ramdisk_ioctl 141 }; 142 143 MALLOC_DEFINE(M_RAMDISK, "ramdisk", "Memory used for CTL RAMdisk"); 144 CTL_BACKEND_DECLARE(cbr, ctl_be_ramdisk_driver); 145 146 int 147 ctl_backend_ramdisk_init(void) 148 { 149 struct ctl_be_ramdisk_softc *softc = &rd_softc; 150 #ifdef CTL_RAMDISK_PAGES 151 int i; 152 #endif 153 154 memset(softc, 0, sizeof(*softc)); 155 mtx_init(&softc->lock, "ctlramdisk", NULL, MTX_DEF); 156 STAILQ_INIT(&softc->lun_list); 157 softc->rd_size = 1024 * 1024; 158 #ifdef CTL_RAMDISK_PAGES 159 softc->num_pages = softc->rd_size / PAGE_SIZE; 160 softc->ramdisk_pages = (uint8_t **)malloc(sizeof(uint8_t *) * 161 softc->num_pages, M_RAMDISK, 162 M_WAITOK); 163 for (i = 0; i < softc->num_pages; i++) 164 softc->ramdisk_pages[i] = malloc(PAGE_SIZE, M_RAMDISK,M_WAITOK); 165 #else 166 softc->ramdisk_buffer = (uint8_t *)malloc(softc->rd_size, M_RAMDISK, 167 M_WAITOK); 168 #endif 169 170 return (0); 171 } 172 173 void 174 ctl_backend_ramdisk_shutdown(void) 175 { 176 struct ctl_be_ramdisk_softc *softc = &rd_softc; 177 struct ctl_be_ramdisk_lun *lun, *next_lun; 178 #ifdef CTL_RAMDISK_PAGES 179 int i; 180 #endif 181 182 mtx_lock(&softc->lock); 183 STAILQ_FOREACH_SAFE(lun, &softc->lun_list, links, next_lun) { 184 /* 185 * Drop our lock here. Since ctl_invalidate_lun() can call 186 * back into us, this could potentially lead to a recursive 187 * lock of the same mutex, which would cause a hang. 188 */ 189 mtx_unlock(&softc->lock); 190 ctl_disable_lun(&lun->cbe_lun); 191 ctl_invalidate_lun(&lun->cbe_lun); 192 mtx_lock(&softc->lock); 193 } 194 mtx_unlock(&softc->lock); 195 196 #ifdef CTL_RAMDISK_PAGES 197 for (i = 0; i < softc->num_pages; i++) 198 free(softc->ramdisk_pages[i], M_RAMDISK); 199 200 free(softc->ramdisk_pages, M_RAMDISK); 201 #else 202 free(softc->ramdisk_buffer, M_RAMDISK); 203 #endif 204 205 if (ctl_backend_deregister(&ctl_be_ramdisk_driver) != 0) { 206 printf("ctl_backend_ramdisk_shutdown: " 207 "ctl_backend_deregister() failed!\n"); 208 } 209 } 210 211 static int 212 ctl_backend_ramdisk_move_done(union ctl_io *io) 213 { 214 struct ctl_be_lun *cbe_lun; 215 struct ctl_be_ramdisk_lun *be_lun; 216 #ifdef CTL_TIME_IO 217 struct bintime cur_bt; 218 #endif 219 220 CTL_DEBUG_PRINT(("ctl_backend_ramdisk_move_done\n")); 221 cbe_lun = CTL_BACKEND_LUN(io); 222 be_lun = (struct ctl_be_ramdisk_lun *)cbe_lun->be_lun; 223 #ifdef CTL_TIME_IO 224 getbinuptime(&cur_bt); 225 bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt); 226 bintime_add(&io->io_hdr.dma_bt, &cur_bt); 227 #endif 228 io->io_hdr.num_dmas++; 229 if (io->scsiio.kern_sg_entries > 0) 230 free(io->scsiio.kern_data_ptr, M_RAMDISK); 231 io->scsiio.kern_rel_offset += io->scsiio.kern_data_len; 232 if (io->io_hdr.flags & CTL_FLAG_ABORT) { 233 ; 234 } else if (io->io_hdr.port_status != 0 && 235 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 236 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 237 ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 1, 238 /*retry_count*/ io->io_hdr.port_status); 239 } else if (io->scsiio.kern_data_resid != 0 && 240 (io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT && 241 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 242 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 243 ctl_set_invalid_field_ciu(&io->scsiio); 244 } else if ((io->io_hdr.port_status == 0) && 245 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) { 246 if (io->io_hdr.ctl_private[CTL_PRIV_BACKEND].integer > 0) { 247 mtx_lock(&be_lun->queue_lock); 248 STAILQ_INSERT_TAIL(&be_lun->cont_queue, 249 &io->io_hdr, links); 250 mtx_unlock(&be_lun->queue_lock); 251 taskqueue_enqueue(be_lun->io_taskqueue, 252 &be_lun->io_task); 253 return (0); 254 } 255 ctl_set_success(&io->scsiio); 256 } 257 ctl_data_submit_done(io); 258 return(0); 259 } 260 261 static int 262 ctl_backend_ramdisk_submit(union ctl_io *io) 263 { 264 struct ctl_be_lun *cbe_lun; 265 struct ctl_lba_len_flags *lbalen; 266 267 cbe_lun = CTL_BACKEND_LUN(io); 268 lbalen = (struct ctl_lba_len_flags *)&io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 269 if (lbalen->flags & CTL_LLF_VERIFY) { 270 ctl_set_success(&io->scsiio); 271 ctl_data_submit_done(io); 272 return (CTL_RETVAL_COMPLETE); 273 } 274 io->io_hdr.ctl_private[CTL_PRIV_BACKEND].integer = 275 lbalen->len * cbe_lun->blocksize; 276 ctl_backend_ramdisk_continue(io); 277 return (CTL_RETVAL_COMPLETE); 278 } 279 280 static void 281 ctl_backend_ramdisk_continue(union ctl_io *io) 282 { 283 struct ctl_be_ramdisk_softc *softc; 284 int len, len_filled, sg_filled; 285 #ifdef CTL_RAMDISK_PAGES 286 struct ctl_sg_entry *sg_entries; 287 int i; 288 #endif 289 290 softc = &rd_softc; 291 len = io->io_hdr.ctl_private[CTL_PRIV_BACKEND].integer; 292 #ifdef CTL_RAMDISK_PAGES 293 sg_filled = min(btoc(len), softc->num_pages); 294 if (sg_filled > 1) { 295 io->scsiio.kern_data_ptr = malloc(sizeof(struct ctl_sg_entry) * 296 sg_filled, M_RAMDISK, 297 M_WAITOK); 298 sg_entries = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; 299 for (i = 0, len_filled = 0; i < sg_filled; i++) { 300 sg_entries[i].addr = softc->ramdisk_pages[i]; 301 sg_entries[i].len = MIN(PAGE_SIZE, len - len_filled); 302 len_filled += sg_entries[i].len; 303 } 304 } else { 305 sg_filled = 0; 306 len_filled = len; 307 io->scsiio.kern_data_ptr = softc->ramdisk_pages[0]; 308 } 309 #else 310 sg_filled = 0; 311 len_filled = min(len, softc->rd_size); 312 io->scsiio.kern_data_ptr = softc->ramdisk_buffer; 313 #endif /* CTL_RAMDISK_PAGES */ 314 315 io->scsiio.be_move_done = ctl_backend_ramdisk_move_done; 316 io->scsiio.kern_data_len = len_filled; 317 io->scsiio.kern_sg_entries = sg_filled; 318 io->io_hdr.flags |= CTL_FLAG_ALLOCATED; 319 io->io_hdr.ctl_private[CTL_PRIV_BACKEND].integer -= len_filled; 320 #ifdef CTL_TIME_IO 321 getbinuptime(&io->io_hdr.dma_start_bt); 322 #endif 323 ctl_datamove(io); 324 } 325 326 static void 327 ctl_backend_ramdisk_worker(void *context, int pending) 328 { 329 struct ctl_be_ramdisk_lun *be_lun; 330 union ctl_io *io; 331 332 be_lun = (struct ctl_be_ramdisk_lun *)context; 333 334 mtx_lock(&be_lun->queue_lock); 335 for (;;) { 336 io = (union ctl_io *)STAILQ_FIRST(&be_lun->cont_queue); 337 if (io != NULL) { 338 STAILQ_REMOVE(&be_lun->cont_queue, &io->io_hdr, 339 ctl_io_hdr, links); 340 mtx_unlock(&be_lun->queue_lock); 341 ctl_backend_ramdisk_continue(io); 342 mtx_lock(&be_lun->queue_lock); 343 continue; 344 } 345 346 /* 347 * If we get here, there is no work left in the queues, so 348 * just break out and let the task queue go to sleep. 349 */ 350 break; 351 } 352 mtx_unlock(&be_lun->queue_lock); 353 } 354 355 static int 356 ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, 357 int flag, struct thread *td) 358 { 359 struct ctl_be_ramdisk_softc *softc = &rd_softc; 360 struct ctl_lun_req *lun_req; 361 int retval; 362 363 retval = 0; 364 switch (cmd) { 365 case CTL_LUN_REQ: 366 lun_req = (struct ctl_lun_req *)addr; 367 switch (lun_req->reqtype) { 368 case CTL_LUNREQ_CREATE: 369 retval = ctl_backend_ramdisk_create(softc, lun_req); 370 break; 371 case CTL_LUNREQ_RM: 372 retval = ctl_backend_ramdisk_rm(softc, lun_req); 373 break; 374 case CTL_LUNREQ_MODIFY: 375 retval = ctl_backend_ramdisk_modify(softc, lun_req); 376 break; 377 default: 378 lun_req->status = CTL_LUN_ERROR; 379 snprintf(lun_req->error_str, sizeof(lun_req->error_str), 380 "%s: invalid LUN request type %d", __func__, 381 lun_req->reqtype); 382 break; 383 } 384 break; 385 default: 386 retval = ENOTTY; 387 break; 388 } 389 390 return (retval); 391 } 392 393 static int 394 ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc, 395 struct ctl_lun_req *req) 396 { 397 struct ctl_be_ramdisk_lun *be_lun; 398 struct ctl_lun_rm_params *params; 399 int retval; 400 401 params = &req->reqdata.rm; 402 mtx_lock(&softc->lock); 403 STAILQ_FOREACH(be_lun, &softc->lun_list, links) { 404 if (be_lun->cbe_lun.lun_id == params->lun_id) 405 break; 406 } 407 mtx_unlock(&softc->lock); 408 if (be_lun == NULL) { 409 snprintf(req->error_str, sizeof(req->error_str), 410 "%s: LUN %u is not managed by the ramdisk backend", 411 __func__, params->lun_id); 412 goto bailout_error; 413 } 414 415 retval = ctl_disable_lun(&be_lun->cbe_lun); 416 if (retval != 0) { 417 snprintf(req->error_str, sizeof(req->error_str), 418 "%s: error %d returned from ctl_disable_lun() for " 419 "LUN %d", __func__, retval, params->lun_id); 420 goto bailout_error; 421 } 422 423 /* 424 * Set the waiting flag before we invalidate the LUN. Our shutdown 425 * routine can be called any time after we invalidate the LUN, 426 * and can be called from our context. 427 * 428 * This tells the shutdown routine that we're waiting, or we're 429 * going to wait for the shutdown to happen. 430 */ 431 mtx_lock(&softc->lock); 432 be_lun->flags |= CTL_BE_RAMDISK_LUN_WAITING; 433 mtx_unlock(&softc->lock); 434 435 retval = ctl_invalidate_lun(&be_lun->cbe_lun); 436 if (retval != 0) { 437 snprintf(req->error_str, sizeof(req->error_str), 438 "%s: error %d returned from ctl_invalidate_lun() for " 439 "LUN %d", __func__, retval, params->lun_id); 440 mtx_lock(&softc->lock); 441 be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING; 442 mtx_unlock(&softc->lock); 443 goto bailout_error; 444 } 445 446 mtx_lock(&softc->lock); 447 while ((be_lun->flags & CTL_BE_RAMDISK_LUN_UNCONFIGURED) == 0) { 448 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlram", 0); 449 if (retval == EINTR) 450 break; 451 } 452 be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING; 453 454 /* 455 * We only remove this LUN from the list and free it (below) if 456 * retval == 0. If the user interrupted the wait, we just bail out 457 * without actually freeing the LUN. We let the shutdown routine 458 * free the LUN if that happens. 459 */ 460 if (retval == 0) { 461 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun, 462 links); 463 softc->num_luns--; 464 } 465 466 mtx_unlock(&softc->lock); 467 468 if (retval == 0) { 469 taskqueue_drain_all(be_lun->io_taskqueue); 470 taskqueue_free(be_lun->io_taskqueue); 471 ctl_free_opts(&be_lun->cbe_lun.options); 472 mtx_destroy(&be_lun->queue_lock); 473 free(be_lun, M_RAMDISK); 474 } 475 476 req->status = CTL_LUN_OK; 477 return (retval); 478 479 bailout_error: 480 req->status = CTL_LUN_ERROR; 481 return (0); 482 } 483 484 static int 485 ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc, 486 struct ctl_lun_req *req) 487 { 488 struct ctl_be_ramdisk_lun *be_lun; 489 struct ctl_be_lun *cbe_lun; 490 struct ctl_lun_create_params *params; 491 char *value; 492 char tmpstr[32]; 493 int retval; 494 495 retval = 0; 496 params = &req->reqdata.create; 497 498 be_lun = malloc(sizeof(*be_lun), M_RAMDISK, M_ZERO | M_WAITOK); 499 cbe_lun = &be_lun->cbe_lun; 500 cbe_lun->be_lun = be_lun; 501 be_lun->params = req->reqdata.create; 502 be_lun->softc = softc; 503 sprintf(be_lun->lunname, "cram%d", softc->num_luns); 504 ctl_init_opts(&cbe_lun->options, req->num_be_args, req->kern_be_args); 505 506 if (params->flags & CTL_LUN_FLAG_DEV_TYPE) 507 cbe_lun->lun_type = params->device_type; 508 else 509 cbe_lun->lun_type = T_DIRECT; 510 be_lun->flags = CTL_BE_RAMDISK_LUN_UNCONFIGURED; 511 cbe_lun->flags = 0; 512 value = ctl_get_opt(&cbe_lun->options, "ha_role"); 513 if (value != NULL) { 514 if (strcmp(value, "primary") == 0) 515 cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY; 516 } else if (control_softc->flags & CTL_FLAG_ACTIVE_SHELF) 517 cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY; 518 519 if (cbe_lun->lun_type == T_DIRECT || 520 cbe_lun->lun_type == T_CDROM) { 521 if (params->blocksize_bytes != 0) 522 cbe_lun->blocksize = params->blocksize_bytes; 523 else if (cbe_lun->lun_type == T_CDROM) 524 cbe_lun->blocksize = 2048; 525 else 526 cbe_lun->blocksize = 512; 527 if (params->lun_size_bytes < cbe_lun->blocksize) { 528 snprintf(req->error_str, sizeof(req->error_str), 529 "%s: LUN size %ju < blocksize %u", __func__, 530 params->lun_size_bytes, cbe_lun->blocksize); 531 goto bailout_error; 532 } 533 be_lun->size_blocks = params->lun_size_bytes / cbe_lun->blocksize; 534 be_lun->size_bytes = be_lun->size_blocks * cbe_lun->blocksize; 535 cbe_lun->maxlba = be_lun->size_blocks - 1; 536 cbe_lun->atomicblock = UINT32_MAX; 537 cbe_lun->opttxferlen = softc->rd_size / cbe_lun->blocksize; 538 } 539 540 /* Tell the user the blocksize we ended up using */ 541 params->blocksize_bytes = cbe_lun->blocksize; 542 params->lun_size_bytes = be_lun->size_bytes; 543 544 value = ctl_get_opt(&cbe_lun->options, "unmap"); 545 if (value != NULL && strcmp(value, "on") == 0) 546 cbe_lun->flags |= CTL_LUN_FLAG_UNMAP; 547 value = ctl_get_opt(&cbe_lun->options, "readonly"); 548 if (value != NULL) { 549 if (strcmp(value, "on") == 0) 550 cbe_lun->flags |= CTL_LUN_FLAG_READONLY; 551 } else if (cbe_lun->lun_type != T_DIRECT) 552 cbe_lun->flags |= CTL_LUN_FLAG_READONLY; 553 cbe_lun->serseq = CTL_LUN_SERSEQ_OFF; 554 value = ctl_get_opt(&cbe_lun->options, "serseq"); 555 if (value != NULL && strcmp(value, "on") == 0) 556 cbe_lun->serseq = CTL_LUN_SERSEQ_ON; 557 else if (value != NULL && strcmp(value, "read") == 0) 558 cbe_lun->serseq = CTL_LUN_SERSEQ_READ; 559 else if (value != NULL && strcmp(value, "off") == 0) 560 cbe_lun->serseq = CTL_LUN_SERSEQ_OFF; 561 562 if (params->flags & CTL_LUN_FLAG_ID_REQ) { 563 cbe_lun->req_lun_id = params->req_lun_id; 564 cbe_lun->flags |= CTL_LUN_FLAG_ID_REQ; 565 } else 566 cbe_lun->req_lun_id = 0; 567 568 cbe_lun->lun_shutdown = ctl_backend_ramdisk_lun_shutdown; 569 cbe_lun->lun_config_status = ctl_backend_ramdisk_lun_config_status; 570 cbe_lun->be = &ctl_be_ramdisk_driver; 571 if ((params->flags & CTL_LUN_FLAG_SERIAL_NUM) == 0) { 572 snprintf(tmpstr, sizeof(tmpstr), "MYSERIAL%4d", 573 softc->num_luns); 574 strncpy((char *)cbe_lun->serial_num, tmpstr, 575 MIN(sizeof(cbe_lun->serial_num), sizeof(tmpstr))); 576 577 /* Tell the user what we used for a serial number */ 578 strncpy((char *)params->serial_num, tmpstr, 579 MIN(sizeof(params->serial_num), sizeof(tmpstr))); 580 } else { 581 strncpy((char *)cbe_lun->serial_num, params->serial_num, 582 MIN(sizeof(cbe_lun->serial_num), 583 sizeof(params->serial_num))); 584 } 585 if ((params->flags & CTL_LUN_FLAG_DEVID) == 0) { 586 snprintf(tmpstr, sizeof(tmpstr), "MYDEVID%4d", softc->num_luns); 587 strncpy((char *)cbe_lun->device_id, tmpstr, 588 MIN(sizeof(cbe_lun->device_id), sizeof(tmpstr))); 589 590 /* Tell the user what we used for a device ID */ 591 strncpy((char *)params->device_id, tmpstr, 592 MIN(sizeof(params->device_id), sizeof(tmpstr))); 593 } else { 594 strncpy((char *)cbe_lun->device_id, params->device_id, 595 MIN(sizeof(cbe_lun->device_id), 596 sizeof(params->device_id))); 597 } 598 599 STAILQ_INIT(&be_lun->cont_queue); 600 mtx_init(&be_lun->queue_lock, "cram queue lock", NULL, MTX_DEF); 601 TASK_INIT(&be_lun->io_task, /*priority*/0, ctl_backend_ramdisk_worker, 602 be_lun); 603 604 be_lun->io_taskqueue = taskqueue_create(be_lun->lunname, M_WAITOK, 605 taskqueue_thread_enqueue, /*context*/&be_lun->io_taskqueue); 606 if (be_lun->io_taskqueue == NULL) { 607 snprintf(req->error_str, sizeof(req->error_str), 608 "%s: Unable to create taskqueue", __func__); 609 goto bailout_error; 610 } 611 612 retval = taskqueue_start_threads(&be_lun->io_taskqueue, 613 /*num threads*/1, 614 /*priority*/PWAIT, 615 /*thread name*/ 616 "%s taskq", be_lun->lunname); 617 if (retval != 0) 618 goto bailout_error; 619 620 mtx_lock(&softc->lock); 621 softc->num_luns++; 622 STAILQ_INSERT_TAIL(&softc->lun_list, be_lun, links); 623 mtx_unlock(&softc->lock); 624 625 retval = ctl_add_lun(&be_lun->cbe_lun); 626 if (retval != 0) { 627 mtx_lock(&softc->lock); 628 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun, 629 links); 630 softc->num_luns--; 631 mtx_unlock(&softc->lock); 632 snprintf(req->error_str, sizeof(req->error_str), 633 "%s: ctl_add_lun() returned error %d, see dmesg for " 634 "details", __func__, retval); 635 retval = 0; 636 goto bailout_error; 637 } 638 639 mtx_lock(&softc->lock); 640 641 /* 642 * Tell the config_status routine that we're waiting so it won't 643 * clean up the LUN in the event of an error. 644 */ 645 be_lun->flags |= CTL_BE_RAMDISK_LUN_WAITING; 646 647 while (be_lun->flags & CTL_BE_RAMDISK_LUN_UNCONFIGURED) { 648 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlram", 0); 649 if (retval == EINTR) 650 break; 651 } 652 be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING; 653 654 if (be_lun->flags & CTL_BE_RAMDISK_LUN_CONFIG_ERR) { 655 snprintf(req->error_str, sizeof(req->error_str), 656 "%s: LUN configuration error, see dmesg for details", 657 __func__); 658 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun, 659 links); 660 softc->num_luns--; 661 mtx_unlock(&softc->lock); 662 goto bailout_error; 663 } else { 664 params->req_lun_id = cbe_lun->lun_id; 665 } 666 mtx_unlock(&softc->lock); 667 668 req->status = CTL_LUN_OK; 669 return (retval); 670 671 bailout_error: 672 req->status = CTL_LUN_ERROR; 673 if (be_lun != NULL) { 674 if (be_lun->io_taskqueue != NULL) { 675 taskqueue_free(be_lun->io_taskqueue); 676 } 677 ctl_free_opts(&cbe_lun->options); 678 mtx_destroy(&be_lun->queue_lock); 679 free(be_lun, M_RAMDISK); 680 } 681 return (retval); 682 } 683 684 static int 685 ctl_backend_ramdisk_modify(struct ctl_be_ramdisk_softc *softc, 686 struct ctl_lun_req *req) 687 { 688 struct ctl_be_ramdisk_lun *be_lun; 689 struct ctl_be_lun *cbe_lun; 690 struct ctl_lun_modify_params *params; 691 char *value; 692 uint32_t blocksize; 693 int wasprim; 694 695 params = &req->reqdata.modify; 696 697 mtx_lock(&softc->lock); 698 STAILQ_FOREACH(be_lun, &softc->lun_list, links) { 699 if (be_lun->cbe_lun.lun_id == params->lun_id) 700 break; 701 } 702 mtx_unlock(&softc->lock); 703 if (be_lun == NULL) { 704 snprintf(req->error_str, sizeof(req->error_str), 705 "%s: LUN %u is not managed by the ramdisk backend", 706 __func__, params->lun_id); 707 goto bailout_error; 708 } 709 cbe_lun = &be_lun->cbe_lun; 710 711 if (params->lun_size_bytes != 0) 712 be_lun->params.lun_size_bytes = params->lun_size_bytes; 713 ctl_update_opts(&cbe_lun->options, req->num_be_args, req->kern_be_args); 714 715 wasprim = (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY); 716 value = ctl_get_opt(&cbe_lun->options, "ha_role"); 717 if (value != NULL) { 718 if (strcmp(value, "primary") == 0) 719 cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY; 720 else 721 cbe_lun->flags &= ~CTL_LUN_FLAG_PRIMARY; 722 } else if (control_softc->flags & CTL_FLAG_ACTIVE_SHELF) 723 cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY; 724 else 725 cbe_lun->flags &= ~CTL_LUN_FLAG_PRIMARY; 726 if (wasprim != (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY)) { 727 if (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY) 728 ctl_lun_primary(cbe_lun); 729 else 730 ctl_lun_secondary(cbe_lun); 731 } 732 733 blocksize = be_lun->cbe_lun.blocksize; 734 if (be_lun->params.lun_size_bytes < blocksize) { 735 snprintf(req->error_str, sizeof(req->error_str), 736 "%s: LUN size %ju < blocksize %u", __func__, 737 be_lun->params.lun_size_bytes, blocksize); 738 goto bailout_error; 739 } 740 be_lun->size_blocks = be_lun->params.lun_size_bytes / blocksize; 741 be_lun->size_bytes = be_lun->size_blocks * blocksize; 742 be_lun->cbe_lun.maxlba = be_lun->size_blocks - 1; 743 ctl_lun_capacity_changed(&be_lun->cbe_lun); 744 745 /* Tell the user the exact size we ended up using */ 746 params->lun_size_bytes = be_lun->size_bytes; 747 748 req->status = CTL_LUN_OK; 749 return (0); 750 751 bailout_error: 752 req->status = CTL_LUN_ERROR; 753 return (0); 754 } 755 756 static void 757 ctl_backend_ramdisk_lun_shutdown(void *be_lun) 758 { 759 struct ctl_be_ramdisk_lun *lun; 760 struct ctl_be_ramdisk_softc *softc; 761 int do_free; 762 763 lun = (struct ctl_be_ramdisk_lun *)be_lun; 764 softc = lun->softc; 765 do_free = 0; 766 767 mtx_lock(&softc->lock); 768 lun->flags |= CTL_BE_RAMDISK_LUN_UNCONFIGURED; 769 if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING) { 770 wakeup(lun); 771 } else { 772 STAILQ_REMOVE(&softc->lun_list, lun, ctl_be_ramdisk_lun, 773 links); 774 softc->num_luns--; 775 do_free = 1; 776 } 777 mtx_unlock(&softc->lock); 778 779 if (do_free != 0) 780 free(be_lun, M_RAMDISK); 781 } 782 783 static void 784 ctl_backend_ramdisk_lun_config_status(void *be_lun, 785 ctl_lun_config_status status) 786 { 787 struct ctl_be_ramdisk_lun *lun; 788 struct ctl_be_ramdisk_softc *softc; 789 790 lun = (struct ctl_be_ramdisk_lun *)be_lun; 791 softc = lun->softc; 792 793 if (status == CTL_LUN_CONFIG_OK) { 794 mtx_lock(&softc->lock); 795 lun->flags &= ~CTL_BE_RAMDISK_LUN_UNCONFIGURED; 796 if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING) 797 wakeup(lun); 798 mtx_unlock(&softc->lock); 799 800 /* 801 * We successfully added the LUN, attempt to enable it. 802 */ 803 if (ctl_enable_lun(&lun->cbe_lun) != 0) { 804 printf("%s: ctl_enable_lun() failed!\n", __func__); 805 if (ctl_invalidate_lun(&lun->cbe_lun) != 0) { 806 printf("%s: ctl_invalidate_lun() failed!\n", 807 __func__); 808 } 809 } 810 811 return; 812 } 813 814 815 mtx_lock(&softc->lock); 816 lun->flags &= ~CTL_BE_RAMDISK_LUN_UNCONFIGURED; 817 818 /* 819 * If we have a user waiting, let him handle the cleanup. If not, 820 * clean things up here. 821 */ 822 if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING) { 823 lun->flags |= CTL_BE_RAMDISK_LUN_CONFIG_ERR; 824 wakeup(lun); 825 } else { 826 STAILQ_REMOVE(&softc->lun_list, lun, ctl_be_ramdisk_lun, 827 links); 828 softc->num_luns--; 829 free(lun, M_RAMDISK); 830 } 831 mtx_unlock(&softc->lock); 832 } 833 834 static int 835 ctl_backend_ramdisk_config_write(union ctl_io *io) 836 { 837 struct ctl_be_lun *cbe_lun; 838 int retval; 839 840 cbe_lun = CTL_BACKEND_LUN(io); 841 retval = 0; 842 switch (io->scsiio.cdb[0]) { 843 case SYNCHRONIZE_CACHE: 844 case SYNCHRONIZE_CACHE_16: 845 /* 846 * The upper level CTL code will filter out any CDBs with 847 * the immediate bit set and return the proper error. It 848 * will also not allow a sync cache command to go to a LUN 849 * that is powered down. 850 * 851 * We don't really need to worry about what LBA range the 852 * user asked to be synced out. When they issue a sync 853 * cache command, we'll sync out the whole thing. 854 * 855 * This is obviously just a stubbed out implementation. 856 * The real implementation will be in the RAIDCore/CTL 857 * interface, and can only really happen when RAIDCore 858 * implements a per-array cache sync. 859 */ 860 ctl_set_success(&io->scsiio); 861 ctl_config_write_done(io); 862 break; 863 case START_STOP_UNIT: { 864 struct scsi_start_stop_unit *cdb; 865 866 cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb; 867 if ((cdb->how & SSS_PC_MASK) != 0) { 868 ctl_set_success(&io->scsiio); 869 ctl_config_write_done(io); 870 break; 871 } 872 if (cdb->how & SSS_START) { 873 if (cdb->how & SSS_LOEJ) 874 ctl_lun_has_media(cbe_lun); 875 ctl_start_lun(cbe_lun); 876 } else { 877 ctl_stop_lun(cbe_lun); 878 if (cdb->how & SSS_LOEJ) 879 ctl_lun_ejected(cbe_lun); 880 } 881 ctl_set_success(&io->scsiio); 882 ctl_config_write_done(io); 883 break; 884 } 885 case PREVENT_ALLOW: 886 case WRITE_SAME_10: 887 case WRITE_SAME_16: 888 case UNMAP: 889 ctl_set_success(&io->scsiio); 890 ctl_config_write_done(io); 891 break; 892 default: 893 ctl_set_invalid_opcode(&io->scsiio); 894 ctl_config_write_done(io); 895 retval = CTL_RETVAL_COMPLETE; 896 break; 897 } 898 899 return (retval); 900 } 901 902 static int 903 ctl_backend_ramdisk_config_read(union ctl_io *io) 904 { 905 int retval = 0; 906 907 switch (io->scsiio.cdb[0]) { 908 case SERVICE_ACTION_IN: 909 if (io->scsiio.cdb[1] == SGLS_SERVICE_ACTION) { 910 /* We have nothing to tell, leave default data. */ 911 ctl_config_read_done(io); 912 retval = CTL_RETVAL_COMPLETE; 913 break; 914 } 915 ctl_set_invalid_field(&io->scsiio, 916 /*sks_valid*/ 1, 917 /*command*/ 1, 918 /*field*/ 1, 919 /*bit_valid*/ 1, 920 /*bit*/ 4); 921 ctl_config_read_done(io); 922 retval = CTL_RETVAL_COMPLETE; 923 break; 924 default: 925 ctl_set_invalid_opcode(&io->scsiio); 926 ctl_config_read_done(io); 927 retval = CTL_RETVAL_COMPLETE; 928 break; 929 } 930 931 return (retval); 932 } 933