1 /*- 2 * Copyright (c) 2003, 2008 Silicon Graphics International Corp. 3 * Copyright (c) 2012 The FreeBSD Foundation 4 * Copyright (c) 2014-2015 Alexander Motin <mav@FreeBSD.org> 5 * All rights reserved. 6 * 7 * Portions of this software were developed by Edward Tomasz Napierala 8 * under sponsorship from the FreeBSD Foundation. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions, and the following disclaimer, 15 * without modification. 16 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 17 * substantially similar to the "NO WARRANTY" disclaimer below 18 * ("Disclaimer") and any redistribution must be conditioned upon 19 * including a substantially similar Disclaimer requirement for further 20 * binary redistribution. 21 * 22 * NO WARRANTY 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 27 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 31 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 32 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 33 * POSSIBILITY OF SUCH DAMAGES. 34 * 35 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend_ramdisk.c#3 $ 36 */ 37 /* 38 * CAM Target Layer backend for a "fake" ramdisk. 39 * 40 * Author: Ken Merry <ken@FreeBSD.org> 41 */ 42 43 #include <sys/cdefs.h> 44 __FBSDID("$FreeBSD$"); 45 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/kernel.h> 49 #include <sys/condvar.h> 50 #include <sys/types.h> 51 #include <sys/lock.h> 52 #include <sys/mutex.h> 53 #include <sys/malloc.h> 54 #include <sys/taskqueue.h> 55 #include <sys/time.h> 56 #include <sys/queue.h> 57 #include <sys/conf.h> 58 #include <sys/ioccom.h> 59 #include <sys/module.h> 60 #include <sys/sysctl.h> 61 62 #include <cam/scsi/scsi_all.h> 63 #include <cam/scsi/scsi_da.h> 64 #include <cam/ctl/ctl_io.h> 65 #include <cam/ctl/ctl.h> 66 #include <cam/ctl/ctl_util.h> 67 #include <cam/ctl/ctl_backend.h> 68 #include <cam/ctl/ctl_debug.h> 69 #include <cam/ctl/ctl_ioctl.h> 70 #include <cam/ctl/ctl_ha.h> 71 #include <cam/ctl/ctl_private.h> 72 #include <cam/ctl/ctl_error.h> 73 74 typedef enum { 75 CTL_BE_RAMDISK_LUN_UNCONFIGURED = 0x01, 76 CTL_BE_RAMDISK_LUN_CONFIG_ERR = 0x02, 77 CTL_BE_RAMDISK_LUN_WAITING = 0x04 78 } ctl_be_ramdisk_lun_flags; 79 80 struct ctl_be_ramdisk_lun { 81 struct ctl_lun_create_params params; 82 char lunname[32]; 83 uint64_t size_bytes; 84 uint64_t size_blocks; 85 struct ctl_be_ramdisk_softc *softc; 86 ctl_be_ramdisk_lun_flags flags; 87 STAILQ_ENTRY(ctl_be_ramdisk_lun) links; 88 struct ctl_be_lun cbe_lun; 89 struct taskqueue *io_taskqueue; 90 struct task io_task; 91 STAILQ_HEAD(, ctl_io_hdr) cont_queue; 92 struct mtx_padalign queue_lock; 93 }; 94 95 struct ctl_be_ramdisk_softc { 96 struct mtx lock; 97 int rd_size; 98 #ifdef CTL_RAMDISK_PAGES 99 uint8_t **ramdisk_pages; 100 int num_pages; 101 #else 102 uint8_t *ramdisk_buffer; 103 #endif 104 int num_luns; 105 STAILQ_HEAD(, ctl_be_ramdisk_lun) lun_list; 106 }; 107 108 static struct ctl_be_ramdisk_softc rd_softc; 109 extern struct ctl_softc *control_softc; 110 111 int ctl_backend_ramdisk_init(void); 112 void ctl_backend_ramdisk_shutdown(void); 113 static int ctl_backend_ramdisk_move_done(union ctl_io *io); 114 static int ctl_backend_ramdisk_submit(union ctl_io *io); 115 static void ctl_backend_ramdisk_continue(union ctl_io *io); 116 static int ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd, 117 caddr_t addr, int flag, struct thread *td); 118 static int ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc, 119 struct ctl_lun_req *req); 120 static int ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc, 121 struct ctl_lun_req *req); 122 static int ctl_backend_ramdisk_modify(struct ctl_be_ramdisk_softc *softc, 123 struct ctl_lun_req *req); 124 static void ctl_backend_ramdisk_worker(void *context, int pending); 125 static void ctl_backend_ramdisk_lun_shutdown(void *be_lun); 126 static void ctl_backend_ramdisk_lun_config_status(void *be_lun, 127 ctl_lun_config_status status); 128 static int ctl_backend_ramdisk_config_write(union ctl_io *io); 129 static int ctl_backend_ramdisk_config_read(union ctl_io *io); 130 131 static struct ctl_backend_driver ctl_be_ramdisk_driver = 132 { 133 .name = "ramdisk", 134 .flags = CTL_BE_FLAG_HAS_CONFIG, 135 .init = ctl_backend_ramdisk_init, 136 .data_submit = ctl_backend_ramdisk_submit, 137 .data_move_done = ctl_backend_ramdisk_move_done, 138 .config_read = ctl_backend_ramdisk_config_read, 139 .config_write = ctl_backend_ramdisk_config_write, 140 .ioctl = ctl_backend_ramdisk_ioctl 141 }; 142 143 MALLOC_DEFINE(M_RAMDISK, "ramdisk", "Memory used for CTL RAMdisk"); 144 CTL_BACKEND_DECLARE(cbr, ctl_be_ramdisk_driver); 145 146 int 147 ctl_backend_ramdisk_init(void) 148 { 149 struct ctl_be_ramdisk_softc *softc = &rd_softc; 150 #ifdef CTL_RAMDISK_PAGES 151 int i; 152 #endif 153 154 memset(softc, 0, sizeof(*softc)); 155 mtx_init(&softc->lock, "ctlramdisk", NULL, MTX_DEF); 156 STAILQ_INIT(&softc->lun_list); 157 softc->rd_size = 1024 * 1024; 158 #ifdef CTL_RAMDISK_PAGES 159 softc->num_pages = softc->rd_size / PAGE_SIZE; 160 softc->ramdisk_pages = (uint8_t **)malloc(sizeof(uint8_t *) * 161 softc->num_pages, M_RAMDISK, 162 M_WAITOK); 163 for (i = 0; i < softc->num_pages; i++) 164 softc->ramdisk_pages[i] = malloc(PAGE_SIZE, M_RAMDISK,M_WAITOK); 165 #else 166 softc->ramdisk_buffer = (uint8_t *)malloc(softc->rd_size, M_RAMDISK, 167 M_WAITOK); 168 #endif 169 170 return (0); 171 } 172 173 void 174 ctl_backend_ramdisk_shutdown(void) 175 { 176 struct ctl_be_ramdisk_softc *softc = &rd_softc; 177 struct ctl_be_ramdisk_lun *lun, *next_lun; 178 #ifdef CTL_RAMDISK_PAGES 179 int i; 180 #endif 181 182 mtx_lock(&softc->lock); 183 STAILQ_FOREACH_SAFE(lun, &softc->lun_list, links, next_lun) { 184 /* 185 * Drop our lock here. Since ctl_invalidate_lun() can call 186 * back into us, this could potentially lead to a recursive 187 * lock of the same mutex, which would cause a hang. 188 */ 189 mtx_unlock(&softc->lock); 190 ctl_disable_lun(&lun->cbe_lun); 191 ctl_invalidate_lun(&lun->cbe_lun); 192 mtx_lock(&softc->lock); 193 } 194 mtx_unlock(&softc->lock); 195 196 #ifdef CTL_RAMDISK_PAGES 197 for (i = 0; i < softc->num_pages; i++) 198 free(softc->ramdisk_pages[i], M_RAMDISK); 199 200 free(softc->ramdisk_pages, M_RAMDISK); 201 #else 202 free(softc->ramdisk_buffer, M_RAMDISK); 203 #endif 204 205 if (ctl_backend_deregister(&ctl_be_ramdisk_driver) != 0) { 206 printf("ctl_backend_ramdisk_shutdown: " 207 "ctl_backend_deregister() failed!\n"); 208 } 209 } 210 211 static int 212 ctl_backend_ramdisk_move_done(union ctl_io *io) 213 { 214 struct ctl_be_lun *cbe_lun; 215 struct ctl_be_ramdisk_lun *be_lun; 216 #ifdef CTL_TIME_IO 217 struct bintime cur_bt; 218 #endif 219 220 CTL_DEBUG_PRINT(("ctl_backend_ramdisk_move_done\n")); 221 cbe_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[ 222 CTL_PRIV_BACKEND_LUN].ptr; 223 be_lun = (struct ctl_be_ramdisk_lun *)cbe_lun->be_lun; 224 #ifdef CTL_TIME_IO 225 getbinuptime(&cur_bt); 226 bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt); 227 bintime_add(&io->io_hdr.dma_bt, &cur_bt); 228 #endif 229 io->io_hdr.num_dmas++; 230 if (io->scsiio.kern_sg_entries > 0) 231 free(io->scsiio.kern_data_ptr, M_RAMDISK); 232 io->scsiio.kern_rel_offset += io->scsiio.kern_data_len; 233 if (io->io_hdr.flags & CTL_FLAG_ABORT) { 234 ; 235 } else if ((io->io_hdr.port_status == 0) && 236 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) { 237 if (io->io_hdr.ctl_private[CTL_PRIV_BACKEND].integer > 0) { 238 mtx_lock(&be_lun->queue_lock); 239 STAILQ_INSERT_TAIL(&be_lun->cont_queue, 240 &io->io_hdr, links); 241 mtx_unlock(&be_lun->queue_lock); 242 taskqueue_enqueue(be_lun->io_taskqueue, 243 &be_lun->io_task); 244 return (0); 245 } 246 ctl_set_success(&io->scsiio); 247 } else if ((io->io_hdr.port_status != 0) && 248 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 249 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 250 /* 251 * For hardware error sense keys, the sense key 252 * specific value is defined to be a retry count, 253 * but we use it to pass back an internal FETD 254 * error code. XXX KDM Hopefully the FETD is only 255 * using 16 bits for an error code, since that's 256 * all the space we have in the sks field. 257 */ 258 ctl_set_internal_failure(&io->scsiio, 259 /*sks_valid*/ 1, 260 /*retry_count*/ 261 io->io_hdr.port_status); 262 } 263 ctl_data_submit_done(io); 264 return(0); 265 } 266 267 static int 268 ctl_backend_ramdisk_submit(union ctl_io *io) 269 { 270 struct ctl_be_lun *cbe_lun; 271 struct ctl_lba_len_flags *lbalen; 272 273 cbe_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[ 274 CTL_PRIV_BACKEND_LUN].ptr; 275 lbalen = (struct ctl_lba_len_flags *)&io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 276 if (lbalen->flags & CTL_LLF_VERIFY) { 277 ctl_set_success(&io->scsiio); 278 ctl_data_submit_done(io); 279 return (CTL_RETVAL_COMPLETE); 280 } 281 io->io_hdr.ctl_private[CTL_PRIV_BACKEND].integer = 282 lbalen->len * cbe_lun->blocksize; 283 ctl_backend_ramdisk_continue(io); 284 return (CTL_RETVAL_COMPLETE); 285 } 286 287 static void 288 ctl_backend_ramdisk_continue(union ctl_io *io) 289 { 290 struct ctl_be_ramdisk_softc *softc; 291 int len, len_filled, sg_filled; 292 #ifdef CTL_RAMDISK_PAGES 293 struct ctl_sg_entry *sg_entries; 294 int i; 295 #endif 296 297 softc = &rd_softc; 298 len = io->io_hdr.ctl_private[CTL_PRIV_BACKEND].integer; 299 #ifdef CTL_RAMDISK_PAGES 300 sg_filled = min(btoc(len), softc->num_pages); 301 if (sg_filled > 1) { 302 io->scsiio.kern_data_ptr = malloc(sizeof(struct ctl_sg_entry) * 303 sg_filled, M_RAMDISK, 304 M_WAITOK); 305 sg_entries = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; 306 for (i = 0, len_filled = 0; i < sg_filled; i++) { 307 sg_entries[i].addr = softc->ramdisk_pages[i]; 308 sg_entries[i].len = MIN(PAGE_SIZE, len - len_filled); 309 len_filled += sg_entries[i].len; 310 } 311 } else { 312 sg_filled = 0; 313 len_filled = len; 314 io->scsiio.kern_data_ptr = softc->ramdisk_pages[0]; 315 } 316 #else 317 sg_filled = 0; 318 len_filled = min(len, softc->rd_size); 319 io->scsiio.kern_data_ptr = softc->ramdisk_buffer; 320 #endif /* CTL_RAMDISK_PAGES */ 321 322 io->scsiio.be_move_done = ctl_backend_ramdisk_move_done; 323 io->scsiio.kern_data_resid = 0; 324 io->scsiio.kern_data_len = len_filled; 325 io->scsiio.kern_sg_entries = sg_filled; 326 io->io_hdr.flags |= CTL_FLAG_ALLOCATED; 327 io->io_hdr.ctl_private[CTL_PRIV_BACKEND].integer -= len_filled; 328 #ifdef CTL_TIME_IO 329 getbinuptime(&io->io_hdr.dma_start_bt); 330 #endif 331 ctl_datamove(io); 332 } 333 334 static void 335 ctl_backend_ramdisk_worker(void *context, int pending) 336 { 337 struct ctl_be_ramdisk_lun *be_lun; 338 union ctl_io *io; 339 340 be_lun = (struct ctl_be_ramdisk_lun *)context; 341 342 mtx_lock(&be_lun->queue_lock); 343 for (;;) { 344 io = (union ctl_io *)STAILQ_FIRST(&be_lun->cont_queue); 345 if (io != NULL) { 346 STAILQ_REMOVE(&be_lun->cont_queue, &io->io_hdr, 347 ctl_io_hdr, links); 348 mtx_unlock(&be_lun->queue_lock); 349 ctl_backend_ramdisk_continue(io); 350 mtx_lock(&be_lun->queue_lock); 351 continue; 352 } 353 354 /* 355 * If we get here, there is no work left in the queues, so 356 * just break out and let the task queue go to sleep. 357 */ 358 break; 359 } 360 mtx_unlock(&be_lun->queue_lock); 361 } 362 363 static int 364 ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, 365 int flag, struct thread *td) 366 { 367 struct ctl_be_ramdisk_softc *softc = &rd_softc; 368 struct ctl_lun_req *lun_req; 369 int retval; 370 371 retval = 0; 372 switch (cmd) { 373 case CTL_LUN_REQ: 374 lun_req = (struct ctl_lun_req *)addr; 375 switch (lun_req->reqtype) { 376 case CTL_LUNREQ_CREATE: 377 retval = ctl_backend_ramdisk_create(softc, lun_req); 378 break; 379 case CTL_LUNREQ_RM: 380 retval = ctl_backend_ramdisk_rm(softc, lun_req); 381 break; 382 case CTL_LUNREQ_MODIFY: 383 retval = ctl_backend_ramdisk_modify(softc, lun_req); 384 break; 385 default: 386 lun_req->status = CTL_LUN_ERROR; 387 snprintf(lun_req->error_str, sizeof(lun_req->error_str), 388 "%s: invalid LUN request type %d", __func__, 389 lun_req->reqtype); 390 break; 391 } 392 break; 393 default: 394 retval = ENOTTY; 395 break; 396 } 397 398 return (retval); 399 } 400 401 static int 402 ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc, 403 struct ctl_lun_req *req) 404 { 405 struct ctl_be_ramdisk_lun *be_lun; 406 struct ctl_lun_rm_params *params; 407 int retval; 408 409 params = &req->reqdata.rm; 410 mtx_lock(&softc->lock); 411 STAILQ_FOREACH(be_lun, &softc->lun_list, links) { 412 if (be_lun->cbe_lun.lun_id == params->lun_id) 413 break; 414 } 415 mtx_unlock(&softc->lock); 416 if (be_lun == NULL) { 417 snprintf(req->error_str, sizeof(req->error_str), 418 "%s: LUN %u is not managed by the ramdisk backend", 419 __func__, params->lun_id); 420 goto bailout_error; 421 } 422 423 retval = ctl_disable_lun(&be_lun->cbe_lun); 424 if (retval != 0) { 425 snprintf(req->error_str, sizeof(req->error_str), 426 "%s: error %d returned from ctl_disable_lun() for " 427 "LUN %d", __func__, retval, params->lun_id); 428 goto bailout_error; 429 } 430 431 /* 432 * Set the waiting flag before we invalidate the LUN. Our shutdown 433 * routine can be called any time after we invalidate the LUN, 434 * and can be called from our context. 435 * 436 * This tells the shutdown routine that we're waiting, or we're 437 * going to wait for the shutdown to happen. 438 */ 439 mtx_lock(&softc->lock); 440 be_lun->flags |= CTL_BE_RAMDISK_LUN_WAITING; 441 mtx_unlock(&softc->lock); 442 443 retval = ctl_invalidate_lun(&be_lun->cbe_lun); 444 if (retval != 0) { 445 snprintf(req->error_str, sizeof(req->error_str), 446 "%s: error %d returned from ctl_invalidate_lun() for " 447 "LUN %d", __func__, retval, params->lun_id); 448 mtx_lock(&softc->lock); 449 be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING; 450 mtx_unlock(&softc->lock); 451 goto bailout_error; 452 } 453 454 mtx_lock(&softc->lock); 455 while ((be_lun->flags & CTL_BE_RAMDISK_LUN_UNCONFIGURED) == 0) { 456 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlram", 0); 457 if (retval == EINTR) 458 break; 459 } 460 be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING; 461 462 /* 463 * We only remove this LUN from the list and free it (below) if 464 * retval == 0. If the user interrupted the wait, we just bail out 465 * without actually freeing the LUN. We let the shutdown routine 466 * free the LUN if that happens. 467 */ 468 if (retval == 0) { 469 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun, 470 links); 471 softc->num_luns--; 472 } 473 474 mtx_unlock(&softc->lock); 475 476 if (retval == 0) { 477 taskqueue_drain_all(be_lun->io_taskqueue); 478 taskqueue_free(be_lun->io_taskqueue); 479 ctl_free_opts(&be_lun->cbe_lun.options); 480 mtx_destroy(&be_lun->queue_lock); 481 free(be_lun, M_RAMDISK); 482 } 483 484 req->status = CTL_LUN_OK; 485 return (retval); 486 487 bailout_error: 488 req->status = CTL_LUN_ERROR; 489 return (0); 490 } 491 492 static int 493 ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc, 494 struct ctl_lun_req *req) 495 { 496 struct ctl_be_ramdisk_lun *be_lun; 497 struct ctl_be_lun *cbe_lun; 498 struct ctl_lun_create_params *params; 499 char *value; 500 char tmpstr[32]; 501 int retval; 502 503 retval = 0; 504 params = &req->reqdata.create; 505 506 be_lun = malloc(sizeof(*be_lun), M_RAMDISK, M_ZERO | M_WAITOK); 507 cbe_lun = &be_lun->cbe_lun; 508 cbe_lun->be_lun = be_lun; 509 be_lun->params = req->reqdata.create; 510 be_lun->softc = softc; 511 sprintf(be_lun->lunname, "cram%d", softc->num_luns); 512 ctl_init_opts(&cbe_lun->options, req->num_be_args, req->kern_be_args); 513 514 if (params->flags & CTL_LUN_FLAG_DEV_TYPE) 515 cbe_lun->lun_type = params->device_type; 516 else 517 cbe_lun->lun_type = T_DIRECT; 518 be_lun->flags = CTL_BE_RAMDISK_LUN_UNCONFIGURED; 519 cbe_lun->flags = 0; 520 value = ctl_get_opt(&cbe_lun->options, "ha_role"); 521 if (value != NULL) { 522 if (strcmp(value, "primary") == 0) 523 cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY; 524 } else if (control_softc->flags & CTL_FLAG_ACTIVE_SHELF) 525 cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY; 526 527 if (cbe_lun->lun_type == T_DIRECT || 528 cbe_lun->lun_type == T_CDROM) { 529 if (params->blocksize_bytes != 0) 530 cbe_lun->blocksize = params->blocksize_bytes; 531 else if (cbe_lun->lun_type == T_CDROM) 532 cbe_lun->blocksize = 2048; 533 else 534 cbe_lun->blocksize = 512; 535 if (params->lun_size_bytes < cbe_lun->blocksize) { 536 snprintf(req->error_str, sizeof(req->error_str), 537 "%s: LUN size %ju < blocksize %u", __func__, 538 params->lun_size_bytes, cbe_lun->blocksize); 539 goto bailout_error; 540 } 541 be_lun->size_blocks = params->lun_size_bytes / cbe_lun->blocksize; 542 be_lun->size_bytes = be_lun->size_blocks * cbe_lun->blocksize; 543 cbe_lun->maxlba = be_lun->size_blocks - 1; 544 cbe_lun->atomicblock = UINT32_MAX; 545 cbe_lun->opttxferlen = softc->rd_size / cbe_lun->blocksize; 546 } 547 548 /* Tell the user the blocksize we ended up using */ 549 params->blocksize_bytes = cbe_lun->blocksize; 550 params->lun_size_bytes = be_lun->size_bytes; 551 552 value = ctl_get_opt(&cbe_lun->options, "unmap"); 553 if (value != NULL && strcmp(value, "on") == 0) 554 cbe_lun->flags |= CTL_LUN_FLAG_UNMAP; 555 value = ctl_get_opt(&cbe_lun->options, "readonly"); 556 if (value != NULL) { 557 if (strcmp(value, "on") == 0) 558 cbe_lun->flags |= CTL_LUN_FLAG_READONLY; 559 } else if (cbe_lun->lun_type != T_DIRECT) 560 cbe_lun->flags |= CTL_LUN_FLAG_READONLY; 561 cbe_lun->serseq = CTL_LUN_SERSEQ_OFF; 562 value = ctl_get_opt(&cbe_lun->options, "serseq"); 563 if (value != NULL && strcmp(value, "on") == 0) 564 cbe_lun->serseq = CTL_LUN_SERSEQ_ON; 565 else if (value != NULL && strcmp(value, "read") == 0) 566 cbe_lun->serseq = CTL_LUN_SERSEQ_READ; 567 else if (value != NULL && strcmp(value, "off") == 0) 568 cbe_lun->serseq = CTL_LUN_SERSEQ_OFF; 569 570 if (params->flags & CTL_LUN_FLAG_ID_REQ) { 571 cbe_lun->req_lun_id = params->req_lun_id; 572 cbe_lun->flags |= CTL_LUN_FLAG_ID_REQ; 573 } else 574 cbe_lun->req_lun_id = 0; 575 576 cbe_lun->lun_shutdown = ctl_backend_ramdisk_lun_shutdown; 577 cbe_lun->lun_config_status = ctl_backend_ramdisk_lun_config_status; 578 cbe_lun->be = &ctl_be_ramdisk_driver; 579 if ((params->flags & CTL_LUN_FLAG_SERIAL_NUM) == 0) { 580 snprintf(tmpstr, sizeof(tmpstr), "MYSERIAL%4d", 581 softc->num_luns); 582 strncpy((char *)cbe_lun->serial_num, tmpstr, 583 MIN(sizeof(cbe_lun->serial_num), sizeof(tmpstr))); 584 585 /* Tell the user what we used for a serial number */ 586 strncpy((char *)params->serial_num, tmpstr, 587 MIN(sizeof(params->serial_num), sizeof(tmpstr))); 588 } else { 589 strncpy((char *)cbe_lun->serial_num, params->serial_num, 590 MIN(sizeof(cbe_lun->serial_num), 591 sizeof(params->serial_num))); 592 } 593 if ((params->flags & CTL_LUN_FLAG_DEVID) == 0) { 594 snprintf(tmpstr, sizeof(tmpstr), "MYDEVID%4d", softc->num_luns); 595 strncpy((char *)cbe_lun->device_id, tmpstr, 596 MIN(sizeof(cbe_lun->device_id), sizeof(tmpstr))); 597 598 /* Tell the user what we used for a device ID */ 599 strncpy((char *)params->device_id, tmpstr, 600 MIN(sizeof(params->device_id), sizeof(tmpstr))); 601 } else { 602 strncpy((char *)cbe_lun->device_id, params->device_id, 603 MIN(sizeof(cbe_lun->device_id), 604 sizeof(params->device_id))); 605 } 606 607 STAILQ_INIT(&be_lun->cont_queue); 608 mtx_init(&be_lun->queue_lock, "cram queue lock", NULL, MTX_DEF); 609 TASK_INIT(&be_lun->io_task, /*priority*/0, ctl_backend_ramdisk_worker, 610 be_lun); 611 612 be_lun->io_taskqueue = taskqueue_create(be_lun->lunname, M_WAITOK, 613 taskqueue_thread_enqueue, /*context*/&be_lun->io_taskqueue); 614 if (be_lun->io_taskqueue == NULL) { 615 snprintf(req->error_str, sizeof(req->error_str), 616 "%s: Unable to create taskqueue", __func__); 617 goto bailout_error; 618 } 619 620 retval = taskqueue_start_threads(&be_lun->io_taskqueue, 621 /*num threads*/1, 622 /*priority*/PWAIT, 623 /*thread name*/ 624 "%s taskq", be_lun->lunname); 625 if (retval != 0) 626 goto bailout_error; 627 628 mtx_lock(&softc->lock); 629 softc->num_luns++; 630 STAILQ_INSERT_TAIL(&softc->lun_list, be_lun, links); 631 mtx_unlock(&softc->lock); 632 633 retval = ctl_add_lun(&be_lun->cbe_lun); 634 if (retval != 0) { 635 mtx_lock(&softc->lock); 636 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun, 637 links); 638 softc->num_luns--; 639 mtx_unlock(&softc->lock); 640 snprintf(req->error_str, sizeof(req->error_str), 641 "%s: ctl_add_lun() returned error %d, see dmesg for " 642 "details", __func__, retval); 643 retval = 0; 644 goto bailout_error; 645 } 646 647 mtx_lock(&softc->lock); 648 649 /* 650 * Tell the config_status routine that we're waiting so it won't 651 * clean up the LUN in the event of an error. 652 */ 653 be_lun->flags |= CTL_BE_RAMDISK_LUN_WAITING; 654 655 while (be_lun->flags & CTL_BE_RAMDISK_LUN_UNCONFIGURED) { 656 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlram", 0); 657 if (retval == EINTR) 658 break; 659 } 660 be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING; 661 662 if (be_lun->flags & CTL_BE_RAMDISK_LUN_CONFIG_ERR) { 663 snprintf(req->error_str, sizeof(req->error_str), 664 "%s: LUN configuration error, see dmesg for details", 665 __func__); 666 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun, 667 links); 668 softc->num_luns--; 669 mtx_unlock(&softc->lock); 670 goto bailout_error; 671 } else { 672 params->req_lun_id = cbe_lun->lun_id; 673 } 674 mtx_unlock(&softc->lock); 675 676 req->status = CTL_LUN_OK; 677 return (retval); 678 679 bailout_error: 680 req->status = CTL_LUN_ERROR; 681 if (be_lun != NULL) { 682 if (be_lun->io_taskqueue != NULL) { 683 taskqueue_free(be_lun->io_taskqueue); 684 } 685 ctl_free_opts(&cbe_lun->options); 686 mtx_destroy(&be_lun->queue_lock); 687 free(be_lun, M_RAMDISK); 688 } 689 return (retval); 690 } 691 692 static int 693 ctl_backend_ramdisk_modify(struct ctl_be_ramdisk_softc *softc, 694 struct ctl_lun_req *req) 695 { 696 struct ctl_be_ramdisk_lun *be_lun; 697 struct ctl_be_lun *cbe_lun; 698 struct ctl_lun_modify_params *params; 699 char *value; 700 uint32_t blocksize; 701 int wasprim; 702 703 params = &req->reqdata.modify; 704 705 mtx_lock(&softc->lock); 706 STAILQ_FOREACH(be_lun, &softc->lun_list, links) { 707 if (be_lun->cbe_lun.lun_id == params->lun_id) 708 break; 709 } 710 mtx_unlock(&softc->lock); 711 if (be_lun == NULL) { 712 snprintf(req->error_str, sizeof(req->error_str), 713 "%s: LUN %u is not managed by the ramdisk backend", 714 __func__, params->lun_id); 715 goto bailout_error; 716 } 717 cbe_lun = &be_lun->cbe_lun; 718 719 if (params->lun_size_bytes != 0) 720 be_lun->params.lun_size_bytes = params->lun_size_bytes; 721 ctl_update_opts(&cbe_lun->options, req->num_be_args, req->kern_be_args); 722 723 wasprim = (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY); 724 value = ctl_get_opt(&cbe_lun->options, "ha_role"); 725 if (value != NULL) { 726 if (strcmp(value, "primary") == 0) 727 cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY; 728 else 729 cbe_lun->flags &= ~CTL_LUN_FLAG_PRIMARY; 730 } else if (control_softc->flags & CTL_FLAG_ACTIVE_SHELF) 731 cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY; 732 else 733 cbe_lun->flags &= ~CTL_LUN_FLAG_PRIMARY; 734 if (wasprim != (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY)) { 735 if (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY) 736 ctl_lun_primary(cbe_lun); 737 else 738 ctl_lun_secondary(cbe_lun); 739 } 740 741 blocksize = be_lun->cbe_lun.blocksize; 742 if (be_lun->params.lun_size_bytes < blocksize) { 743 snprintf(req->error_str, sizeof(req->error_str), 744 "%s: LUN size %ju < blocksize %u", __func__, 745 be_lun->params.lun_size_bytes, blocksize); 746 goto bailout_error; 747 } 748 be_lun->size_blocks = be_lun->params.lun_size_bytes / blocksize; 749 be_lun->size_bytes = be_lun->size_blocks * blocksize; 750 be_lun->cbe_lun.maxlba = be_lun->size_blocks - 1; 751 ctl_lun_capacity_changed(&be_lun->cbe_lun); 752 753 /* Tell the user the exact size we ended up using */ 754 params->lun_size_bytes = be_lun->size_bytes; 755 756 req->status = CTL_LUN_OK; 757 return (0); 758 759 bailout_error: 760 req->status = CTL_LUN_ERROR; 761 return (0); 762 } 763 764 static void 765 ctl_backend_ramdisk_lun_shutdown(void *be_lun) 766 { 767 struct ctl_be_ramdisk_lun *lun; 768 struct ctl_be_ramdisk_softc *softc; 769 int do_free; 770 771 lun = (struct ctl_be_ramdisk_lun *)be_lun; 772 softc = lun->softc; 773 do_free = 0; 774 775 mtx_lock(&softc->lock); 776 lun->flags |= CTL_BE_RAMDISK_LUN_UNCONFIGURED; 777 if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING) { 778 wakeup(lun); 779 } else { 780 STAILQ_REMOVE(&softc->lun_list, lun, ctl_be_ramdisk_lun, 781 links); 782 softc->num_luns--; 783 do_free = 1; 784 } 785 mtx_unlock(&softc->lock); 786 787 if (do_free != 0) 788 free(be_lun, M_RAMDISK); 789 } 790 791 static void 792 ctl_backend_ramdisk_lun_config_status(void *be_lun, 793 ctl_lun_config_status status) 794 { 795 struct ctl_be_ramdisk_lun *lun; 796 struct ctl_be_ramdisk_softc *softc; 797 798 lun = (struct ctl_be_ramdisk_lun *)be_lun; 799 softc = lun->softc; 800 801 if (status == CTL_LUN_CONFIG_OK) { 802 mtx_lock(&softc->lock); 803 lun->flags &= ~CTL_BE_RAMDISK_LUN_UNCONFIGURED; 804 if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING) 805 wakeup(lun); 806 mtx_unlock(&softc->lock); 807 808 /* 809 * We successfully added the LUN, attempt to enable it. 810 */ 811 if (ctl_enable_lun(&lun->cbe_lun) != 0) { 812 printf("%s: ctl_enable_lun() failed!\n", __func__); 813 if (ctl_invalidate_lun(&lun->cbe_lun) != 0) { 814 printf("%s: ctl_invalidate_lun() failed!\n", 815 __func__); 816 } 817 } 818 819 return; 820 } 821 822 823 mtx_lock(&softc->lock); 824 lun->flags &= ~CTL_BE_RAMDISK_LUN_UNCONFIGURED; 825 826 /* 827 * If we have a user waiting, let him handle the cleanup. If not, 828 * clean things up here. 829 */ 830 if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING) { 831 lun->flags |= CTL_BE_RAMDISK_LUN_CONFIG_ERR; 832 wakeup(lun); 833 } else { 834 STAILQ_REMOVE(&softc->lun_list, lun, ctl_be_ramdisk_lun, 835 links); 836 softc->num_luns--; 837 free(lun, M_RAMDISK); 838 } 839 mtx_unlock(&softc->lock); 840 } 841 842 static int 843 ctl_backend_ramdisk_config_write(union ctl_io *io) 844 { 845 struct ctl_be_lun *cbe_lun; 846 int retval; 847 848 cbe_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[ 849 CTL_PRIV_BACKEND_LUN].ptr; 850 retval = 0; 851 switch (io->scsiio.cdb[0]) { 852 case SYNCHRONIZE_CACHE: 853 case SYNCHRONIZE_CACHE_16: 854 /* 855 * The upper level CTL code will filter out any CDBs with 856 * the immediate bit set and return the proper error. It 857 * will also not allow a sync cache command to go to a LUN 858 * that is powered down. 859 * 860 * We don't really need to worry about what LBA range the 861 * user asked to be synced out. When they issue a sync 862 * cache command, we'll sync out the whole thing. 863 * 864 * This is obviously just a stubbed out implementation. 865 * The real implementation will be in the RAIDCore/CTL 866 * interface, and can only really happen when RAIDCore 867 * implements a per-array cache sync. 868 */ 869 ctl_set_success(&io->scsiio); 870 ctl_config_write_done(io); 871 break; 872 case START_STOP_UNIT: { 873 struct scsi_start_stop_unit *cdb; 874 875 cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb; 876 if ((cdb->how & SSS_PC_MASK) != 0) { 877 ctl_set_success(&io->scsiio); 878 ctl_config_write_done(io); 879 break; 880 } 881 if (cdb->how & SSS_START) { 882 if (cdb->how & SSS_LOEJ) 883 ctl_lun_has_media(cbe_lun); 884 ctl_start_lun(cbe_lun); 885 } else { 886 ctl_stop_lun(cbe_lun); 887 if (cdb->how & SSS_LOEJ) 888 ctl_lun_ejected(cbe_lun); 889 } 890 ctl_set_success(&io->scsiio); 891 ctl_config_write_done(io); 892 break; 893 } 894 case PREVENT_ALLOW: 895 case WRITE_SAME_10: 896 case WRITE_SAME_16: 897 case UNMAP: 898 ctl_set_success(&io->scsiio); 899 ctl_config_write_done(io); 900 break; 901 default: 902 ctl_set_invalid_opcode(&io->scsiio); 903 ctl_config_write_done(io); 904 retval = CTL_RETVAL_COMPLETE; 905 break; 906 } 907 908 return (retval); 909 } 910 911 static int 912 ctl_backend_ramdisk_config_read(union ctl_io *io) 913 { 914 int retval = 0; 915 916 switch (io->scsiio.cdb[0]) { 917 case SERVICE_ACTION_IN: 918 if (io->scsiio.cdb[1] == SGLS_SERVICE_ACTION) { 919 /* We have nothing to tell, leave default data. */ 920 ctl_config_read_done(io); 921 retval = CTL_RETVAL_COMPLETE; 922 break; 923 } 924 ctl_set_invalid_field(&io->scsiio, 925 /*sks_valid*/ 1, 926 /*command*/ 1, 927 /*field*/ 1, 928 /*bit_valid*/ 1, 929 /*bit*/ 4); 930 ctl_config_read_done(io); 931 retval = CTL_RETVAL_COMPLETE; 932 break; 933 default: 934 ctl_set_invalid_opcode(&io->scsiio); 935 ctl_config_read_done(io); 936 retval = CTL_RETVAL_COMPLETE; 937 break; 938 } 939 940 return (retval); 941 } 942