1 /*- 2 * Copyright (c) 2003, 2008 Silicon Graphics International Corp. 3 * Copyright (c) 2012 The FreeBSD Foundation 4 * All rights reserved. 5 * 6 * Portions of this software were developed by Edward Tomasz Napierala 7 * under sponsorship from the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions, and the following disclaimer, 14 * without modification. 15 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 16 * substantially similar to the "NO WARRANTY" disclaimer below 17 * ("Disclaimer") and any redistribution must be conditioned upon 18 * including a substantially similar Disclaimer requirement for further 19 * binary redistribution. 20 * 21 * NO WARRANTY 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 26 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 30 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 31 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 * POSSIBILITY OF SUCH DAMAGES. 33 * 34 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend_ramdisk.c#3 $ 35 */ 36 /* 37 * CAM Target Layer backend for a "fake" ramdisk. 38 * 39 * Author: Ken Merry <ken@FreeBSD.org> 40 */ 41 42 #include <sys/cdefs.h> 43 __FBSDID("$FreeBSD$"); 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/kernel.h> 48 #include <sys/condvar.h> 49 #include <sys/types.h> 50 #include <sys/lock.h> 51 #include <sys/mutex.h> 52 #include <sys/malloc.h> 53 #include <sys/taskqueue.h> 54 #include <sys/time.h> 55 #include <sys/queue.h> 56 #include <sys/conf.h> 57 #include <sys/ioccom.h> 58 #include <sys/module.h> 59 60 #include <cam/scsi/scsi_all.h> 61 #include <cam/ctl/ctl_io.h> 62 #include <cam/ctl/ctl.h> 63 #include <cam/ctl/ctl_util.h> 64 #include <cam/ctl/ctl_backend.h> 65 #include <cam/ctl/ctl_frontend_internal.h> 66 #include <cam/ctl/ctl_debug.h> 67 #include <cam/ctl/ctl_ioctl.h> 68 #include <cam/ctl/ctl_error.h> 69 70 typedef enum { 71 CTL_BE_RAMDISK_LUN_UNCONFIGURED = 0x01, 72 CTL_BE_RAMDISK_LUN_CONFIG_ERR = 0x02, 73 CTL_BE_RAMDISK_LUN_WAITING = 0x04 74 } ctl_be_ramdisk_lun_flags; 75 76 struct ctl_be_ramdisk_lun { 77 char lunname[32]; 78 uint64_t size_bytes; 79 uint64_t size_blocks; 80 struct ctl_be_ramdisk_softc *softc; 81 ctl_be_ramdisk_lun_flags flags; 82 STAILQ_ENTRY(ctl_be_ramdisk_lun) links; 83 struct ctl_be_lun ctl_be_lun; 84 struct taskqueue *io_taskqueue; 85 struct task io_task; 86 STAILQ_HEAD(, ctl_io_hdr) cont_queue; 87 struct mtx_padalign queue_lock; 88 }; 89 90 struct ctl_be_ramdisk_softc { 91 struct mtx lock; 92 int rd_size; 93 #ifdef CTL_RAMDISK_PAGES 94 uint8_t **ramdisk_pages; 95 int num_pages; 96 #else 97 uint8_t *ramdisk_buffer; 98 #endif 99 int num_luns; 100 STAILQ_HEAD(, ctl_be_ramdisk_lun) lun_list; 101 }; 102 103 static struct ctl_be_ramdisk_softc rd_softc; 104 105 int ctl_backend_ramdisk_init(void); 106 void ctl_backend_ramdisk_shutdown(void); 107 static int ctl_backend_ramdisk_move_done(union ctl_io *io); 108 static int ctl_backend_ramdisk_submit(union ctl_io *io); 109 static void ctl_backend_ramdisk_continue(union ctl_io *io); 110 static int ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd, 111 caddr_t addr, int flag, struct thread *td); 112 static int ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc, 113 struct ctl_lun_req *req); 114 static int ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc, 115 struct ctl_lun_req *req, int do_wait); 116 static int ctl_backend_ramdisk_modify(struct ctl_be_ramdisk_softc *softc, 117 struct ctl_lun_req *req); 118 static void ctl_backend_ramdisk_worker(void *context, int pending); 119 static void ctl_backend_ramdisk_lun_shutdown(void *be_lun); 120 static void ctl_backend_ramdisk_lun_config_status(void *be_lun, 121 ctl_lun_config_status status); 122 static int ctl_backend_ramdisk_config_write(union ctl_io *io); 123 static int ctl_backend_ramdisk_config_read(union ctl_io *io); 124 125 static struct ctl_backend_driver ctl_be_ramdisk_driver = 126 { 127 .name = "ramdisk", 128 .flags = CTL_BE_FLAG_HAS_CONFIG, 129 .init = ctl_backend_ramdisk_init, 130 .data_submit = ctl_backend_ramdisk_submit, 131 .data_move_done = ctl_backend_ramdisk_move_done, 132 .config_read = ctl_backend_ramdisk_config_read, 133 .config_write = ctl_backend_ramdisk_config_write, 134 .ioctl = ctl_backend_ramdisk_ioctl 135 }; 136 137 MALLOC_DEFINE(M_RAMDISK, "ramdisk", "Memory used for CTL RAMdisk"); 138 CTL_BACKEND_DECLARE(cbr, ctl_be_ramdisk_driver); 139 140 int 141 ctl_backend_ramdisk_init(void) 142 { 143 struct ctl_be_ramdisk_softc *softc; 144 #ifdef CTL_RAMDISK_PAGES 145 int i; 146 #endif 147 148 149 softc = &rd_softc; 150 151 memset(softc, 0, sizeof(*softc)); 152 153 mtx_init(&softc->lock, "ctlramdisk", NULL, MTX_DEF); 154 155 STAILQ_INIT(&softc->lun_list); 156 softc->rd_size = 1024 * 1024; 157 #ifdef CTL_RAMDISK_PAGES 158 softc->num_pages = softc->rd_size / PAGE_SIZE; 159 softc->ramdisk_pages = (uint8_t **)malloc(sizeof(uint8_t *) * 160 softc->num_pages, M_RAMDISK, 161 M_WAITOK); 162 for (i = 0; i < softc->num_pages; i++) 163 softc->ramdisk_pages[i] = malloc(PAGE_SIZE, M_RAMDISK,M_WAITOK); 164 #else 165 softc->ramdisk_buffer = (uint8_t *)malloc(softc->rd_size, M_RAMDISK, 166 M_WAITOK); 167 #endif 168 169 return (0); 170 } 171 172 void 173 ctl_backend_ramdisk_shutdown(void) 174 { 175 struct ctl_be_ramdisk_softc *softc; 176 struct ctl_be_ramdisk_lun *lun, *next_lun; 177 #ifdef CTL_RAMDISK_PAGES 178 int i; 179 #endif 180 181 softc = &rd_softc; 182 183 mtx_lock(&softc->lock); 184 for (lun = STAILQ_FIRST(&softc->lun_list); lun != NULL; lun = next_lun){ 185 /* 186 * Grab the next LUN. The current LUN may get removed by 187 * ctl_invalidate_lun(), which will call our LUN shutdown 188 * routine, if there is no outstanding I/O for this LUN. 189 */ 190 next_lun = STAILQ_NEXT(lun, links); 191 192 /* 193 * Drop our lock here. Since ctl_invalidate_lun() can call 194 * back into us, this could potentially lead to a recursive 195 * lock of the same mutex, which would cause a hang. 196 */ 197 mtx_unlock(&softc->lock); 198 ctl_disable_lun(&lun->ctl_be_lun); 199 ctl_invalidate_lun(&lun->ctl_be_lun); 200 mtx_lock(&softc->lock); 201 } 202 mtx_unlock(&softc->lock); 203 204 #ifdef CTL_RAMDISK_PAGES 205 for (i = 0; i < softc->num_pages; i++) 206 free(softc->ramdisk_pages[i], M_RAMDISK); 207 208 free(softc->ramdisk_pages, M_RAMDISK); 209 #else 210 free(softc->ramdisk_buffer, M_RAMDISK); 211 #endif 212 213 if (ctl_backend_deregister(&ctl_be_ramdisk_driver) != 0) { 214 printf("ctl_backend_ramdisk_shutdown: " 215 "ctl_backend_deregister() failed!\n"); 216 } 217 } 218 219 static int 220 ctl_backend_ramdisk_move_done(union ctl_io *io) 221 { 222 struct ctl_be_lun *ctl_be_lun; 223 struct ctl_be_ramdisk_lun *be_lun; 224 #ifdef CTL_TIME_IO 225 struct bintime cur_bt; 226 #endif 227 228 CTL_DEBUG_PRINT(("ctl_backend_ramdisk_move_done\n")); 229 ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[ 230 CTL_PRIV_BACKEND_LUN].ptr; 231 be_lun = (struct ctl_be_ramdisk_lun *)ctl_be_lun->be_lun; 232 #ifdef CTL_TIME_IO 233 getbintime(&cur_bt); 234 bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt); 235 bintime_add(&io->io_hdr.dma_bt, &cur_bt); 236 io->io_hdr.num_dmas++; 237 #endif 238 if (io->scsiio.kern_sg_entries > 0) 239 free(io->scsiio.kern_data_ptr, M_RAMDISK); 240 io->scsiio.kern_rel_offset += io->scsiio.kern_data_len; 241 if ((io->io_hdr.port_status == 0) 242 && ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0) 243 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) { 244 if (io->io_hdr.ctl_private[CTL_PRIV_BACKEND].integer > 0) { 245 mtx_lock(&be_lun->queue_lock); 246 STAILQ_INSERT_TAIL(&be_lun->cont_queue, 247 &io->io_hdr, links); 248 mtx_unlock(&be_lun->queue_lock); 249 taskqueue_enqueue(be_lun->io_taskqueue, 250 &be_lun->io_task); 251 return (0); 252 } 253 io->io_hdr.status = CTL_SUCCESS; 254 } else if ((io->io_hdr.port_status != 0) 255 && ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0) 256 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)){ 257 /* 258 * For hardware error sense keys, the sense key 259 * specific value is defined to be a retry count, 260 * but we use it to pass back an internal FETD 261 * error code. XXX KDM Hopefully the FETD is only 262 * using 16 bits for an error code, since that's 263 * all the space we have in the sks field. 264 */ 265 ctl_set_internal_failure(&io->scsiio, 266 /*sks_valid*/ 1, 267 /*retry_count*/ 268 io->io_hdr.port_status); 269 } 270 ctl_data_submit_done(io); 271 return(0); 272 } 273 274 static int 275 ctl_backend_ramdisk_submit(union ctl_io *io) 276 { 277 struct ctl_be_lun *ctl_be_lun; 278 struct ctl_lba_len_flags *lbalen; 279 280 ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[ 281 CTL_PRIV_BACKEND_LUN].ptr; 282 lbalen = (struct ctl_lba_len_flags *)&io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 283 if (lbalen->flags & CTL_LLF_VERIFY) { 284 ctl_set_success(&io->scsiio); 285 ctl_data_submit_done(io); 286 return (CTL_RETVAL_COMPLETE); 287 } 288 io->io_hdr.ctl_private[CTL_PRIV_BACKEND].integer = 289 lbalen->len * ctl_be_lun->blocksize; 290 ctl_backend_ramdisk_continue(io); 291 return (CTL_RETVAL_COMPLETE); 292 } 293 294 static void 295 ctl_backend_ramdisk_continue(union ctl_io *io) 296 { 297 struct ctl_be_ramdisk_softc *softc; 298 int len, len_filled, sg_filled; 299 #ifdef CTL_RAMDISK_PAGES 300 struct ctl_sg_entry *sg_entries; 301 int i; 302 #endif 303 304 softc = &rd_softc; 305 len = io->io_hdr.ctl_private[CTL_PRIV_BACKEND].integer; 306 #ifdef CTL_RAMDISK_PAGES 307 sg_filled = min(btoc(len), softc->num_pages); 308 if (sg_filled > 1) { 309 io->scsiio.kern_data_ptr = malloc(sizeof(struct ctl_sg_entry) * 310 sg_filled, M_RAMDISK, 311 M_WAITOK); 312 sg_entries = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; 313 for (i = 0, len_filled = 0; i < sg_filled; i++) { 314 sg_entries[i].addr = softc->ramdisk_pages[i]; 315 sg_entries[i].len = ctl_min(PAGE_SIZE, 316 len - len_filled); 317 len_filled += sg_entries[i].len; 318 } 319 io->io_hdr.flags |= CTL_FLAG_KDPTR_SGLIST; 320 } else { 321 sg_filled = 0; 322 len_filled = len; 323 io->scsiio.kern_data_ptr = softc->ramdisk_pages[0]; 324 } 325 #else 326 sg_filled = 0; 327 len_filled = min(len, softc->rd_size); 328 io->scsiio.kern_data_ptr = softc->ramdisk_buffer; 329 #endif /* CTL_RAMDISK_PAGES */ 330 331 io->scsiio.be_move_done = ctl_backend_ramdisk_move_done; 332 io->scsiio.kern_data_resid = 0; 333 io->scsiio.kern_data_len = len_filled; 334 io->scsiio.kern_sg_entries = sg_filled; 335 io->io_hdr.flags |= CTL_FLAG_ALLOCATED; 336 io->io_hdr.ctl_private[CTL_PRIV_BACKEND].integer -= len_filled; 337 #ifdef CTL_TIME_IO 338 getbintime(&io->io_hdr.dma_start_bt); 339 #endif 340 ctl_datamove(io); 341 } 342 343 static void 344 ctl_backend_ramdisk_worker(void *context, int pending) 345 { 346 struct ctl_be_ramdisk_softc *softc; 347 struct ctl_be_ramdisk_lun *be_lun; 348 union ctl_io *io; 349 350 be_lun = (struct ctl_be_ramdisk_lun *)context; 351 softc = be_lun->softc; 352 353 mtx_lock(&be_lun->queue_lock); 354 for (;;) { 355 io = (union ctl_io *)STAILQ_FIRST(&be_lun->cont_queue); 356 if (io != NULL) { 357 STAILQ_REMOVE(&be_lun->cont_queue, &io->io_hdr, 358 ctl_io_hdr, links); 359 360 mtx_unlock(&be_lun->queue_lock); 361 362 ctl_backend_ramdisk_continue(io); 363 364 mtx_lock(&be_lun->queue_lock); 365 continue; 366 } 367 368 /* 369 * If we get here, there is no work left in the queues, so 370 * just break out and let the task queue go to sleep. 371 */ 372 break; 373 } 374 mtx_unlock(&be_lun->queue_lock); 375 } 376 377 static int 378 ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, 379 int flag, struct thread *td) 380 { 381 struct ctl_be_ramdisk_softc *softc; 382 int retval; 383 384 retval = 0; 385 softc = &rd_softc; 386 387 switch (cmd) { 388 case CTL_LUN_REQ: { 389 struct ctl_lun_req *lun_req; 390 391 lun_req = (struct ctl_lun_req *)addr; 392 393 switch (lun_req->reqtype) { 394 case CTL_LUNREQ_CREATE: 395 retval = ctl_backend_ramdisk_create(softc, lun_req, 396 /*do_wait*/ 1); 397 break; 398 case CTL_LUNREQ_RM: 399 retval = ctl_backend_ramdisk_rm(softc, lun_req); 400 break; 401 case CTL_LUNREQ_MODIFY: 402 retval = ctl_backend_ramdisk_modify(softc, lun_req); 403 break; 404 default: 405 lun_req->status = CTL_LUN_ERROR; 406 snprintf(lun_req->error_str, sizeof(lun_req->error_str), 407 "%s: invalid LUN request type %d", __func__, 408 lun_req->reqtype); 409 break; 410 } 411 break; 412 } 413 default: 414 retval = ENOTTY; 415 break; 416 } 417 418 return (retval); 419 } 420 421 static int 422 ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc, 423 struct ctl_lun_req *req) 424 { 425 struct ctl_be_ramdisk_lun *be_lun; 426 struct ctl_lun_rm_params *params; 427 int retval; 428 429 430 retval = 0; 431 params = &req->reqdata.rm; 432 433 be_lun = NULL; 434 435 mtx_lock(&softc->lock); 436 437 STAILQ_FOREACH(be_lun, &softc->lun_list, links) { 438 if (be_lun->ctl_be_lun.lun_id == params->lun_id) 439 break; 440 } 441 mtx_unlock(&softc->lock); 442 443 if (be_lun == NULL) { 444 snprintf(req->error_str, sizeof(req->error_str), 445 "%s: LUN %u is not managed by the ramdisk backend", 446 __func__, params->lun_id); 447 goto bailout_error; 448 } 449 450 retval = ctl_disable_lun(&be_lun->ctl_be_lun); 451 452 if (retval != 0) { 453 snprintf(req->error_str, sizeof(req->error_str), 454 "%s: error %d returned from ctl_disable_lun() for " 455 "LUN %d", __func__, retval, params->lun_id); 456 goto bailout_error; 457 } 458 459 /* 460 * Set the waiting flag before we invalidate the LUN. Our shutdown 461 * routine can be called any time after we invalidate the LUN, 462 * and can be called from our context. 463 * 464 * This tells the shutdown routine that we're waiting, or we're 465 * going to wait for the shutdown to happen. 466 */ 467 mtx_lock(&softc->lock); 468 be_lun->flags |= CTL_BE_RAMDISK_LUN_WAITING; 469 mtx_unlock(&softc->lock); 470 471 retval = ctl_invalidate_lun(&be_lun->ctl_be_lun); 472 if (retval != 0) { 473 snprintf(req->error_str, sizeof(req->error_str), 474 "%s: error %d returned from ctl_invalidate_lun() for " 475 "LUN %d", __func__, retval, params->lun_id); 476 mtx_lock(&softc->lock); 477 be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING; 478 mtx_unlock(&softc->lock); 479 goto bailout_error; 480 } 481 482 mtx_lock(&softc->lock); 483 484 while ((be_lun->flags & CTL_BE_RAMDISK_LUN_UNCONFIGURED) == 0) { 485 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlram", 0); 486 if (retval == EINTR) 487 break; 488 } 489 be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING; 490 491 /* 492 * We only remove this LUN from the list and free it (below) if 493 * retval == 0. If the user interrupted the wait, we just bail out 494 * without actually freeing the LUN. We let the shutdown routine 495 * free the LUN if that happens. 496 */ 497 if (retval == 0) { 498 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun, 499 links); 500 softc->num_luns--; 501 } 502 503 mtx_unlock(&softc->lock); 504 505 if (retval == 0) { 506 taskqueue_drain(be_lun->io_taskqueue, &be_lun->io_task); 507 taskqueue_free(be_lun->io_taskqueue); 508 ctl_free_opts(&be_lun->ctl_be_lun.options); 509 mtx_destroy(&be_lun->queue_lock); 510 free(be_lun, M_RAMDISK); 511 } 512 513 req->status = CTL_LUN_OK; 514 515 return (retval); 516 517 bailout_error: 518 req->status = CTL_LUN_ERROR; 519 520 return (0); 521 } 522 523 static int 524 ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc, 525 struct ctl_lun_req *req, int do_wait) 526 { 527 struct ctl_be_ramdisk_lun *be_lun; 528 struct ctl_lun_create_params *params; 529 uint32_t blocksize; 530 char *value; 531 char tmpstr[32]; 532 int retval, unmap; 533 534 retval = 0; 535 params = &req->reqdata.create; 536 if (params->blocksize_bytes != 0) 537 blocksize = params->blocksize_bytes; 538 else 539 blocksize = 512; 540 541 be_lun = malloc(sizeof(*be_lun), M_RAMDISK, M_ZERO | (do_wait ? 542 M_WAITOK : M_NOWAIT)); 543 544 if (be_lun == NULL) { 545 snprintf(req->error_str, sizeof(req->error_str), 546 "%s: error allocating %zd bytes", __func__, 547 sizeof(*be_lun)); 548 goto bailout_error; 549 } 550 sprintf(be_lun->lunname, "cram%d", softc->num_luns); 551 ctl_init_opts(&be_lun->ctl_be_lun.options, 552 req->num_be_args, req->kern_be_args); 553 554 if (params->flags & CTL_LUN_FLAG_DEV_TYPE) 555 be_lun->ctl_be_lun.lun_type = params->device_type; 556 else 557 be_lun->ctl_be_lun.lun_type = T_DIRECT; 558 559 if (be_lun->ctl_be_lun.lun_type == T_DIRECT) { 560 561 if (params->lun_size_bytes < blocksize) { 562 snprintf(req->error_str, sizeof(req->error_str), 563 "%s: LUN size %ju < blocksize %u", __func__, 564 params->lun_size_bytes, blocksize); 565 goto bailout_error; 566 } 567 568 be_lun->size_blocks = params->lun_size_bytes / blocksize; 569 be_lun->size_bytes = be_lun->size_blocks * blocksize; 570 571 be_lun->ctl_be_lun.maxlba = be_lun->size_blocks - 1; 572 } else { 573 be_lun->ctl_be_lun.maxlba = 0; 574 blocksize = 0; 575 be_lun->size_bytes = 0; 576 be_lun->size_blocks = 0; 577 } 578 579 be_lun->ctl_be_lun.blocksize = blocksize; 580 581 /* Tell the user the blocksize we ended up using */ 582 params->blocksize_bytes = blocksize; 583 584 /* Tell the user the exact size we ended up using */ 585 params->lun_size_bytes = be_lun->size_bytes; 586 587 be_lun->softc = softc; 588 589 unmap = 0; 590 value = ctl_get_opt(&be_lun->ctl_be_lun.options, "unmap"); 591 if (value != NULL && strcmp(value, "on") == 0) 592 unmap = 1; 593 594 be_lun->flags = CTL_BE_RAMDISK_LUN_UNCONFIGURED; 595 be_lun->ctl_be_lun.flags = CTL_LUN_FLAG_PRIMARY; 596 if (unmap) 597 be_lun->ctl_be_lun.flags |= CTL_LUN_FLAG_UNMAP; 598 be_lun->ctl_be_lun.be_lun = be_lun; 599 600 if (params->flags & CTL_LUN_FLAG_ID_REQ) { 601 be_lun->ctl_be_lun.req_lun_id = params->req_lun_id; 602 be_lun->ctl_be_lun.flags |= CTL_LUN_FLAG_ID_REQ; 603 } else 604 be_lun->ctl_be_lun.req_lun_id = 0; 605 606 be_lun->ctl_be_lun.lun_shutdown = ctl_backend_ramdisk_lun_shutdown; 607 be_lun->ctl_be_lun.lun_config_status = 608 ctl_backend_ramdisk_lun_config_status; 609 be_lun->ctl_be_lun.be = &ctl_be_ramdisk_driver; 610 if ((params->flags & CTL_LUN_FLAG_SERIAL_NUM) == 0) { 611 snprintf(tmpstr, sizeof(tmpstr), "MYSERIAL%4d", 612 softc->num_luns); 613 strncpy((char *)be_lun->ctl_be_lun.serial_num, tmpstr, 614 ctl_min(sizeof(be_lun->ctl_be_lun.serial_num), 615 sizeof(tmpstr))); 616 617 /* Tell the user what we used for a serial number */ 618 strncpy((char *)params->serial_num, tmpstr, 619 ctl_min(sizeof(params->serial_num), sizeof(tmpstr))); 620 } else { 621 strncpy((char *)be_lun->ctl_be_lun.serial_num, 622 params->serial_num, 623 ctl_min(sizeof(be_lun->ctl_be_lun.serial_num), 624 sizeof(params->serial_num))); 625 } 626 if ((params->flags & CTL_LUN_FLAG_DEVID) == 0) { 627 snprintf(tmpstr, sizeof(tmpstr), "MYDEVID%4d", softc->num_luns); 628 strncpy((char *)be_lun->ctl_be_lun.device_id, tmpstr, 629 ctl_min(sizeof(be_lun->ctl_be_lun.device_id), 630 sizeof(tmpstr))); 631 632 /* Tell the user what we used for a device ID */ 633 strncpy((char *)params->device_id, tmpstr, 634 ctl_min(sizeof(params->device_id), sizeof(tmpstr))); 635 } else { 636 strncpy((char *)be_lun->ctl_be_lun.device_id, 637 params->device_id, 638 ctl_min(sizeof(be_lun->ctl_be_lun.device_id), 639 sizeof(params->device_id))); 640 } 641 642 STAILQ_INIT(&be_lun->cont_queue); 643 mtx_init(&be_lun->queue_lock, "cram queue lock", NULL, MTX_DEF); 644 TASK_INIT(&be_lun->io_task, /*priority*/0, ctl_backend_ramdisk_worker, 645 be_lun); 646 647 be_lun->io_taskqueue = taskqueue_create(be_lun->lunname, M_WAITOK, 648 taskqueue_thread_enqueue, /*context*/&be_lun->io_taskqueue); 649 if (be_lun->io_taskqueue == NULL) { 650 snprintf(req->error_str, sizeof(req->error_str), 651 "%s: Unable to create taskqueue", __func__); 652 goto bailout_error; 653 } 654 655 retval = taskqueue_start_threads(&be_lun->io_taskqueue, 656 /*num threads*/1, 657 /*priority*/PWAIT, 658 /*thread name*/ 659 "%s taskq", be_lun->lunname); 660 if (retval != 0) 661 goto bailout_error; 662 663 mtx_lock(&softc->lock); 664 softc->num_luns++; 665 STAILQ_INSERT_TAIL(&softc->lun_list, be_lun, links); 666 667 mtx_unlock(&softc->lock); 668 669 retval = ctl_add_lun(&be_lun->ctl_be_lun); 670 if (retval != 0) { 671 mtx_lock(&softc->lock); 672 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun, 673 links); 674 softc->num_luns--; 675 mtx_unlock(&softc->lock); 676 snprintf(req->error_str, sizeof(req->error_str), 677 "%s: ctl_add_lun() returned error %d, see dmesg for " 678 "details", __func__, retval); 679 retval = 0; 680 goto bailout_error; 681 } 682 683 if (do_wait == 0) 684 return (retval); 685 686 mtx_lock(&softc->lock); 687 688 /* 689 * Tell the config_status routine that we're waiting so it won't 690 * clean up the LUN in the event of an error. 691 */ 692 be_lun->flags |= CTL_BE_RAMDISK_LUN_WAITING; 693 694 while (be_lun->flags & CTL_BE_RAMDISK_LUN_UNCONFIGURED) { 695 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlram", 0); 696 if (retval == EINTR) 697 break; 698 } 699 be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING; 700 701 if (be_lun->flags & CTL_BE_RAMDISK_LUN_CONFIG_ERR) { 702 snprintf(req->error_str, sizeof(req->error_str), 703 "%s: LUN configuration error, see dmesg for details", 704 __func__); 705 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun, 706 links); 707 softc->num_luns--; 708 mtx_unlock(&softc->lock); 709 goto bailout_error; 710 } else { 711 params->req_lun_id = be_lun->ctl_be_lun.lun_id; 712 } 713 mtx_unlock(&softc->lock); 714 715 req->status = CTL_LUN_OK; 716 717 return (retval); 718 719 bailout_error: 720 req->status = CTL_LUN_ERROR; 721 if (be_lun != NULL) { 722 if (be_lun->io_taskqueue != NULL) { 723 taskqueue_free(be_lun->io_taskqueue); 724 } 725 ctl_free_opts(&be_lun->ctl_be_lun.options); 726 mtx_destroy(&be_lun->queue_lock); 727 free(be_lun, M_RAMDISK); 728 } 729 730 return (retval); 731 } 732 733 static int 734 ctl_backend_ramdisk_modify(struct ctl_be_ramdisk_softc *softc, 735 struct ctl_lun_req *req) 736 { 737 struct ctl_be_ramdisk_lun *be_lun; 738 struct ctl_lun_modify_params *params; 739 uint32_t blocksize; 740 741 params = &req->reqdata.modify; 742 743 be_lun = NULL; 744 745 mtx_lock(&softc->lock); 746 STAILQ_FOREACH(be_lun, &softc->lun_list, links) { 747 if (be_lun->ctl_be_lun.lun_id == params->lun_id) 748 break; 749 } 750 mtx_unlock(&softc->lock); 751 752 if (be_lun == NULL) { 753 snprintf(req->error_str, sizeof(req->error_str), 754 "%s: LUN %u is not managed by the ramdisk backend", 755 __func__, params->lun_id); 756 goto bailout_error; 757 } 758 759 if (params->lun_size_bytes == 0) { 760 snprintf(req->error_str, sizeof(req->error_str), 761 "%s: LUN size \"auto\" not supported " 762 "by the ramdisk backend", __func__); 763 goto bailout_error; 764 } 765 766 blocksize = be_lun->ctl_be_lun.blocksize; 767 768 if (params->lun_size_bytes < blocksize) { 769 snprintf(req->error_str, sizeof(req->error_str), 770 "%s: LUN size %ju < blocksize %u", __func__, 771 params->lun_size_bytes, blocksize); 772 goto bailout_error; 773 } 774 775 be_lun->size_blocks = params->lun_size_bytes / blocksize; 776 be_lun->size_bytes = be_lun->size_blocks * blocksize; 777 778 /* 779 * The maximum LBA is the size - 1. 780 * 781 * XXX: Note that this field is being updated without locking, 782 * which might cause problems on 32-bit architectures. 783 */ 784 be_lun->ctl_be_lun.maxlba = be_lun->size_blocks - 1; 785 ctl_lun_capacity_changed(&be_lun->ctl_be_lun); 786 787 /* Tell the user the exact size we ended up using */ 788 params->lun_size_bytes = be_lun->size_bytes; 789 790 req->status = CTL_LUN_OK; 791 792 return (0); 793 794 bailout_error: 795 req->status = CTL_LUN_ERROR; 796 797 return (0); 798 } 799 800 static void 801 ctl_backend_ramdisk_lun_shutdown(void *be_lun) 802 { 803 struct ctl_be_ramdisk_lun *lun; 804 struct ctl_be_ramdisk_softc *softc; 805 int do_free; 806 807 lun = (struct ctl_be_ramdisk_lun *)be_lun; 808 softc = lun->softc; 809 do_free = 0; 810 811 mtx_lock(&softc->lock); 812 813 lun->flags |= CTL_BE_RAMDISK_LUN_UNCONFIGURED; 814 815 if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING) { 816 wakeup(lun); 817 } else { 818 STAILQ_REMOVE(&softc->lun_list, lun, ctl_be_ramdisk_lun, 819 links); 820 softc->num_luns--; 821 do_free = 1; 822 } 823 824 mtx_unlock(&softc->lock); 825 826 if (do_free != 0) 827 free(be_lun, M_RAMDISK); 828 } 829 830 static void 831 ctl_backend_ramdisk_lun_config_status(void *be_lun, 832 ctl_lun_config_status status) 833 { 834 struct ctl_be_ramdisk_lun *lun; 835 struct ctl_be_ramdisk_softc *softc; 836 837 lun = (struct ctl_be_ramdisk_lun *)be_lun; 838 softc = lun->softc; 839 840 if (status == CTL_LUN_CONFIG_OK) { 841 mtx_lock(&softc->lock); 842 lun->flags &= ~CTL_BE_RAMDISK_LUN_UNCONFIGURED; 843 if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING) 844 wakeup(lun); 845 mtx_unlock(&softc->lock); 846 847 /* 848 * We successfully added the LUN, attempt to enable it. 849 */ 850 if (ctl_enable_lun(&lun->ctl_be_lun) != 0) { 851 printf("%s: ctl_enable_lun() failed!\n", __func__); 852 if (ctl_invalidate_lun(&lun->ctl_be_lun) != 0) { 853 printf("%s: ctl_invalidate_lun() failed!\n", 854 __func__); 855 } 856 } 857 858 return; 859 } 860 861 862 mtx_lock(&softc->lock); 863 lun->flags &= ~CTL_BE_RAMDISK_LUN_UNCONFIGURED; 864 865 /* 866 * If we have a user waiting, let him handle the cleanup. If not, 867 * clean things up here. 868 */ 869 if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING) { 870 lun->flags |= CTL_BE_RAMDISK_LUN_CONFIG_ERR; 871 wakeup(lun); 872 } else { 873 STAILQ_REMOVE(&softc->lun_list, lun, ctl_be_ramdisk_lun, 874 links); 875 softc->num_luns--; 876 free(lun, M_RAMDISK); 877 } 878 mtx_unlock(&softc->lock); 879 } 880 881 static int 882 ctl_backend_ramdisk_config_write(union ctl_io *io) 883 { 884 struct ctl_be_ramdisk_softc *softc; 885 int retval; 886 887 retval = 0; 888 softc = &rd_softc; 889 890 switch (io->scsiio.cdb[0]) { 891 case SYNCHRONIZE_CACHE: 892 case SYNCHRONIZE_CACHE_16: 893 /* 894 * The upper level CTL code will filter out any CDBs with 895 * the immediate bit set and return the proper error. It 896 * will also not allow a sync cache command to go to a LUN 897 * that is powered down. 898 * 899 * We don't really need to worry about what LBA range the 900 * user asked to be synced out. When they issue a sync 901 * cache command, we'll sync out the whole thing. 902 * 903 * This is obviously just a stubbed out implementation. 904 * The real implementation will be in the RAIDCore/CTL 905 * interface, and can only really happen when RAIDCore 906 * implements a per-array cache sync. 907 */ 908 ctl_set_success(&io->scsiio); 909 ctl_config_write_done(io); 910 break; 911 case START_STOP_UNIT: { 912 struct scsi_start_stop_unit *cdb; 913 struct ctl_be_lun *ctl_be_lun; 914 struct ctl_be_ramdisk_lun *be_lun; 915 916 cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb; 917 918 ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[ 919 CTL_PRIV_BACKEND_LUN].ptr; 920 be_lun = (struct ctl_be_ramdisk_lun *)ctl_be_lun->be_lun; 921 922 if (cdb->how & SSS_START) 923 retval = ctl_start_lun(ctl_be_lun); 924 else { 925 retval = ctl_stop_lun(ctl_be_lun); 926 #ifdef NEEDTOPORT 927 if ((retval == 0) 928 && (cdb->byte2 & SSS_ONOFFLINE)) 929 retval = ctl_lun_offline(ctl_be_lun); 930 #endif 931 } 932 933 /* 934 * In general, the above routines should not fail. They 935 * just set state for the LUN. So we've got something 936 * pretty wrong here if we can't start or stop the LUN. 937 */ 938 if (retval != 0) { 939 ctl_set_internal_failure(&io->scsiio, 940 /*sks_valid*/ 1, 941 /*retry_count*/ 0xf051); 942 retval = CTL_RETVAL_COMPLETE; 943 } else { 944 ctl_set_success(&io->scsiio); 945 } 946 ctl_config_write_done(io); 947 break; 948 } 949 case WRITE_SAME_10: 950 case WRITE_SAME_16: 951 case UNMAP: 952 ctl_set_success(&io->scsiio); 953 ctl_config_write_done(io); 954 break; 955 default: 956 ctl_set_invalid_opcode(&io->scsiio); 957 ctl_config_write_done(io); 958 retval = CTL_RETVAL_COMPLETE; 959 break; 960 } 961 962 return (retval); 963 } 964 965 static int 966 ctl_backend_ramdisk_config_read(union ctl_io *io) 967 { 968 /* 969 * XXX KDM need to implement!! 970 */ 971 return (0); 972 } 973