1 /*- 2 * Copyright (c) 2003, 2008 Silicon Graphics International Corp. 3 * Copyright (c) 2012 The FreeBSD Foundation 4 * All rights reserved. 5 * 6 * Portions of this software were developed by Edward Tomasz Napierala 7 * under sponsorship from the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions, and the following disclaimer, 14 * without modification. 15 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 16 * substantially similar to the "NO WARRANTY" disclaimer below 17 * ("Disclaimer") and any redistribution must be conditioned upon 18 * including a substantially similar Disclaimer requirement for further 19 * binary redistribution. 20 * 21 * NO WARRANTY 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 26 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 30 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 31 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 * POSSIBILITY OF SUCH DAMAGES. 33 * 34 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend_ramdisk.c#3 $ 35 */ 36 /* 37 * CAM Target Layer backend for a "fake" ramdisk. 38 * 39 * Author: Ken Merry <ken@FreeBSD.org> 40 */ 41 42 #include <sys/cdefs.h> 43 __FBSDID("$FreeBSD$"); 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/kernel.h> 48 #include <sys/condvar.h> 49 #include <sys/types.h> 50 #include <sys/lock.h> 51 #include <sys/mutex.h> 52 #include <sys/malloc.h> 53 #include <sys/taskqueue.h> 54 #include <sys/time.h> 55 #include <sys/queue.h> 56 #include <sys/conf.h> 57 #include <sys/ioccom.h> 58 #include <sys/module.h> 59 60 #include <cam/scsi/scsi_all.h> 61 #include <cam/ctl/ctl_io.h> 62 #include <cam/ctl/ctl.h> 63 #include <cam/ctl/ctl_util.h> 64 #include <cam/ctl/ctl_backend.h> 65 #include <cam/ctl/ctl_frontend_internal.h> 66 #include <cam/ctl/ctl_debug.h> 67 #include <cam/ctl/ctl_ioctl.h> 68 #include <cam/ctl/ctl_error.h> 69 70 typedef enum { 71 CTL_BE_RAMDISK_LUN_UNCONFIGURED = 0x01, 72 CTL_BE_RAMDISK_LUN_CONFIG_ERR = 0x02, 73 CTL_BE_RAMDISK_LUN_WAITING = 0x04 74 } ctl_be_ramdisk_lun_flags; 75 76 struct ctl_be_ramdisk_lun { 77 char lunname[32]; 78 uint64_t size_bytes; 79 uint64_t size_blocks; 80 struct ctl_be_ramdisk_softc *softc; 81 ctl_be_ramdisk_lun_flags flags; 82 STAILQ_ENTRY(ctl_be_ramdisk_lun) links; 83 struct ctl_be_lun ctl_be_lun; 84 struct taskqueue *io_taskqueue; 85 struct task io_task; 86 STAILQ_HEAD(, ctl_io_hdr) cont_queue; 87 struct mtx_padalign queue_lock; 88 }; 89 90 struct ctl_be_ramdisk_softc { 91 struct mtx lock; 92 int rd_size; 93 #ifdef CTL_RAMDISK_PAGES 94 uint8_t **ramdisk_pages; 95 int num_pages; 96 #else 97 uint8_t *ramdisk_buffer; 98 #endif 99 int num_luns; 100 STAILQ_HEAD(, ctl_be_ramdisk_lun) lun_list; 101 }; 102 103 static struct ctl_be_ramdisk_softc rd_softc; 104 105 int ctl_backend_ramdisk_init(void); 106 void ctl_backend_ramdisk_shutdown(void); 107 static int ctl_backend_ramdisk_move_done(union ctl_io *io); 108 static int ctl_backend_ramdisk_submit(union ctl_io *io); 109 static void ctl_backend_ramdisk_continue(union ctl_io *io); 110 static int ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd, 111 caddr_t addr, int flag, struct thread *td); 112 static int ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc, 113 struct ctl_lun_req *req); 114 static int ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc, 115 struct ctl_lun_req *req, int do_wait); 116 static int ctl_backend_ramdisk_modify(struct ctl_be_ramdisk_softc *softc, 117 struct ctl_lun_req *req); 118 static void ctl_backend_ramdisk_worker(void *context, int pending); 119 static void ctl_backend_ramdisk_lun_shutdown(void *be_lun); 120 static void ctl_backend_ramdisk_lun_config_status(void *be_lun, 121 ctl_lun_config_status status); 122 static int ctl_backend_ramdisk_config_write(union ctl_io *io); 123 static int ctl_backend_ramdisk_config_read(union ctl_io *io); 124 125 static struct ctl_backend_driver ctl_be_ramdisk_driver = 126 { 127 .name = "ramdisk", 128 .flags = CTL_BE_FLAG_HAS_CONFIG, 129 .init = ctl_backend_ramdisk_init, 130 .data_submit = ctl_backend_ramdisk_submit, 131 .data_move_done = ctl_backend_ramdisk_move_done, 132 .config_read = ctl_backend_ramdisk_config_read, 133 .config_write = ctl_backend_ramdisk_config_write, 134 .ioctl = ctl_backend_ramdisk_ioctl 135 }; 136 137 MALLOC_DEFINE(M_RAMDISK, "ramdisk", "Memory used for CTL RAMdisk"); 138 CTL_BACKEND_DECLARE(cbr, ctl_be_ramdisk_driver); 139 140 int 141 ctl_backend_ramdisk_init(void) 142 { 143 struct ctl_be_ramdisk_softc *softc; 144 #ifdef CTL_RAMDISK_PAGES 145 int i; 146 #endif 147 148 149 softc = &rd_softc; 150 151 memset(softc, 0, sizeof(*softc)); 152 153 mtx_init(&softc->lock, "ctlramdisk", NULL, MTX_DEF); 154 155 STAILQ_INIT(&softc->lun_list); 156 softc->rd_size = 1024 * 1024; 157 #ifdef CTL_RAMDISK_PAGES 158 softc->num_pages = softc->rd_size / PAGE_SIZE; 159 softc->ramdisk_pages = (uint8_t **)malloc(sizeof(uint8_t *) * 160 softc->num_pages, M_RAMDISK, 161 M_WAITOK); 162 for (i = 0; i < softc->num_pages; i++) 163 softc->ramdisk_pages[i] = malloc(PAGE_SIZE, M_RAMDISK,M_WAITOK); 164 #else 165 softc->ramdisk_buffer = (uint8_t *)malloc(softc->rd_size, M_RAMDISK, 166 M_WAITOK); 167 #endif 168 169 return (0); 170 } 171 172 void 173 ctl_backend_ramdisk_shutdown(void) 174 { 175 struct ctl_be_ramdisk_softc *softc; 176 struct ctl_be_ramdisk_lun *lun, *next_lun; 177 #ifdef CTL_RAMDISK_PAGES 178 int i; 179 #endif 180 181 softc = &rd_softc; 182 183 mtx_lock(&softc->lock); 184 for (lun = STAILQ_FIRST(&softc->lun_list); lun != NULL; lun = next_lun){ 185 /* 186 * Grab the next LUN. The current LUN may get removed by 187 * ctl_invalidate_lun(), which will call our LUN shutdown 188 * routine, if there is no outstanding I/O for this LUN. 189 */ 190 next_lun = STAILQ_NEXT(lun, links); 191 192 /* 193 * Drop our lock here. Since ctl_invalidate_lun() can call 194 * back into us, this could potentially lead to a recursive 195 * lock of the same mutex, which would cause a hang. 196 */ 197 mtx_unlock(&softc->lock); 198 ctl_disable_lun(&lun->ctl_be_lun); 199 ctl_invalidate_lun(&lun->ctl_be_lun); 200 mtx_lock(&softc->lock); 201 } 202 mtx_unlock(&softc->lock); 203 204 #ifdef CTL_RAMDISK_PAGES 205 for (i = 0; i < softc->num_pages; i++) 206 free(softc->ramdisk_pages[i], M_RAMDISK); 207 208 free(softc->ramdisk_pages, M_RAMDISK); 209 #else 210 free(softc->ramdisk_buffer, M_RAMDISK); 211 #endif 212 213 if (ctl_backend_deregister(&ctl_be_ramdisk_driver) != 0) { 214 printf("ctl_backend_ramdisk_shutdown: " 215 "ctl_backend_deregister() failed!\n"); 216 } 217 } 218 219 static int 220 ctl_backend_ramdisk_move_done(union ctl_io *io) 221 { 222 struct ctl_be_lun *ctl_be_lun; 223 struct ctl_be_ramdisk_lun *be_lun; 224 #ifdef CTL_TIME_IO 225 struct bintime cur_bt; 226 #endif 227 228 CTL_DEBUG_PRINT(("ctl_backend_ramdisk_move_done\n")); 229 ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[ 230 CTL_PRIV_BACKEND_LUN].ptr; 231 be_lun = (struct ctl_be_ramdisk_lun *)ctl_be_lun->be_lun; 232 #ifdef CTL_TIME_IO 233 getbintime(&cur_bt); 234 bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt); 235 bintime_add(&io->io_hdr.dma_bt, &cur_bt); 236 io->io_hdr.num_dmas++; 237 #endif 238 if (io->scsiio.kern_sg_entries > 0) 239 free(io->scsiio.kern_data_ptr, M_RAMDISK); 240 io->scsiio.kern_rel_offset += io->scsiio.kern_data_len; 241 if ((io->io_hdr.port_status == 0) 242 && ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0) 243 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) { 244 if (io->io_hdr.ctl_private[CTL_PRIV_BACKEND].integer > 0) { 245 mtx_lock(&be_lun->queue_lock); 246 STAILQ_INSERT_TAIL(&be_lun->cont_queue, 247 &io->io_hdr, links); 248 mtx_unlock(&be_lun->queue_lock); 249 taskqueue_enqueue(be_lun->io_taskqueue, 250 &be_lun->io_task); 251 return (0); 252 } 253 io->io_hdr.status = CTL_SUCCESS; 254 } else if ((io->io_hdr.port_status != 0) 255 && ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0) 256 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)){ 257 /* 258 * For hardware error sense keys, the sense key 259 * specific value is defined to be a retry count, 260 * but we use it to pass back an internal FETD 261 * error code. XXX KDM Hopefully the FETD is only 262 * using 16 bits for an error code, since that's 263 * all the space we have in the sks field. 264 */ 265 ctl_set_internal_failure(&io->scsiio, 266 /*sks_valid*/ 1, 267 /*retry_count*/ 268 io->io_hdr.port_status); 269 } 270 ctl_data_submit_done(io); 271 return(0); 272 } 273 274 static int 275 ctl_backend_ramdisk_submit(union ctl_io *io) 276 { 277 struct ctl_be_lun *ctl_be_lun; 278 struct ctl_lba_len_flags *lbalen; 279 280 ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[ 281 CTL_PRIV_BACKEND_LUN].ptr; 282 lbalen = (struct ctl_lba_len_flags *)&io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 283 if (lbalen->flags & CTL_LLF_VERIFY) { 284 ctl_set_success(&io->scsiio); 285 ctl_data_submit_done(io); 286 return (CTL_RETVAL_COMPLETE); 287 } 288 io->io_hdr.ctl_private[CTL_PRIV_BACKEND].integer = 289 lbalen->len * ctl_be_lun->blocksize; 290 ctl_backend_ramdisk_continue(io); 291 return (CTL_RETVAL_COMPLETE); 292 } 293 294 static void 295 ctl_backend_ramdisk_continue(union ctl_io *io) 296 { 297 struct ctl_be_ramdisk_softc *softc; 298 int len, len_filled, sg_filled; 299 #ifdef CTL_RAMDISK_PAGES 300 struct ctl_sg_entry *sg_entries; 301 int i; 302 #endif 303 304 softc = &rd_softc; 305 len = io->io_hdr.ctl_private[CTL_PRIV_BACKEND].integer; 306 #ifdef CTL_RAMDISK_PAGES 307 sg_filled = min(btoc(len), softc->num_pages); 308 if (sg_filled > 1) { 309 io->scsiio.kern_data_ptr = malloc(sizeof(struct ctl_sg_entry) * 310 sg_filled, M_RAMDISK, 311 M_WAITOK); 312 sg_entries = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; 313 for (i = 0, len_filled = 0; i < sg_filled; i++) { 314 sg_entries[i].addr = softc->ramdisk_pages[i]; 315 sg_entries[i].len = ctl_min(PAGE_SIZE, 316 len - len_filled); 317 len_filled += sg_entries[i].len; 318 } 319 io->io_hdr.flags |= CTL_FLAG_KDPTR_SGLIST; 320 } else { 321 sg_filled = 0; 322 len_filled = len; 323 io->scsiio.kern_data_ptr = softc->ramdisk_pages[0]; 324 } 325 #else 326 sg_filled = 0; 327 len_filled = min(len, softc->rd_size); 328 io->scsiio.kern_data_ptr = softc->ramdisk_buffer; 329 #endif /* CTL_RAMDISK_PAGES */ 330 331 io->scsiio.be_move_done = ctl_backend_ramdisk_move_done; 332 io->scsiio.kern_data_resid = 0; 333 io->scsiio.kern_data_len = len_filled; 334 io->scsiio.kern_sg_entries = sg_filled; 335 io->io_hdr.flags |= CTL_FLAG_ALLOCATED; 336 io->io_hdr.ctl_private[CTL_PRIV_BACKEND].integer -= len_filled; 337 #ifdef CTL_TIME_IO 338 getbintime(&io->io_hdr.dma_start_bt); 339 #endif 340 ctl_datamove(io); 341 } 342 343 static void 344 ctl_backend_ramdisk_worker(void *context, int pending) 345 { 346 struct ctl_be_ramdisk_softc *softc; 347 struct ctl_be_ramdisk_lun *be_lun; 348 union ctl_io *io; 349 350 be_lun = (struct ctl_be_ramdisk_lun *)context; 351 softc = be_lun->softc; 352 353 mtx_lock(&be_lun->queue_lock); 354 for (;;) { 355 io = (union ctl_io *)STAILQ_FIRST(&be_lun->cont_queue); 356 if (io != NULL) { 357 STAILQ_REMOVE(&be_lun->cont_queue, &io->io_hdr, 358 ctl_io_hdr, links); 359 360 mtx_unlock(&be_lun->queue_lock); 361 362 ctl_backend_ramdisk_continue(io); 363 364 mtx_lock(&be_lun->queue_lock); 365 continue; 366 } 367 368 /* 369 * If we get here, there is no work left in the queues, so 370 * just break out and let the task queue go to sleep. 371 */ 372 break; 373 } 374 mtx_unlock(&be_lun->queue_lock); 375 } 376 377 static int 378 ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, 379 int flag, struct thread *td) 380 { 381 struct ctl_be_ramdisk_softc *softc; 382 int retval; 383 384 retval = 0; 385 softc = &rd_softc; 386 387 switch (cmd) { 388 case CTL_LUN_REQ: { 389 struct ctl_lun_req *lun_req; 390 391 lun_req = (struct ctl_lun_req *)addr; 392 393 switch (lun_req->reqtype) { 394 case CTL_LUNREQ_CREATE: 395 retval = ctl_backend_ramdisk_create(softc, lun_req, 396 /*do_wait*/ 1); 397 break; 398 case CTL_LUNREQ_RM: 399 retval = ctl_backend_ramdisk_rm(softc, lun_req); 400 break; 401 case CTL_LUNREQ_MODIFY: 402 retval = ctl_backend_ramdisk_modify(softc, lun_req); 403 break; 404 default: 405 lun_req->status = CTL_LUN_ERROR; 406 snprintf(lun_req->error_str, sizeof(lun_req->error_str), 407 "%s: invalid LUN request type %d", __func__, 408 lun_req->reqtype); 409 break; 410 } 411 break; 412 } 413 default: 414 retval = ENOTTY; 415 break; 416 } 417 418 return (retval); 419 } 420 421 static int 422 ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc, 423 struct ctl_lun_req *req) 424 { 425 struct ctl_be_ramdisk_lun *be_lun; 426 struct ctl_lun_rm_params *params; 427 int retval; 428 429 430 retval = 0; 431 params = &req->reqdata.rm; 432 433 be_lun = NULL; 434 435 mtx_lock(&softc->lock); 436 437 STAILQ_FOREACH(be_lun, &softc->lun_list, links) { 438 if (be_lun->ctl_be_lun.lun_id == params->lun_id) 439 break; 440 } 441 mtx_unlock(&softc->lock); 442 443 if (be_lun == NULL) { 444 snprintf(req->error_str, sizeof(req->error_str), 445 "%s: LUN %u is not managed by the ramdisk backend", 446 __func__, params->lun_id); 447 goto bailout_error; 448 } 449 450 retval = ctl_disable_lun(&be_lun->ctl_be_lun); 451 452 if (retval != 0) { 453 snprintf(req->error_str, sizeof(req->error_str), 454 "%s: error %d returned from ctl_disable_lun() for " 455 "LUN %d", __func__, retval, params->lun_id); 456 goto bailout_error; 457 } 458 459 /* 460 * Set the waiting flag before we invalidate the LUN. Our shutdown 461 * routine can be called any time after we invalidate the LUN, 462 * and can be called from our context. 463 * 464 * This tells the shutdown routine that we're waiting, or we're 465 * going to wait for the shutdown to happen. 466 */ 467 mtx_lock(&softc->lock); 468 be_lun->flags |= CTL_BE_RAMDISK_LUN_WAITING; 469 mtx_unlock(&softc->lock); 470 471 retval = ctl_invalidate_lun(&be_lun->ctl_be_lun); 472 if (retval != 0) { 473 snprintf(req->error_str, sizeof(req->error_str), 474 "%s: error %d returned from ctl_invalidate_lun() for " 475 "LUN %d", __func__, retval, params->lun_id); 476 mtx_lock(&softc->lock); 477 be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING; 478 mtx_unlock(&softc->lock); 479 goto bailout_error; 480 } 481 482 mtx_lock(&softc->lock); 483 484 while ((be_lun->flags & CTL_BE_RAMDISK_LUN_UNCONFIGURED) == 0) { 485 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlram", 0); 486 if (retval == EINTR) 487 break; 488 } 489 be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING; 490 491 /* 492 * We only remove this LUN from the list and free it (below) if 493 * retval == 0. If the user interrupted the wait, we just bail out 494 * without actually freeing the LUN. We let the shutdown routine 495 * free the LUN if that happens. 496 */ 497 if (retval == 0) { 498 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun, 499 links); 500 softc->num_luns--; 501 } 502 503 mtx_unlock(&softc->lock); 504 505 if (retval == 0) { 506 taskqueue_drain(be_lun->io_taskqueue, &be_lun->io_task); 507 taskqueue_free(be_lun->io_taskqueue); 508 ctl_free_opts(&be_lun->ctl_be_lun.options); 509 mtx_destroy(&be_lun->queue_lock); 510 free(be_lun, M_RAMDISK); 511 } 512 513 req->status = CTL_LUN_OK; 514 515 return (retval); 516 517 bailout_error: 518 req->status = CTL_LUN_ERROR; 519 520 return (0); 521 } 522 523 static int 524 ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc, 525 struct ctl_lun_req *req, int do_wait) 526 { 527 struct ctl_be_ramdisk_lun *be_lun; 528 struct ctl_lun_create_params *params; 529 uint32_t blocksize; 530 char *value; 531 char tmpstr[32]; 532 int retval, unmap; 533 534 retval = 0; 535 params = &req->reqdata.create; 536 if (params->blocksize_bytes != 0) 537 blocksize = params->blocksize_bytes; 538 else 539 blocksize = 512; 540 541 be_lun = malloc(sizeof(*be_lun), M_RAMDISK, M_ZERO | (do_wait ? 542 M_WAITOK : M_NOWAIT)); 543 544 if (be_lun == NULL) { 545 snprintf(req->error_str, sizeof(req->error_str), 546 "%s: error allocating %zd bytes", __func__, 547 sizeof(*be_lun)); 548 goto bailout_error; 549 } 550 sprintf(be_lun->lunname, "cram%d", softc->num_luns); 551 ctl_init_opts(&be_lun->ctl_be_lun.options, 552 req->num_be_args, req->kern_be_args); 553 554 if (params->flags & CTL_LUN_FLAG_DEV_TYPE) 555 be_lun->ctl_be_lun.lun_type = params->device_type; 556 else 557 be_lun->ctl_be_lun.lun_type = T_DIRECT; 558 559 if (be_lun->ctl_be_lun.lun_type == T_DIRECT) { 560 561 if (params->lun_size_bytes < blocksize) { 562 snprintf(req->error_str, sizeof(req->error_str), 563 "%s: LUN size %ju < blocksize %u", __func__, 564 params->lun_size_bytes, blocksize); 565 goto bailout_error; 566 } 567 568 be_lun->size_blocks = params->lun_size_bytes / blocksize; 569 be_lun->size_bytes = be_lun->size_blocks * blocksize; 570 571 be_lun->ctl_be_lun.maxlba = be_lun->size_blocks - 1; 572 } else { 573 be_lun->ctl_be_lun.maxlba = 0; 574 blocksize = 0; 575 be_lun->size_bytes = 0; 576 be_lun->size_blocks = 0; 577 } 578 579 be_lun->ctl_be_lun.blocksize = blocksize; 580 581 /* Tell the user the blocksize we ended up using */ 582 params->blocksize_bytes = blocksize; 583 584 /* Tell the user the exact size we ended up using */ 585 params->lun_size_bytes = be_lun->size_bytes; 586 587 be_lun->softc = softc; 588 589 unmap = 0; 590 value = ctl_get_opt(&be_lun->ctl_be_lun.options, "unmap"); 591 if (value != NULL && strcmp(value, "on") == 0) 592 unmap = 1; 593 594 be_lun->flags = CTL_BE_RAMDISK_LUN_UNCONFIGURED; 595 be_lun->ctl_be_lun.flags = CTL_LUN_FLAG_PRIMARY; 596 if (unmap) 597 be_lun->ctl_be_lun.flags |= CTL_LUN_FLAG_UNMAP; 598 be_lun->ctl_be_lun.atomicblock = UINT32_MAX; 599 be_lun->ctl_be_lun.be_lun = be_lun; 600 601 if (params->flags & CTL_LUN_FLAG_ID_REQ) { 602 be_lun->ctl_be_lun.req_lun_id = params->req_lun_id; 603 be_lun->ctl_be_lun.flags |= CTL_LUN_FLAG_ID_REQ; 604 } else 605 be_lun->ctl_be_lun.req_lun_id = 0; 606 607 be_lun->ctl_be_lun.lun_shutdown = ctl_backend_ramdisk_lun_shutdown; 608 be_lun->ctl_be_lun.lun_config_status = 609 ctl_backend_ramdisk_lun_config_status; 610 be_lun->ctl_be_lun.be = &ctl_be_ramdisk_driver; 611 if ((params->flags & CTL_LUN_FLAG_SERIAL_NUM) == 0) { 612 snprintf(tmpstr, sizeof(tmpstr), "MYSERIAL%4d", 613 softc->num_luns); 614 strncpy((char *)be_lun->ctl_be_lun.serial_num, tmpstr, 615 ctl_min(sizeof(be_lun->ctl_be_lun.serial_num), 616 sizeof(tmpstr))); 617 618 /* Tell the user what we used for a serial number */ 619 strncpy((char *)params->serial_num, tmpstr, 620 ctl_min(sizeof(params->serial_num), sizeof(tmpstr))); 621 } else { 622 strncpy((char *)be_lun->ctl_be_lun.serial_num, 623 params->serial_num, 624 ctl_min(sizeof(be_lun->ctl_be_lun.serial_num), 625 sizeof(params->serial_num))); 626 } 627 if ((params->flags & CTL_LUN_FLAG_DEVID) == 0) { 628 snprintf(tmpstr, sizeof(tmpstr), "MYDEVID%4d", softc->num_luns); 629 strncpy((char *)be_lun->ctl_be_lun.device_id, tmpstr, 630 ctl_min(sizeof(be_lun->ctl_be_lun.device_id), 631 sizeof(tmpstr))); 632 633 /* Tell the user what we used for a device ID */ 634 strncpy((char *)params->device_id, tmpstr, 635 ctl_min(sizeof(params->device_id), sizeof(tmpstr))); 636 } else { 637 strncpy((char *)be_lun->ctl_be_lun.device_id, 638 params->device_id, 639 ctl_min(sizeof(be_lun->ctl_be_lun.device_id), 640 sizeof(params->device_id))); 641 } 642 643 STAILQ_INIT(&be_lun->cont_queue); 644 mtx_init(&be_lun->queue_lock, "cram queue lock", NULL, MTX_DEF); 645 TASK_INIT(&be_lun->io_task, /*priority*/0, ctl_backend_ramdisk_worker, 646 be_lun); 647 648 be_lun->io_taskqueue = taskqueue_create(be_lun->lunname, M_WAITOK, 649 taskqueue_thread_enqueue, /*context*/&be_lun->io_taskqueue); 650 if (be_lun->io_taskqueue == NULL) { 651 snprintf(req->error_str, sizeof(req->error_str), 652 "%s: Unable to create taskqueue", __func__); 653 goto bailout_error; 654 } 655 656 retval = taskqueue_start_threads(&be_lun->io_taskqueue, 657 /*num threads*/1, 658 /*priority*/PWAIT, 659 /*thread name*/ 660 "%s taskq", be_lun->lunname); 661 if (retval != 0) 662 goto bailout_error; 663 664 mtx_lock(&softc->lock); 665 softc->num_luns++; 666 STAILQ_INSERT_TAIL(&softc->lun_list, be_lun, links); 667 668 mtx_unlock(&softc->lock); 669 670 retval = ctl_add_lun(&be_lun->ctl_be_lun); 671 if (retval != 0) { 672 mtx_lock(&softc->lock); 673 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun, 674 links); 675 softc->num_luns--; 676 mtx_unlock(&softc->lock); 677 snprintf(req->error_str, sizeof(req->error_str), 678 "%s: ctl_add_lun() returned error %d, see dmesg for " 679 "details", __func__, retval); 680 retval = 0; 681 goto bailout_error; 682 } 683 684 if (do_wait == 0) 685 return (retval); 686 687 mtx_lock(&softc->lock); 688 689 /* 690 * Tell the config_status routine that we're waiting so it won't 691 * clean up the LUN in the event of an error. 692 */ 693 be_lun->flags |= CTL_BE_RAMDISK_LUN_WAITING; 694 695 while (be_lun->flags & CTL_BE_RAMDISK_LUN_UNCONFIGURED) { 696 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlram", 0); 697 if (retval == EINTR) 698 break; 699 } 700 be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING; 701 702 if (be_lun->flags & CTL_BE_RAMDISK_LUN_CONFIG_ERR) { 703 snprintf(req->error_str, sizeof(req->error_str), 704 "%s: LUN configuration error, see dmesg for details", 705 __func__); 706 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun, 707 links); 708 softc->num_luns--; 709 mtx_unlock(&softc->lock); 710 goto bailout_error; 711 } else { 712 params->req_lun_id = be_lun->ctl_be_lun.lun_id; 713 } 714 mtx_unlock(&softc->lock); 715 716 req->status = CTL_LUN_OK; 717 718 return (retval); 719 720 bailout_error: 721 req->status = CTL_LUN_ERROR; 722 if (be_lun != NULL) { 723 if (be_lun->io_taskqueue != NULL) { 724 taskqueue_free(be_lun->io_taskqueue); 725 } 726 ctl_free_opts(&be_lun->ctl_be_lun.options); 727 mtx_destroy(&be_lun->queue_lock); 728 free(be_lun, M_RAMDISK); 729 } 730 731 return (retval); 732 } 733 734 static int 735 ctl_backend_ramdisk_modify(struct ctl_be_ramdisk_softc *softc, 736 struct ctl_lun_req *req) 737 { 738 struct ctl_be_ramdisk_lun *be_lun; 739 struct ctl_lun_modify_params *params; 740 uint32_t blocksize; 741 742 params = &req->reqdata.modify; 743 744 be_lun = NULL; 745 746 mtx_lock(&softc->lock); 747 STAILQ_FOREACH(be_lun, &softc->lun_list, links) { 748 if (be_lun->ctl_be_lun.lun_id == params->lun_id) 749 break; 750 } 751 mtx_unlock(&softc->lock); 752 753 if (be_lun == NULL) { 754 snprintf(req->error_str, sizeof(req->error_str), 755 "%s: LUN %u is not managed by the ramdisk backend", 756 __func__, params->lun_id); 757 goto bailout_error; 758 } 759 760 if (params->lun_size_bytes == 0) { 761 snprintf(req->error_str, sizeof(req->error_str), 762 "%s: LUN size \"auto\" not supported " 763 "by the ramdisk backend", __func__); 764 goto bailout_error; 765 } 766 767 blocksize = be_lun->ctl_be_lun.blocksize; 768 769 if (params->lun_size_bytes < blocksize) { 770 snprintf(req->error_str, sizeof(req->error_str), 771 "%s: LUN size %ju < blocksize %u", __func__, 772 params->lun_size_bytes, blocksize); 773 goto bailout_error; 774 } 775 776 be_lun->size_blocks = params->lun_size_bytes / blocksize; 777 be_lun->size_bytes = be_lun->size_blocks * blocksize; 778 779 /* 780 * The maximum LBA is the size - 1. 781 * 782 * XXX: Note that this field is being updated without locking, 783 * which might cause problems on 32-bit architectures. 784 */ 785 be_lun->ctl_be_lun.maxlba = be_lun->size_blocks - 1; 786 ctl_lun_capacity_changed(&be_lun->ctl_be_lun); 787 788 /* Tell the user the exact size we ended up using */ 789 params->lun_size_bytes = be_lun->size_bytes; 790 791 req->status = CTL_LUN_OK; 792 793 return (0); 794 795 bailout_error: 796 req->status = CTL_LUN_ERROR; 797 798 return (0); 799 } 800 801 static void 802 ctl_backend_ramdisk_lun_shutdown(void *be_lun) 803 { 804 struct ctl_be_ramdisk_lun *lun; 805 struct ctl_be_ramdisk_softc *softc; 806 int do_free; 807 808 lun = (struct ctl_be_ramdisk_lun *)be_lun; 809 softc = lun->softc; 810 do_free = 0; 811 812 mtx_lock(&softc->lock); 813 814 lun->flags |= CTL_BE_RAMDISK_LUN_UNCONFIGURED; 815 816 if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING) { 817 wakeup(lun); 818 } else { 819 STAILQ_REMOVE(&softc->lun_list, lun, ctl_be_ramdisk_lun, 820 links); 821 softc->num_luns--; 822 do_free = 1; 823 } 824 825 mtx_unlock(&softc->lock); 826 827 if (do_free != 0) 828 free(be_lun, M_RAMDISK); 829 } 830 831 static void 832 ctl_backend_ramdisk_lun_config_status(void *be_lun, 833 ctl_lun_config_status status) 834 { 835 struct ctl_be_ramdisk_lun *lun; 836 struct ctl_be_ramdisk_softc *softc; 837 838 lun = (struct ctl_be_ramdisk_lun *)be_lun; 839 softc = lun->softc; 840 841 if (status == CTL_LUN_CONFIG_OK) { 842 mtx_lock(&softc->lock); 843 lun->flags &= ~CTL_BE_RAMDISK_LUN_UNCONFIGURED; 844 if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING) 845 wakeup(lun); 846 mtx_unlock(&softc->lock); 847 848 /* 849 * We successfully added the LUN, attempt to enable it. 850 */ 851 if (ctl_enable_lun(&lun->ctl_be_lun) != 0) { 852 printf("%s: ctl_enable_lun() failed!\n", __func__); 853 if (ctl_invalidate_lun(&lun->ctl_be_lun) != 0) { 854 printf("%s: ctl_invalidate_lun() failed!\n", 855 __func__); 856 } 857 } 858 859 return; 860 } 861 862 863 mtx_lock(&softc->lock); 864 lun->flags &= ~CTL_BE_RAMDISK_LUN_UNCONFIGURED; 865 866 /* 867 * If we have a user waiting, let him handle the cleanup. If not, 868 * clean things up here. 869 */ 870 if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING) { 871 lun->flags |= CTL_BE_RAMDISK_LUN_CONFIG_ERR; 872 wakeup(lun); 873 } else { 874 STAILQ_REMOVE(&softc->lun_list, lun, ctl_be_ramdisk_lun, 875 links); 876 softc->num_luns--; 877 free(lun, M_RAMDISK); 878 } 879 mtx_unlock(&softc->lock); 880 } 881 882 static int 883 ctl_backend_ramdisk_config_write(union ctl_io *io) 884 { 885 struct ctl_be_ramdisk_softc *softc; 886 int retval; 887 888 retval = 0; 889 softc = &rd_softc; 890 891 switch (io->scsiio.cdb[0]) { 892 case SYNCHRONIZE_CACHE: 893 case SYNCHRONIZE_CACHE_16: 894 /* 895 * The upper level CTL code will filter out any CDBs with 896 * the immediate bit set and return the proper error. It 897 * will also not allow a sync cache command to go to a LUN 898 * that is powered down. 899 * 900 * We don't really need to worry about what LBA range the 901 * user asked to be synced out. When they issue a sync 902 * cache command, we'll sync out the whole thing. 903 * 904 * This is obviously just a stubbed out implementation. 905 * The real implementation will be in the RAIDCore/CTL 906 * interface, and can only really happen when RAIDCore 907 * implements a per-array cache sync. 908 */ 909 ctl_set_success(&io->scsiio); 910 ctl_config_write_done(io); 911 break; 912 case START_STOP_UNIT: { 913 struct scsi_start_stop_unit *cdb; 914 struct ctl_be_lun *ctl_be_lun; 915 struct ctl_be_ramdisk_lun *be_lun; 916 917 cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb; 918 919 ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[ 920 CTL_PRIV_BACKEND_LUN].ptr; 921 be_lun = (struct ctl_be_ramdisk_lun *)ctl_be_lun->be_lun; 922 923 if (cdb->how & SSS_START) 924 retval = ctl_start_lun(ctl_be_lun); 925 else { 926 retval = ctl_stop_lun(ctl_be_lun); 927 #ifdef NEEDTOPORT 928 if ((retval == 0) 929 && (cdb->byte2 & SSS_ONOFFLINE)) 930 retval = ctl_lun_offline(ctl_be_lun); 931 #endif 932 } 933 934 /* 935 * In general, the above routines should not fail. They 936 * just set state for the LUN. So we've got something 937 * pretty wrong here if we can't start or stop the LUN. 938 */ 939 if (retval != 0) { 940 ctl_set_internal_failure(&io->scsiio, 941 /*sks_valid*/ 1, 942 /*retry_count*/ 0xf051); 943 retval = CTL_RETVAL_COMPLETE; 944 } else { 945 ctl_set_success(&io->scsiio); 946 } 947 ctl_config_write_done(io); 948 break; 949 } 950 case WRITE_SAME_10: 951 case WRITE_SAME_16: 952 case UNMAP: 953 ctl_set_success(&io->scsiio); 954 ctl_config_write_done(io); 955 break; 956 default: 957 ctl_set_invalid_opcode(&io->scsiio); 958 ctl_config_write_done(io); 959 retval = CTL_RETVAL_COMPLETE; 960 break; 961 } 962 963 return (retval); 964 } 965 966 static int 967 ctl_backend_ramdisk_config_read(union ctl_io *io) 968 { 969 /* 970 * XXX KDM need to implement!! 971 */ 972 return (0); 973 } 974