1 /*- 2 * Copyright (c) 2003, 2008 Silicon Graphics International Corp. 3 * Copyright (c) 2012 The FreeBSD Foundation 4 * All rights reserved. 5 * 6 * Portions of this software were developed by Edward Tomasz Napierala 7 * under sponsorship from the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions, and the following disclaimer, 14 * without modification. 15 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 16 * substantially similar to the "NO WARRANTY" disclaimer below 17 * ("Disclaimer") and any redistribution must be conditioned upon 18 * including a substantially similar Disclaimer requirement for further 19 * binary redistribution. 20 * 21 * NO WARRANTY 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 26 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 30 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 31 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 * POSSIBILITY OF SUCH DAMAGES. 33 * 34 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend_ramdisk.c#3 $ 35 */ 36 /* 37 * CAM Target Layer backend for a "fake" ramdisk. 38 * 39 * Author: Ken Merry <ken@FreeBSD.org> 40 */ 41 42 #include <sys/cdefs.h> 43 __FBSDID("$FreeBSD$"); 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/kernel.h> 48 #include <sys/condvar.h> 49 #include <sys/types.h> 50 #include <sys/lock.h> 51 #include <sys/mutex.h> 52 #include <sys/malloc.h> 53 #include <sys/taskqueue.h> 54 #include <sys/time.h> 55 #include <sys/queue.h> 56 #include <sys/conf.h> 57 #include <sys/ioccom.h> 58 #include <sys/module.h> 59 60 #include <cam/scsi/scsi_all.h> 61 #include <cam/ctl/ctl_io.h> 62 #include <cam/ctl/ctl.h> 63 #include <cam/ctl/ctl_util.h> 64 #include <cam/ctl/ctl_backend.h> 65 #include <cam/ctl/ctl_frontend_internal.h> 66 #include <cam/ctl/ctl_debug.h> 67 #include <cam/ctl/ctl_ioctl.h> 68 #include <cam/ctl/ctl_error.h> 69 70 typedef enum { 71 CTL_BE_RAMDISK_LUN_UNCONFIGURED = 0x01, 72 CTL_BE_RAMDISK_LUN_CONFIG_ERR = 0x02, 73 CTL_BE_RAMDISK_LUN_WAITING = 0x04 74 } ctl_be_ramdisk_lun_flags; 75 76 struct ctl_be_ramdisk_lun { 77 char lunname[32]; 78 uint64_t size_bytes; 79 uint64_t size_blocks; 80 struct ctl_be_ramdisk_softc *softc; 81 ctl_be_ramdisk_lun_flags flags; 82 STAILQ_ENTRY(ctl_be_ramdisk_lun) links; 83 struct ctl_be_lun ctl_be_lun; 84 struct taskqueue *io_taskqueue; 85 struct task io_task; 86 STAILQ_HEAD(, ctl_io_hdr) cont_queue; 87 struct mtx_padalign queue_lock; 88 }; 89 90 struct ctl_be_ramdisk_softc { 91 struct mtx lock; 92 int rd_size; 93 #ifdef CTL_RAMDISK_PAGES 94 uint8_t **ramdisk_pages; 95 int num_pages; 96 #else 97 uint8_t *ramdisk_buffer; 98 #endif 99 int num_luns; 100 STAILQ_HEAD(, ctl_be_ramdisk_lun) lun_list; 101 }; 102 103 static struct ctl_be_ramdisk_softc rd_softc; 104 105 int ctl_backend_ramdisk_init(void); 106 void ctl_backend_ramdisk_shutdown(void); 107 static int ctl_backend_ramdisk_move_done(union ctl_io *io); 108 static int ctl_backend_ramdisk_submit(union ctl_io *io); 109 static void ctl_backend_ramdisk_continue(union ctl_io *io); 110 static int ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd, 111 caddr_t addr, int flag, struct thread *td); 112 static int ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc, 113 struct ctl_lun_req *req); 114 static int ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc, 115 struct ctl_lun_req *req, int do_wait); 116 static int ctl_backend_ramdisk_modify(struct ctl_be_ramdisk_softc *softc, 117 struct ctl_lun_req *req); 118 static void ctl_backend_ramdisk_worker(void *context, int pending); 119 static void ctl_backend_ramdisk_lun_shutdown(void *be_lun); 120 static void ctl_backend_ramdisk_lun_config_status(void *be_lun, 121 ctl_lun_config_status status); 122 static int ctl_backend_ramdisk_config_write(union ctl_io *io); 123 static int ctl_backend_ramdisk_config_read(union ctl_io *io); 124 125 static struct ctl_backend_driver ctl_be_ramdisk_driver = 126 { 127 .name = "ramdisk", 128 .flags = CTL_BE_FLAG_HAS_CONFIG, 129 .init = ctl_backend_ramdisk_init, 130 .data_submit = ctl_backend_ramdisk_submit, 131 .data_move_done = ctl_backend_ramdisk_move_done, 132 .config_read = ctl_backend_ramdisk_config_read, 133 .config_write = ctl_backend_ramdisk_config_write, 134 .ioctl = ctl_backend_ramdisk_ioctl 135 }; 136 137 MALLOC_DEFINE(M_RAMDISK, "ramdisk", "Memory used for CTL RAMdisk"); 138 CTL_BACKEND_DECLARE(cbr, ctl_be_ramdisk_driver); 139 140 int 141 ctl_backend_ramdisk_init(void) 142 { 143 struct ctl_be_ramdisk_softc *softc; 144 #ifdef CTL_RAMDISK_PAGES 145 int i; 146 #endif 147 148 149 softc = &rd_softc; 150 151 memset(softc, 0, sizeof(*softc)); 152 153 mtx_init(&softc->lock, "ctlramdisk", NULL, MTX_DEF); 154 155 STAILQ_INIT(&softc->lun_list); 156 softc->rd_size = 1024 * 1024; 157 #ifdef CTL_RAMDISK_PAGES 158 softc->num_pages = softc->rd_size / PAGE_SIZE; 159 softc->ramdisk_pages = (uint8_t **)malloc(sizeof(uint8_t *) * 160 softc->num_pages, M_RAMDISK, 161 M_WAITOK); 162 for (i = 0; i < softc->num_pages; i++) 163 softc->ramdisk_pages[i] = malloc(PAGE_SIZE, M_RAMDISK,M_WAITOK); 164 #else 165 softc->ramdisk_buffer = (uint8_t *)malloc(softc->rd_size, M_RAMDISK, 166 M_WAITOK); 167 #endif 168 169 return (0); 170 } 171 172 void 173 ctl_backend_ramdisk_shutdown(void) 174 { 175 struct ctl_be_ramdisk_softc *softc; 176 struct ctl_be_ramdisk_lun *lun, *next_lun; 177 #ifdef CTL_RAMDISK_PAGES 178 int i; 179 #endif 180 181 softc = &rd_softc; 182 183 mtx_lock(&softc->lock); 184 for (lun = STAILQ_FIRST(&softc->lun_list); lun != NULL; lun = next_lun){ 185 /* 186 * Grab the next LUN. The current LUN may get removed by 187 * ctl_invalidate_lun(), which will call our LUN shutdown 188 * routine, if there is no outstanding I/O for this LUN. 189 */ 190 next_lun = STAILQ_NEXT(lun, links); 191 192 /* 193 * Drop our lock here. Since ctl_invalidate_lun() can call 194 * back into us, this could potentially lead to a recursive 195 * lock of the same mutex, which would cause a hang. 196 */ 197 mtx_unlock(&softc->lock); 198 ctl_disable_lun(&lun->ctl_be_lun); 199 ctl_invalidate_lun(&lun->ctl_be_lun); 200 mtx_lock(&softc->lock); 201 } 202 mtx_unlock(&softc->lock); 203 204 #ifdef CTL_RAMDISK_PAGES 205 for (i = 0; i < softc->num_pages; i++) 206 free(softc->ramdisk_pages[i], M_RAMDISK); 207 208 free(softc->ramdisk_pages, M_RAMDISK); 209 #else 210 free(softc->ramdisk_buffer, M_RAMDISK); 211 #endif 212 213 if (ctl_backend_deregister(&ctl_be_ramdisk_driver) != 0) { 214 printf("ctl_backend_ramdisk_shutdown: " 215 "ctl_backend_deregister() failed!\n"); 216 } 217 } 218 219 static int 220 ctl_backend_ramdisk_move_done(union ctl_io *io) 221 { 222 struct ctl_be_lun *ctl_be_lun; 223 struct ctl_be_ramdisk_lun *be_lun; 224 #ifdef CTL_TIME_IO 225 struct bintime cur_bt; 226 #endif 227 228 CTL_DEBUG_PRINT(("ctl_backend_ramdisk_move_done\n")); 229 ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[ 230 CTL_PRIV_BACKEND_LUN].ptr; 231 be_lun = (struct ctl_be_ramdisk_lun *)ctl_be_lun->be_lun; 232 #ifdef CTL_TIME_IO 233 getbintime(&cur_bt); 234 bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt); 235 bintime_add(&io->io_hdr.dma_bt, &cur_bt); 236 io->io_hdr.num_dmas++; 237 #endif 238 if (io->scsiio.kern_sg_entries > 0) 239 free(io->scsiio.kern_data_ptr, M_RAMDISK); 240 io->scsiio.kern_rel_offset += io->scsiio.kern_data_len; 241 if (io->io_hdr.flags & CTL_FLAG_ABORT) { 242 ; 243 } else if ((io->io_hdr.port_status == 0) && 244 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) { 245 if (io->io_hdr.ctl_private[CTL_PRIV_BACKEND].integer > 0) { 246 mtx_lock(&be_lun->queue_lock); 247 STAILQ_INSERT_TAIL(&be_lun->cont_queue, 248 &io->io_hdr, links); 249 mtx_unlock(&be_lun->queue_lock); 250 taskqueue_enqueue(be_lun->io_taskqueue, 251 &be_lun->io_task); 252 return (0); 253 } 254 ctl_set_success(&io->scsiio); 255 } else if ((io->io_hdr.port_status != 0) && 256 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 257 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 258 /* 259 * For hardware error sense keys, the sense key 260 * specific value is defined to be a retry count, 261 * but we use it to pass back an internal FETD 262 * error code. XXX KDM Hopefully the FETD is only 263 * using 16 bits for an error code, since that's 264 * all the space we have in the sks field. 265 */ 266 ctl_set_internal_failure(&io->scsiio, 267 /*sks_valid*/ 1, 268 /*retry_count*/ 269 io->io_hdr.port_status); 270 } 271 ctl_data_submit_done(io); 272 return(0); 273 } 274 275 static int 276 ctl_backend_ramdisk_submit(union ctl_io *io) 277 { 278 struct ctl_be_lun *ctl_be_lun; 279 struct ctl_lba_len_flags *lbalen; 280 281 ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[ 282 CTL_PRIV_BACKEND_LUN].ptr; 283 lbalen = (struct ctl_lba_len_flags *)&io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 284 if (lbalen->flags & CTL_LLF_VERIFY) { 285 ctl_set_success(&io->scsiio); 286 ctl_data_submit_done(io); 287 return (CTL_RETVAL_COMPLETE); 288 } 289 io->io_hdr.ctl_private[CTL_PRIV_BACKEND].integer = 290 lbalen->len * ctl_be_lun->blocksize; 291 ctl_backend_ramdisk_continue(io); 292 return (CTL_RETVAL_COMPLETE); 293 } 294 295 static void 296 ctl_backend_ramdisk_continue(union ctl_io *io) 297 { 298 struct ctl_be_ramdisk_softc *softc; 299 int len, len_filled, sg_filled; 300 #ifdef CTL_RAMDISK_PAGES 301 struct ctl_sg_entry *sg_entries; 302 int i; 303 #endif 304 305 softc = &rd_softc; 306 len = io->io_hdr.ctl_private[CTL_PRIV_BACKEND].integer; 307 #ifdef CTL_RAMDISK_PAGES 308 sg_filled = min(btoc(len), softc->num_pages); 309 if (sg_filled > 1) { 310 io->scsiio.kern_data_ptr = malloc(sizeof(struct ctl_sg_entry) * 311 sg_filled, M_RAMDISK, 312 M_WAITOK); 313 sg_entries = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; 314 for (i = 0, len_filled = 0; i < sg_filled; i++) { 315 sg_entries[i].addr = softc->ramdisk_pages[i]; 316 sg_entries[i].len = ctl_min(PAGE_SIZE, 317 len - len_filled); 318 len_filled += sg_entries[i].len; 319 } 320 io->io_hdr.flags |= CTL_FLAG_KDPTR_SGLIST; 321 } else { 322 sg_filled = 0; 323 len_filled = len; 324 io->scsiio.kern_data_ptr = softc->ramdisk_pages[0]; 325 } 326 #else 327 sg_filled = 0; 328 len_filled = min(len, softc->rd_size); 329 io->scsiio.kern_data_ptr = softc->ramdisk_buffer; 330 #endif /* CTL_RAMDISK_PAGES */ 331 332 io->scsiio.be_move_done = ctl_backend_ramdisk_move_done; 333 io->scsiio.kern_data_resid = 0; 334 io->scsiio.kern_data_len = len_filled; 335 io->scsiio.kern_sg_entries = sg_filled; 336 io->io_hdr.flags |= CTL_FLAG_ALLOCATED; 337 io->io_hdr.ctl_private[CTL_PRIV_BACKEND].integer -= len_filled; 338 #ifdef CTL_TIME_IO 339 getbintime(&io->io_hdr.dma_start_bt); 340 #endif 341 ctl_datamove(io); 342 } 343 344 static void 345 ctl_backend_ramdisk_worker(void *context, int pending) 346 { 347 struct ctl_be_ramdisk_softc *softc; 348 struct ctl_be_ramdisk_lun *be_lun; 349 union ctl_io *io; 350 351 be_lun = (struct ctl_be_ramdisk_lun *)context; 352 softc = be_lun->softc; 353 354 mtx_lock(&be_lun->queue_lock); 355 for (;;) { 356 io = (union ctl_io *)STAILQ_FIRST(&be_lun->cont_queue); 357 if (io != NULL) { 358 STAILQ_REMOVE(&be_lun->cont_queue, &io->io_hdr, 359 ctl_io_hdr, links); 360 361 mtx_unlock(&be_lun->queue_lock); 362 363 ctl_backend_ramdisk_continue(io); 364 365 mtx_lock(&be_lun->queue_lock); 366 continue; 367 } 368 369 /* 370 * If we get here, there is no work left in the queues, so 371 * just break out and let the task queue go to sleep. 372 */ 373 break; 374 } 375 mtx_unlock(&be_lun->queue_lock); 376 } 377 378 static int 379 ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, 380 int flag, struct thread *td) 381 { 382 struct ctl_be_ramdisk_softc *softc; 383 int retval; 384 385 retval = 0; 386 softc = &rd_softc; 387 388 switch (cmd) { 389 case CTL_LUN_REQ: { 390 struct ctl_lun_req *lun_req; 391 392 lun_req = (struct ctl_lun_req *)addr; 393 394 switch (lun_req->reqtype) { 395 case CTL_LUNREQ_CREATE: 396 retval = ctl_backend_ramdisk_create(softc, lun_req, 397 /*do_wait*/ 1); 398 break; 399 case CTL_LUNREQ_RM: 400 retval = ctl_backend_ramdisk_rm(softc, lun_req); 401 break; 402 case CTL_LUNREQ_MODIFY: 403 retval = ctl_backend_ramdisk_modify(softc, lun_req); 404 break; 405 default: 406 lun_req->status = CTL_LUN_ERROR; 407 snprintf(lun_req->error_str, sizeof(lun_req->error_str), 408 "%s: invalid LUN request type %d", __func__, 409 lun_req->reqtype); 410 break; 411 } 412 break; 413 } 414 default: 415 retval = ENOTTY; 416 break; 417 } 418 419 return (retval); 420 } 421 422 static int 423 ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc, 424 struct ctl_lun_req *req) 425 { 426 struct ctl_be_ramdisk_lun *be_lun; 427 struct ctl_lun_rm_params *params; 428 int retval; 429 430 431 retval = 0; 432 params = &req->reqdata.rm; 433 434 be_lun = NULL; 435 436 mtx_lock(&softc->lock); 437 438 STAILQ_FOREACH(be_lun, &softc->lun_list, links) { 439 if (be_lun->ctl_be_lun.lun_id == params->lun_id) 440 break; 441 } 442 mtx_unlock(&softc->lock); 443 444 if (be_lun == NULL) { 445 snprintf(req->error_str, sizeof(req->error_str), 446 "%s: LUN %u is not managed by the ramdisk backend", 447 __func__, params->lun_id); 448 goto bailout_error; 449 } 450 451 retval = ctl_disable_lun(&be_lun->ctl_be_lun); 452 453 if (retval != 0) { 454 snprintf(req->error_str, sizeof(req->error_str), 455 "%s: error %d returned from ctl_disable_lun() for " 456 "LUN %d", __func__, retval, params->lun_id); 457 goto bailout_error; 458 } 459 460 /* 461 * Set the waiting flag before we invalidate the LUN. Our shutdown 462 * routine can be called any time after we invalidate the LUN, 463 * and can be called from our context. 464 * 465 * This tells the shutdown routine that we're waiting, or we're 466 * going to wait for the shutdown to happen. 467 */ 468 mtx_lock(&softc->lock); 469 be_lun->flags |= CTL_BE_RAMDISK_LUN_WAITING; 470 mtx_unlock(&softc->lock); 471 472 retval = ctl_invalidate_lun(&be_lun->ctl_be_lun); 473 if (retval != 0) { 474 snprintf(req->error_str, sizeof(req->error_str), 475 "%s: error %d returned from ctl_invalidate_lun() for " 476 "LUN %d", __func__, retval, params->lun_id); 477 mtx_lock(&softc->lock); 478 be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING; 479 mtx_unlock(&softc->lock); 480 goto bailout_error; 481 } 482 483 mtx_lock(&softc->lock); 484 485 while ((be_lun->flags & CTL_BE_RAMDISK_LUN_UNCONFIGURED) == 0) { 486 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlram", 0); 487 if (retval == EINTR) 488 break; 489 } 490 be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING; 491 492 /* 493 * We only remove this LUN from the list and free it (below) if 494 * retval == 0. If the user interrupted the wait, we just bail out 495 * without actually freeing the LUN. We let the shutdown routine 496 * free the LUN if that happens. 497 */ 498 if (retval == 0) { 499 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun, 500 links); 501 softc->num_luns--; 502 } 503 504 mtx_unlock(&softc->lock); 505 506 if (retval == 0) { 507 taskqueue_drain(be_lun->io_taskqueue, &be_lun->io_task); 508 taskqueue_free(be_lun->io_taskqueue); 509 ctl_free_opts(&be_lun->ctl_be_lun.options); 510 mtx_destroy(&be_lun->queue_lock); 511 free(be_lun, M_RAMDISK); 512 } 513 514 req->status = CTL_LUN_OK; 515 516 return (retval); 517 518 bailout_error: 519 req->status = CTL_LUN_ERROR; 520 521 return (0); 522 } 523 524 static int 525 ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc, 526 struct ctl_lun_req *req, int do_wait) 527 { 528 struct ctl_be_ramdisk_lun *be_lun; 529 struct ctl_lun_create_params *params; 530 uint32_t blocksize; 531 char *value; 532 char tmpstr[32]; 533 int retval, unmap; 534 535 retval = 0; 536 params = &req->reqdata.create; 537 if (params->blocksize_bytes != 0) 538 blocksize = params->blocksize_bytes; 539 else 540 blocksize = 512; 541 542 be_lun = malloc(sizeof(*be_lun), M_RAMDISK, M_ZERO | (do_wait ? 543 M_WAITOK : M_NOWAIT)); 544 545 if (be_lun == NULL) { 546 snprintf(req->error_str, sizeof(req->error_str), 547 "%s: error allocating %zd bytes", __func__, 548 sizeof(*be_lun)); 549 goto bailout_error; 550 } 551 sprintf(be_lun->lunname, "cram%d", softc->num_luns); 552 ctl_init_opts(&be_lun->ctl_be_lun.options, 553 req->num_be_args, req->kern_be_args); 554 555 if (params->flags & CTL_LUN_FLAG_DEV_TYPE) 556 be_lun->ctl_be_lun.lun_type = params->device_type; 557 else 558 be_lun->ctl_be_lun.lun_type = T_DIRECT; 559 560 if (be_lun->ctl_be_lun.lun_type == T_DIRECT) { 561 562 if (params->lun_size_bytes < blocksize) { 563 snprintf(req->error_str, sizeof(req->error_str), 564 "%s: LUN size %ju < blocksize %u", __func__, 565 params->lun_size_bytes, blocksize); 566 goto bailout_error; 567 } 568 569 be_lun->size_blocks = params->lun_size_bytes / blocksize; 570 be_lun->size_bytes = be_lun->size_blocks * blocksize; 571 572 be_lun->ctl_be_lun.maxlba = be_lun->size_blocks - 1; 573 } else { 574 be_lun->ctl_be_lun.maxlba = 0; 575 blocksize = 0; 576 be_lun->size_bytes = 0; 577 be_lun->size_blocks = 0; 578 } 579 580 be_lun->ctl_be_lun.blocksize = blocksize; 581 582 /* Tell the user the blocksize we ended up using */ 583 params->blocksize_bytes = blocksize; 584 585 /* Tell the user the exact size we ended up using */ 586 params->lun_size_bytes = be_lun->size_bytes; 587 588 be_lun->softc = softc; 589 590 unmap = 0; 591 value = ctl_get_opt(&be_lun->ctl_be_lun.options, "unmap"); 592 if (value != NULL && strcmp(value, "on") == 0) 593 unmap = 1; 594 595 be_lun->flags = CTL_BE_RAMDISK_LUN_UNCONFIGURED; 596 be_lun->ctl_be_lun.flags = CTL_LUN_FLAG_PRIMARY; 597 if (unmap) 598 be_lun->ctl_be_lun.flags |= CTL_LUN_FLAG_UNMAP; 599 be_lun->ctl_be_lun.atomicblock = UINT32_MAX; 600 be_lun->ctl_be_lun.be_lun = be_lun; 601 602 if (params->flags & CTL_LUN_FLAG_ID_REQ) { 603 be_lun->ctl_be_lun.req_lun_id = params->req_lun_id; 604 be_lun->ctl_be_lun.flags |= CTL_LUN_FLAG_ID_REQ; 605 } else 606 be_lun->ctl_be_lun.req_lun_id = 0; 607 608 be_lun->ctl_be_lun.lun_shutdown = ctl_backend_ramdisk_lun_shutdown; 609 be_lun->ctl_be_lun.lun_config_status = 610 ctl_backend_ramdisk_lun_config_status; 611 be_lun->ctl_be_lun.be = &ctl_be_ramdisk_driver; 612 if ((params->flags & CTL_LUN_FLAG_SERIAL_NUM) == 0) { 613 snprintf(tmpstr, sizeof(tmpstr), "MYSERIAL%4d", 614 softc->num_luns); 615 strncpy((char *)be_lun->ctl_be_lun.serial_num, tmpstr, 616 ctl_min(sizeof(be_lun->ctl_be_lun.serial_num), 617 sizeof(tmpstr))); 618 619 /* Tell the user what we used for a serial number */ 620 strncpy((char *)params->serial_num, tmpstr, 621 ctl_min(sizeof(params->serial_num), sizeof(tmpstr))); 622 } else { 623 strncpy((char *)be_lun->ctl_be_lun.serial_num, 624 params->serial_num, 625 ctl_min(sizeof(be_lun->ctl_be_lun.serial_num), 626 sizeof(params->serial_num))); 627 } 628 if ((params->flags & CTL_LUN_FLAG_DEVID) == 0) { 629 snprintf(tmpstr, sizeof(tmpstr), "MYDEVID%4d", softc->num_luns); 630 strncpy((char *)be_lun->ctl_be_lun.device_id, tmpstr, 631 ctl_min(sizeof(be_lun->ctl_be_lun.device_id), 632 sizeof(tmpstr))); 633 634 /* Tell the user what we used for a device ID */ 635 strncpy((char *)params->device_id, tmpstr, 636 ctl_min(sizeof(params->device_id), sizeof(tmpstr))); 637 } else { 638 strncpy((char *)be_lun->ctl_be_lun.device_id, 639 params->device_id, 640 ctl_min(sizeof(be_lun->ctl_be_lun.device_id), 641 sizeof(params->device_id))); 642 } 643 644 STAILQ_INIT(&be_lun->cont_queue); 645 mtx_init(&be_lun->queue_lock, "cram queue lock", NULL, MTX_DEF); 646 TASK_INIT(&be_lun->io_task, /*priority*/0, ctl_backend_ramdisk_worker, 647 be_lun); 648 649 be_lun->io_taskqueue = taskqueue_create(be_lun->lunname, M_WAITOK, 650 taskqueue_thread_enqueue, /*context*/&be_lun->io_taskqueue); 651 if (be_lun->io_taskqueue == NULL) { 652 snprintf(req->error_str, sizeof(req->error_str), 653 "%s: Unable to create taskqueue", __func__); 654 goto bailout_error; 655 } 656 657 retval = taskqueue_start_threads(&be_lun->io_taskqueue, 658 /*num threads*/1, 659 /*priority*/PWAIT, 660 /*thread name*/ 661 "%s taskq", be_lun->lunname); 662 if (retval != 0) 663 goto bailout_error; 664 665 mtx_lock(&softc->lock); 666 softc->num_luns++; 667 STAILQ_INSERT_TAIL(&softc->lun_list, be_lun, links); 668 669 mtx_unlock(&softc->lock); 670 671 retval = ctl_add_lun(&be_lun->ctl_be_lun); 672 if (retval != 0) { 673 mtx_lock(&softc->lock); 674 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun, 675 links); 676 softc->num_luns--; 677 mtx_unlock(&softc->lock); 678 snprintf(req->error_str, sizeof(req->error_str), 679 "%s: ctl_add_lun() returned error %d, see dmesg for " 680 "details", __func__, retval); 681 retval = 0; 682 goto bailout_error; 683 } 684 685 if (do_wait == 0) 686 return (retval); 687 688 mtx_lock(&softc->lock); 689 690 /* 691 * Tell the config_status routine that we're waiting so it won't 692 * clean up the LUN in the event of an error. 693 */ 694 be_lun->flags |= CTL_BE_RAMDISK_LUN_WAITING; 695 696 while (be_lun->flags & CTL_BE_RAMDISK_LUN_UNCONFIGURED) { 697 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlram", 0); 698 if (retval == EINTR) 699 break; 700 } 701 be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING; 702 703 if (be_lun->flags & CTL_BE_RAMDISK_LUN_CONFIG_ERR) { 704 snprintf(req->error_str, sizeof(req->error_str), 705 "%s: LUN configuration error, see dmesg for details", 706 __func__); 707 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun, 708 links); 709 softc->num_luns--; 710 mtx_unlock(&softc->lock); 711 goto bailout_error; 712 } else { 713 params->req_lun_id = be_lun->ctl_be_lun.lun_id; 714 } 715 mtx_unlock(&softc->lock); 716 717 req->status = CTL_LUN_OK; 718 719 return (retval); 720 721 bailout_error: 722 req->status = CTL_LUN_ERROR; 723 if (be_lun != NULL) { 724 if (be_lun->io_taskqueue != NULL) { 725 taskqueue_free(be_lun->io_taskqueue); 726 } 727 ctl_free_opts(&be_lun->ctl_be_lun.options); 728 mtx_destroy(&be_lun->queue_lock); 729 free(be_lun, M_RAMDISK); 730 } 731 732 return (retval); 733 } 734 735 static int 736 ctl_backend_ramdisk_modify(struct ctl_be_ramdisk_softc *softc, 737 struct ctl_lun_req *req) 738 { 739 struct ctl_be_ramdisk_lun *be_lun; 740 struct ctl_lun_modify_params *params; 741 uint32_t blocksize; 742 743 params = &req->reqdata.modify; 744 745 be_lun = NULL; 746 747 mtx_lock(&softc->lock); 748 STAILQ_FOREACH(be_lun, &softc->lun_list, links) { 749 if (be_lun->ctl_be_lun.lun_id == params->lun_id) 750 break; 751 } 752 mtx_unlock(&softc->lock); 753 754 if (be_lun == NULL) { 755 snprintf(req->error_str, sizeof(req->error_str), 756 "%s: LUN %u is not managed by the ramdisk backend", 757 __func__, params->lun_id); 758 goto bailout_error; 759 } 760 761 if (params->lun_size_bytes == 0) { 762 snprintf(req->error_str, sizeof(req->error_str), 763 "%s: LUN size \"auto\" not supported " 764 "by the ramdisk backend", __func__); 765 goto bailout_error; 766 } 767 768 blocksize = be_lun->ctl_be_lun.blocksize; 769 770 if (params->lun_size_bytes < blocksize) { 771 snprintf(req->error_str, sizeof(req->error_str), 772 "%s: LUN size %ju < blocksize %u", __func__, 773 params->lun_size_bytes, blocksize); 774 goto bailout_error; 775 } 776 777 be_lun->size_blocks = params->lun_size_bytes / blocksize; 778 be_lun->size_bytes = be_lun->size_blocks * blocksize; 779 780 /* 781 * The maximum LBA is the size - 1. 782 * 783 * XXX: Note that this field is being updated without locking, 784 * which might cause problems on 32-bit architectures. 785 */ 786 be_lun->ctl_be_lun.maxlba = be_lun->size_blocks - 1; 787 ctl_lun_capacity_changed(&be_lun->ctl_be_lun); 788 789 /* Tell the user the exact size we ended up using */ 790 params->lun_size_bytes = be_lun->size_bytes; 791 792 req->status = CTL_LUN_OK; 793 794 return (0); 795 796 bailout_error: 797 req->status = CTL_LUN_ERROR; 798 799 return (0); 800 } 801 802 static void 803 ctl_backend_ramdisk_lun_shutdown(void *be_lun) 804 { 805 struct ctl_be_ramdisk_lun *lun; 806 struct ctl_be_ramdisk_softc *softc; 807 int do_free; 808 809 lun = (struct ctl_be_ramdisk_lun *)be_lun; 810 softc = lun->softc; 811 do_free = 0; 812 813 mtx_lock(&softc->lock); 814 815 lun->flags |= CTL_BE_RAMDISK_LUN_UNCONFIGURED; 816 817 if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING) { 818 wakeup(lun); 819 } else { 820 STAILQ_REMOVE(&softc->lun_list, lun, ctl_be_ramdisk_lun, 821 links); 822 softc->num_luns--; 823 do_free = 1; 824 } 825 826 mtx_unlock(&softc->lock); 827 828 if (do_free != 0) 829 free(be_lun, M_RAMDISK); 830 } 831 832 static void 833 ctl_backend_ramdisk_lun_config_status(void *be_lun, 834 ctl_lun_config_status status) 835 { 836 struct ctl_be_ramdisk_lun *lun; 837 struct ctl_be_ramdisk_softc *softc; 838 839 lun = (struct ctl_be_ramdisk_lun *)be_lun; 840 softc = lun->softc; 841 842 if (status == CTL_LUN_CONFIG_OK) { 843 mtx_lock(&softc->lock); 844 lun->flags &= ~CTL_BE_RAMDISK_LUN_UNCONFIGURED; 845 if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING) 846 wakeup(lun); 847 mtx_unlock(&softc->lock); 848 849 /* 850 * We successfully added the LUN, attempt to enable it. 851 */ 852 if (ctl_enable_lun(&lun->ctl_be_lun) != 0) { 853 printf("%s: ctl_enable_lun() failed!\n", __func__); 854 if (ctl_invalidate_lun(&lun->ctl_be_lun) != 0) { 855 printf("%s: ctl_invalidate_lun() failed!\n", 856 __func__); 857 } 858 } 859 860 return; 861 } 862 863 864 mtx_lock(&softc->lock); 865 lun->flags &= ~CTL_BE_RAMDISK_LUN_UNCONFIGURED; 866 867 /* 868 * If we have a user waiting, let him handle the cleanup. If not, 869 * clean things up here. 870 */ 871 if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING) { 872 lun->flags |= CTL_BE_RAMDISK_LUN_CONFIG_ERR; 873 wakeup(lun); 874 } else { 875 STAILQ_REMOVE(&softc->lun_list, lun, ctl_be_ramdisk_lun, 876 links); 877 softc->num_luns--; 878 free(lun, M_RAMDISK); 879 } 880 mtx_unlock(&softc->lock); 881 } 882 883 static int 884 ctl_backend_ramdisk_config_write(union ctl_io *io) 885 { 886 struct ctl_be_ramdisk_softc *softc; 887 int retval; 888 889 retval = 0; 890 softc = &rd_softc; 891 892 switch (io->scsiio.cdb[0]) { 893 case SYNCHRONIZE_CACHE: 894 case SYNCHRONIZE_CACHE_16: 895 /* 896 * The upper level CTL code will filter out any CDBs with 897 * the immediate bit set and return the proper error. It 898 * will also not allow a sync cache command to go to a LUN 899 * that is powered down. 900 * 901 * We don't really need to worry about what LBA range the 902 * user asked to be synced out. When they issue a sync 903 * cache command, we'll sync out the whole thing. 904 * 905 * This is obviously just a stubbed out implementation. 906 * The real implementation will be in the RAIDCore/CTL 907 * interface, and can only really happen when RAIDCore 908 * implements a per-array cache sync. 909 */ 910 ctl_set_success(&io->scsiio); 911 ctl_config_write_done(io); 912 break; 913 case START_STOP_UNIT: { 914 struct scsi_start_stop_unit *cdb; 915 struct ctl_be_lun *ctl_be_lun; 916 struct ctl_be_ramdisk_lun *be_lun; 917 918 cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb; 919 920 ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[ 921 CTL_PRIV_BACKEND_LUN].ptr; 922 be_lun = (struct ctl_be_ramdisk_lun *)ctl_be_lun->be_lun; 923 924 if (cdb->how & SSS_START) 925 retval = ctl_start_lun(ctl_be_lun); 926 else { 927 retval = ctl_stop_lun(ctl_be_lun); 928 #ifdef NEEDTOPORT 929 if ((retval == 0) 930 && (cdb->byte2 & SSS_ONOFFLINE)) 931 retval = ctl_lun_offline(ctl_be_lun); 932 #endif 933 } 934 935 /* 936 * In general, the above routines should not fail. They 937 * just set state for the LUN. So we've got something 938 * pretty wrong here if we can't start or stop the LUN. 939 */ 940 if (retval != 0) { 941 ctl_set_internal_failure(&io->scsiio, 942 /*sks_valid*/ 1, 943 /*retry_count*/ 0xf051); 944 retval = CTL_RETVAL_COMPLETE; 945 } else { 946 ctl_set_success(&io->scsiio); 947 } 948 ctl_config_write_done(io); 949 break; 950 } 951 case WRITE_SAME_10: 952 case WRITE_SAME_16: 953 case UNMAP: 954 ctl_set_success(&io->scsiio); 955 ctl_config_write_done(io); 956 break; 957 default: 958 ctl_set_invalid_opcode(&io->scsiio); 959 ctl_config_write_done(io); 960 retval = CTL_RETVAL_COMPLETE; 961 break; 962 } 963 964 return (retval); 965 } 966 967 static int 968 ctl_backend_ramdisk_config_read(union ctl_io *io) 969 { 970 /* 971 * XXX KDM need to implement!! 972 */ 973 return (0); 974 } 975