1 /*- 2 * Copyright (c) 2003, 2008 Silicon Graphics International Corp. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions, and the following disclaimer, 10 * without modification. 11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 12 * substantially similar to the "NO WARRANTY" disclaimer below 13 * ("Disclaimer") and any redistribution must be conditioned upon 14 * including a substantially similar Disclaimer requirement for further 15 * binary redistribution. 16 * 17 * NO WARRANTY 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 26 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 27 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * POSSIBILITY OF SUCH DAMAGES. 29 * 30 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend_ramdisk.c#3 $ 31 */ 32 /* 33 * CAM Target Layer backend for a "fake" ramdisk. 34 * 35 * Author: Ken Merry <ken@FreeBSD.org> 36 */ 37 38 #include <sys/cdefs.h> 39 __FBSDID("$FreeBSD$"); 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/kernel.h> 44 #include <sys/condvar.h> 45 #include <sys/types.h> 46 #include <sys/lock.h> 47 #include <sys/mutex.h> 48 #include <sys/malloc.h> 49 #include <sys/time.h> 50 #include <sys/queue.h> 51 #include <sys/conf.h> 52 #include <sys/ioccom.h> 53 #include <sys/module.h> 54 55 #include <cam/scsi/scsi_all.h> 56 #include <cam/ctl/ctl_io.h> 57 #include <cam/ctl/ctl.h> 58 #include <cam/ctl/ctl_util.h> 59 #include <cam/ctl/ctl_backend.h> 60 #include <cam/ctl/ctl_frontend_internal.h> 61 #include <cam/ctl/ctl_debug.h> 62 #include <cam/ctl/ctl_ioctl.h> 63 #include <cam/ctl/ctl_error.h> 64 65 typedef enum { 66 CTL_BE_RAMDISK_LUN_UNCONFIGURED = 0x01, 67 CTL_BE_RAMDISK_LUN_CONFIG_ERR = 0x02, 68 CTL_BE_RAMDISK_LUN_WAITING = 0x04 69 } ctl_be_ramdisk_lun_flags; 70 71 struct ctl_be_ramdisk_lun { 72 uint64_t size_bytes; 73 uint64_t size_blocks; 74 struct ctl_be_ramdisk_softc *softc; 75 ctl_be_ramdisk_lun_flags flags; 76 STAILQ_ENTRY(ctl_be_ramdisk_lun) links; 77 struct ctl_be_lun ctl_be_lun; 78 }; 79 80 struct ctl_be_ramdisk_softc { 81 struct mtx lock; 82 int rd_size; 83 #ifdef CTL_RAMDISK_PAGES 84 uint8_t **ramdisk_pages; 85 int num_pages; 86 #else 87 uint8_t *ramdisk_buffer; 88 #endif 89 int num_luns; 90 STAILQ_HEAD(, ctl_be_ramdisk_lun) lun_list; 91 }; 92 93 static struct ctl_be_ramdisk_softc rd_softc; 94 95 int ctl_backend_ramdisk_init(void); 96 void ctl_backend_ramdisk_shutdown(void); 97 static int ctl_backend_ramdisk_move_done(union ctl_io *io); 98 static int ctl_backend_ramdisk_submit(union ctl_io *io); 99 static int ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd, 100 caddr_t addr, int flag, struct thread *td); 101 static int ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc, 102 struct ctl_lun_req *req); 103 static int ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc, 104 struct ctl_lun_req *req, int do_wait); 105 static void ctl_backend_ramdisk_lun_shutdown(void *be_lun); 106 static void ctl_backend_ramdisk_lun_config_status(void *be_lun, 107 ctl_lun_config_status status); 108 static int ctl_backend_ramdisk_config_write(union ctl_io *io); 109 static int ctl_backend_ramdisk_config_read(union ctl_io *io); 110 111 static struct ctl_backend_driver ctl_be_ramdisk_driver = 112 { 113 name: "ramdisk", 114 flags: CTL_BE_FLAG_HAS_CONFIG, 115 init: ctl_backend_ramdisk_init, 116 data_submit: ctl_backend_ramdisk_submit, 117 data_move_done: ctl_backend_ramdisk_move_done, 118 config_read: ctl_backend_ramdisk_config_read, 119 config_write: ctl_backend_ramdisk_config_write, 120 ioctl: ctl_backend_ramdisk_ioctl 121 }; 122 123 MALLOC_DEFINE(M_RAMDISK, "ramdisk", "Memory used for CTL RAMdisk"); 124 CTL_BACKEND_DECLARE(cbr, ctl_be_ramdisk_driver); 125 126 int 127 ctl_backend_ramdisk_init(void) 128 { 129 struct ctl_be_ramdisk_softc *softc; 130 #ifdef CTL_RAMDISK_PAGES 131 int i, j; 132 #endif 133 134 135 softc = &rd_softc; 136 137 memset(softc, 0, sizeof(*softc)); 138 139 mtx_init(&softc->lock, "ramdisk", NULL, MTX_DEF); 140 141 STAILQ_INIT(&softc->lun_list); 142 softc->rd_size = 4 * 1024 * 1024; 143 #ifdef CTL_RAMDISK_PAGES 144 softc->num_pages = softc->rd_size / PAGE_SIZE; 145 softc->ramdisk_pages = (uint8_t **)malloc(sizeof(uint8_t *) * 146 softc->num_pages, M_RAMDISK, 147 M_WAITOK); 148 for (i = 0; i < softc->num_pages; i++) { 149 softc->ramdisk_pages[i] = malloc(PAGE_SIZE, M_RAMDISK,M_WAITOK); 150 if (softc->ramdisk_pages[i] == NULL) { 151 for (j = 0; j < i; j++) { 152 free(softc->ramdisk_pages[j], M_RAMDISK); 153 } 154 free(softc->ramdisk_pages, M_RAMDISK); 155 panic("RAMDisk initialization failed\n"); 156 return (1); /* NOTREACHED */ 157 } 158 } 159 #else 160 softc->ramdisk_buffer = (uint8_t *)malloc(softc->rd_size, M_RAMDISK, 161 M_WAITOK); 162 #endif 163 164 return (0); 165 } 166 167 void 168 ctl_backend_ramdisk_shutdown(void) 169 { 170 struct ctl_be_ramdisk_softc *softc; 171 struct ctl_be_ramdisk_lun *lun, *next_lun; 172 #ifdef CTL_RAMDISK_PAGES 173 int i; 174 #endif 175 176 softc = &rd_softc; 177 178 mtx_lock(&softc->lock); 179 for (lun = STAILQ_FIRST(&softc->lun_list); lun != NULL; lun = next_lun){ 180 /* 181 * Grab the next LUN. The current LUN may get removed by 182 * ctl_invalidate_lun(), which will call our LUN shutdown 183 * routine, if there is no outstanding I/O for this LUN. 184 */ 185 next_lun = STAILQ_NEXT(lun, links); 186 187 /* 188 * Drop our lock here. Since ctl_invalidate_lun() can call 189 * back into us, this could potentially lead to a recursive 190 * lock of the same mutex, which would cause a hang. 191 */ 192 mtx_unlock(&softc->lock); 193 ctl_disable_lun(&lun->ctl_be_lun); 194 ctl_invalidate_lun(&lun->ctl_be_lun); 195 mtx_lock(&softc->lock); 196 } 197 mtx_unlock(&softc->lock); 198 199 #ifdef CTL_RAMDISK_PAGES 200 for (i = 0; i < softc->num_pages; i++) 201 free(softc->ramdisk_pages[i], M_RAMDISK); 202 203 free(softc->ramdisk_pages, M_RAMDISK); 204 #else 205 free(softc->ramdisk_buffer, M_RAMDISK); 206 #endif 207 208 if (ctl_backend_deregister(&ctl_be_ramdisk_driver) != 0) { 209 printf("ctl_backend_ramdisk_shutdown: " 210 "ctl_backend_deregister() failed!\n"); 211 } 212 } 213 214 static int 215 ctl_backend_ramdisk_move_done(union ctl_io *io) 216 { 217 #ifdef CTL_TIME_IO 218 struct bintime cur_bt; 219 #endif 220 221 CTL_DEBUG_PRINT(("ctl_backend_ramdisk_move_done\n")); 222 if ((io->io_hdr.port_status == 0) 223 && ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0) 224 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) 225 io->io_hdr.status = CTL_SUCCESS; 226 else if ((io->io_hdr.port_status != 0) 227 && ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0) 228 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)){ 229 /* 230 * For hardware error sense keys, the sense key 231 * specific value is defined to be a retry count, 232 * but we use it to pass back an internal FETD 233 * error code. XXX KDM Hopefully the FETD is only 234 * using 16 bits for an error code, since that's 235 * all the space we have in the sks field. 236 */ 237 ctl_set_internal_failure(&io->scsiio, 238 /*sks_valid*/ 1, 239 /*retry_count*/ 240 io->io_hdr.port_status); 241 } 242 #ifdef CTL_TIME_IO 243 getbintime(&cur_bt); 244 bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt); 245 bintime_add(&io->io_hdr.dma_bt, &cur_bt); 246 io->io_hdr.num_dmas++; 247 #endif 248 249 if (io->scsiio.kern_sg_entries > 0) 250 free(io->scsiio.kern_data_ptr, M_RAMDISK); 251 ctl_done(io); 252 return(0); 253 } 254 255 static int 256 ctl_backend_ramdisk_submit(union ctl_io *io) 257 { 258 struct ctl_lba_len lbalen; 259 #ifdef CTL_RAMDISK_PAGES 260 struct ctl_sg_entry *sg_entries; 261 int len_filled; 262 int i; 263 #endif 264 int num_sg_entries, len; 265 struct ctl_be_ramdisk_softc *softc; 266 struct ctl_be_lun *ctl_be_lun; 267 struct ctl_be_ramdisk_lun *be_lun; 268 269 softc = &rd_softc; 270 271 ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[ 272 CTL_PRIV_BACKEND_LUN].ptr; 273 be_lun = (struct ctl_be_ramdisk_lun *)ctl_be_lun->be_lun; 274 275 memcpy(&lbalen, io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes, 276 sizeof(lbalen)); 277 278 len = lbalen.len * ctl_be_lun->blocksize; 279 280 /* 281 * Kick out the request if it's bigger than we can handle. 282 */ 283 if (len > softc->rd_size) { 284 ctl_set_internal_failure(&io->scsiio, 285 /*sks_valid*/ 0, 286 /*retry_count*/ 0); 287 ctl_done(io); 288 return (CTL_RETVAL_COMPLETE); 289 } 290 291 /* 292 * Kick out the request if it's larger than the device size that 293 * the user requested. 294 */ 295 if (((lbalen.lba * ctl_be_lun->blocksize) + len) > be_lun->size_bytes) { 296 ctl_set_lba_out_of_range(&io->scsiio); 297 ctl_done(io); 298 return (CTL_RETVAL_COMPLETE); 299 } 300 301 #ifdef CTL_RAMDISK_PAGES 302 num_sg_entries = len >> PAGE_SHIFT; 303 if ((len & (PAGE_SIZE - 1)) != 0) 304 num_sg_entries++; 305 306 if (num_sg_entries > 1) { 307 io->scsiio.kern_data_ptr = malloc(sizeof(struct ctl_sg_entry) * 308 num_sg_entries, M_RAMDISK, 309 M_WAITOK); 310 if (io->scsiio.kern_data_ptr == NULL) { 311 ctl_set_internal_failure(&io->scsiio, 312 /*sks_valid*/ 0, 313 /*retry_count*/ 0); 314 ctl_done(io); 315 return (CTL_RETVAL_COMPLETE); 316 } 317 sg_entries = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; 318 for (i = 0, len_filled = 0; i < num_sg_entries; 319 i++, len_filled += PAGE_SIZE) { 320 sg_entries[i].addr = softc->ramdisk_pages[i]; 321 sg_entries[i].len = ctl_min(PAGE_SIZE, 322 len - len_filled); 323 } 324 } else { 325 #endif /* CTL_RAMDISK_PAGES */ 326 /* 327 * If this is less than 1 page, don't bother allocating a 328 * scatter/gather list for it. This saves time/overhead. 329 */ 330 num_sg_entries = 0; 331 #ifdef CTL_RAMDISK_PAGES 332 io->scsiio.kern_data_ptr = softc->ramdisk_pages[0]; 333 #else 334 io->scsiio.kern_data_ptr = softc->ramdisk_buffer; 335 #endif 336 #ifdef CTL_RAMDISK_PAGES 337 } 338 #endif 339 340 io->scsiio.be_move_done = ctl_backend_ramdisk_move_done; 341 io->scsiio.kern_data_len = len; 342 io->scsiio.kern_total_len = len; 343 io->scsiio.kern_rel_offset = 0; 344 io->scsiio.kern_data_resid = 0; 345 io->scsiio.kern_sg_entries = num_sg_entries; 346 io->io_hdr.flags |= CTL_FLAG_ALLOCATED | CTL_FLAG_KDPTR_SGLIST; 347 #ifdef CTL_TIME_IO 348 getbintime(&io->io_hdr.dma_start_bt); 349 #endif 350 ctl_datamove(io); 351 352 return (CTL_RETVAL_COMPLETE); 353 } 354 355 static int 356 ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, 357 int flag, struct thread *td) 358 { 359 struct ctl_be_ramdisk_softc *softc; 360 int retval; 361 362 retval = 0; 363 softc = &rd_softc; 364 365 switch (cmd) { 366 case CTL_LUN_REQ: { 367 struct ctl_lun_req *lun_req; 368 369 lun_req = (struct ctl_lun_req *)addr; 370 371 switch (lun_req->reqtype) { 372 case CTL_LUNREQ_CREATE: 373 retval = ctl_backend_ramdisk_create(softc, lun_req, 374 /*do_wait*/ 1); 375 break; 376 case CTL_LUNREQ_RM: 377 retval = ctl_backend_ramdisk_rm(softc, lun_req); 378 break; 379 default: 380 lun_req->status = CTL_LUN_ERROR; 381 snprintf(lun_req->error_str, sizeof(lun_req->error_str), 382 "%s: invalid LUN request type %d", __func__, 383 lun_req->reqtype); 384 break; 385 } 386 break; 387 } 388 default: 389 retval = ENOTTY; 390 break; 391 } 392 393 return (retval); 394 } 395 396 static int 397 ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc, 398 struct ctl_lun_req *req) 399 { 400 struct ctl_be_ramdisk_lun *be_lun; 401 struct ctl_lun_rm_params *params; 402 int retval; 403 404 405 retval = 0; 406 params = &req->reqdata.rm; 407 408 be_lun = NULL; 409 410 mtx_lock(&softc->lock); 411 412 STAILQ_FOREACH(be_lun, &softc->lun_list, links) { 413 if (be_lun->ctl_be_lun.lun_id == params->lun_id) 414 break; 415 } 416 mtx_unlock(&softc->lock); 417 418 if (be_lun == NULL) { 419 snprintf(req->error_str, sizeof(req->error_str), 420 "%s: LUN %u is not managed by the ramdisk backend", 421 __func__, params->lun_id); 422 goto bailout_error; 423 } 424 425 retval = ctl_disable_lun(&be_lun->ctl_be_lun); 426 427 if (retval != 0) { 428 snprintf(req->error_str, sizeof(req->error_str), 429 "%s: error %d returned from ctl_disable_lun() for " 430 "LUN %d", __func__, retval, params->lun_id); 431 goto bailout_error; 432 } 433 434 /* 435 * Set the waiting flag before we invalidate the LUN. Our shutdown 436 * routine can be called any time after we invalidate the LUN, 437 * and can be called from our context. 438 * 439 * This tells the shutdown routine that we're waiting, or we're 440 * going to wait for the shutdown to happen. 441 */ 442 mtx_lock(&softc->lock); 443 be_lun->flags |= CTL_BE_RAMDISK_LUN_WAITING; 444 mtx_unlock(&softc->lock); 445 446 retval = ctl_invalidate_lun(&be_lun->ctl_be_lun); 447 if (retval != 0) { 448 snprintf(req->error_str, sizeof(req->error_str), 449 "%s: error %d returned from ctl_invalidate_lun() for " 450 "LUN %d", __func__, retval, params->lun_id); 451 goto bailout_error; 452 } 453 454 mtx_lock(&softc->lock); 455 456 while ((be_lun->flags & CTL_BE_RAMDISK_LUN_UNCONFIGURED) == 0) { 457 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlram", 0); 458 if (retval == EINTR) 459 break; 460 } 461 be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING; 462 463 /* 464 * We only remove this LUN from the list and free it (below) if 465 * retval == 0. If the user interrupted the wait, we just bail out 466 * without actually freeing the LUN. We let the shutdown routine 467 * free the LUN if that happens. 468 */ 469 if (retval == 0) { 470 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun, 471 links); 472 softc->num_luns--; 473 } 474 475 mtx_unlock(&softc->lock); 476 477 if (retval == 0) 478 free(be_lun, M_RAMDISK); 479 480 req->status = CTL_LUN_OK; 481 482 return (retval); 483 484 bailout_error: 485 486 /* 487 * Don't leave the waiting flag set. 488 */ 489 mtx_lock(&softc->lock); 490 be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING; 491 mtx_unlock(&softc->lock); 492 493 req->status = CTL_LUN_ERROR; 494 495 return (0); 496 } 497 498 static int 499 ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc, 500 struct ctl_lun_req *req, int do_wait) 501 { 502 struct ctl_be_ramdisk_lun *be_lun; 503 struct ctl_lun_create_params *params; 504 uint32_t blocksize; 505 char tmpstr[32]; 506 int retval; 507 508 retval = 0; 509 params = &req->reqdata.create; 510 if (params->blocksize_bytes != 0) 511 blocksize = params->blocksize_bytes; 512 else 513 blocksize = 512; 514 515 be_lun = malloc(sizeof(*be_lun), M_RAMDISK, M_ZERO | (do_wait ? 516 M_WAITOK : M_NOWAIT)); 517 518 if (be_lun == NULL) { 519 snprintf(req->error_str, sizeof(req->error_str), 520 "%s: error allocating %zd bytes", __func__, 521 sizeof(*be_lun)); 522 goto bailout_error; 523 } 524 525 if (params->flags & CTL_LUN_FLAG_DEV_TYPE) 526 be_lun->ctl_be_lun.lun_type = params->device_type; 527 else 528 be_lun->ctl_be_lun.lun_type = T_DIRECT; 529 530 if (be_lun->ctl_be_lun.lun_type == T_DIRECT) { 531 532 if (params->lun_size_bytes < blocksize) { 533 snprintf(req->error_str, sizeof(req->error_str), 534 "%s: LUN size %ju < blocksize %u", __func__, 535 params->lun_size_bytes, blocksize); 536 goto bailout_error; 537 } 538 539 be_lun->size_blocks = params->lun_size_bytes / blocksize; 540 be_lun->size_bytes = be_lun->size_blocks * blocksize; 541 542 be_lun->ctl_be_lun.maxlba = be_lun->size_blocks - 1; 543 } else { 544 be_lun->ctl_be_lun.maxlba = 0; 545 blocksize = 0; 546 be_lun->size_bytes = 0; 547 be_lun->size_blocks = 0; 548 } 549 550 be_lun->ctl_be_lun.blocksize = blocksize; 551 552 /* Tell the user the blocksize we ended up using */ 553 params->blocksize_bytes = blocksize; 554 555 /* Tell the user the exact size we ended up using */ 556 params->lun_size_bytes = be_lun->size_bytes; 557 558 be_lun->softc = softc; 559 560 be_lun->flags = CTL_BE_RAMDISK_LUN_UNCONFIGURED; 561 be_lun->ctl_be_lun.flags = CTL_LUN_FLAG_PRIMARY; 562 be_lun->ctl_be_lun.be_lun = be_lun; 563 564 if (params->flags & CTL_LUN_FLAG_ID_REQ) { 565 be_lun->ctl_be_lun.req_lun_id = params->req_lun_id; 566 be_lun->ctl_be_lun.flags |= CTL_LUN_FLAG_ID_REQ; 567 } else 568 be_lun->ctl_be_lun.req_lun_id = 0; 569 570 be_lun->ctl_be_lun.lun_shutdown = ctl_backend_ramdisk_lun_shutdown; 571 be_lun->ctl_be_lun.lun_config_status = 572 ctl_backend_ramdisk_lun_config_status; 573 be_lun->ctl_be_lun.be = &ctl_be_ramdisk_driver; 574 if ((params->flags & CTL_LUN_FLAG_SERIAL_NUM) == 0) { 575 snprintf(tmpstr, sizeof(tmpstr), "MYSERIAL%4d", 576 softc->num_luns); 577 strncpy((char *)be_lun->ctl_be_lun.serial_num, tmpstr, 578 ctl_min(sizeof(be_lun->ctl_be_lun.serial_num), 579 sizeof(tmpstr))); 580 581 /* Tell the user what we used for a serial number */ 582 strncpy((char *)params->serial_num, tmpstr, 583 ctl_min(sizeof(params->serial_num), sizeof(tmpstr))); 584 } else { 585 strncpy((char *)be_lun->ctl_be_lun.serial_num, 586 params->serial_num, 587 ctl_min(sizeof(be_lun->ctl_be_lun.serial_num), 588 sizeof(params->serial_num))); 589 } 590 if ((params->flags & CTL_LUN_FLAG_DEVID) == 0) { 591 snprintf(tmpstr, sizeof(tmpstr), "MYDEVID%4d", softc->num_luns); 592 strncpy((char *)be_lun->ctl_be_lun.device_id, tmpstr, 593 ctl_min(sizeof(be_lun->ctl_be_lun.device_id), 594 sizeof(tmpstr))); 595 596 /* Tell the user what we used for a device ID */ 597 strncpy((char *)params->device_id, tmpstr, 598 ctl_min(sizeof(params->device_id), sizeof(tmpstr))); 599 } else { 600 strncpy((char *)be_lun->ctl_be_lun.device_id, 601 params->device_id, 602 ctl_min(sizeof(be_lun->ctl_be_lun.device_id), 603 sizeof(params->device_id))); 604 } 605 606 mtx_lock(&softc->lock); 607 softc->num_luns++; 608 STAILQ_INSERT_TAIL(&softc->lun_list, be_lun, links); 609 610 mtx_unlock(&softc->lock); 611 612 retval = ctl_add_lun(&be_lun->ctl_be_lun); 613 if (retval != 0) { 614 mtx_lock(&softc->lock); 615 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun, 616 links); 617 softc->num_luns--; 618 mtx_unlock(&softc->lock); 619 snprintf(req->error_str, sizeof(req->error_str), 620 "%s: ctl_add_lun() returned error %d, see dmesg for " 621 "details", __func__, retval); 622 retval = 0; 623 goto bailout_error; 624 } 625 626 if (do_wait == 0) 627 return (retval); 628 629 mtx_lock(&softc->lock); 630 631 /* 632 * Tell the config_status routine that we're waiting so it won't 633 * clean up the LUN in the event of an error. 634 */ 635 be_lun->flags |= CTL_BE_RAMDISK_LUN_WAITING; 636 637 while (be_lun->flags & CTL_BE_RAMDISK_LUN_UNCONFIGURED) { 638 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlram", 0); 639 if (retval == EINTR) 640 break; 641 } 642 be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING; 643 644 if (be_lun->flags & CTL_BE_RAMDISK_LUN_CONFIG_ERR) { 645 snprintf(req->error_str, sizeof(req->error_str), 646 "%s: LUN configuration error, see dmesg for details", 647 __func__); 648 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun, 649 links); 650 softc->num_luns--; 651 mtx_unlock(&softc->lock); 652 goto bailout_error; 653 } else { 654 params->req_lun_id = be_lun->ctl_be_lun.lun_id; 655 } 656 mtx_unlock(&softc->lock); 657 658 req->status = CTL_LUN_OK; 659 660 return (retval); 661 662 bailout_error: 663 req->status = CTL_LUN_ERROR; 664 free(be_lun, M_RAMDISK); 665 666 return (retval); 667 } 668 669 static void 670 ctl_backend_ramdisk_lun_shutdown(void *be_lun) 671 { 672 struct ctl_be_ramdisk_lun *lun; 673 struct ctl_be_ramdisk_softc *softc; 674 int do_free; 675 676 lun = (struct ctl_be_ramdisk_lun *)be_lun; 677 softc = lun->softc; 678 do_free = 0; 679 680 mtx_lock(&softc->lock); 681 682 lun->flags |= CTL_BE_RAMDISK_LUN_UNCONFIGURED; 683 684 if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING) { 685 wakeup(lun); 686 } else { 687 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun, 688 links); 689 softc->num_luns--; 690 do_free = 1; 691 } 692 693 mtx_unlock(&softc->lock); 694 695 if (do_free != 0) 696 free(be_lun, M_RAMDISK); 697 } 698 699 static void 700 ctl_backend_ramdisk_lun_config_status(void *be_lun, 701 ctl_lun_config_status status) 702 { 703 struct ctl_be_ramdisk_lun *lun; 704 struct ctl_be_ramdisk_softc *softc; 705 706 lun = (struct ctl_be_ramdisk_lun *)be_lun; 707 softc = lun->softc; 708 709 if (status == CTL_LUN_CONFIG_OK) { 710 mtx_lock(&softc->lock); 711 lun->flags &= ~CTL_BE_RAMDISK_LUN_UNCONFIGURED; 712 if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING) 713 wakeup(lun); 714 mtx_unlock(&softc->lock); 715 716 /* 717 * We successfully added the LUN, attempt to enable it. 718 */ 719 if (ctl_enable_lun(&lun->ctl_be_lun) != 0) { 720 printf("%s: ctl_enable_lun() failed!\n", __func__); 721 if (ctl_invalidate_lun(&lun->ctl_be_lun) != 0) { 722 printf("%s: ctl_invalidate_lun() failed!\n", 723 __func__); 724 } 725 } 726 727 return; 728 } 729 730 731 mtx_lock(&softc->lock); 732 lun->flags &= ~CTL_BE_RAMDISK_LUN_UNCONFIGURED; 733 734 /* 735 * If we have a user waiting, let him handle the cleanup. If not, 736 * clean things up here. 737 */ 738 if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING) { 739 lun->flags |= CTL_BE_RAMDISK_LUN_CONFIG_ERR; 740 wakeup(lun); 741 } else { 742 STAILQ_REMOVE(&softc->lun_list, lun, ctl_be_ramdisk_lun, 743 links); 744 softc->num_luns--; 745 free(lun, M_RAMDISK); 746 } 747 mtx_unlock(&softc->lock); 748 } 749 750 static int 751 ctl_backend_ramdisk_config_write(union ctl_io *io) 752 { 753 struct ctl_be_ramdisk_softc *softc; 754 int retval; 755 756 retval = 0; 757 softc = &rd_softc; 758 759 switch (io->scsiio.cdb[0]) { 760 case SYNCHRONIZE_CACHE: 761 case SYNCHRONIZE_CACHE_16: 762 /* 763 * The upper level CTL code will filter out any CDBs with 764 * the immediate bit set and return the proper error. It 765 * will also not allow a sync cache command to go to a LUN 766 * that is powered down. 767 * 768 * We don't really need to worry about what LBA range the 769 * user asked to be synced out. When they issue a sync 770 * cache command, we'll sync out the whole thing. 771 * 772 * This is obviously just a stubbed out implementation. 773 * The real implementation will be in the RAIDCore/CTL 774 * interface, and can only really happen when RAIDCore 775 * implements a per-array cache sync. 776 */ 777 ctl_set_success(&io->scsiio); 778 ctl_config_write_done(io); 779 break; 780 case START_STOP_UNIT: { 781 struct scsi_start_stop_unit *cdb; 782 struct ctl_be_lun *ctl_be_lun; 783 struct ctl_be_ramdisk_lun *be_lun; 784 785 cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb; 786 787 ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[ 788 CTL_PRIV_BACKEND_LUN].ptr; 789 be_lun = (struct ctl_be_ramdisk_lun *)ctl_be_lun->be_lun; 790 791 if (cdb->how & SSS_START) 792 retval = ctl_start_lun(ctl_be_lun); 793 else { 794 retval = ctl_stop_lun(ctl_be_lun); 795 #ifdef NEEDTOPORT 796 if ((retval == 0) 797 && (cdb->byte2 & SSS_ONOFFLINE)) 798 retval = ctl_lun_offline(ctl_be_lun); 799 #endif 800 } 801 802 /* 803 * In general, the above routines should not fail. They 804 * just set state for the LUN. So we've got something 805 * pretty wrong here if we can't start or stop the LUN. 806 */ 807 if (retval != 0) { 808 ctl_set_internal_failure(&io->scsiio, 809 /*sks_valid*/ 1, 810 /*retry_count*/ 0xf051); 811 retval = CTL_RETVAL_COMPLETE; 812 } else { 813 ctl_set_success(&io->scsiio); 814 } 815 ctl_config_write_done(io); 816 break; 817 } 818 default: 819 ctl_set_invalid_opcode(&io->scsiio); 820 ctl_config_write_done(io); 821 retval = CTL_RETVAL_COMPLETE; 822 break; 823 } 824 825 return (retval); 826 } 827 828 static int 829 ctl_backend_ramdisk_config_read(union ctl_io *io) 830 { 831 /* 832 * XXX KDM need to implement!! 833 */ 834 return (0); 835 } 836