1 /*- 2 * Copyright (c) 2003, 2008 Silicon Graphics International Corp. 3 * Copyright (c) 2012 The FreeBSD Foundation 4 * All rights reserved. 5 * 6 * Portions of this software were developed by Edward Tomasz Napierala 7 * under sponsorship from the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions, and the following disclaimer, 14 * without modification. 15 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 16 * substantially similar to the "NO WARRANTY" disclaimer below 17 * ("Disclaimer") and any redistribution must be conditioned upon 18 * including a substantially similar Disclaimer requirement for further 19 * binary redistribution. 20 * 21 * NO WARRANTY 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 26 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 30 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 31 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 * POSSIBILITY OF SUCH DAMAGES. 33 * 34 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend_ramdisk.c#3 $ 35 */ 36 /* 37 * CAM Target Layer backend for a "fake" ramdisk. 38 * 39 * Author: Ken Merry <ken@FreeBSD.org> 40 */ 41 42 #include <sys/cdefs.h> 43 __FBSDID("$FreeBSD$"); 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/kernel.h> 48 #include <sys/condvar.h> 49 #include <sys/types.h> 50 #include <sys/lock.h> 51 #include <sys/mutex.h> 52 #include <sys/malloc.h> 53 #include <sys/time.h> 54 #include <sys/queue.h> 55 #include <sys/conf.h> 56 #include <sys/ioccom.h> 57 #include <sys/module.h> 58 59 #include <cam/scsi/scsi_all.h> 60 #include <cam/ctl/ctl_io.h> 61 #include <cam/ctl/ctl.h> 62 #include <cam/ctl/ctl_util.h> 63 #include <cam/ctl/ctl_backend.h> 64 #include <cam/ctl/ctl_frontend_internal.h> 65 #include <cam/ctl/ctl_debug.h> 66 #include <cam/ctl/ctl_ioctl.h> 67 #include <cam/ctl/ctl_error.h> 68 69 typedef enum { 70 CTL_BE_RAMDISK_LUN_UNCONFIGURED = 0x01, 71 CTL_BE_RAMDISK_LUN_CONFIG_ERR = 0x02, 72 CTL_BE_RAMDISK_LUN_WAITING = 0x04 73 } ctl_be_ramdisk_lun_flags; 74 75 struct ctl_be_ramdisk_lun { 76 uint64_t size_bytes; 77 uint64_t size_blocks; 78 struct ctl_be_ramdisk_softc *softc; 79 ctl_be_ramdisk_lun_flags flags; 80 STAILQ_ENTRY(ctl_be_ramdisk_lun) links; 81 struct ctl_be_lun ctl_be_lun; 82 }; 83 84 struct ctl_be_ramdisk_softc { 85 struct mtx lock; 86 int rd_size; 87 #ifdef CTL_RAMDISK_PAGES 88 uint8_t **ramdisk_pages; 89 int num_pages; 90 #else 91 uint8_t *ramdisk_buffer; 92 #endif 93 int num_luns; 94 STAILQ_HEAD(, ctl_be_ramdisk_lun) lun_list; 95 }; 96 97 static struct ctl_be_ramdisk_softc rd_softc; 98 99 int ctl_backend_ramdisk_init(void); 100 void ctl_backend_ramdisk_shutdown(void); 101 static int ctl_backend_ramdisk_move_done(union ctl_io *io); 102 static int ctl_backend_ramdisk_submit(union ctl_io *io); 103 static int ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd, 104 caddr_t addr, int flag, struct thread *td); 105 static int ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc, 106 struct ctl_lun_req *req); 107 static int ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc, 108 struct ctl_lun_req *req, int do_wait); 109 static int ctl_backend_ramdisk_modify(struct ctl_be_ramdisk_softc *softc, 110 struct ctl_lun_req *req); 111 static void ctl_backend_ramdisk_lun_shutdown(void *be_lun); 112 static void ctl_backend_ramdisk_lun_config_status(void *be_lun, 113 ctl_lun_config_status status); 114 static int ctl_backend_ramdisk_config_write(union ctl_io *io); 115 static int ctl_backend_ramdisk_config_read(union ctl_io *io); 116 117 static struct ctl_backend_driver ctl_be_ramdisk_driver = 118 { 119 .name = "ramdisk", 120 .flags = CTL_BE_FLAG_HAS_CONFIG, 121 .init = ctl_backend_ramdisk_init, 122 .data_submit = ctl_backend_ramdisk_submit, 123 .data_move_done = ctl_backend_ramdisk_move_done, 124 .config_read = ctl_backend_ramdisk_config_read, 125 .config_write = ctl_backend_ramdisk_config_write, 126 .ioctl = ctl_backend_ramdisk_ioctl 127 }; 128 129 MALLOC_DEFINE(M_RAMDISK, "ramdisk", "Memory used for CTL RAMdisk"); 130 CTL_BACKEND_DECLARE(cbr, ctl_be_ramdisk_driver); 131 132 int 133 ctl_backend_ramdisk_init(void) 134 { 135 struct ctl_be_ramdisk_softc *softc; 136 #ifdef CTL_RAMDISK_PAGES 137 int i, j; 138 #endif 139 140 141 softc = &rd_softc; 142 143 memset(softc, 0, sizeof(*softc)); 144 145 mtx_init(&softc->lock, "ramdisk", NULL, MTX_DEF); 146 147 STAILQ_INIT(&softc->lun_list); 148 softc->rd_size = 4 * 1024 * 1024; 149 #ifdef CTL_RAMDISK_PAGES 150 softc->num_pages = softc->rd_size / PAGE_SIZE; 151 softc->ramdisk_pages = (uint8_t **)malloc(sizeof(uint8_t *) * 152 softc->num_pages, M_RAMDISK, 153 M_WAITOK); 154 for (i = 0; i < softc->num_pages; i++) { 155 softc->ramdisk_pages[i] = malloc(PAGE_SIZE, M_RAMDISK,M_WAITOK); 156 if (softc->ramdisk_pages[i] == NULL) { 157 for (j = 0; j < i; j++) { 158 free(softc->ramdisk_pages[j], M_RAMDISK); 159 } 160 free(softc->ramdisk_pages, M_RAMDISK); 161 panic("RAMDisk initialization failed\n"); 162 return (1); /* NOTREACHED */ 163 } 164 } 165 #else 166 softc->ramdisk_buffer = (uint8_t *)malloc(softc->rd_size, M_RAMDISK, 167 M_WAITOK); 168 #endif 169 170 return (0); 171 } 172 173 void 174 ctl_backend_ramdisk_shutdown(void) 175 { 176 struct ctl_be_ramdisk_softc *softc; 177 struct ctl_be_ramdisk_lun *lun, *next_lun; 178 #ifdef CTL_RAMDISK_PAGES 179 int i; 180 #endif 181 182 softc = &rd_softc; 183 184 mtx_lock(&softc->lock); 185 for (lun = STAILQ_FIRST(&softc->lun_list); lun != NULL; lun = next_lun){ 186 /* 187 * Grab the next LUN. The current LUN may get removed by 188 * ctl_invalidate_lun(), which will call our LUN shutdown 189 * routine, if there is no outstanding I/O for this LUN. 190 */ 191 next_lun = STAILQ_NEXT(lun, links); 192 193 /* 194 * Drop our lock here. Since ctl_invalidate_lun() can call 195 * back into us, this could potentially lead to a recursive 196 * lock of the same mutex, which would cause a hang. 197 */ 198 mtx_unlock(&softc->lock); 199 ctl_disable_lun(&lun->ctl_be_lun); 200 ctl_invalidate_lun(&lun->ctl_be_lun); 201 mtx_lock(&softc->lock); 202 } 203 mtx_unlock(&softc->lock); 204 205 #ifdef CTL_RAMDISK_PAGES 206 for (i = 0; i < softc->num_pages; i++) 207 free(softc->ramdisk_pages[i], M_RAMDISK); 208 209 free(softc->ramdisk_pages, M_RAMDISK); 210 #else 211 free(softc->ramdisk_buffer, M_RAMDISK); 212 #endif 213 214 if (ctl_backend_deregister(&ctl_be_ramdisk_driver) != 0) { 215 printf("ctl_backend_ramdisk_shutdown: " 216 "ctl_backend_deregister() failed!\n"); 217 } 218 } 219 220 static int 221 ctl_backend_ramdisk_move_done(union ctl_io *io) 222 { 223 #ifdef CTL_TIME_IO 224 struct bintime cur_bt; 225 #endif 226 227 CTL_DEBUG_PRINT(("ctl_backend_ramdisk_move_done\n")); 228 if ((io->io_hdr.port_status == 0) 229 && ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0) 230 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) 231 io->io_hdr.status = CTL_SUCCESS; 232 else if ((io->io_hdr.port_status != 0) 233 && ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0) 234 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)){ 235 /* 236 * For hardware error sense keys, the sense key 237 * specific value is defined to be a retry count, 238 * but we use it to pass back an internal FETD 239 * error code. XXX KDM Hopefully the FETD is only 240 * using 16 bits for an error code, since that's 241 * all the space we have in the sks field. 242 */ 243 ctl_set_internal_failure(&io->scsiio, 244 /*sks_valid*/ 1, 245 /*retry_count*/ 246 io->io_hdr.port_status); 247 } 248 #ifdef CTL_TIME_IO 249 getbintime(&cur_bt); 250 bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt); 251 bintime_add(&io->io_hdr.dma_bt, &cur_bt); 252 io->io_hdr.num_dmas++; 253 #endif 254 255 if (io->scsiio.kern_sg_entries > 0) 256 free(io->scsiio.kern_data_ptr, M_RAMDISK); 257 ctl_done(io); 258 return(0); 259 } 260 261 static int 262 ctl_backend_ramdisk_submit(union ctl_io *io) 263 { 264 struct ctl_lba_len lbalen; 265 #ifdef CTL_RAMDISK_PAGES 266 struct ctl_sg_entry *sg_entries; 267 int len_filled; 268 int i; 269 #endif 270 int num_sg_entries, len; 271 struct ctl_be_ramdisk_softc *softc; 272 struct ctl_be_lun *ctl_be_lun; 273 struct ctl_be_ramdisk_lun *be_lun; 274 275 softc = &rd_softc; 276 277 ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[ 278 CTL_PRIV_BACKEND_LUN].ptr; 279 be_lun = (struct ctl_be_ramdisk_lun *)ctl_be_lun->be_lun; 280 281 memcpy(&lbalen, io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes, 282 sizeof(lbalen)); 283 284 len = lbalen.len * ctl_be_lun->blocksize; 285 286 /* 287 * Kick out the request if it's bigger than we can handle. 288 */ 289 if (len > softc->rd_size) { 290 ctl_set_internal_failure(&io->scsiio, 291 /*sks_valid*/ 0, 292 /*retry_count*/ 0); 293 ctl_done(io); 294 return (CTL_RETVAL_COMPLETE); 295 } 296 297 /* 298 * Kick out the request if it's larger than the device size that 299 * the user requested. 300 */ 301 if (((lbalen.lba * ctl_be_lun->blocksize) + len) > be_lun->size_bytes) { 302 ctl_set_lba_out_of_range(&io->scsiio); 303 ctl_done(io); 304 return (CTL_RETVAL_COMPLETE); 305 } 306 307 #ifdef CTL_RAMDISK_PAGES 308 num_sg_entries = len >> PAGE_SHIFT; 309 if ((len & (PAGE_SIZE - 1)) != 0) 310 num_sg_entries++; 311 312 if (num_sg_entries > 1) { 313 io->scsiio.kern_data_ptr = malloc(sizeof(struct ctl_sg_entry) * 314 num_sg_entries, M_RAMDISK, 315 M_WAITOK); 316 if (io->scsiio.kern_data_ptr == NULL) { 317 ctl_set_internal_failure(&io->scsiio, 318 /*sks_valid*/ 0, 319 /*retry_count*/ 0); 320 ctl_done(io); 321 return (CTL_RETVAL_COMPLETE); 322 } 323 sg_entries = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; 324 for (i = 0, len_filled = 0; i < num_sg_entries; 325 i++, len_filled += PAGE_SIZE) { 326 sg_entries[i].addr = softc->ramdisk_pages[i]; 327 sg_entries[i].len = ctl_min(PAGE_SIZE, 328 len - len_filled); 329 } 330 } else { 331 #endif /* CTL_RAMDISK_PAGES */ 332 /* 333 * If this is less than 1 page, don't bother allocating a 334 * scatter/gather list for it. This saves time/overhead. 335 */ 336 num_sg_entries = 0; 337 #ifdef CTL_RAMDISK_PAGES 338 io->scsiio.kern_data_ptr = softc->ramdisk_pages[0]; 339 #else 340 io->scsiio.kern_data_ptr = softc->ramdisk_buffer; 341 #endif 342 #ifdef CTL_RAMDISK_PAGES 343 } 344 #endif 345 346 io->scsiio.be_move_done = ctl_backend_ramdisk_move_done; 347 io->scsiio.kern_data_len = len; 348 io->scsiio.kern_total_len = len; 349 io->scsiio.kern_rel_offset = 0; 350 io->scsiio.kern_data_resid = 0; 351 io->scsiio.kern_sg_entries = num_sg_entries; 352 io->io_hdr.flags |= CTL_FLAG_ALLOCATED | CTL_FLAG_KDPTR_SGLIST; 353 #ifdef CTL_TIME_IO 354 getbintime(&io->io_hdr.dma_start_bt); 355 #endif 356 ctl_datamove(io); 357 358 return (CTL_RETVAL_COMPLETE); 359 } 360 361 static int 362 ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, 363 int flag, struct thread *td) 364 { 365 struct ctl_be_ramdisk_softc *softc; 366 int retval; 367 368 retval = 0; 369 softc = &rd_softc; 370 371 switch (cmd) { 372 case CTL_LUN_REQ: { 373 struct ctl_lun_req *lun_req; 374 375 lun_req = (struct ctl_lun_req *)addr; 376 377 switch (lun_req->reqtype) { 378 case CTL_LUNREQ_CREATE: 379 retval = ctl_backend_ramdisk_create(softc, lun_req, 380 /*do_wait*/ 1); 381 break; 382 case CTL_LUNREQ_RM: 383 retval = ctl_backend_ramdisk_rm(softc, lun_req); 384 break; 385 case CTL_LUNREQ_MODIFY: 386 retval = ctl_backend_ramdisk_modify(softc, lun_req); 387 break; 388 default: 389 lun_req->status = CTL_LUN_ERROR; 390 snprintf(lun_req->error_str, sizeof(lun_req->error_str), 391 "%s: invalid LUN request type %d", __func__, 392 lun_req->reqtype); 393 break; 394 } 395 break; 396 } 397 default: 398 retval = ENOTTY; 399 break; 400 } 401 402 return (retval); 403 } 404 405 static int 406 ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc, 407 struct ctl_lun_req *req) 408 { 409 struct ctl_be_ramdisk_lun *be_lun; 410 struct ctl_lun_rm_params *params; 411 int retval; 412 413 414 retval = 0; 415 params = &req->reqdata.rm; 416 417 be_lun = NULL; 418 419 mtx_lock(&softc->lock); 420 421 STAILQ_FOREACH(be_lun, &softc->lun_list, links) { 422 if (be_lun->ctl_be_lun.lun_id == params->lun_id) 423 break; 424 } 425 mtx_unlock(&softc->lock); 426 427 if (be_lun == NULL) { 428 snprintf(req->error_str, sizeof(req->error_str), 429 "%s: LUN %u is not managed by the ramdisk backend", 430 __func__, params->lun_id); 431 goto bailout_error; 432 } 433 434 retval = ctl_disable_lun(&be_lun->ctl_be_lun); 435 436 if (retval != 0) { 437 snprintf(req->error_str, sizeof(req->error_str), 438 "%s: error %d returned from ctl_disable_lun() for " 439 "LUN %d", __func__, retval, params->lun_id); 440 goto bailout_error; 441 } 442 443 /* 444 * Set the waiting flag before we invalidate the LUN. Our shutdown 445 * routine can be called any time after we invalidate the LUN, 446 * and can be called from our context. 447 * 448 * This tells the shutdown routine that we're waiting, or we're 449 * going to wait for the shutdown to happen. 450 */ 451 mtx_lock(&softc->lock); 452 be_lun->flags |= CTL_BE_RAMDISK_LUN_WAITING; 453 mtx_unlock(&softc->lock); 454 455 retval = ctl_invalidate_lun(&be_lun->ctl_be_lun); 456 if (retval != 0) { 457 snprintf(req->error_str, sizeof(req->error_str), 458 "%s: error %d returned from ctl_invalidate_lun() for " 459 "LUN %d", __func__, retval, params->lun_id); 460 goto bailout_error; 461 } 462 463 mtx_lock(&softc->lock); 464 465 while ((be_lun->flags & CTL_BE_RAMDISK_LUN_UNCONFIGURED) == 0) { 466 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlram", 0); 467 if (retval == EINTR) 468 break; 469 } 470 be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING; 471 472 /* 473 * We only remove this LUN from the list and free it (below) if 474 * retval == 0. If the user interrupted the wait, we just bail out 475 * without actually freeing the LUN. We let the shutdown routine 476 * free the LUN if that happens. 477 */ 478 if (retval == 0) { 479 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun, 480 links); 481 softc->num_luns--; 482 } 483 484 mtx_unlock(&softc->lock); 485 486 if (retval == 0) 487 free(be_lun, M_RAMDISK); 488 489 req->status = CTL_LUN_OK; 490 491 return (retval); 492 493 bailout_error: 494 495 /* 496 * Don't leave the waiting flag set. 497 */ 498 mtx_lock(&softc->lock); 499 be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING; 500 mtx_unlock(&softc->lock); 501 502 req->status = CTL_LUN_ERROR; 503 504 return (0); 505 } 506 507 static int 508 ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc, 509 struct ctl_lun_req *req, int do_wait) 510 { 511 struct ctl_be_ramdisk_lun *be_lun; 512 struct ctl_lun_create_params *params; 513 uint32_t blocksize; 514 char tmpstr[32]; 515 int retval; 516 517 retval = 0; 518 params = &req->reqdata.create; 519 if (params->blocksize_bytes != 0) 520 blocksize = params->blocksize_bytes; 521 else 522 blocksize = 512; 523 524 be_lun = malloc(sizeof(*be_lun), M_RAMDISK, M_ZERO | (do_wait ? 525 M_WAITOK : M_NOWAIT)); 526 527 if (be_lun == NULL) { 528 snprintf(req->error_str, sizeof(req->error_str), 529 "%s: error allocating %zd bytes", __func__, 530 sizeof(*be_lun)); 531 goto bailout_error; 532 } 533 534 if (params->flags & CTL_LUN_FLAG_DEV_TYPE) 535 be_lun->ctl_be_lun.lun_type = params->device_type; 536 else 537 be_lun->ctl_be_lun.lun_type = T_DIRECT; 538 539 if (be_lun->ctl_be_lun.lun_type == T_DIRECT) { 540 541 if (params->lun_size_bytes < blocksize) { 542 snprintf(req->error_str, sizeof(req->error_str), 543 "%s: LUN size %ju < blocksize %u", __func__, 544 params->lun_size_bytes, blocksize); 545 goto bailout_error; 546 } 547 548 be_lun->size_blocks = params->lun_size_bytes / blocksize; 549 be_lun->size_bytes = be_lun->size_blocks * blocksize; 550 551 be_lun->ctl_be_lun.maxlba = be_lun->size_blocks - 1; 552 } else { 553 be_lun->ctl_be_lun.maxlba = 0; 554 blocksize = 0; 555 be_lun->size_bytes = 0; 556 be_lun->size_blocks = 0; 557 } 558 559 be_lun->ctl_be_lun.blocksize = blocksize; 560 561 /* Tell the user the blocksize we ended up using */ 562 params->blocksize_bytes = blocksize; 563 564 /* Tell the user the exact size we ended up using */ 565 params->lun_size_bytes = be_lun->size_bytes; 566 567 be_lun->softc = softc; 568 569 be_lun->flags = CTL_BE_RAMDISK_LUN_UNCONFIGURED; 570 be_lun->ctl_be_lun.flags = CTL_LUN_FLAG_PRIMARY; 571 be_lun->ctl_be_lun.be_lun = be_lun; 572 573 if (params->flags & CTL_LUN_FLAG_ID_REQ) { 574 be_lun->ctl_be_lun.req_lun_id = params->req_lun_id; 575 be_lun->ctl_be_lun.flags |= CTL_LUN_FLAG_ID_REQ; 576 } else 577 be_lun->ctl_be_lun.req_lun_id = 0; 578 579 be_lun->ctl_be_lun.lun_shutdown = ctl_backend_ramdisk_lun_shutdown; 580 be_lun->ctl_be_lun.lun_config_status = 581 ctl_backend_ramdisk_lun_config_status; 582 be_lun->ctl_be_lun.be = &ctl_be_ramdisk_driver; 583 if ((params->flags & CTL_LUN_FLAG_SERIAL_NUM) == 0) { 584 snprintf(tmpstr, sizeof(tmpstr), "MYSERIAL%4d", 585 softc->num_luns); 586 strncpy((char *)be_lun->ctl_be_lun.serial_num, tmpstr, 587 ctl_min(sizeof(be_lun->ctl_be_lun.serial_num), 588 sizeof(tmpstr))); 589 590 /* Tell the user what we used for a serial number */ 591 strncpy((char *)params->serial_num, tmpstr, 592 ctl_min(sizeof(params->serial_num), sizeof(tmpstr))); 593 } else { 594 strncpy((char *)be_lun->ctl_be_lun.serial_num, 595 params->serial_num, 596 ctl_min(sizeof(be_lun->ctl_be_lun.serial_num), 597 sizeof(params->serial_num))); 598 } 599 if ((params->flags & CTL_LUN_FLAG_DEVID) == 0) { 600 snprintf(tmpstr, sizeof(tmpstr), "MYDEVID%4d", softc->num_luns); 601 strncpy((char *)be_lun->ctl_be_lun.device_id, tmpstr, 602 ctl_min(sizeof(be_lun->ctl_be_lun.device_id), 603 sizeof(tmpstr))); 604 605 /* Tell the user what we used for a device ID */ 606 strncpy((char *)params->device_id, tmpstr, 607 ctl_min(sizeof(params->device_id), sizeof(tmpstr))); 608 } else { 609 strncpy((char *)be_lun->ctl_be_lun.device_id, 610 params->device_id, 611 ctl_min(sizeof(be_lun->ctl_be_lun.device_id), 612 sizeof(params->device_id))); 613 } 614 615 mtx_lock(&softc->lock); 616 softc->num_luns++; 617 STAILQ_INSERT_TAIL(&softc->lun_list, be_lun, links); 618 619 mtx_unlock(&softc->lock); 620 621 retval = ctl_add_lun(&be_lun->ctl_be_lun); 622 if (retval != 0) { 623 mtx_lock(&softc->lock); 624 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun, 625 links); 626 softc->num_luns--; 627 mtx_unlock(&softc->lock); 628 snprintf(req->error_str, sizeof(req->error_str), 629 "%s: ctl_add_lun() returned error %d, see dmesg for " 630 "details", __func__, retval); 631 retval = 0; 632 goto bailout_error; 633 } 634 635 if (do_wait == 0) 636 return (retval); 637 638 mtx_lock(&softc->lock); 639 640 /* 641 * Tell the config_status routine that we're waiting so it won't 642 * clean up the LUN in the event of an error. 643 */ 644 be_lun->flags |= CTL_BE_RAMDISK_LUN_WAITING; 645 646 while (be_lun->flags & CTL_BE_RAMDISK_LUN_UNCONFIGURED) { 647 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlram", 0); 648 if (retval == EINTR) 649 break; 650 } 651 be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING; 652 653 if (be_lun->flags & CTL_BE_RAMDISK_LUN_CONFIG_ERR) { 654 snprintf(req->error_str, sizeof(req->error_str), 655 "%s: LUN configuration error, see dmesg for details", 656 __func__); 657 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun, 658 links); 659 softc->num_luns--; 660 mtx_unlock(&softc->lock); 661 goto bailout_error; 662 } else { 663 params->req_lun_id = be_lun->ctl_be_lun.lun_id; 664 } 665 mtx_unlock(&softc->lock); 666 667 req->status = CTL_LUN_OK; 668 669 return (retval); 670 671 bailout_error: 672 req->status = CTL_LUN_ERROR; 673 free(be_lun, M_RAMDISK); 674 675 return (retval); 676 } 677 678 static int 679 ctl_backend_ramdisk_modify(struct ctl_be_ramdisk_softc *softc, 680 struct ctl_lun_req *req) 681 { 682 struct ctl_be_ramdisk_lun *be_lun; 683 struct ctl_lun_modify_params *params; 684 uint32_t blocksize; 685 686 params = &req->reqdata.modify; 687 688 be_lun = NULL; 689 690 mtx_lock(&softc->lock); 691 STAILQ_FOREACH(be_lun, &softc->lun_list, links) { 692 if (be_lun->ctl_be_lun.lun_id == params->lun_id) 693 break; 694 } 695 mtx_unlock(&softc->lock); 696 697 if (be_lun == NULL) { 698 snprintf(req->error_str, sizeof(req->error_str), 699 "%s: LUN %u is not managed by the ramdisk backend", 700 __func__, params->lun_id); 701 goto bailout_error; 702 } 703 704 if (params->lun_size_bytes == 0) { 705 snprintf(req->error_str, sizeof(req->error_str), 706 "%s: LUN size \"auto\" not supported " 707 "by the ramdisk backend", __func__); 708 goto bailout_error; 709 } 710 711 blocksize = be_lun->ctl_be_lun.blocksize; 712 713 if (params->lun_size_bytes < blocksize) { 714 snprintf(req->error_str, sizeof(req->error_str), 715 "%s: LUN size %ju < blocksize %u", __func__, 716 params->lun_size_bytes, blocksize); 717 goto bailout_error; 718 } 719 720 be_lun->size_blocks = params->lun_size_bytes / blocksize; 721 be_lun->size_bytes = be_lun->size_blocks * blocksize; 722 723 /* 724 * The maximum LBA is the size - 1. 725 * 726 * XXX: Note that this field is being updated without locking, 727 * which might cause problems on 32-bit architectures. 728 */ 729 be_lun->ctl_be_lun.maxlba = be_lun->size_blocks - 1; 730 ctl_lun_capacity_changed(&be_lun->ctl_be_lun); 731 732 /* Tell the user the exact size we ended up using */ 733 params->lun_size_bytes = be_lun->size_bytes; 734 735 req->status = CTL_LUN_OK; 736 737 return (0); 738 739 bailout_error: 740 req->status = CTL_LUN_ERROR; 741 742 return (0); 743 } 744 745 static void 746 ctl_backend_ramdisk_lun_shutdown(void *be_lun) 747 { 748 struct ctl_be_ramdisk_lun *lun; 749 struct ctl_be_ramdisk_softc *softc; 750 int do_free; 751 752 lun = (struct ctl_be_ramdisk_lun *)be_lun; 753 softc = lun->softc; 754 do_free = 0; 755 756 mtx_lock(&softc->lock); 757 758 lun->flags |= CTL_BE_RAMDISK_LUN_UNCONFIGURED; 759 760 if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING) { 761 wakeup(lun); 762 } else { 763 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun, 764 links); 765 softc->num_luns--; 766 do_free = 1; 767 } 768 769 mtx_unlock(&softc->lock); 770 771 if (do_free != 0) 772 free(be_lun, M_RAMDISK); 773 } 774 775 static void 776 ctl_backend_ramdisk_lun_config_status(void *be_lun, 777 ctl_lun_config_status status) 778 { 779 struct ctl_be_ramdisk_lun *lun; 780 struct ctl_be_ramdisk_softc *softc; 781 782 lun = (struct ctl_be_ramdisk_lun *)be_lun; 783 softc = lun->softc; 784 785 if (status == CTL_LUN_CONFIG_OK) { 786 mtx_lock(&softc->lock); 787 lun->flags &= ~CTL_BE_RAMDISK_LUN_UNCONFIGURED; 788 if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING) 789 wakeup(lun); 790 mtx_unlock(&softc->lock); 791 792 /* 793 * We successfully added the LUN, attempt to enable it. 794 */ 795 if (ctl_enable_lun(&lun->ctl_be_lun) != 0) { 796 printf("%s: ctl_enable_lun() failed!\n", __func__); 797 if (ctl_invalidate_lun(&lun->ctl_be_lun) != 0) { 798 printf("%s: ctl_invalidate_lun() failed!\n", 799 __func__); 800 } 801 } 802 803 return; 804 } 805 806 807 mtx_lock(&softc->lock); 808 lun->flags &= ~CTL_BE_RAMDISK_LUN_UNCONFIGURED; 809 810 /* 811 * If we have a user waiting, let him handle the cleanup. If not, 812 * clean things up here. 813 */ 814 if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING) { 815 lun->flags |= CTL_BE_RAMDISK_LUN_CONFIG_ERR; 816 wakeup(lun); 817 } else { 818 STAILQ_REMOVE(&softc->lun_list, lun, ctl_be_ramdisk_lun, 819 links); 820 softc->num_luns--; 821 free(lun, M_RAMDISK); 822 } 823 mtx_unlock(&softc->lock); 824 } 825 826 static int 827 ctl_backend_ramdisk_config_write(union ctl_io *io) 828 { 829 struct ctl_be_ramdisk_softc *softc; 830 int retval; 831 832 retval = 0; 833 softc = &rd_softc; 834 835 switch (io->scsiio.cdb[0]) { 836 case SYNCHRONIZE_CACHE: 837 case SYNCHRONIZE_CACHE_16: 838 /* 839 * The upper level CTL code will filter out any CDBs with 840 * the immediate bit set and return the proper error. It 841 * will also not allow a sync cache command to go to a LUN 842 * that is powered down. 843 * 844 * We don't really need to worry about what LBA range the 845 * user asked to be synced out. When they issue a sync 846 * cache command, we'll sync out the whole thing. 847 * 848 * This is obviously just a stubbed out implementation. 849 * The real implementation will be in the RAIDCore/CTL 850 * interface, and can only really happen when RAIDCore 851 * implements a per-array cache sync. 852 */ 853 ctl_set_success(&io->scsiio); 854 ctl_config_write_done(io); 855 break; 856 case START_STOP_UNIT: { 857 struct scsi_start_stop_unit *cdb; 858 struct ctl_be_lun *ctl_be_lun; 859 struct ctl_be_ramdisk_lun *be_lun; 860 861 cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb; 862 863 ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[ 864 CTL_PRIV_BACKEND_LUN].ptr; 865 be_lun = (struct ctl_be_ramdisk_lun *)ctl_be_lun->be_lun; 866 867 if (cdb->how & SSS_START) 868 retval = ctl_start_lun(ctl_be_lun); 869 else { 870 retval = ctl_stop_lun(ctl_be_lun); 871 #ifdef NEEDTOPORT 872 if ((retval == 0) 873 && (cdb->byte2 & SSS_ONOFFLINE)) 874 retval = ctl_lun_offline(ctl_be_lun); 875 #endif 876 } 877 878 /* 879 * In general, the above routines should not fail. They 880 * just set state for the LUN. So we've got something 881 * pretty wrong here if we can't start or stop the LUN. 882 */ 883 if (retval != 0) { 884 ctl_set_internal_failure(&io->scsiio, 885 /*sks_valid*/ 1, 886 /*retry_count*/ 0xf051); 887 retval = CTL_RETVAL_COMPLETE; 888 } else { 889 ctl_set_success(&io->scsiio); 890 } 891 ctl_config_write_done(io); 892 break; 893 } 894 default: 895 ctl_set_invalid_opcode(&io->scsiio); 896 ctl_config_write_done(io); 897 retval = CTL_RETVAL_COMPLETE; 898 break; 899 } 900 901 return (retval); 902 } 903 904 static int 905 ctl_backend_ramdisk_config_read(union ctl_io *io) 906 { 907 /* 908 * XXX KDM need to implement!! 909 */ 910 return (0); 911 } 912