xref: /freebsd/sys/cam/ctl/ctl_backend_ramdisk.c (revision ba8d15d3a8b10be9f3a0cb86a60246f225a36736)
1 /*-
2  * Copyright (c) 2003, 2008 Silicon Graphics International Corp.
3  * Copyright (c) 2012 The FreeBSD Foundation
4  * All rights reserved.
5  *
6  * Portions of this software were developed by Edward Tomasz Napierala
7  * under sponsorship from the FreeBSD Foundation.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions, and the following disclaimer,
14  *    without modification.
15  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
16  *    substantially similar to the "NO WARRANTY" disclaimer below
17  *    ("Disclaimer") and any redistribution must be conditioned upon
18  *    including a substantially similar Disclaimer requirement for further
19  *    binary redistribution.
20  *
21  * NO WARRANTY
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
25  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
30  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
31  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32  * POSSIBILITY OF SUCH DAMAGES.
33  *
34  * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend_ramdisk.c#3 $
35  */
36 /*
37  * CAM Target Layer backend for a "fake" ramdisk.
38  *
39  * Author: Ken Merry <ken@FreeBSD.org>
40  */
41 
42 #include <sys/cdefs.h>
43 __FBSDID("$FreeBSD$");
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/kernel.h>
48 #include <sys/condvar.h>
49 #include <sys/types.h>
50 #include <sys/lock.h>
51 #include <sys/mutex.h>
52 #include <sys/malloc.h>
53 #include <sys/taskqueue.h>
54 #include <sys/time.h>
55 #include <sys/queue.h>
56 #include <sys/conf.h>
57 #include <sys/ioccom.h>
58 #include <sys/module.h>
59 
60 #include <cam/scsi/scsi_all.h>
61 #include <cam/ctl/ctl_io.h>
62 #include <cam/ctl/ctl.h>
63 #include <cam/ctl/ctl_util.h>
64 #include <cam/ctl/ctl_backend.h>
65 #include <cam/ctl/ctl_frontend_internal.h>
66 #include <cam/ctl/ctl_debug.h>
67 #include <cam/ctl/ctl_ioctl.h>
68 #include <cam/ctl/ctl_error.h>
69 
70 typedef enum {
71 	CTL_BE_RAMDISK_LUN_UNCONFIGURED	= 0x01,
72 	CTL_BE_RAMDISK_LUN_CONFIG_ERR	= 0x02,
73 	CTL_BE_RAMDISK_LUN_WAITING	= 0x04
74 } ctl_be_ramdisk_lun_flags;
75 
76 struct ctl_be_ramdisk_lun {
77 	char lunname[32];
78 	uint64_t size_bytes;
79 	uint64_t size_blocks;
80 	struct ctl_be_ramdisk_softc *softc;
81 	ctl_be_ramdisk_lun_flags flags;
82 	STAILQ_ENTRY(ctl_be_ramdisk_lun) links;
83 	struct ctl_be_lun ctl_be_lun;
84 	struct taskqueue *io_taskqueue;
85 	struct task io_task;
86 	STAILQ_HEAD(, ctl_io_hdr) cont_queue;
87 	struct mtx_padalign queue_lock;
88 };
89 
90 struct ctl_be_ramdisk_softc {
91 	struct mtx lock;
92 	int rd_size;
93 #ifdef CTL_RAMDISK_PAGES
94 	uint8_t **ramdisk_pages;
95 	int num_pages;
96 #else
97 	uint8_t *ramdisk_buffer;
98 #endif
99 	int num_luns;
100 	STAILQ_HEAD(, ctl_be_ramdisk_lun) lun_list;
101 };
102 
103 static struct ctl_be_ramdisk_softc rd_softc;
104 
105 int ctl_backend_ramdisk_init(void);
106 void ctl_backend_ramdisk_shutdown(void);
107 static int ctl_backend_ramdisk_move_done(union ctl_io *io);
108 static int ctl_backend_ramdisk_submit(union ctl_io *io);
109 static void ctl_backend_ramdisk_continue(union ctl_io *io);
110 static int ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd,
111 				     caddr_t addr, int flag, struct thread *td);
112 static int ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc,
113 				  struct ctl_lun_req *req);
114 static int ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc,
115 				      struct ctl_lun_req *req, int do_wait);
116 static int ctl_backend_ramdisk_modify(struct ctl_be_ramdisk_softc *softc,
117 				  struct ctl_lun_req *req);
118 static void ctl_backend_ramdisk_worker(void *context, int pending);
119 static void ctl_backend_ramdisk_lun_shutdown(void *be_lun);
120 static void ctl_backend_ramdisk_lun_config_status(void *be_lun,
121 						  ctl_lun_config_status status);
122 static int ctl_backend_ramdisk_config_write(union ctl_io *io);
123 static int ctl_backend_ramdisk_config_read(union ctl_io *io);
124 
125 static struct ctl_backend_driver ctl_be_ramdisk_driver =
126 {
127 	.name = "ramdisk",
128 	.flags = CTL_BE_FLAG_HAS_CONFIG,
129 	.init = ctl_backend_ramdisk_init,
130 	.data_submit = ctl_backend_ramdisk_submit,
131 	.data_move_done = ctl_backend_ramdisk_move_done,
132 	.config_read = ctl_backend_ramdisk_config_read,
133 	.config_write = ctl_backend_ramdisk_config_write,
134 	.ioctl = ctl_backend_ramdisk_ioctl
135 };
136 
137 MALLOC_DEFINE(M_RAMDISK, "ramdisk", "Memory used for CTL RAMdisk");
138 CTL_BACKEND_DECLARE(cbr, ctl_be_ramdisk_driver);
139 
140 int
141 ctl_backend_ramdisk_init(void)
142 {
143 	struct ctl_be_ramdisk_softc *softc;
144 #ifdef CTL_RAMDISK_PAGES
145 	int i;
146 #endif
147 
148 
149 	softc = &rd_softc;
150 
151 	memset(softc, 0, sizeof(*softc));
152 
153 	mtx_init(&softc->lock, "ctlramdisk", NULL, MTX_DEF);
154 
155 	STAILQ_INIT(&softc->lun_list);
156 	softc->rd_size = 1024 * 1024;
157 #ifdef CTL_RAMDISK_PAGES
158 	softc->num_pages = softc->rd_size / PAGE_SIZE;
159 	softc->ramdisk_pages = (uint8_t **)malloc(sizeof(uint8_t *) *
160 						  softc->num_pages, M_RAMDISK,
161 						  M_WAITOK);
162 	for (i = 0; i < softc->num_pages; i++)
163 		softc->ramdisk_pages[i] = malloc(PAGE_SIZE, M_RAMDISK,M_WAITOK);
164 #else
165 	softc->ramdisk_buffer = (uint8_t *)malloc(softc->rd_size, M_RAMDISK,
166 						  M_WAITOK);
167 #endif
168 
169 	return (0);
170 }
171 
172 void
173 ctl_backend_ramdisk_shutdown(void)
174 {
175 	struct ctl_be_ramdisk_softc *softc;
176 	struct ctl_be_ramdisk_lun *lun, *next_lun;
177 #ifdef CTL_RAMDISK_PAGES
178 	int i;
179 #endif
180 
181 	softc = &rd_softc;
182 
183 	mtx_lock(&softc->lock);
184 	for (lun = STAILQ_FIRST(&softc->lun_list); lun != NULL; lun = next_lun){
185 		/*
186 		 * Grab the next LUN.  The current LUN may get removed by
187 		 * ctl_invalidate_lun(), which will call our LUN shutdown
188 		 * routine, if there is no outstanding I/O for this LUN.
189 		 */
190 		next_lun = STAILQ_NEXT(lun, links);
191 
192 		/*
193 		 * Drop our lock here.  Since ctl_invalidate_lun() can call
194 		 * back into us, this could potentially lead to a recursive
195 		 * lock of the same mutex, which would cause a hang.
196 		 */
197 		mtx_unlock(&softc->lock);
198 		ctl_disable_lun(&lun->ctl_be_lun);
199 		ctl_invalidate_lun(&lun->ctl_be_lun);
200 		mtx_lock(&softc->lock);
201 	}
202 	mtx_unlock(&softc->lock);
203 
204 #ifdef CTL_RAMDISK_PAGES
205 	for (i = 0; i < softc->num_pages; i++)
206 		free(softc->ramdisk_pages[i], M_RAMDISK);
207 
208 	free(softc->ramdisk_pages, M_RAMDISK);
209 #else
210 	free(softc->ramdisk_buffer, M_RAMDISK);
211 #endif
212 
213 	if (ctl_backend_deregister(&ctl_be_ramdisk_driver) != 0) {
214 		printf("ctl_backend_ramdisk_shutdown: "
215 		       "ctl_backend_deregister() failed!\n");
216 	}
217 }
218 
219 static int
220 ctl_backend_ramdisk_move_done(union ctl_io *io)
221 {
222 	struct ctl_be_lun *ctl_be_lun;
223 	struct ctl_be_ramdisk_lun *be_lun;
224 #ifdef CTL_TIME_IO
225 	struct bintime cur_bt;
226 #endif
227 
228 	CTL_DEBUG_PRINT(("ctl_backend_ramdisk_move_done\n"));
229 	ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[
230 		CTL_PRIV_BACKEND_LUN].ptr;
231 	be_lun = (struct ctl_be_ramdisk_lun *)ctl_be_lun->be_lun;
232 #ifdef CTL_TIME_IO
233 	getbintime(&cur_bt);
234 	bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt);
235 	bintime_add(&io->io_hdr.dma_bt, &cur_bt);
236 	io->io_hdr.num_dmas++;
237 #endif
238 	if (io->scsiio.kern_sg_entries > 0)
239 		free(io->scsiio.kern_data_ptr, M_RAMDISK);
240 	io->scsiio.kern_rel_offset += io->scsiio.kern_data_len;
241 	if (io->io_hdr.flags & CTL_FLAG_ABORT) {
242 		;
243 	} else if ((io->io_hdr.port_status == 0) &&
244 	    ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) {
245 		if (io->io_hdr.ctl_private[CTL_PRIV_BACKEND].integer > 0) {
246 			mtx_lock(&be_lun->queue_lock);
247 			STAILQ_INSERT_TAIL(&be_lun->cont_queue,
248 			    &io->io_hdr, links);
249 			mtx_unlock(&be_lun->queue_lock);
250 			taskqueue_enqueue(be_lun->io_taskqueue,
251 			    &be_lun->io_task);
252 			return (0);
253 		}
254 		ctl_set_success(&io->scsiio);
255 	} else if ((io->io_hdr.port_status != 0) &&
256 	    ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
257 	     (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
258 		/*
259 		 * For hardware error sense keys, the sense key
260 		 * specific value is defined to be a retry count,
261 		 * but we use it to pass back an internal FETD
262 		 * error code.  XXX KDM  Hopefully the FETD is only
263 		 * using 16 bits for an error code, since that's
264 		 * all the space we have in the sks field.
265 		 */
266 		ctl_set_internal_failure(&io->scsiio,
267 					 /*sks_valid*/ 1,
268 					 /*retry_count*/
269 					 io->io_hdr.port_status);
270 	}
271 	ctl_data_submit_done(io);
272 	return(0);
273 }
274 
275 static int
276 ctl_backend_ramdisk_submit(union ctl_io *io)
277 {
278 	struct ctl_be_lun *ctl_be_lun;
279 	struct ctl_lba_len_flags *lbalen;
280 
281 	ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[
282 		CTL_PRIV_BACKEND_LUN].ptr;
283 	lbalen = (struct ctl_lba_len_flags *)&io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
284 	if (lbalen->flags & CTL_LLF_VERIFY) {
285 		ctl_set_success(&io->scsiio);
286 		ctl_data_submit_done(io);
287 		return (CTL_RETVAL_COMPLETE);
288 	}
289 	io->io_hdr.ctl_private[CTL_PRIV_BACKEND].integer =
290 	    lbalen->len * ctl_be_lun->blocksize;
291 	ctl_backend_ramdisk_continue(io);
292 	return (CTL_RETVAL_COMPLETE);
293 }
294 
295 static void
296 ctl_backend_ramdisk_continue(union ctl_io *io)
297 {
298 	struct ctl_be_ramdisk_softc *softc;
299 	int len, len_filled, sg_filled;
300 #ifdef CTL_RAMDISK_PAGES
301 	struct ctl_sg_entry *sg_entries;
302 	int i;
303 #endif
304 
305 	softc = &rd_softc;
306 	len = io->io_hdr.ctl_private[CTL_PRIV_BACKEND].integer;
307 #ifdef CTL_RAMDISK_PAGES
308 	sg_filled = min(btoc(len), softc->num_pages);
309 	if (sg_filled > 1) {
310 		io->scsiio.kern_data_ptr = malloc(sizeof(struct ctl_sg_entry) *
311 						  sg_filled, M_RAMDISK,
312 						  M_WAITOK);
313 		sg_entries = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr;
314 		for (i = 0, len_filled = 0; i < sg_filled; i++) {
315 			sg_entries[i].addr = softc->ramdisk_pages[i];
316 			sg_entries[i].len = MIN(PAGE_SIZE, len - len_filled);
317 			len_filled += sg_entries[i].len;
318 		}
319 		io->io_hdr.flags |= CTL_FLAG_KDPTR_SGLIST;
320 	} else {
321 		sg_filled = 0;
322 		len_filled = len;
323 		io->scsiio.kern_data_ptr = softc->ramdisk_pages[0];
324 	}
325 #else
326 	sg_filled = 0;
327 	len_filled = min(len, softc->rd_size);
328 	io->scsiio.kern_data_ptr = softc->ramdisk_buffer;
329 #endif /* CTL_RAMDISK_PAGES */
330 
331 	io->scsiio.be_move_done = ctl_backend_ramdisk_move_done;
332 	io->scsiio.kern_data_resid = 0;
333 	io->scsiio.kern_data_len = len_filled;
334 	io->scsiio.kern_sg_entries = sg_filled;
335 	io->io_hdr.flags |= CTL_FLAG_ALLOCATED;
336 	io->io_hdr.ctl_private[CTL_PRIV_BACKEND].integer -= len_filled;
337 #ifdef CTL_TIME_IO
338 	getbintime(&io->io_hdr.dma_start_bt);
339 #endif
340 	ctl_datamove(io);
341 }
342 
343 static void
344 ctl_backend_ramdisk_worker(void *context, int pending)
345 {
346 	struct ctl_be_ramdisk_softc *softc;
347 	struct ctl_be_ramdisk_lun *be_lun;
348 	union ctl_io *io;
349 
350 	be_lun = (struct ctl_be_ramdisk_lun *)context;
351 	softc = be_lun->softc;
352 
353 	mtx_lock(&be_lun->queue_lock);
354 	for (;;) {
355 		io = (union ctl_io *)STAILQ_FIRST(&be_lun->cont_queue);
356 		if (io != NULL) {
357 			STAILQ_REMOVE(&be_lun->cont_queue, &io->io_hdr,
358 				      ctl_io_hdr, links);
359 
360 			mtx_unlock(&be_lun->queue_lock);
361 
362 			ctl_backend_ramdisk_continue(io);
363 
364 			mtx_lock(&be_lun->queue_lock);
365 			continue;
366 		}
367 
368 		/*
369 		 * If we get here, there is no work left in the queues, so
370 		 * just break out and let the task queue go to sleep.
371 		 */
372 		break;
373 	}
374 	mtx_unlock(&be_lun->queue_lock);
375 }
376 
377 static int
378 ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd, caddr_t addr,
379 			  int flag, struct thread *td)
380 {
381 	struct ctl_be_ramdisk_softc *softc;
382 	int retval;
383 
384 	retval = 0;
385 	softc = &rd_softc;
386 
387 	switch (cmd) {
388 	case CTL_LUN_REQ: {
389 		struct ctl_lun_req *lun_req;
390 
391 		lun_req = (struct ctl_lun_req *)addr;
392 
393 		switch (lun_req->reqtype) {
394 		case CTL_LUNREQ_CREATE:
395 			retval = ctl_backend_ramdisk_create(softc, lun_req,
396 							    /*do_wait*/ 1);
397 			break;
398 		case CTL_LUNREQ_RM:
399 			retval = ctl_backend_ramdisk_rm(softc, lun_req);
400 			break;
401 		case CTL_LUNREQ_MODIFY:
402 			retval = ctl_backend_ramdisk_modify(softc, lun_req);
403 			break;
404 		default:
405 			lun_req->status = CTL_LUN_ERROR;
406 			snprintf(lun_req->error_str, sizeof(lun_req->error_str),
407 				 "%s: invalid LUN request type %d", __func__,
408 				 lun_req->reqtype);
409 			break;
410 		}
411 		break;
412 	}
413 	default:
414 		retval = ENOTTY;
415 		break;
416 	}
417 
418 	return (retval);
419 }
420 
421 static int
422 ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc,
423 		       struct ctl_lun_req *req)
424 {
425 	struct ctl_be_ramdisk_lun *be_lun;
426 	struct ctl_lun_rm_params *params;
427 	int retval;
428 
429 
430 	retval = 0;
431 	params = &req->reqdata.rm;
432 
433 	be_lun = NULL;
434 
435 	mtx_lock(&softc->lock);
436 
437 	STAILQ_FOREACH(be_lun, &softc->lun_list, links) {
438 		if (be_lun->ctl_be_lun.lun_id == params->lun_id)
439 			break;
440 	}
441 	mtx_unlock(&softc->lock);
442 
443 	if (be_lun == NULL) {
444 		snprintf(req->error_str, sizeof(req->error_str),
445 			 "%s: LUN %u is not managed by the ramdisk backend",
446 			 __func__, params->lun_id);
447 		goto bailout_error;
448 	}
449 
450 	retval = ctl_disable_lun(&be_lun->ctl_be_lun);
451 
452 	if (retval != 0) {
453 		snprintf(req->error_str, sizeof(req->error_str),
454 			 "%s: error %d returned from ctl_disable_lun() for "
455 			 "LUN %d", __func__, retval, params->lun_id);
456 		goto bailout_error;
457 	}
458 
459 	/*
460 	 * Set the waiting flag before we invalidate the LUN.  Our shutdown
461 	 * routine can be called any time after we invalidate the LUN,
462 	 * and can be called from our context.
463 	 *
464 	 * This tells the shutdown routine that we're waiting, or we're
465 	 * going to wait for the shutdown to happen.
466 	 */
467 	mtx_lock(&softc->lock);
468 	be_lun->flags |= CTL_BE_RAMDISK_LUN_WAITING;
469 	mtx_unlock(&softc->lock);
470 
471 	retval = ctl_invalidate_lun(&be_lun->ctl_be_lun);
472 	if (retval != 0) {
473 		snprintf(req->error_str, sizeof(req->error_str),
474 			 "%s: error %d returned from ctl_invalidate_lun() for "
475 			 "LUN %d", __func__, retval, params->lun_id);
476 		mtx_lock(&softc->lock);
477 		be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING;
478 		mtx_unlock(&softc->lock);
479 		goto bailout_error;
480 	}
481 
482 	mtx_lock(&softc->lock);
483 
484 	while ((be_lun->flags & CTL_BE_RAMDISK_LUN_UNCONFIGURED) == 0) {
485 		retval = msleep(be_lun, &softc->lock, PCATCH, "ctlram", 0);
486  		if (retval == EINTR)
487 			break;
488 	}
489 	be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING;
490 
491 	/*
492 	 * We only remove this LUN from the list and free it (below) if
493 	 * retval == 0.  If the user interrupted the wait, we just bail out
494 	 * without actually freeing the LUN.  We let the shutdown routine
495 	 * free the LUN if that happens.
496 	 */
497 	if (retval == 0) {
498 		STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun,
499 			      links);
500 		softc->num_luns--;
501 	}
502 
503 	mtx_unlock(&softc->lock);
504 
505 	if (retval == 0) {
506 		taskqueue_drain(be_lun->io_taskqueue, &be_lun->io_task);
507 		taskqueue_free(be_lun->io_taskqueue);
508 		ctl_free_opts(&be_lun->ctl_be_lun.options);
509 		mtx_destroy(&be_lun->queue_lock);
510 		free(be_lun, M_RAMDISK);
511 	}
512 
513 	req->status = CTL_LUN_OK;
514 
515 	return (retval);
516 
517 bailout_error:
518 	req->status = CTL_LUN_ERROR;
519 
520 	return (0);
521 }
522 
523 static int
524 ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc,
525 			   struct ctl_lun_req *req, int do_wait)
526 {
527 	struct ctl_be_ramdisk_lun *be_lun;
528 	struct ctl_lun_create_params *params;
529 	uint32_t blocksize;
530 	char *value;
531 	char tmpstr[32];
532 	int retval, unmap;
533 
534 	retval = 0;
535 	params = &req->reqdata.create;
536 	if (params->blocksize_bytes != 0)
537 		blocksize = params->blocksize_bytes;
538 	else
539 		blocksize = 512;
540 
541 	be_lun = malloc(sizeof(*be_lun), M_RAMDISK, M_ZERO | (do_wait ?
542 			M_WAITOK : M_NOWAIT));
543 
544 	if (be_lun == NULL) {
545 		snprintf(req->error_str, sizeof(req->error_str),
546 			 "%s: error allocating %zd bytes", __func__,
547 			 sizeof(*be_lun));
548 		goto bailout_error;
549 	}
550 	sprintf(be_lun->lunname, "cram%d", softc->num_luns);
551 	ctl_init_opts(&be_lun->ctl_be_lun.options,
552 	    req->num_be_args, req->kern_be_args);
553 
554 	if (params->flags & CTL_LUN_FLAG_DEV_TYPE)
555 		be_lun->ctl_be_lun.lun_type = params->device_type;
556 	else
557 		be_lun->ctl_be_lun.lun_type = T_DIRECT;
558 
559 	if (be_lun->ctl_be_lun.lun_type == T_DIRECT) {
560 
561 		if (params->lun_size_bytes < blocksize) {
562 			snprintf(req->error_str, sizeof(req->error_str),
563 				 "%s: LUN size %ju < blocksize %u", __func__,
564 				 params->lun_size_bytes, blocksize);
565 			goto bailout_error;
566 		}
567 
568 		be_lun->size_blocks = params->lun_size_bytes / blocksize;
569 		be_lun->size_bytes = be_lun->size_blocks * blocksize;
570 
571 		be_lun->ctl_be_lun.maxlba = be_lun->size_blocks - 1;
572 		be_lun->ctl_be_lun.atomicblock = UINT32_MAX;
573 		be_lun->ctl_be_lun.opttxferlen = softc->rd_size / blocksize;
574 	} else {
575 		be_lun->ctl_be_lun.maxlba = 0;
576 		blocksize = 0;
577 		be_lun->size_bytes = 0;
578 		be_lun->size_blocks = 0;
579 	}
580 
581 	be_lun->ctl_be_lun.blocksize = blocksize;
582 
583 	/* Tell the user the blocksize we ended up using */
584 	params->blocksize_bytes = blocksize;
585 
586 	/* Tell the user the exact size we ended up using */
587 	params->lun_size_bytes = be_lun->size_bytes;
588 
589 	be_lun->softc = softc;
590 
591 	unmap = 1;
592 	value = ctl_get_opt(&be_lun->ctl_be_lun.options, "unmap");
593 	if (value != NULL && strcmp(value, "on") == 0)
594 		unmap = (strcmp(value, "on") == 0);
595 
596 	be_lun->flags = CTL_BE_RAMDISK_LUN_UNCONFIGURED;
597 	be_lun->ctl_be_lun.flags = CTL_LUN_FLAG_PRIMARY;
598 	if (unmap)
599 		be_lun->ctl_be_lun.flags |= CTL_LUN_FLAG_UNMAP;
600 	be_lun->ctl_be_lun.be_lun = be_lun;
601 
602 	if (params->flags & CTL_LUN_FLAG_ID_REQ) {
603 		be_lun->ctl_be_lun.req_lun_id = params->req_lun_id;
604 		be_lun->ctl_be_lun.flags |= CTL_LUN_FLAG_ID_REQ;
605 	} else
606 		be_lun->ctl_be_lun.req_lun_id = 0;
607 
608 	be_lun->ctl_be_lun.lun_shutdown = ctl_backend_ramdisk_lun_shutdown;
609 	be_lun->ctl_be_lun.lun_config_status =
610 		ctl_backend_ramdisk_lun_config_status;
611 	be_lun->ctl_be_lun.be = &ctl_be_ramdisk_driver;
612 	if ((params->flags & CTL_LUN_FLAG_SERIAL_NUM) == 0) {
613 		snprintf(tmpstr, sizeof(tmpstr), "MYSERIAL%4d",
614 			 softc->num_luns);
615 		strncpy((char *)be_lun->ctl_be_lun.serial_num, tmpstr,
616 			MIN(sizeof(be_lun->ctl_be_lun.serial_num),
617 			    sizeof(tmpstr)));
618 
619 		/* Tell the user what we used for a serial number */
620 		strncpy((char *)params->serial_num, tmpstr,
621 			MIN(sizeof(params->serial_num), sizeof(tmpstr)));
622 	} else {
623 		strncpy((char *)be_lun->ctl_be_lun.serial_num,
624 			params->serial_num,
625 			MIN(sizeof(be_lun->ctl_be_lun.serial_num),
626 			    sizeof(params->serial_num)));
627 	}
628 	if ((params->flags & CTL_LUN_FLAG_DEVID) == 0) {
629 		snprintf(tmpstr, sizeof(tmpstr), "MYDEVID%4d", softc->num_luns);
630 		strncpy((char *)be_lun->ctl_be_lun.device_id, tmpstr,
631 			MIN(sizeof(be_lun->ctl_be_lun.device_id),
632 			    sizeof(tmpstr)));
633 
634 		/* Tell the user what we used for a device ID */
635 		strncpy((char *)params->device_id, tmpstr,
636 			MIN(sizeof(params->device_id), sizeof(tmpstr)));
637 	} else {
638 		strncpy((char *)be_lun->ctl_be_lun.device_id,
639 			params->device_id,
640 			MIN(sizeof(be_lun->ctl_be_lun.device_id),
641 			    sizeof(params->device_id)));
642 	}
643 
644 	STAILQ_INIT(&be_lun->cont_queue);
645 	mtx_init(&be_lun->queue_lock, "cram queue lock", NULL, MTX_DEF);
646 	TASK_INIT(&be_lun->io_task, /*priority*/0, ctl_backend_ramdisk_worker,
647 	    be_lun);
648 
649 	be_lun->io_taskqueue = taskqueue_create(be_lun->lunname, M_WAITOK,
650 	    taskqueue_thread_enqueue, /*context*/&be_lun->io_taskqueue);
651 	if (be_lun->io_taskqueue == NULL) {
652 		snprintf(req->error_str, sizeof(req->error_str),
653 			 "%s: Unable to create taskqueue", __func__);
654 		goto bailout_error;
655 	}
656 
657 	retval = taskqueue_start_threads(&be_lun->io_taskqueue,
658 					 /*num threads*/1,
659 					 /*priority*/PWAIT,
660 					 /*thread name*/
661 					 "%s taskq", be_lun->lunname);
662 	if (retval != 0)
663 		goto bailout_error;
664 
665 	mtx_lock(&softc->lock);
666 	softc->num_luns++;
667 	STAILQ_INSERT_TAIL(&softc->lun_list, be_lun, links);
668 
669 	mtx_unlock(&softc->lock);
670 
671 	retval = ctl_add_lun(&be_lun->ctl_be_lun);
672 	if (retval != 0) {
673 		mtx_lock(&softc->lock);
674 		STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun,
675 			      links);
676 		softc->num_luns--;
677 		mtx_unlock(&softc->lock);
678 		snprintf(req->error_str, sizeof(req->error_str),
679 			 "%s: ctl_add_lun() returned error %d, see dmesg for "
680 			"details", __func__, retval);
681 		retval = 0;
682 		goto bailout_error;
683 	}
684 
685 	if (do_wait == 0)
686 		return (retval);
687 
688 	mtx_lock(&softc->lock);
689 
690 	/*
691 	 * Tell the config_status routine that we're waiting so it won't
692 	 * clean up the LUN in the event of an error.
693 	 */
694 	be_lun->flags |= CTL_BE_RAMDISK_LUN_WAITING;
695 
696 	while (be_lun->flags & CTL_BE_RAMDISK_LUN_UNCONFIGURED) {
697 		retval = msleep(be_lun, &softc->lock, PCATCH, "ctlram", 0);
698 		if (retval == EINTR)
699 			break;
700 	}
701 	be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING;
702 
703 	if (be_lun->flags & CTL_BE_RAMDISK_LUN_CONFIG_ERR) {
704 		snprintf(req->error_str, sizeof(req->error_str),
705 			 "%s: LUN configuration error, see dmesg for details",
706 			 __func__);
707 		STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun,
708 			      links);
709 		softc->num_luns--;
710 		mtx_unlock(&softc->lock);
711 		goto bailout_error;
712 	} else {
713 		params->req_lun_id = be_lun->ctl_be_lun.lun_id;
714 	}
715 	mtx_unlock(&softc->lock);
716 
717 	req->status = CTL_LUN_OK;
718 
719 	return (retval);
720 
721 bailout_error:
722 	req->status = CTL_LUN_ERROR;
723 	if (be_lun != NULL) {
724 		if (be_lun->io_taskqueue != NULL) {
725 			taskqueue_free(be_lun->io_taskqueue);
726 		}
727 		ctl_free_opts(&be_lun->ctl_be_lun.options);
728 		mtx_destroy(&be_lun->queue_lock);
729 		free(be_lun, M_RAMDISK);
730 	}
731 
732 	return (retval);
733 }
734 
735 static int
736 ctl_backend_ramdisk_modify(struct ctl_be_ramdisk_softc *softc,
737 		       struct ctl_lun_req *req)
738 {
739 	struct ctl_be_ramdisk_lun *be_lun;
740 	struct ctl_lun_modify_params *params;
741 	uint32_t blocksize;
742 
743 	params = &req->reqdata.modify;
744 
745 	be_lun = NULL;
746 
747 	mtx_lock(&softc->lock);
748 	STAILQ_FOREACH(be_lun, &softc->lun_list, links) {
749 		if (be_lun->ctl_be_lun.lun_id == params->lun_id)
750 			break;
751 	}
752 	mtx_unlock(&softc->lock);
753 
754 	if (be_lun == NULL) {
755 		snprintf(req->error_str, sizeof(req->error_str),
756 			 "%s: LUN %u is not managed by the ramdisk backend",
757 			 __func__, params->lun_id);
758 		goto bailout_error;
759 	}
760 
761 	if (params->lun_size_bytes == 0) {
762 		snprintf(req->error_str, sizeof(req->error_str),
763 			"%s: LUN size \"auto\" not supported "
764 			"by the ramdisk backend", __func__);
765 		goto bailout_error;
766 	}
767 
768 	blocksize = be_lun->ctl_be_lun.blocksize;
769 
770 	if (params->lun_size_bytes < blocksize) {
771 		snprintf(req->error_str, sizeof(req->error_str),
772 			"%s: LUN size %ju < blocksize %u", __func__,
773 			params->lun_size_bytes, blocksize);
774 		goto bailout_error;
775 	}
776 
777 	be_lun->size_blocks = params->lun_size_bytes / blocksize;
778 	be_lun->size_bytes = be_lun->size_blocks * blocksize;
779 
780 	/*
781 	 * The maximum LBA is the size - 1.
782 	 *
783 	 * XXX: Note that this field is being updated without locking,
784 	 * 	which might cause problems on 32-bit architectures.
785 	 */
786 	be_lun->ctl_be_lun.maxlba = be_lun->size_blocks - 1;
787 	ctl_lun_capacity_changed(&be_lun->ctl_be_lun);
788 
789 	/* Tell the user the exact size we ended up using */
790 	params->lun_size_bytes = be_lun->size_bytes;
791 
792 	req->status = CTL_LUN_OK;
793 
794 	return (0);
795 
796 bailout_error:
797 	req->status = CTL_LUN_ERROR;
798 
799 	return (0);
800 }
801 
802 static void
803 ctl_backend_ramdisk_lun_shutdown(void *be_lun)
804 {
805 	struct ctl_be_ramdisk_lun *lun;
806 	struct ctl_be_ramdisk_softc *softc;
807 	int do_free;
808 
809 	lun = (struct ctl_be_ramdisk_lun *)be_lun;
810 	softc = lun->softc;
811 	do_free = 0;
812 
813 	mtx_lock(&softc->lock);
814 
815 	lun->flags |= CTL_BE_RAMDISK_LUN_UNCONFIGURED;
816 
817 	if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING) {
818 		wakeup(lun);
819 	} else {
820 		STAILQ_REMOVE(&softc->lun_list, lun, ctl_be_ramdisk_lun,
821 			      links);
822 		softc->num_luns--;
823 		do_free = 1;
824 	}
825 
826 	mtx_unlock(&softc->lock);
827 
828 	if (do_free != 0)
829 		free(be_lun, M_RAMDISK);
830 }
831 
832 static void
833 ctl_backend_ramdisk_lun_config_status(void *be_lun,
834 				      ctl_lun_config_status status)
835 {
836 	struct ctl_be_ramdisk_lun *lun;
837 	struct ctl_be_ramdisk_softc *softc;
838 
839 	lun = (struct ctl_be_ramdisk_lun *)be_lun;
840 	softc = lun->softc;
841 
842 	if (status == CTL_LUN_CONFIG_OK) {
843 		mtx_lock(&softc->lock);
844 		lun->flags &= ~CTL_BE_RAMDISK_LUN_UNCONFIGURED;
845 		if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING)
846 			wakeup(lun);
847 		mtx_unlock(&softc->lock);
848 
849 		/*
850 		 * We successfully added the LUN, attempt to enable it.
851 		 */
852 		if (ctl_enable_lun(&lun->ctl_be_lun) != 0) {
853 			printf("%s: ctl_enable_lun() failed!\n", __func__);
854 			if (ctl_invalidate_lun(&lun->ctl_be_lun) != 0) {
855 				printf("%s: ctl_invalidate_lun() failed!\n",
856 				       __func__);
857 			}
858 		}
859 
860 		return;
861 	}
862 
863 
864 	mtx_lock(&softc->lock);
865 	lun->flags &= ~CTL_BE_RAMDISK_LUN_UNCONFIGURED;
866 
867 	/*
868 	 * If we have a user waiting, let him handle the cleanup.  If not,
869 	 * clean things up here.
870 	 */
871 	if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING) {
872 		lun->flags |= CTL_BE_RAMDISK_LUN_CONFIG_ERR;
873 		wakeup(lun);
874 	} else {
875 		STAILQ_REMOVE(&softc->lun_list, lun, ctl_be_ramdisk_lun,
876 			      links);
877 		softc->num_luns--;
878 		free(lun, M_RAMDISK);
879 	}
880 	mtx_unlock(&softc->lock);
881 }
882 
883 static int
884 ctl_backend_ramdisk_config_write(union ctl_io *io)
885 {
886 	struct ctl_be_ramdisk_softc *softc;
887 	int retval;
888 
889 	retval = 0;
890 	softc = &rd_softc;
891 
892 	switch (io->scsiio.cdb[0]) {
893 	case SYNCHRONIZE_CACHE:
894 	case SYNCHRONIZE_CACHE_16:
895 		/*
896 		 * The upper level CTL code will filter out any CDBs with
897 		 * the immediate bit set and return the proper error.  It
898 		 * will also not allow a sync cache command to go to a LUN
899 		 * that is powered down.
900 		 *
901 		 * We don't really need to worry about what LBA range the
902 		 * user asked to be synced out.  When they issue a sync
903 		 * cache command, we'll sync out the whole thing.
904 		 *
905 		 * This is obviously just a stubbed out implementation.
906 		 * The real implementation will be in the RAIDCore/CTL
907 		 * interface, and can only really happen when RAIDCore
908 		 * implements a per-array cache sync.
909 		 */
910 		ctl_set_success(&io->scsiio);
911 		ctl_config_write_done(io);
912 		break;
913 	case START_STOP_UNIT: {
914 		struct scsi_start_stop_unit *cdb;
915 		struct ctl_be_lun *ctl_be_lun;
916 		struct ctl_be_ramdisk_lun *be_lun;
917 
918 		cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb;
919 
920 		ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[
921 			CTL_PRIV_BACKEND_LUN].ptr;
922 		be_lun = (struct ctl_be_ramdisk_lun *)ctl_be_lun->be_lun;
923 
924 		if (cdb->how & SSS_START)
925 			retval = ctl_start_lun(ctl_be_lun);
926 		else {
927 			retval = ctl_stop_lun(ctl_be_lun);
928 #ifdef NEEDTOPORT
929 			if ((retval == 0)
930 			 && (cdb->byte2 & SSS_ONOFFLINE))
931 				retval = ctl_lun_offline(ctl_be_lun);
932 #endif
933 		}
934 
935 		/*
936 		 * In general, the above routines should not fail.  They
937 		 * just set state for the LUN.  So we've got something
938 		 * pretty wrong here if we can't start or stop the LUN.
939 		 */
940 		if (retval != 0) {
941 			ctl_set_internal_failure(&io->scsiio,
942 						 /*sks_valid*/ 1,
943 						 /*retry_count*/ 0xf051);
944 			retval = CTL_RETVAL_COMPLETE;
945 		} else {
946 			ctl_set_success(&io->scsiio);
947 		}
948 		ctl_config_write_done(io);
949 		break;
950 	}
951 	case WRITE_SAME_10:
952 	case WRITE_SAME_16:
953 	case UNMAP:
954 		ctl_set_success(&io->scsiio);
955 		ctl_config_write_done(io);
956 		break;
957 	default:
958 		ctl_set_invalid_opcode(&io->scsiio);
959 		ctl_config_write_done(io);
960 		retval = CTL_RETVAL_COMPLETE;
961 		break;
962 	}
963 
964 	return (retval);
965 }
966 
967 static int
968 ctl_backend_ramdisk_config_read(union ctl_io *io)
969 {
970 	int retval = 0;
971 
972 	switch (io->scsiio.cdb[0]) {
973 	case SERVICE_ACTION_IN:
974 		if (io->scsiio.cdb[1] == SGLS_SERVICE_ACTION) {
975 			/* We have nothing to tell, leave default data. */
976 			ctl_config_read_done(io);
977 			retval = CTL_RETVAL_COMPLETE;
978 			break;
979 		}
980 		ctl_set_invalid_field(&io->scsiio,
981 				      /*sks_valid*/ 1,
982 				      /*command*/ 1,
983 				      /*field*/ 1,
984 				      /*bit_valid*/ 1,
985 				      /*bit*/ 4);
986 		ctl_config_read_done(io);
987 		retval = CTL_RETVAL_COMPLETE;
988 		break;
989 	default:
990 		ctl_set_invalid_opcode(&io->scsiio);
991 		ctl_config_read_done(io);
992 		retval = CTL_RETVAL_COMPLETE;
993 		break;
994 	}
995 
996 	return (retval);
997 }
998