xref: /freebsd/sys/cam/ctl/ctl_backend_ramdisk.c (revision 60b9567d16b585b05c86c60393958ad81cbfa72f)
1 /*-
2  * Copyright (c) 2003, 2008 Silicon Graphics International Corp.
3  * Copyright (c) 2012 The FreeBSD Foundation
4  * Copyright (c) 2014-2015 Alexander Motin <mav@FreeBSD.org>
5  * All rights reserved.
6  *
7  * Portions of this software were developed by Edward Tomasz Napierala
8  * under sponsorship from the FreeBSD Foundation.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions, and the following disclaimer,
15  *    without modification.
16  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
17  *    substantially similar to the "NO WARRANTY" disclaimer below
18  *    ("Disclaimer") and any redistribution must be conditioned upon
19  *    including a substantially similar Disclaimer requirement for further
20  *    binary redistribution.
21  *
22  * NO WARRANTY
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
26  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
31  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
32  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33  * POSSIBILITY OF SUCH DAMAGES.
34  *
35  * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend_ramdisk.c#3 $
36  */
37 /*
38  * CAM Target Layer backend for a "fake" ramdisk.
39  *
40  * Author: Ken Merry <ken@FreeBSD.org>
41  */
42 
43 #include <sys/cdefs.h>
44 __FBSDID("$FreeBSD$");
45 
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/condvar.h>
50 #include <sys/types.h>
51 #include <sys/lock.h>
52 #include <sys/mutex.h>
53 #include <sys/malloc.h>
54 #include <sys/taskqueue.h>
55 #include <sys/time.h>
56 #include <sys/queue.h>
57 #include <sys/conf.h>
58 #include <sys/ioccom.h>
59 #include <sys/module.h>
60 #include <sys/sysctl.h>
61 
62 #include <cam/scsi/scsi_all.h>
63 #include <cam/scsi/scsi_da.h>
64 #include <cam/ctl/ctl_io.h>
65 #include <cam/ctl/ctl.h>
66 #include <cam/ctl/ctl_util.h>
67 #include <cam/ctl/ctl_backend.h>
68 #include <cam/ctl/ctl_debug.h>
69 #include <cam/ctl/ctl_ioctl.h>
70 #include <cam/ctl/ctl_ha.h>
71 #include <cam/ctl/ctl_private.h>
72 #include <cam/ctl/ctl_error.h>
73 
74 typedef enum {
75 	CTL_BE_RAMDISK_LUN_UNCONFIGURED	= 0x01,
76 	CTL_BE_RAMDISK_LUN_CONFIG_ERR	= 0x02,
77 	CTL_BE_RAMDISK_LUN_WAITING	= 0x04
78 } ctl_be_ramdisk_lun_flags;
79 
80 struct ctl_be_ramdisk_lun {
81 	struct ctl_lun_create_params params;
82 	char lunname[32];
83 	uint64_t size_bytes;
84 	uint64_t size_blocks;
85 	struct ctl_be_ramdisk_softc *softc;
86 	ctl_be_ramdisk_lun_flags flags;
87 	STAILQ_ENTRY(ctl_be_ramdisk_lun) links;
88 	struct ctl_be_lun cbe_lun;
89 	struct taskqueue *io_taskqueue;
90 	struct task io_task;
91 	STAILQ_HEAD(, ctl_io_hdr) cont_queue;
92 	struct mtx_padalign queue_lock;
93 };
94 
95 struct ctl_be_ramdisk_softc {
96 	struct mtx lock;
97 	int rd_size;
98 #ifdef CTL_RAMDISK_PAGES
99 	uint8_t **ramdisk_pages;
100 	int num_pages;
101 #else
102 	uint8_t *ramdisk_buffer;
103 #endif
104 	int num_luns;
105 	STAILQ_HEAD(, ctl_be_ramdisk_lun) lun_list;
106 };
107 
108 static struct ctl_be_ramdisk_softc rd_softc;
109 extern struct ctl_softc *control_softc;
110 
111 static int ctl_backend_ramdisk_init(void);
112 static int ctl_backend_ramdisk_shutdown(void);
113 static int ctl_backend_ramdisk_move_done(union ctl_io *io);
114 static int ctl_backend_ramdisk_submit(union ctl_io *io);
115 static void ctl_backend_ramdisk_continue(union ctl_io *io);
116 static int ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd,
117 				     caddr_t addr, int flag, struct thread *td);
118 static int ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc,
119 				  struct ctl_lun_req *req);
120 static int ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc,
121 				      struct ctl_lun_req *req);
122 static int ctl_backend_ramdisk_modify(struct ctl_be_ramdisk_softc *softc,
123 				  struct ctl_lun_req *req);
124 static void ctl_backend_ramdisk_worker(void *context, int pending);
125 static void ctl_backend_ramdisk_lun_shutdown(void *be_lun);
126 static void ctl_backend_ramdisk_lun_config_status(void *be_lun,
127 						  ctl_lun_config_status status);
128 static int ctl_backend_ramdisk_config_write(union ctl_io *io);
129 static int ctl_backend_ramdisk_config_read(union ctl_io *io);
130 
131 static struct ctl_backend_driver ctl_be_ramdisk_driver =
132 {
133 	.name = "ramdisk",
134 	.flags = CTL_BE_FLAG_HAS_CONFIG,
135 	.init = ctl_backend_ramdisk_init,
136 	.shutdown = ctl_backend_ramdisk_shutdown,
137 	.data_submit = ctl_backend_ramdisk_submit,
138 	.data_move_done = ctl_backend_ramdisk_move_done,
139 	.config_read = ctl_backend_ramdisk_config_read,
140 	.config_write = ctl_backend_ramdisk_config_write,
141 	.ioctl = ctl_backend_ramdisk_ioctl
142 };
143 
144 MALLOC_DEFINE(M_RAMDISK, "ramdisk", "Memory used for CTL RAMdisk");
145 CTL_BACKEND_DECLARE(cbr, ctl_be_ramdisk_driver);
146 
147 int
148 ctl_backend_ramdisk_init(void)
149 {
150 	struct ctl_be_ramdisk_softc *softc = &rd_softc;
151 #ifdef CTL_RAMDISK_PAGES
152 	int i;
153 #endif
154 
155 	memset(softc, 0, sizeof(*softc));
156 	mtx_init(&softc->lock, "ctlramdisk", NULL, MTX_DEF);
157 	STAILQ_INIT(&softc->lun_list);
158 	softc->rd_size = 1024 * 1024;
159 #ifdef CTL_RAMDISK_PAGES
160 	softc->num_pages = softc->rd_size / PAGE_SIZE;
161 	softc->ramdisk_pages = (uint8_t **)malloc(sizeof(uint8_t *) *
162 						  softc->num_pages, M_RAMDISK,
163 						  M_WAITOK);
164 	for (i = 0; i < softc->num_pages; i++)
165 		softc->ramdisk_pages[i] = malloc(PAGE_SIZE, M_RAMDISK,M_WAITOK);
166 #else
167 	softc->ramdisk_buffer = (uint8_t *)malloc(softc->rd_size, M_RAMDISK,
168 						  M_WAITOK);
169 #endif
170 
171 	return (0);
172 }
173 
174 static int
175 ctl_backend_ramdisk_shutdown(void)
176 {
177 	struct ctl_be_ramdisk_softc *softc = &rd_softc;
178 	struct ctl_be_ramdisk_lun *lun, *next_lun;
179 #ifdef CTL_RAMDISK_PAGES
180 	int i;
181 #endif
182 
183 	mtx_lock(&softc->lock);
184 	STAILQ_FOREACH_SAFE(lun, &softc->lun_list, links, next_lun) {
185 		/*
186 		 * Drop our lock here.  Since ctl_invalidate_lun() can call
187 		 * back into us, this could potentially lead to a recursive
188 		 * lock of the same mutex, which would cause a hang.
189 		 */
190 		mtx_unlock(&softc->lock);
191 		ctl_disable_lun(&lun->cbe_lun);
192 		ctl_invalidate_lun(&lun->cbe_lun);
193 		mtx_lock(&softc->lock);
194 	}
195 	mtx_unlock(&softc->lock);
196 
197 #ifdef CTL_RAMDISK_PAGES
198 	for (i = 0; i < softc->num_pages; i++)
199 		free(softc->ramdisk_pages[i], M_RAMDISK);
200 	free(softc->ramdisk_pages, M_RAMDISK);
201 #else
202 	free(softc->ramdisk_buffer, M_RAMDISK);
203 #endif
204 	mtx_destroy(&softc->lock);
205 	return (0);
206 }
207 
208 static int
209 ctl_backend_ramdisk_move_done(union ctl_io *io)
210 {
211 	struct ctl_be_lun *cbe_lun;
212 	struct ctl_be_ramdisk_lun *be_lun;
213 #ifdef CTL_TIME_IO
214 	struct bintime cur_bt;
215 #endif
216 
217 	CTL_DEBUG_PRINT(("ctl_backend_ramdisk_move_done\n"));
218 	cbe_lun = CTL_BACKEND_LUN(io);
219 	be_lun = (struct ctl_be_ramdisk_lun *)cbe_lun->be_lun;
220 #ifdef CTL_TIME_IO
221 	getbinuptime(&cur_bt);
222 	bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt);
223 	bintime_add(&io->io_hdr.dma_bt, &cur_bt);
224 #endif
225 	io->io_hdr.num_dmas++;
226 	if (io->scsiio.kern_sg_entries > 0)
227 		free(io->scsiio.kern_data_ptr, M_RAMDISK);
228 	io->scsiio.kern_rel_offset += io->scsiio.kern_data_len;
229 	if (io->io_hdr.flags & CTL_FLAG_ABORT) {
230 		;
231 	} else if (io->io_hdr.port_status != 0 &&
232 	    ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
233 	     (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
234 		ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 1,
235 		    /*retry_count*/ io->io_hdr.port_status);
236 	} else if (io->scsiio.kern_data_resid != 0 &&
237 	    (io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT &&
238 	    ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
239 	     (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
240 		ctl_set_invalid_field_ciu(&io->scsiio);
241 	} else if ((io->io_hdr.port_status == 0) &&
242 	    ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) {
243 		if (io->io_hdr.ctl_private[CTL_PRIV_BACKEND].integer > 0) {
244 			mtx_lock(&be_lun->queue_lock);
245 			STAILQ_INSERT_TAIL(&be_lun->cont_queue,
246 			    &io->io_hdr, links);
247 			mtx_unlock(&be_lun->queue_lock);
248 			taskqueue_enqueue(be_lun->io_taskqueue,
249 			    &be_lun->io_task);
250 			return (0);
251 		}
252 		ctl_set_success(&io->scsiio);
253 	}
254 	ctl_data_submit_done(io);
255 	return(0);
256 }
257 
258 static int
259 ctl_backend_ramdisk_submit(union ctl_io *io)
260 {
261 	struct ctl_be_lun *cbe_lun;
262 	struct ctl_lba_len_flags *lbalen;
263 
264 	cbe_lun = CTL_BACKEND_LUN(io);
265 	lbalen = (struct ctl_lba_len_flags *)&io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
266 	if (lbalen->flags & CTL_LLF_VERIFY) {
267 		ctl_set_success(&io->scsiio);
268 		ctl_data_submit_done(io);
269 		return (CTL_RETVAL_COMPLETE);
270 	}
271 	io->io_hdr.ctl_private[CTL_PRIV_BACKEND].integer =
272 	    lbalen->len * cbe_lun->blocksize;
273 	ctl_backend_ramdisk_continue(io);
274 	return (CTL_RETVAL_COMPLETE);
275 }
276 
277 static void
278 ctl_backend_ramdisk_continue(union ctl_io *io)
279 {
280 	struct ctl_be_ramdisk_softc *softc;
281 	int len, len_filled, sg_filled;
282 #ifdef CTL_RAMDISK_PAGES
283 	struct ctl_sg_entry *sg_entries;
284 	int i;
285 #endif
286 
287 	softc = &rd_softc;
288 	len = io->io_hdr.ctl_private[CTL_PRIV_BACKEND].integer;
289 #ifdef CTL_RAMDISK_PAGES
290 	sg_filled = min(btoc(len), softc->num_pages);
291 	if (sg_filled > 1) {
292 		io->scsiio.kern_data_ptr = malloc(sizeof(struct ctl_sg_entry) *
293 						  sg_filled, M_RAMDISK,
294 						  M_WAITOK);
295 		sg_entries = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr;
296 		for (i = 0, len_filled = 0; i < sg_filled; i++) {
297 			sg_entries[i].addr = softc->ramdisk_pages[i];
298 			sg_entries[i].len = MIN(PAGE_SIZE, len - len_filled);
299 			len_filled += sg_entries[i].len;
300 		}
301 	} else {
302 		sg_filled = 0;
303 		len_filled = len;
304 		io->scsiio.kern_data_ptr = softc->ramdisk_pages[0];
305 	}
306 #else
307 	sg_filled = 0;
308 	len_filled = min(len, softc->rd_size);
309 	io->scsiio.kern_data_ptr = softc->ramdisk_buffer;
310 #endif /* CTL_RAMDISK_PAGES */
311 
312 	io->scsiio.be_move_done = ctl_backend_ramdisk_move_done;
313 	io->scsiio.kern_data_len = len_filled;
314 	io->scsiio.kern_sg_entries = sg_filled;
315 	io->io_hdr.flags |= CTL_FLAG_ALLOCATED;
316 	io->io_hdr.ctl_private[CTL_PRIV_BACKEND].integer -= len_filled;
317 #ifdef CTL_TIME_IO
318 	getbinuptime(&io->io_hdr.dma_start_bt);
319 #endif
320 	ctl_datamove(io);
321 }
322 
323 static void
324 ctl_backend_ramdisk_worker(void *context, int pending)
325 {
326 	struct ctl_be_ramdisk_lun *be_lun;
327 	union ctl_io *io;
328 
329 	be_lun = (struct ctl_be_ramdisk_lun *)context;
330 
331 	mtx_lock(&be_lun->queue_lock);
332 	for (;;) {
333 		io = (union ctl_io *)STAILQ_FIRST(&be_lun->cont_queue);
334 		if (io != NULL) {
335 			STAILQ_REMOVE(&be_lun->cont_queue, &io->io_hdr,
336 				      ctl_io_hdr, links);
337 			mtx_unlock(&be_lun->queue_lock);
338 			ctl_backend_ramdisk_continue(io);
339 			mtx_lock(&be_lun->queue_lock);
340 			continue;
341 		}
342 
343 		/*
344 		 * If we get here, there is no work left in the queues, so
345 		 * just break out and let the task queue go to sleep.
346 		 */
347 		break;
348 	}
349 	mtx_unlock(&be_lun->queue_lock);
350 }
351 
352 static int
353 ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd, caddr_t addr,
354 			  int flag, struct thread *td)
355 {
356 	struct ctl_be_ramdisk_softc *softc = &rd_softc;
357 	struct ctl_lun_req *lun_req;
358 	int retval;
359 
360 	retval = 0;
361 	switch (cmd) {
362 	case CTL_LUN_REQ:
363 		lun_req = (struct ctl_lun_req *)addr;
364 		switch (lun_req->reqtype) {
365 		case CTL_LUNREQ_CREATE:
366 			retval = ctl_backend_ramdisk_create(softc, lun_req);
367 			break;
368 		case CTL_LUNREQ_RM:
369 			retval = ctl_backend_ramdisk_rm(softc, lun_req);
370 			break;
371 		case CTL_LUNREQ_MODIFY:
372 			retval = ctl_backend_ramdisk_modify(softc, lun_req);
373 			break;
374 		default:
375 			lun_req->status = CTL_LUN_ERROR;
376 			snprintf(lun_req->error_str, sizeof(lun_req->error_str),
377 				 "%s: invalid LUN request type %d", __func__,
378 				 lun_req->reqtype);
379 			break;
380 		}
381 		break;
382 	default:
383 		retval = ENOTTY;
384 		break;
385 	}
386 
387 	return (retval);
388 }
389 
390 static int
391 ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc,
392 		       struct ctl_lun_req *req)
393 {
394 	struct ctl_be_ramdisk_lun *be_lun;
395 	struct ctl_lun_rm_params *params;
396 	int retval;
397 
398 	params = &req->reqdata.rm;
399 	mtx_lock(&softc->lock);
400 	STAILQ_FOREACH(be_lun, &softc->lun_list, links) {
401 		if (be_lun->cbe_lun.lun_id == params->lun_id)
402 			break;
403 	}
404 	mtx_unlock(&softc->lock);
405 	if (be_lun == NULL) {
406 		snprintf(req->error_str, sizeof(req->error_str),
407 			 "%s: LUN %u is not managed by the ramdisk backend",
408 			 __func__, params->lun_id);
409 		goto bailout_error;
410 	}
411 
412 	retval = ctl_disable_lun(&be_lun->cbe_lun);
413 	if (retval != 0) {
414 		snprintf(req->error_str, sizeof(req->error_str),
415 			 "%s: error %d returned from ctl_disable_lun() for "
416 			 "LUN %d", __func__, retval, params->lun_id);
417 		goto bailout_error;
418 	}
419 
420 	/*
421 	 * Set the waiting flag before we invalidate the LUN.  Our shutdown
422 	 * routine can be called any time after we invalidate the LUN,
423 	 * and can be called from our context.
424 	 *
425 	 * This tells the shutdown routine that we're waiting, or we're
426 	 * going to wait for the shutdown to happen.
427 	 */
428 	mtx_lock(&softc->lock);
429 	be_lun->flags |= CTL_BE_RAMDISK_LUN_WAITING;
430 	mtx_unlock(&softc->lock);
431 
432 	retval = ctl_invalidate_lun(&be_lun->cbe_lun);
433 	if (retval != 0) {
434 		snprintf(req->error_str, sizeof(req->error_str),
435 			 "%s: error %d returned from ctl_invalidate_lun() for "
436 			 "LUN %d", __func__, retval, params->lun_id);
437 		mtx_lock(&softc->lock);
438 		be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING;
439 		mtx_unlock(&softc->lock);
440 		goto bailout_error;
441 	}
442 
443 	mtx_lock(&softc->lock);
444 	while ((be_lun->flags & CTL_BE_RAMDISK_LUN_UNCONFIGURED) == 0) {
445 		retval = msleep(be_lun, &softc->lock, PCATCH, "ctlram", 0);
446 		if (retval == EINTR)
447 			break;
448 	}
449 	be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING;
450 
451 	/*
452 	 * We only remove this LUN from the list and free it (below) if
453 	 * retval == 0.  If the user interrupted the wait, we just bail out
454 	 * without actually freeing the LUN.  We let the shutdown routine
455 	 * free the LUN if that happens.
456 	 */
457 	if (retval == 0) {
458 		STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun,
459 			      links);
460 		softc->num_luns--;
461 	}
462 
463 	mtx_unlock(&softc->lock);
464 
465 	if (retval == 0) {
466 		taskqueue_drain_all(be_lun->io_taskqueue);
467 		taskqueue_free(be_lun->io_taskqueue);
468 		ctl_free_opts(&be_lun->cbe_lun.options);
469 		mtx_destroy(&be_lun->queue_lock);
470 		free(be_lun, M_RAMDISK);
471 	}
472 
473 	req->status = CTL_LUN_OK;
474 	return (retval);
475 
476 bailout_error:
477 	req->status = CTL_LUN_ERROR;
478 	return (0);
479 }
480 
481 static int
482 ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc,
483 			   struct ctl_lun_req *req)
484 {
485 	struct ctl_be_ramdisk_lun *be_lun;
486 	struct ctl_be_lun *cbe_lun;
487 	struct ctl_lun_create_params *params;
488 	char *value;
489 	char tmpstr[32];
490 	int retval;
491 
492 	retval = 0;
493 	params = &req->reqdata.create;
494 
495 	be_lun = malloc(sizeof(*be_lun), M_RAMDISK, M_ZERO | M_WAITOK);
496 	cbe_lun = &be_lun->cbe_lun;
497 	cbe_lun->be_lun = be_lun;
498 	be_lun->params = req->reqdata.create;
499 	be_lun->softc = softc;
500 	sprintf(be_lun->lunname, "cram%d", softc->num_luns);
501 	ctl_init_opts(&cbe_lun->options, req->num_be_args, req->kern_be_args);
502 
503 	if (params->flags & CTL_LUN_FLAG_DEV_TYPE)
504 		cbe_lun->lun_type = params->device_type;
505 	else
506 		cbe_lun->lun_type = T_DIRECT;
507 	be_lun->flags = CTL_BE_RAMDISK_LUN_UNCONFIGURED;
508 	cbe_lun->flags = 0;
509 	value = ctl_get_opt(&cbe_lun->options, "ha_role");
510 	if (value != NULL) {
511 		if (strcmp(value, "primary") == 0)
512 			cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY;
513 	} else if (control_softc->flags & CTL_FLAG_ACTIVE_SHELF)
514 		cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY;
515 
516 	if (cbe_lun->lun_type == T_DIRECT ||
517 	    cbe_lun->lun_type == T_CDROM) {
518 		if (params->blocksize_bytes != 0)
519 			cbe_lun->blocksize = params->blocksize_bytes;
520 		else if (cbe_lun->lun_type == T_CDROM)
521 			cbe_lun->blocksize = 2048;
522 		else
523 			cbe_lun->blocksize = 512;
524 		if (params->lun_size_bytes < cbe_lun->blocksize) {
525 			snprintf(req->error_str, sizeof(req->error_str),
526 				 "%s: LUN size %ju < blocksize %u", __func__,
527 				 params->lun_size_bytes, cbe_lun->blocksize);
528 			goto bailout_error;
529 		}
530 		be_lun->size_blocks = params->lun_size_bytes / cbe_lun->blocksize;
531 		be_lun->size_bytes = be_lun->size_blocks * cbe_lun->blocksize;
532 		cbe_lun->maxlba = be_lun->size_blocks - 1;
533 		cbe_lun->atomicblock = UINT32_MAX;
534 		cbe_lun->opttxferlen = softc->rd_size / cbe_lun->blocksize;
535 	}
536 
537 	/* Tell the user the blocksize we ended up using */
538 	params->blocksize_bytes = cbe_lun->blocksize;
539 	params->lun_size_bytes = be_lun->size_bytes;
540 
541 	value = ctl_get_opt(&cbe_lun->options, "unmap");
542 	if (value != NULL && strcmp(value, "on") == 0)
543 		cbe_lun->flags |= CTL_LUN_FLAG_UNMAP;
544 	value = ctl_get_opt(&cbe_lun->options, "readonly");
545 	if (value != NULL) {
546 		if (strcmp(value, "on") == 0)
547 			cbe_lun->flags |= CTL_LUN_FLAG_READONLY;
548 	} else if (cbe_lun->lun_type != T_DIRECT)
549 		cbe_lun->flags |= CTL_LUN_FLAG_READONLY;
550 	cbe_lun->serseq = CTL_LUN_SERSEQ_OFF;
551 	value = ctl_get_opt(&cbe_lun->options, "serseq");
552 	if (value != NULL && strcmp(value, "on") == 0)
553 		cbe_lun->serseq = CTL_LUN_SERSEQ_ON;
554 	else if (value != NULL && strcmp(value, "read") == 0)
555 		cbe_lun->serseq = CTL_LUN_SERSEQ_READ;
556 	else if (value != NULL && strcmp(value, "off") == 0)
557 		cbe_lun->serseq = CTL_LUN_SERSEQ_OFF;
558 
559 	if (params->flags & CTL_LUN_FLAG_ID_REQ) {
560 		cbe_lun->req_lun_id = params->req_lun_id;
561 		cbe_lun->flags |= CTL_LUN_FLAG_ID_REQ;
562 	} else
563 		cbe_lun->req_lun_id = 0;
564 
565 	cbe_lun->lun_shutdown = ctl_backend_ramdisk_lun_shutdown;
566 	cbe_lun->lun_config_status = ctl_backend_ramdisk_lun_config_status;
567 	cbe_lun->be = &ctl_be_ramdisk_driver;
568 	if ((params->flags & CTL_LUN_FLAG_SERIAL_NUM) == 0) {
569 		snprintf(tmpstr, sizeof(tmpstr), "MYSERIAL%4d",
570 			 softc->num_luns);
571 		strncpy((char *)cbe_lun->serial_num, tmpstr,
572 			MIN(sizeof(cbe_lun->serial_num), sizeof(tmpstr)));
573 
574 		/* Tell the user what we used for a serial number */
575 		strncpy((char *)params->serial_num, tmpstr,
576 			MIN(sizeof(params->serial_num), sizeof(tmpstr)));
577 	} else {
578 		strncpy((char *)cbe_lun->serial_num, params->serial_num,
579 			MIN(sizeof(cbe_lun->serial_num),
580 			    sizeof(params->serial_num)));
581 	}
582 	if ((params->flags & CTL_LUN_FLAG_DEVID) == 0) {
583 		snprintf(tmpstr, sizeof(tmpstr), "MYDEVID%4d", softc->num_luns);
584 		strncpy((char *)cbe_lun->device_id, tmpstr,
585 			MIN(sizeof(cbe_lun->device_id), sizeof(tmpstr)));
586 
587 		/* Tell the user what we used for a device ID */
588 		strncpy((char *)params->device_id, tmpstr,
589 			MIN(sizeof(params->device_id), sizeof(tmpstr)));
590 	} else {
591 		strncpy((char *)cbe_lun->device_id, params->device_id,
592 			MIN(sizeof(cbe_lun->device_id),
593 			    sizeof(params->device_id)));
594 	}
595 
596 	STAILQ_INIT(&be_lun->cont_queue);
597 	mtx_init(&be_lun->queue_lock, "cram queue lock", NULL, MTX_DEF);
598 	TASK_INIT(&be_lun->io_task, /*priority*/0, ctl_backend_ramdisk_worker,
599 	    be_lun);
600 
601 	be_lun->io_taskqueue = taskqueue_create(be_lun->lunname, M_WAITOK,
602 	    taskqueue_thread_enqueue, /*context*/&be_lun->io_taskqueue);
603 	if (be_lun->io_taskqueue == NULL) {
604 		snprintf(req->error_str, sizeof(req->error_str),
605 			 "%s: Unable to create taskqueue", __func__);
606 		goto bailout_error;
607 	}
608 
609 	retval = taskqueue_start_threads(&be_lun->io_taskqueue,
610 					 /*num threads*/1,
611 					 /*priority*/PWAIT,
612 					 /*thread name*/
613 					 "%s taskq", be_lun->lunname);
614 	if (retval != 0)
615 		goto bailout_error;
616 
617 	mtx_lock(&softc->lock);
618 	softc->num_luns++;
619 	STAILQ_INSERT_TAIL(&softc->lun_list, be_lun, links);
620 	mtx_unlock(&softc->lock);
621 
622 	retval = ctl_add_lun(&be_lun->cbe_lun);
623 	if (retval != 0) {
624 		mtx_lock(&softc->lock);
625 		STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun,
626 			      links);
627 		softc->num_luns--;
628 		mtx_unlock(&softc->lock);
629 		snprintf(req->error_str, sizeof(req->error_str),
630 			 "%s: ctl_add_lun() returned error %d, see dmesg for "
631 			"details", __func__, retval);
632 		retval = 0;
633 		goto bailout_error;
634 	}
635 
636 	mtx_lock(&softc->lock);
637 
638 	/*
639 	 * Tell the config_status routine that we're waiting so it won't
640 	 * clean up the LUN in the event of an error.
641 	 */
642 	be_lun->flags |= CTL_BE_RAMDISK_LUN_WAITING;
643 
644 	while (be_lun->flags & CTL_BE_RAMDISK_LUN_UNCONFIGURED) {
645 		retval = msleep(be_lun, &softc->lock, PCATCH, "ctlram", 0);
646 		if (retval == EINTR)
647 			break;
648 	}
649 	be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING;
650 
651 	if (be_lun->flags & CTL_BE_RAMDISK_LUN_CONFIG_ERR) {
652 		snprintf(req->error_str, sizeof(req->error_str),
653 			 "%s: LUN configuration error, see dmesg for details",
654 			 __func__);
655 		STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun,
656 			      links);
657 		softc->num_luns--;
658 		mtx_unlock(&softc->lock);
659 		goto bailout_error;
660 	} else {
661 		params->req_lun_id = cbe_lun->lun_id;
662 	}
663 	mtx_unlock(&softc->lock);
664 
665 	req->status = CTL_LUN_OK;
666 	return (retval);
667 
668 bailout_error:
669 	req->status = CTL_LUN_ERROR;
670 	if (be_lun != NULL) {
671 		if (be_lun->io_taskqueue != NULL) {
672 			taskqueue_free(be_lun->io_taskqueue);
673 		}
674 		ctl_free_opts(&cbe_lun->options);
675 		mtx_destroy(&be_lun->queue_lock);
676 		free(be_lun, M_RAMDISK);
677 	}
678 	return (retval);
679 }
680 
681 static int
682 ctl_backend_ramdisk_modify(struct ctl_be_ramdisk_softc *softc,
683 		       struct ctl_lun_req *req)
684 {
685 	struct ctl_be_ramdisk_lun *be_lun;
686 	struct ctl_be_lun *cbe_lun;
687 	struct ctl_lun_modify_params *params;
688 	char *value;
689 	uint32_t blocksize;
690 	int wasprim;
691 
692 	params = &req->reqdata.modify;
693 
694 	mtx_lock(&softc->lock);
695 	STAILQ_FOREACH(be_lun, &softc->lun_list, links) {
696 		if (be_lun->cbe_lun.lun_id == params->lun_id)
697 			break;
698 	}
699 	mtx_unlock(&softc->lock);
700 	if (be_lun == NULL) {
701 		snprintf(req->error_str, sizeof(req->error_str),
702 			 "%s: LUN %u is not managed by the ramdisk backend",
703 			 __func__, params->lun_id);
704 		goto bailout_error;
705 	}
706 	cbe_lun = &be_lun->cbe_lun;
707 
708 	if (params->lun_size_bytes != 0)
709 		be_lun->params.lun_size_bytes = params->lun_size_bytes;
710 	ctl_update_opts(&cbe_lun->options, req->num_be_args, req->kern_be_args);
711 
712 	wasprim = (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY);
713 	value = ctl_get_opt(&cbe_lun->options, "ha_role");
714 	if (value != NULL) {
715 		if (strcmp(value, "primary") == 0)
716 			cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY;
717 		else
718 			cbe_lun->flags &= ~CTL_LUN_FLAG_PRIMARY;
719 	} else if (control_softc->flags & CTL_FLAG_ACTIVE_SHELF)
720 		cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY;
721 	else
722 		cbe_lun->flags &= ~CTL_LUN_FLAG_PRIMARY;
723 	if (wasprim != (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY)) {
724 		if (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY)
725 			ctl_lun_primary(cbe_lun);
726 		else
727 			ctl_lun_secondary(cbe_lun);
728 	}
729 
730 	blocksize = be_lun->cbe_lun.blocksize;
731 	if (be_lun->params.lun_size_bytes < blocksize) {
732 		snprintf(req->error_str, sizeof(req->error_str),
733 			"%s: LUN size %ju < blocksize %u", __func__,
734 			be_lun->params.lun_size_bytes, blocksize);
735 		goto bailout_error;
736 	}
737 	be_lun->size_blocks = be_lun->params.lun_size_bytes / blocksize;
738 	be_lun->size_bytes = be_lun->size_blocks * blocksize;
739 	be_lun->cbe_lun.maxlba = be_lun->size_blocks - 1;
740 	ctl_lun_capacity_changed(&be_lun->cbe_lun);
741 
742 	/* Tell the user the exact size we ended up using */
743 	params->lun_size_bytes = be_lun->size_bytes;
744 
745 	req->status = CTL_LUN_OK;
746 	return (0);
747 
748 bailout_error:
749 	req->status = CTL_LUN_ERROR;
750 	return (0);
751 }
752 
753 static void
754 ctl_backend_ramdisk_lun_shutdown(void *be_lun)
755 {
756 	struct ctl_be_ramdisk_lun *lun;
757 	struct ctl_be_ramdisk_softc *softc;
758 	int do_free;
759 
760 	lun = (struct ctl_be_ramdisk_lun *)be_lun;
761 	softc = lun->softc;
762 	do_free = 0;
763 
764 	mtx_lock(&softc->lock);
765 	lun->flags |= CTL_BE_RAMDISK_LUN_UNCONFIGURED;
766 	if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING) {
767 		wakeup(lun);
768 	} else {
769 		STAILQ_REMOVE(&softc->lun_list, lun, ctl_be_ramdisk_lun,
770 			      links);
771 		softc->num_luns--;
772 		do_free = 1;
773 	}
774 	mtx_unlock(&softc->lock);
775 
776 	if (do_free != 0)
777 		free(be_lun, M_RAMDISK);
778 }
779 
780 static void
781 ctl_backend_ramdisk_lun_config_status(void *be_lun,
782 				      ctl_lun_config_status status)
783 {
784 	struct ctl_be_ramdisk_lun *lun;
785 	struct ctl_be_ramdisk_softc *softc;
786 
787 	lun = (struct ctl_be_ramdisk_lun *)be_lun;
788 	softc = lun->softc;
789 
790 	if (status == CTL_LUN_CONFIG_OK) {
791 		mtx_lock(&softc->lock);
792 		lun->flags &= ~CTL_BE_RAMDISK_LUN_UNCONFIGURED;
793 		if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING)
794 			wakeup(lun);
795 		mtx_unlock(&softc->lock);
796 
797 		/*
798 		 * We successfully added the LUN, attempt to enable it.
799 		 */
800 		if (ctl_enable_lun(&lun->cbe_lun) != 0) {
801 			printf("%s: ctl_enable_lun() failed!\n", __func__);
802 			if (ctl_invalidate_lun(&lun->cbe_lun) != 0) {
803 				printf("%s: ctl_invalidate_lun() failed!\n",
804 				       __func__);
805 			}
806 		}
807 
808 		return;
809 	}
810 
811 
812 	mtx_lock(&softc->lock);
813 	lun->flags &= ~CTL_BE_RAMDISK_LUN_UNCONFIGURED;
814 
815 	/*
816 	 * If we have a user waiting, let him handle the cleanup.  If not,
817 	 * clean things up here.
818 	 */
819 	if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING) {
820 		lun->flags |= CTL_BE_RAMDISK_LUN_CONFIG_ERR;
821 		wakeup(lun);
822 	} else {
823 		STAILQ_REMOVE(&softc->lun_list, lun, ctl_be_ramdisk_lun,
824 			      links);
825 		softc->num_luns--;
826 		free(lun, M_RAMDISK);
827 	}
828 	mtx_unlock(&softc->lock);
829 }
830 
831 static int
832 ctl_backend_ramdisk_config_write(union ctl_io *io)
833 {
834 	struct ctl_be_lun *cbe_lun;
835 	int retval;
836 
837 	cbe_lun = CTL_BACKEND_LUN(io);
838 	retval = 0;
839 	switch (io->scsiio.cdb[0]) {
840 	case SYNCHRONIZE_CACHE:
841 	case SYNCHRONIZE_CACHE_16:
842 		/*
843 		 * The upper level CTL code will filter out any CDBs with
844 		 * the immediate bit set and return the proper error.  It
845 		 * will also not allow a sync cache command to go to a LUN
846 		 * that is powered down.
847 		 *
848 		 * We don't really need to worry about what LBA range the
849 		 * user asked to be synced out.  When they issue a sync
850 		 * cache command, we'll sync out the whole thing.
851 		 *
852 		 * This is obviously just a stubbed out implementation.
853 		 * The real implementation will be in the RAIDCore/CTL
854 		 * interface, and can only really happen when RAIDCore
855 		 * implements a per-array cache sync.
856 		 */
857 		ctl_set_success(&io->scsiio);
858 		ctl_config_write_done(io);
859 		break;
860 	case START_STOP_UNIT: {
861 		struct scsi_start_stop_unit *cdb;
862 
863 		cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb;
864 		if ((cdb->how & SSS_PC_MASK) != 0) {
865 			ctl_set_success(&io->scsiio);
866 			ctl_config_write_done(io);
867 			break;
868 		}
869 		if (cdb->how & SSS_START) {
870 			if (cdb->how & SSS_LOEJ)
871 				ctl_lun_has_media(cbe_lun);
872 			ctl_start_lun(cbe_lun);
873 		} else {
874 			ctl_stop_lun(cbe_lun);
875 			if (cdb->how & SSS_LOEJ)
876 				ctl_lun_ejected(cbe_lun);
877 		}
878 		ctl_set_success(&io->scsiio);
879 		ctl_config_write_done(io);
880 		break;
881 	}
882 	case PREVENT_ALLOW:
883 	case WRITE_SAME_10:
884 	case WRITE_SAME_16:
885 	case UNMAP:
886 		ctl_set_success(&io->scsiio);
887 		ctl_config_write_done(io);
888 		break;
889 	default:
890 		ctl_set_invalid_opcode(&io->scsiio);
891 		ctl_config_write_done(io);
892 		retval = CTL_RETVAL_COMPLETE;
893 		break;
894 	}
895 
896 	return (retval);
897 }
898 
899 static int
900 ctl_backend_ramdisk_config_read(union ctl_io *io)
901 {
902 	int retval = 0;
903 
904 	switch (io->scsiio.cdb[0]) {
905 	case SERVICE_ACTION_IN:
906 		if (io->scsiio.cdb[1] == SGLS_SERVICE_ACTION) {
907 			/* We have nothing to tell, leave default data. */
908 			ctl_config_read_done(io);
909 			retval = CTL_RETVAL_COMPLETE;
910 			break;
911 		}
912 		ctl_set_invalid_field(&io->scsiio,
913 				      /*sks_valid*/ 1,
914 				      /*command*/ 1,
915 				      /*field*/ 1,
916 				      /*bit_valid*/ 1,
917 				      /*bit*/ 4);
918 		ctl_config_read_done(io);
919 		retval = CTL_RETVAL_COMPLETE;
920 		break;
921 	default:
922 		ctl_set_invalid_opcode(&io->scsiio);
923 		ctl_config_read_done(io);
924 		retval = CTL_RETVAL_COMPLETE;
925 		break;
926 	}
927 
928 	return (retval);
929 }
930