xref: /freebsd/sys/cam/ctl/ctl_frontend_ioctl.c (revision ccfd87fe2ac0e2e6aeb1911a7d7cce6712a8564f)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2003-2009 Silicon Graphics International Corp.
5  * Copyright (c) 2012 The FreeBSD Foundation
6  * Copyright (c) 2015 Alexander Motin <mav@FreeBSD.org>
7  * Copyright (c) 2017 Jakub Wojciech Klama <jceel@FreeBSD.org>
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer,
15  *    without modification, immediately at the beginning of the file.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/types.h>
39 #include <sys/lock.h>
40 #include <sys/module.h>
41 #include <sys/mutex.h>
42 #include <sys/condvar.h>
43 #include <sys/malloc.h>
44 #include <sys/conf.h>
45 #include <sys/queue.h>
46 #include <sys/sysctl.h>
47 #include <sys/nv.h>
48 #include <sys/dnv.h>
49 
50 #include <cam/cam.h>
51 #include <cam/scsi/scsi_all.h>
52 #include <cam/scsi/scsi_da.h>
53 #include <cam/ctl/ctl_io.h>
54 #include <cam/ctl/ctl.h>
55 #include <cam/ctl/ctl_frontend.h>
56 #include <cam/ctl/ctl_util.h>
57 #include <cam/ctl/ctl_backend.h>
58 #include <cam/ctl/ctl_ioctl.h>
59 #include <cam/ctl/ctl_ha.h>
60 #include <cam/ctl/ctl_private.h>
61 #include <cam/ctl/ctl_debug.h>
62 #include <cam/ctl/ctl_error.h>
63 
64 typedef enum {
65 	CTL_IOCTL_INPROG,
66 	CTL_IOCTL_DATAMOVE,
67 	CTL_IOCTL_DONE
68 } ctl_fe_ioctl_state;
69 
70 struct ctl_fe_ioctl_params {
71 	struct cv		sem;
72 	struct mtx		ioctl_mtx;
73 	ctl_fe_ioctl_state	state;
74 };
75 
76 struct cfi_port {
77 	TAILQ_ENTRY(cfi_port)	link;
78 	u_int			cur_tag_num;
79 	struct cdev *		dev;
80 	struct ctl_port		port;
81 };
82 
83 struct cfi_softc {
84 	TAILQ_HEAD(, cfi_port)	ports;
85 };
86 
87 static struct cfi_softc cfi_softc;
88 
89 static int cfi_init(void);
90 static int cfi_shutdown(void);
91 static void cfi_datamove(union ctl_io *io);
92 static void cfi_done(union ctl_io *io);
93 static int cfi_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
94     struct thread *td);
95 static void cfi_ioctl_port_create(struct ctl_req *req);
96 static void cfi_ioctl_port_remove(struct ctl_req *req);
97 
98 static struct cdevsw cfi_cdevsw = {
99 	.d_version = D_VERSION,
100 	.d_flags = 0,
101 	.d_ioctl = ctl_ioctl_io
102 };
103 
104 static struct ctl_frontend cfi_frontend =
105 {
106 	.name = "ioctl",
107 	.init = cfi_init,
108 	.ioctl = cfi_ioctl,
109 	.shutdown = cfi_shutdown,
110 };
111 CTL_FRONTEND_DECLARE(ctlioctl, cfi_frontend);
112 
113 static int
114 cfi_init(void)
115 {
116 	struct cfi_softc *isoftc = &cfi_softc;
117 	struct cfi_port *cfi;
118 	struct ctl_port *port;
119 	int error = 0;
120 
121 	memset(isoftc, 0, sizeof(*isoftc));
122 	TAILQ_INIT(&isoftc->ports);
123 
124 	cfi = malloc(sizeof(*cfi), M_CTL, M_WAITOK | M_ZERO);
125 	port = &cfi->port;
126 	port->frontend = &cfi_frontend;
127 	port->port_type = CTL_PORT_IOCTL;
128 	port->num_requested_ctl_io = 100;
129 	port->port_name = "ioctl";
130 	port->fe_datamove = cfi_datamove;
131 	port->fe_done = cfi_done;
132 	port->physical_port = 0;
133 	port->targ_port = -1;
134 
135 	if ((error = ctl_port_register(port)) != 0) {
136 		printf("%s: ioctl port registration failed\n", __func__);
137 		return (error);
138 	}
139 
140 	ctl_port_online(port);
141 	TAILQ_INSERT_TAIL(&isoftc->ports, cfi, link);
142 	return (0);
143 }
144 
145 static int
146 cfi_shutdown(void)
147 {
148 	struct cfi_softc *isoftc = &cfi_softc;
149 	struct cfi_port *cfi, *temp;
150 	struct ctl_port *port;
151 	int error;
152 
153 	TAILQ_FOREACH_SAFE(cfi, &isoftc->ports, link, temp) {
154 		port = &cfi->port;
155 		ctl_port_offline(port);
156 		error = ctl_port_deregister(port);
157 		if (error != 0) {
158 			printf("%s: ctl_frontend_deregister() failed\n",
159 			   __func__);
160 			return (error);
161 		}
162 
163 		TAILQ_REMOVE(&isoftc->ports, cfi, link);
164 		free(cfi, M_CTL);
165 	}
166 
167 	return (0);
168 }
169 
170 static void
171 cfi_ioctl_port_create(struct ctl_req *req)
172 {
173 	struct cfi_softc *isoftc = &cfi_softc;
174 	struct cfi_port *cfi;
175 	struct ctl_port *port;
176 	struct make_dev_args args;
177 	const char *val;
178 	int retval;
179 	int pp = -1, vp = 0;
180 
181 	val = dnvlist_get_string(req->args_nvl, "pp", NULL);
182 	if (val != NULL)
183 		pp = strtol(val, NULL, 10);
184 
185 	val = dnvlist_get_string(req->args_nvl, "vp", NULL);
186 	if (val != NULL)
187 		vp = strtol(val, NULL, 10);
188 
189 	if (pp != -1) {
190 		/* Check for duplicates */
191 		TAILQ_FOREACH(cfi, &isoftc->ports, link) {
192 			if (pp == cfi->port.physical_port &&
193 			    vp == cfi->port.virtual_port) {
194 				req->status = CTL_LUN_ERROR;
195 				snprintf(req->error_str, sizeof(req->error_str),
196 				    "port %d already exists", pp);
197 
198 				return;
199 			}
200 		}
201 	} else {
202 		/* Find free port number */
203 		TAILQ_FOREACH(cfi, &isoftc->ports, link) {
204 			pp = MAX(pp, cfi->port.physical_port);
205 		}
206 
207 		pp++;
208 	}
209 
210 	cfi = malloc(sizeof(*cfi), M_CTL, M_WAITOK | M_ZERO);
211 	port = &cfi->port;
212 	port->frontend = &cfi_frontend;
213 	port->port_type = CTL_PORT_IOCTL;
214 	port->num_requested_ctl_io = 100;
215 	port->port_name = "ioctl";
216 	port->fe_datamove = cfi_datamove;
217 	port->fe_done = cfi_done;
218 	port->physical_port = pp;
219 	port->virtual_port = vp;
220 	port->targ_port = -1;
221 
222 	retval = ctl_port_register(port);
223 	if (retval != 0) {
224 		req->status = CTL_LUN_ERROR;
225 		snprintf(req->error_str, sizeof(req->error_str),
226 		    "ctl_port_register() failed with error %d", retval);
227 		free(cfi, M_CTL);
228 		return;
229 	}
230 
231 	req->result_nvl = nvlist_create(0);
232 	nvlist_add_number(req->result_nvl, "port_id", port->targ_port);
233 	ctl_port_online(port);
234 
235 	make_dev_args_init(&args);
236 	args.mda_devsw = &cfi_cdevsw;
237 	args.mda_uid = UID_ROOT;
238 	args.mda_gid = GID_OPERATOR;
239 	args.mda_mode = 0600;
240 	args.mda_si_drv1 = NULL;
241 	args.mda_si_drv2 = cfi;
242 
243 	retval = make_dev_s(&args, &cfi->dev, "cam/ctl%d.%d", pp, vp);
244 	if (retval != 0) {
245 		req->status = CTL_LUN_ERROR;
246 		snprintf(req->error_str, sizeof(req->error_str),
247 		    "make_dev_s() failed with error %d", retval);
248 		ctl_port_offline(port);
249 		ctl_port_deregister(port);
250 		free(cfi, M_CTL);
251 		return;
252 	}
253 
254 	req->status = CTL_LUN_OK;
255 	TAILQ_INSERT_TAIL(&isoftc->ports, cfi, link);
256 }
257 
258 static void
259 cfi_ioctl_port_remove(struct ctl_req *req)
260 {
261 	struct cfi_softc *isoftc = &cfi_softc;
262 	struct cfi_port *cfi = NULL;
263 	const char *val;
264 	int port_id = -1;
265 
266 	val = dnvlist_get_string(req->args_nvl, "port_id", NULL);
267 	if (val != NULL)
268 		port_id = strtol(val, NULL, 10);
269 
270 	if (port_id == -1) {
271 		req->status = CTL_LUN_ERROR;
272 		snprintf(req->error_str, sizeof(req->error_str),
273 		    "port_id not provided");
274 		return;
275 	}
276 
277 	TAILQ_FOREACH(cfi, &isoftc->ports, link) {
278 		if (cfi->port.targ_port == port_id)
279 			break;
280 	}
281 
282 	if (cfi == NULL) {
283 		req->status = CTL_LUN_ERROR;
284 		snprintf(req->error_str, sizeof(req->error_str),
285 		    "cannot find port %d", port_id);
286 
287 		return;
288 	}
289 
290 	if (cfi->port.physical_port == 0 && cfi->port.virtual_port == 0) {
291 		req->status = CTL_LUN_ERROR;
292 		snprintf(req->error_str, sizeof(req->error_str),
293 		    "cannot destroy default ioctl port");
294 
295 		return;
296 	}
297 
298 	ctl_port_offline(&cfi->port);
299 	ctl_port_deregister(&cfi->port);
300 	TAILQ_REMOVE(&isoftc->ports, cfi, link);
301 	destroy_dev(cfi->dev);
302 	free(cfi, M_CTL);
303 	req->status = CTL_LUN_OK;
304 }
305 
306 static int
307 cfi_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
308     struct thread *td)
309 {
310 	struct ctl_req *req;
311 
312 	if (cmd == CTL_PORT_REQ) {
313 		req = (struct ctl_req *)addr;
314 		switch (req->reqtype) {
315 		case CTL_REQ_CREATE:
316 			cfi_ioctl_port_create(req);
317 			break;
318 		case CTL_REQ_REMOVE:
319 			cfi_ioctl_port_remove(req);
320 			break;
321 		default:
322 			req->status = CTL_LUN_ERROR;
323 			snprintf(req->error_str, sizeof(req->error_str),
324 			    "Unsupported request type %d", req->reqtype);
325 		}
326 		return (0);
327 	}
328 
329 	return (ENOTTY);
330 }
331 
332 /*
333  * Data movement routine for the CTL ioctl frontend port.
334  */
335 static int
336 ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio)
337 {
338 	struct ctl_sg_entry *ext_sglist, *kern_sglist;
339 	struct ctl_sg_entry ext_entry, kern_entry;
340 	int ext_sglen, ext_sg_entries, kern_sg_entries;
341 	int ext_sg_start, ext_offset;
342 	int len_to_copy;
343 	int kern_watermark, ext_watermark;
344 	int ext_sglist_malloced;
345 	int i, j;
346 
347 	CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove\n"));
348 
349 	/*
350 	 * If this flag is set, fake the data transfer.
351 	 */
352 	if (ctsio->io_hdr.flags & CTL_FLAG_NO_DATAMOVE) {
353 		ext_sglist_malloced = 0;
354 		ctsio->ext_data_filled += ctsio->kern_data_len;
355 		ctsio->kern_data_resid = 0;
356 		goto bailout;
357 	}
358 
359 	/*
360 	 * To simplify things here, if we have a single buffer, stick it in
361 	 * a S/G entry and just make it a single entry S/G list.
362 	 */
363 	if (ctsio->ext_sg_entries > 0) {
364 		int len_seen;
365 
366 		ext_sglen = ctsio->ext_sg_entries * sizeof(*ext_sglist);
367 		ext_sglist = (struct ctl_sg_entry *)malloc(ext_sglen, M_CTL,
368 							   M_WAITOK);
369 		ext_sglist_malloced = 1;
370 		if (copyin(ctsio->ext_data_ptr, ext_sglist, ext_sglen) != 0) {
371 			ctsio->io_hdr.port_status = 31343;
372 			goto bailout;
373 		}
374 		ext_sg_entries = ctsio->ext_sg_entries;
375 		ext_sg_start = ext_sg_entries;
376 		ext_offset = 0;
377 		len_seen = 0;
378 		for (i = 0; i < ext_sg_entries; i++) {
379 			if ((len_seen + ext_sglist[i].len) >=
380 			     ctsio->ext_data_filled) {
381 				ext_sg_start = i;
382 				ext_offset = ctsio->ext_data_filled - len_seen;
383 				break;
384 			}
385 			len_seen += ext_sglist[i].len;
386 		}
387 	} else {
388 		ext_sglist = &ext_entry;
389 		ext_sglist_malloced = 0;
390 		ext_sglist->addr = ctsio->ext_data_ptr;
391 		ext_sglist->len = ctsio->ext_data_len;
392 		ext_sg_entries = 1;
393 		ext_sg_start = 0;
394 		ext_offset = ctsio->ext_data_filled;
395 	}
396 
397 	if (ctsio->kern_sg_entries > 0) {
398 		kern_sglist = (struct ctl_sg_entry *)ctsio->kern_data_ptr;
399 		kern_sg_entries = ctsio->kern_sg_entries;
400 	} else {
401 		kern_sglist = &kern_entry;
402 		kern_sglist->addr = ctsio->kern_data_ptr;
403 		kern_sglist->len = ctsio->kern_data_len;
404 		kern_sg_entries = 1;
405 	}
406 
407 	kern_watermark = 0;
408 	ext_watermark = ext_offset;
409 	for (i = ext_sg_start, j = 0;
410 	     i < ext_sg_entries && j < kern_sg_entries;) {
411 		uint8_t *ext_ptr, *kern_ptr;
412 
413 		len_to_copy = MIN(ext_sglist[i].len - ext_watermark,
414 				  kern_sglist[j].len - kern_watermark);
415 
416 		ext_ptr = (uint8_t *)ext_sglist[i].addr;
417 		ext_ptr = ext_ptr + ext_watermark;
418 		if (ctsio->io_hdr.flags & CTL_FLAG_BUS_ADDR) {
419 			/*
420 			 * XXX KDM fix this!
421 			 */
422 			panic("need to implement bus address support");
423 #if 0
424 			kern_ptr = bus_to_virt(kern_sglist[j].addr);
425 #endif
426 		} else
427 			kern_ptr = (uint8_t *)kern_sglist[j].addr;
428 		kern_ptr = kern_ptr + kern_watermark;
429 
430 		if ((ctsio->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
431 		     CTL_FLAG_DATA_IN) {
432 			CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: copying %d "
433 					 "bytes to user\n", len_to_copy));
434 			CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: from %p "
435 					 "to %p\n", kern_ptr, ext_ptr));
436 			if (copyout(kern_ptr, ext_ptr, len_to_copy) != 0) {
437 				ctsio->io_hdr.port_status = 31344;
438 				goto bailout;
439 			}
440 		} else {
441 			CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: copying %d "
442 					 "bytes from user\n", len_to_copy));
443 			CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: from %p "
444 					 "to %p\n", ext_ptr, kern_ptr));
445 			if (copyin(ext_ptr, kern_ptr, len_to_copy)!= 0){
446 				ctsio->io_hdr.port_status = 31345;
447 				goto bailout;
448 			}
449 		}
450 
451 		ctsio->ext_data_filled += len_to_copy;
452 		ctsio->kern_data_resid -= len_to_copy;
453 
454 		ext_watermark += len_to_copy;
455 		if (ext_sglist[i].len == ext_watermark) {
456 			i++;
457 			ext_watermark = 0;
458 		}
459 
460 		kern_watermark += len_to_copy;
461 		if (kern_sglist[j].len == kern_watermark) {
462 			j++;
463 			kern_watermark = 0;
464 		}
465 	}
466 
467 	CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: ext_sg_entries: %d, "
468 			 "kern_sg_entries: %d\n", ext_sg_entries,
469 			 kern_sg_entries));
470 	CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: ext_data_len = %d, "
471 			 "kern_data_len = %d\n", ctsio->ext_data_len,
472 			 ctsio->kern_data_len));
473 
474 bailout:
475 	if (ext_sglist_malloced != 0)
476 		free(ext_sglist, M_CTL);
477 
478 	return (CTL_RETVAL_COMPLETE);
479 }
480 
481 static void
482 cfi_datamove(union ctl_io *io)
483 {
484 	struct ctl_fe_ioctl_params *params;
485 
486 	params = (struct ctl_fe_ioctl_params *)
487 		io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
488 
489 	mtx_lock(&params->ioctl_mtx);
490 	params->state = CTL_IOCTL_DATAMOVE;
491 	cv_broadcast(&params->sem);
492 	mtx_unlock(&params->ioctl_mtx);
493 }
494 
495 static void
496 cfi_done(union ctl_io *io)
497 {
498 	struct ctl_fe_ioctl_params *params;
499 
500 	params = (struct ctl_fe_ioctl_params *)
501 		io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
502 
503 	mtx_lock(&params->ioctl_mtx);
504 	params->state = CTL_IOCTL_DONE;
505 	cv_broadcast(&params->sem);
506 	mtx_unlock(&params->ioctl_mtx);
507 }
508 
509 static int
510 cfi_submit_wait(union ctl_io *io)
511 {
512 	struct ctl_fe_ioctl_params params;
513 	ctl_fe_ioctl_state last_state;
514 	int done, retval;
515 
516 	bzero(&params, sizeof(params));
517 	mtx_init(&params.ioctl_mtx, "ctliocmtx", NULL, MTX_DEF);
518 	cv_init(&params.sem, "ctlioccv");
519 	params.state = CTL_IOCTL_INPROG;
520 	last_state = params.state;
521 
522 	io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = &params;
523 
524 	CTL_DEBUG_PRINT(("cfi_submit_wait\n"));
525 
526 	/* This shouldn't happen */
527 	if ((retval = ctl_run(io)) != CTL_RETVAL_COMPLETE)
528 		return (retval);
529 
530 	done = 0;
531 
532 	do {
533 		mtx_lock(&params.ioctl_mtx);
534 		/*
535 		 * Check the state here, and don't sleep if the state has
536 		 * already changed (i.e. wakeup has already occurred, but we
537 		 * weren't waiting yet).
538 		 */
539 		if (params.state == last_state) {
540 			/* XXX KDM cv_wait_sig instead? */
541 			cv_wait(&params.sem, &params.ioctl_mtx);
542 		}
543 		last_state = params.state;
544 
545 		switch (params.state) {
546 		case CTL_IOCTL_INPROG:
547 			/* Why did we wake up? */
548 			/* XXX KDM error here? */
549 			mtx_unlock(&params.ioctl_mtx);
550 			break;
551 		case CTL_IOCTL_DATAMOVE:
552 			CTL_DEBUG_PRINT(("got CTL_IOCTL_DATAMOVE\n"));
553 
554 			/*
555 			 * change last_state back to INPROG to avoid
556 			 * deadlock on subsequent data moves.
557 			 */
558 			params.state = last_state = CTL_IOCTL_INPROG;
559 
560 			mtx_unlock(&params.ioctl_mtx);
561 			ctl_ioctl_do_datamove(&io->scsiio);
562 			/*
563 			 * Note that in some cases, most notably writes,
564 			 * this will queue the I/O and call us back later.
565 			 * In other cases, generally reads, this routine
566 			 * will immediately call back and wake us up,
567 			 * probably using our own context.
568 			 */
569 			ctl_datamove_done(io, false);
570 			break;
571 		case CTL_IOCTL_DONE:
572 			mtx_unlock(&params.ioctl_mtx);
573 			CTL_DEBUG_PRINT(("got CTL_IOCTL_DONE\n"));
574 			done = 1;
575 			break;
576 		default:
577 			mtx_unlock(&params.ioctl_mtx);
578 			/* XXX KDM error here? */
579 			break;
580 		}
581 	} while (done == 0);
582 
583 	mtx_destroy(&params.ioctl_mtx);
584 	cv_destroy(&params.sem);
585 
586 	return (CTL_RETVAL_COMPLETE);
587 }
588 
589 int
590 ctl_ioctl_io(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
591     struct thread *td)
592 {
593 	struct cfi_port *cfi;
594 	union ctl_io *io;
595 	void *pool_tmp, *sc_tmp;
596 	int retval = 0;
597 
598 	if (cmd != CTL_IO)
599 		return (ENOTTY);
600 
601 	cfi = dev->si_drv2 == NULL
602 	    ? TAILQ_FIRST(&cfi_softc.ports)
603 	    : dev->si_drv2;
604 
605 	/*
606 	 * If we haven't been "enabled", don't allow any SCSI I/O
607 	 * to this FETD.
608 	 */
609 	if ((cfi->port.status & CTL_PORT_STATUS_ONLINE) == 0)
610 		return (EPERM);
611 
612 	io = ctl_alloc_io(cfi->port.ctl_pool_ref);
613 
614 	/*
615 	 * Need to save the pool reference so it doesn't get
616 	 * spammed by the user's ctl_io.
617 	 */
618 	pool_tmp = io->io_hdr.pool;
619 	sc_tmp = CTL_SOFTC(io);
620 	memcpy(io, (void *)addr, sizeof(*io));
621 	io->io_hdr.pool = pool_tmp;
622 	CTL_SOFTC(io) = sc_tmp;
623 	TAILQ_INIT(&io->io_hdr.blocked_queue);
624 
625 	/*
626 	 * No status yet, so make sure the status is set properly.
627 	 */
628 	io->io_hdr.status = CTL_STATUS_NONE;
629 
630 	/*
631 	 * The user sets the initiator ID, target and LUN IDs.
632 	 */
633 	io->io_hdr.nexus.targ_port = cfi->port.targ_port;
634 	io->io_hdr.flags |= CTL_FLAG_USER_REQ;
635 	if ((io->io_hdr.flags & CTL_FLAG_USER_TAG) == 0 &&
636 	    io->io_hdr.io_type == CTL_IO_SCSI &&
637 	    io->scsiio.tag_type != CTL_TAG_UNTAGGED)
638 		io->scsiio.tag_num = atomic_fetchadd_int(&cfi->cur_tag_num, 1);
639 
640 	retval = cfi_submit_wait(io);
641 	if (retval == 0)
642 		memcpy((void *)addr, io, sizeof(*io));
643 
644 	ctl_free_io(io);
645 	return (retval);
646 }
647