xref: /freebsd/sys/cam/ctl/ctl_frontend_ioctl.c (revision 9ce06829f29232e312130530c304d287b39b0059)
1 /*-
2  * Copyright (c) 2003-2009 Silicon Graphics International Corp.
3  * Copyright (c) 2012 The FreeBSD Foundation
4  * Copyright (c) 2015 Alexander Motin <mav@FreeBSD.org>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer,
12  *    without modification, immediately at the beginning of the file.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/types.h>
36 #include <sys/lock.h>
37 #include <sys/module.h>
38 #include <sys/mutex.h>
39 #include <sys/condvar.h>
40 #include <sys/malloc.h>
41 #include <sys/conf.h>
42 #include <sys/queue.h>
43 #include <sys/sysctl.h>
44 
45 #include <cam/cam.h>
46 #include <cam/scsi/scsi_all.h>
47 #include <cam/scsi/scsi_da.h>
48 #include <cam/ctl/ctl_io.h>
49 #include <cam/ctl/ctl.h>
50 #include <cam/ctl/ctl_frontend.h>
51 #include <cam/ctl/ctl_util.h>
52 #include <cam/ctl/ctl_backend.h>
53 #include <cam/ctl/ctl_ioctl.h>
54 #include <cam/ctl/ctl_ha.h>
55 #include <cam/ctl/ctl_private.h>
56 #include <cam/ctl/ctl_debug.h>
57 #include <cam/ctl/ctl_error.h>
58 
59 struct cfi_softc {
60 	uint32_t		cur_tag_num;
61 	struct ctl_port		port;
62 };
63 
64 static struct cfi_softc cfi_softc;
65 
66 static int cfi_init(void);
67 static void cfi_shutdown(void);
68 static void cfi_datamove(union ctl_io *io);
69 static void cfi_done(union ctl_io *io);
70 
71 static struct ctl_frontend cfi_frontend =
72 {
73 	.name = "ioctl",
74 	.init = cfi_init,
75 	.shutdown = cfi_shutdown,
76 };
77 CTL_FRONTEND_DECLARE(ctlioctl, cfi_frontend);
78 
79 static int
80 cfi_init(void)
81 {
82 	struct cfi_softc *isoftc = &cfi_softc;
83 	struct ctl_port *port;
84 
85 	memset(isoftc, 0, sizeof(*isoftc));
86 
87 	port = &isoftc->port;
88 	port->frontend = &cfi_frontend;
89 	port->port_type = CTL_PORT_IOCTL;
90 	port->num_requested_ctl_io = 100;
91 	port->port_name = "ioctl";
92 	port->fe_datamove = cfi_datamove;
93 	port->fe_done = cfi_done;
94 	port->max_targets = 1;
95 	port->max_target_id = 0;
96 	port->targ_port = -1;
97 	port->max_initiators = 1;
98 
99 	if (ctl_port_register(port) != 0) {
100 		printf("%s: ioctl port registration failed\n", __func__);
101 		return (0);
102 	}
103 	ctl_port_online(port);
104 	return (0);
105 }
106 
107 void
108 cfi_shutdown(void)
109 {
110 	struct cfi_softc *isoftc = &cfi_softc;
111 	struct ctl_port *port;
112 
113 	port = &isoftc->port;
114 	ctl_port_offline(port);
115 	if (ctl_port_deregister(&isoftc->port) != 0)
116 		printf("%s: ctl_frontend_deregister() failed\n", __func__);
117 }
118 
119 /*
120  * Data movement routine for the CTL ioctl frontend port.
121  */
122 static int
123 ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio)
124 {
125 	struct ctl_sg_entry *ext_sglist, *kern_sglist;
126 	struct ctl_sg_entry ext_entry, kern_entry;
127 	int ext_sglen, ext_sg_entries, kern_sg_entries;
128 	int ext_sg_start, ext_offset;
129 	int len_to_copy, len_copied;
130 	int kern_watermark, ext_watermark;
131 	int ext_sglist_malloced;
132 	int i, j;
133 
134 	ext_sglist_malloced = 0;
135 	ext_sg_start = 0;
136 	ext_offset = 0;
137 
138 	CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove\n"));
139 
140 	/*
141 	 * If this flag is set, fake the data transfer.
142 	 */
143 	if (ctsio->io_hdr.flags & CTL_FLAG_NO_DATAMOVE) {
144 		ctsio->ext_data_filled = ctsio->ext_data_len;
145 		goto bailout;
146 	}
147 
148 	/*
149 	 * To simplify things here, if we have a single buffer, stick it in
150 	 * a S/G entry and just make it a single entry S/G list.
151 	 */
152 	if (ctsio->ext_sg_entries > 0) {
153 		int len_seen;
154 
155 		ext_sglen = ctsio->ext_sg_entries * sizeof(*ext_sglist);
156 
157 		ext_sglist = (struct ctl_sg_entry *)malloc(ext_sglen, M_CTL,
158 							   M_WAITOK);
159 		ext_sglist_malloced = 1;
160 		if (copyin(ctsio->ext_data_ptr, ext_sglist, ext_sglen) != 0) {
161 			ctsio->io_hdr.port_status = 31343;
162 			goto bailout;
163 		}
164 		ext_sg_entries = ctsio->ext_sg_entries;
165 		len_seen = 0;
166 		for (i = 0; i < ext_sg_entries; i++) {
167 			if ((len_seen + ext_sglist[i].len) >=
168 			     ctsio->ext_data_filled) {
169 				ext_sg_start = i;
170 				ext_offset = ctsio->ext_data_filled - len_seen;
171 				break;
172 			}
173 			len_seen += ext_sglist[i].len;
174 		}
175 	} else {
176 		ext_sglist = &ext_entry;
177 		ext_sglist->addr = ctsio->ext_data_ptr;
178 		ext_sglist->len = ctsio->ext_data_len;
179 		ext_sg_entries = 1;
180 		ext_sg_start = 0;
181 		ext_offset = ctsio->ext_data_filled;
182 	}
183 
184 	if (ctsio->kern_sg_entries > 0) {
185 		kern_sglist = (struct ctl_sg_entry *)ctsio->kern_data_ptr;
186 		kern_sg_entries = ctsio->kern_sg_entries;
187 	} else {
188 		kern_sglist = &kern_entry;
189 		kern_sglist->addr = ctsio->kern_data_ptr;
190 		kern_sglist->len = ctsio->kern_data_len;
191 		kern_sg_entries = 1;
192 	}
193 
194 
195 	kern_watermark = 0;
196 	ext_watermark = ext_offset;
197 	len_copied = 0;
198 	for (i = ext_sg_start, j = 0;
199 	     i < ext_sg_entries && j < kern_sg_entries;) {
200 		uint8_t *ext_ptr, *kern_ptr;
201 
202 		len_to_copy = MIN(ext_sglist[i].len - ext_watermark,
203 				  kern_sglist[j].len - kern_watermark);
204 
205 		ext_ptr = (uint8_t *)ext_sglist[i].addr;
206 		ext_ptr = ext_ptr + ext_watermark;
207 		if (ctsio->io_hdr.flags & CTL_FLAG_BUS_ADDR) {
208 			/*
209 			 * XXX KDM fix this!
210 			 */
211 			panic("need to implement bus address support");
212 #if 0
213 			kern_ptr = bus_to_virt(kern_sglist[j].addr);
214 #endif
215 		} else
216 			kern_ptr = (uint8_t *)kern_sglist[j].addr;
217 		kern_ptr = kern_ptr + kern_watermark;
218 
219 		kern_watermark += len_to_copy;
220 		ext_watermark += len_to_copy;
221 
222 		if ((ctsio->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
223 		     CTL_FLAG_DATA_IN) {
224 			CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: copying %d "
225 					 "bytes to user\n", len_to_copy));
226 			CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: from %p "
227 					 "to %p\n", kern_ptr, ext_ptr));
228 			if (copyout(kern_ptr, ext_ptr, len_to_copy) != 0) {
229 				ctsio->io_hdr.port_status = 31344;
230 				goto bailout;
231 			}
232 		} else {
233 			CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: copying %d "
234 					 "bytes from user\n", len_to_copy));
235 			CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: from %p "
236 					 "to %p\n", ext_ptr, kern_ptr));
237 			if (copyin(ext_ptr, kern_ptr, len_to_copy)!= 0){
238 				ctsio->io_hdr.port_status = 31345;
239 				goto bailout;
240 			}
241 		}
242 
243 		len_copied += len_to_copy;
244 
245 		if (ext_sglist[i].len == ext_watermark) {
246 			i++;
247 			ext_watermark = 0;
248 		}
249 
250 		if (kern_sglist[j].len == kern_watermark) {
251 			j++;
252 			kern_watermark = 0;
253 		}
254 	}
255 
256 	ctsio->ext_data_filled += len_copied;
257 
258 	CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: ext_sg_entries: %d, "
259 			 "kern_sg_entries: %d\n", ext_sg_entries,
260 			 kern_sg_entries));
261 	CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: ext_data_len = %d, "
262 			 "kern_data_len = %d\n", ctsio->ext_data_len,
263 			 ctsio->kern_data_len));
264 
265 
266 	/* XXX KDM set residual?? */
267 bailout:
268 
269 	if (ext_sglist_malloced != 0)
270 		free(ext_sglist, M_CTL);
271 
272 	return (CTL_RETVAL_COMPLETE);
273 }
274 
275 static void
276 cfi_datamove(union ctl_io *io)
277 {
278 	struct ctl_fe_ioctl_params *params;
279 
280 	params = (struct ctl_fe_ioctl_params *)
281 		io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
282 
283 	mtx_lock(&params->ioctl_mtx);
284 	params->state = CTL_IOCTL_DATAMOVE;
285 	cv_broadcast(&params->sem);
286 	mtx_unlock(&params->ioctl_mtx);
287 }
288 
289 static void
290 cfi_done(union ctl_io *io)
291 {
292 	struct ctl_fe_ioctl_params *params;
293 
294 	params = (struct ctl_fe_ioctl_params *)
295 		io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
296 
297 	mtx_lock(&params->ioctl_mtx);
298 	params->state = CTL_IOCTL_DONE;
299 	cv_broadcast(&params->sem);
300 	mtx_unlock(&params->ioctl_mtx);
301 }
302 
303 static int
304 cfi_submit_wait(union ctl_io *io)
305 {
306 	struct ctl_fe_ioctl_params params;
307 	ctl_fe_ioctl_state last_state;
308 	int done, retval;
309 
310 	retval = 0;
311 
312 	bzero(&params, sizeof(params));
313 
314 	mtx_init(&params.ioctl_mtx, "ctliocmtx", NULL, MTX_DEF);
315 	cv_init(&params.sem, "ctlioccv");
316 	params.state = CTL_IOCTL_INPROG;
317 	last_state = params.state;
318 
319 	io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = &params;
320 
321 	CTL_DEBUG_PRINT(("cfi_submit_wait\n"));
322 
323 	/* This shouldn't happen */
324 	if ((retval = ctl_queue(io)) != CTL_RETVAL_COMPLETE)
325 		return (retval);
326 
327 	done = 0;
328 
329 	do {
330 		mtx_lock(&params.ioctl_mtx);
331 		/*
332 		 * Check the state here, and don't sleep if the state has
333 		 * already changed (i.e. wakeup has already occured, but we
334 		 * weren't waiting yet).
335 		 */
336 		if (params.state == last_state) {
337 			/* XXX KDM cv_wait_sig instead? */
338 			cv_wait(&params.sem, &params.ioctl_mtx);
339 		}
340 		last_state = params.state;
341 
342 		switch (params.state) {
343 		case CTL_IOCTL_INPROG:
344 			/* Why did we wake up? */
345 			/* XXX KDM error here? */
346 			mtx_unlock(&params.ioctl_mtx);
347 			break;
348 		case CTL_IOCTL_DATAMOVE:
349 			CTL_DEBUG_PRINT(("got CTL_IOCTL_DATAMOVE\n"));
350 
351 			/*
352 			 * change last_state back to INPROG to avoid
353 			 * deadlock on subsequent data moves.
354 			 */
355 			params.state = last_state = CTL_IOCTL_INPROG;
356 
357 			mtx_unlock(&params.ioctl_mtx);
358 			ctl_ioctl_do_datamove(&io->scsiio);
359 			/*
360 			 * Note that in some cases, most notably writes,
361 			 * this will queue the I/O and call us back later.
362 			 * In other cases, generally reads, this routine
363 			 * will immediately call back and wake us up,
364 			 * probably using our own context.
365 			 */
366 			io->scsiio.be_move_done(io);
367 			break;
368 		case CTL_IOCTL_DONE:
369 			mtx_unlock(&params.ioctl_mtx);
370 			CTL_DEBUG_PRINT(("got CTL_IOCTL_DONE\n"));
371 			done = 1;
372 			break;
373 		default:
374 			mtx_unlock(&params.ioctl_mtx);
375 			/* XXX KDM error here? */
376 			break;
377 		}
378 	} while (done == 0);
379 
380 	mtx_destroy(&params.ioctl_mtx);
381 	cv_destroy(&params.sem);
382 
383 	return (CTL_RETVAL_COMPLETE);
384 }
385 
386 int
387 ctl_ioctl_io(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
388     struct thread *td)
389 {
390 	union ctl_io *io;
391 	void *pool_tmp;
392 	int retval = 0;
393 
394 	/*
395 	 * If we haven't been "enabled", don't allow any SCSI I/O
396 	 * to this FETD.
397 	 */
398 	if ((cfi_softc.port.status & CTL_PORT_STATUS_ONLINE) == 0)
399 		return (EPERM);
400 
401 	io = ctl_alloc_io(cfi_softc.port.ctl_pool_ref);
402 
403 	/*
404 	 * Need to save the pool reference so it doesn't get
405 	 * spammed by the user's ctl_io.
406 	 */
407 	pool_tmp = io->io_hdr.pool;
408 	memcpy(io, (void *)addr, sizeof(*io));
409 	io->io_hdr.pool = pool_tmp;
410 
411 	/*
412 	 * No status yet, so make sure the status is set properly.
413 	 */
414 	io->io_hdr.status = CTL_STATUS_NONE;
415 
416 	/*
417 	 * The user sets the initiator ID, target and LUN IDs.
418 	 */
419 	io->io_hdr.nexus.targ_port = cfi_softc.port.targ_port;
420 	io->io_hdr.flags |= CTL_FLAG_USER_REQ;
421 	if ((io->io_hdr.io_type == CTL_IO_SCSI) &&
422 	    (io->scsiio.tag_type != CTL_TAG_UNTAGGED))
423 		io->scsiio.tag_num = cfi_softc.cur_tag_num++;
424 
425 	retval = cfi_submit_wait(io);
426 	if (retval == 0)
427 		memcpy((void *)addr, io, sizeof(*io));
428 	ctl_free_io(io);
429 	return (retval);
430 }
431