xref: /freebsd/sys/cam/ctl/ctl_frontend_ioctl.c (revision 0b3105a37d7adcadcb720112fed4dc4e8040be99)
1 /*-
2  * Copyright (c) 2003-2009 Silicon Graphics International Corp.
3  * Copyright (c) 2012 The FreeBSD Foundation
4  * Copyright (c) 2015 Alexander Motin <mav@FreeBSD.org>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer,
12  *    without modification, immediately at the beginning of the file.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/types.h>
36 #include <sys/lock.h>
37 #include <sys/module.h>
38 #include <sys/mutex.h>
39 #include <sys/condvar.h>
40 #include <sys/malloc.h>
41 #include <sys/conf.h>
42 #include <sys/queue.h>
43 #include <sys/sysctl.h>
44 
45 #include <cam/cam.h>
46 #include <cam/scsi/scsi_all.h>
47 #include <cam/scsi/scsi_da.h>
48 #include <cam/ctl/ctl_io.h>
49 #include <cam/ctl/ctl.h>
50 #include <cam/ctl/ctl_frontend.h>
51 #include <cam/ctl/ctl_util.h>
52 #include <cam/ctl/ctl_backend.h>
53 #include <cam/ctl/ctl_ioctl.h>
54 #include <cam/ctl/ctl_ha.h>
55 #include <cam/ctl/ctl_private.h>
56 #include <cam/ctl/ctl_debug.h>
57 #include <cam/ctl/ctl_error.h>
58 
59 typedef enum {
60 	CTL_IOCTL_INPROG,
61 	CTL_IOCTL_DATAMOVE,
62 	CTL_IOCTL_DONE
63 } ctl_fe_ioctl_state;
64 
65 struct ctl_fe_ioctl_params {
66 	struct cv		sem;
67 	struct mtx		ioctl_mtx;
68 	ctl_fe_ioctl_state	state;
69 };
70 
71 struct cfi_softc {
72 	uint32_t		cur_tag_num;
73 	struct ctl_port		port;
74 };
75 
76 static struct cfi_softc cfi_softc;
77 
78 static int cfi_init(void);
79 static void cfi_shutdown(void);
80 static void cfi_datamove(union ctl_io *io);
81 static void cfi_done(union ctl_io *io);
82 
83 static struct ctl_frontend cfi_frontend =
84 {
85 	.name = "ioctl",
86 	.init = cfi_init,
87 	.shutdown = cfi_shutdown,
88 };
89 CTL_FRONTEND_DECLARE(ctlioctl, cfi_frontend);
90 
91 static int
92 cfi_init(void)
93 {
94 	struct cfi_softc *isoftc = &cfi_softc;
95 	struct ctl_port *port;
96 
97 	memset(isoftc, 0, sizeof(*isoftc));
98 
99 	port = &isoftc->port;
100 	port->frontend = &cfi_frontend;
101 	port->port_type = CTL_PORT_IOCTL;
102 	port->num_requested_ctl_io = 100;
103 	port->port_name = "ioctl";
104 	port->fe_datamove = cfi_datamove;
105 	port->fe_done = cfi_done;
106 	port->max_targets = 1;
107 	port->max_target_id = 0;
108 	port->targ_port = -1;
109 	port->max_initiators = 1;
110 
111 	if (ctl_port_register(port) != 0) {
112 		printf("%s: ioctl port registration failed\n", __func__);
113 		return (0);
114 	}
115 	ctl_port_online(port);
116 	return (0);
117 }
118 
119 void
120 cfi_shutdown(void)
121 {
122 	struct cfi_softc *isoftc = &cfi_softc;
123 	struct ctl_port *port;
124 
125 	port = &isoftc->port;
126 	ctl_port_offline(port);
127 	if (ctl_port_deregister(&isoftc->port) != 0)
128 		printf("%s: ctl_frontend_deregister() failed\n", __func__);
129 }
130 
131 /*
132  * Data movement routine for the CTL ioctl frontend port.
133  */
134 static int
135 ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio)
136 {
137 	struct ctl_sg_entry *ext_sglist, *kern_sglist;
138 	struct ctl_sg_entry ext_entry, kern_entry;
139 	int ext_sglen, ext_sg_entries, kern_sg_entries;
140 	int ext_sg_start, ext_offset;
141 	int len_to_copy, len_copied;
142 	int kern_watermark, ext_watermark;
143 	int ext_sglist_malloced;
144 	int i, j;
145 
146 	ext_sglist_malloced = 0;
147 	ext_sg_start = 0;
148 	ext_offset = 0;
149 
150 	CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove\n"));
151 
152 	/*
153 	 * If this flag is set, fake the data transfer.
154 	 */
155 	if (ctsio->io_hdr.flags & CTL_FLAG_NO_DATAMOVE) {
156 		ctsio->ext_data_filled = ctsio->ext_data_len;
157 		goto bailout;
158 	}
159 
160 	/*
161 	 * To simplify things here, if we have a single buffer, stick it in
162 	 * a S/G entry and just make it a single entry S/G list.
163 	 */
164 	if (ctsio->ext_sg_entries > 0) {
165 		int len_seen;
166 
167 		ext_sglen = ctsio->ext_sg_entries * sizeof(*ext_sglist);
168 
169 		ext_sglist = (struct ctl_sg_entry *)malloc(ext_sglen, M_CTL,
170 							   M_WAITOK);
171 		ext_sglist_malloced = 1;
172 		if (copyin(ctsio->ext_data_ptr, ext_sglist, ext_sglen) != 0) {
173 			ctsio->io_hdr.port_status = 31343;
174 			goto bailout;
175 		}
176 		ext_sg_entries = ctsio->ext_sg_entries;
177 		len_seen = 0;
178 		for (i = 0; i < ext_sg_entries; i++) {
179 			if ((len_seen + ext_sglist[i].len) >=
180 			     ctsio->ext_data_filled) {
181 				ext_sg_start = i;
182 				ext_offset = ctsio->ext_data_filled - len_seen;
183 				break;
184 			}
185 			len_seen += ext_sglist[i].len;
186 		}
187 	} else {
188 		ext_sglist = &ext_entry;
189 		ext_sglist->addr = ctsio->ext_data_ptr;
190 		ext_sglist->len = ctsio->ext_data_len;
191 		ext_sg_entries = 1;
192 		ext_sg_start = 0;
193 		ext_offset = ctsio->ext_data_filled;
194 	}
195 
196 	if (ctsio->kern_sg_entries > 0) {
197 		kern_sglist = (struct ctl_sg_entry *)ctsio->kern_data_ptr;
198 		kern_sg_entries = ctsio->kern_sg_entries;
199 	} else {
200 		kern_sglist = &kern_entry;
201 		kern_sglist->addr = ctsio->kern_data_ptr;
202 		kern_sglist->len = ctsio->kern_data_len;
203 		kern_sg_entries = 1;
204 	}
205 
206 
207 	kern_watermark = 0;
208 	ext_watermark = ext_offset;
209 	len_copied = 0;
210 	for (i = ext_sg_start, j = 0;
211 	     i < ext_sg_entries && j < kern_sg_entries;) {
212 		uint8_t *ext_ptr, *kern_ptr;
213 
214 		len_to_copy = MIN(ext_sglist[i].len - ext_watermark,
215 				  kern_sglist[j].len - kern_watermark);
216 
217 		ext_ptr = (uint8_t *)ext_sglist[i].addr;
218 		ext_ptr = ext_ptr + ext_watermark;
219 		if (ctsio->io_hdr.flags & CTL_FLAG_BUS_ADDR) {
220 			/*
221 			 * XXX KDM fix this!
222 			 */
223 			panic("need to implement bus address support");
224 #if 0
225 			kern_ptr = bus_to_virt(kern_sglist[j].addr);
226 #endif
227 		} else
228 			kern_ptr = (uint8_t *)kern_sglist[j].addr;
229 		kern_ptr = kern_ptr + kern_watermark;
230 
231 		kern_watermark += len_to_copy;
232 		ext_watermark += len_to_copy;
233 
234 		if ((ctsio->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
235 		     CTL_FLAG_DATA_IN) {
236 			CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: copying %d "
237 					 "bytes to user\n", len_to_copy));
238 			CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: from %p "
239 					 "to %p\n", kern_ptr, ext_ptr));
240 			if (copyout(kern_ptr, ext_ptr, len_to_copy) != 0) {
241 				ctsio->io_hdr.port_status = 31344;
242 				goto bailout;
243 			}
244 		} else {
245 			CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: copying %d "
246 					 "bytes from user\n", len_to_copy));
247 			CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: from %p "
248 					 "to %p\n", ext_ptr, kern_ptr));
249 			if (copyin(ext_ptr, kern_ptr, len_to_copy)!= 0){
250 				ctsio->io_hdr.port_status = 31345;
251 				goto bailout;
252 			}
253 		}
254 
255 		len_copied += len_to_copy;
256 
257 		if (ext_sglist[i].len == ext_watermark) {
258 			i++;
259 			ext_watermark = 0;
260 		}
261 
262 		if (kern_sglist[j].len == kern_watermark) {
263 			j++;
264 			kern_watermark = 0;
265 		}
266 	}
267 
268 	ctsio->ext_data_filled += len_copied;
269 
270 	CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: ext_sg_entries: %d, "
271 			 "kern_sg_entries: %d\n", ext_sg_entries,
272 			 kern_sg_entries));
273 	CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: ext_data_len = %d, "
274 			 "kern_data_len = %d\n", ctsio->ext_data_len,
275 			 ctsio->kern_data_len));
276 
277 
278 	/* XXX KDM set residual?? */
279 bailout:
280 
281 	if (ext_sglist_malloced != 0)
282 		free(ext_sglist, M_CTL);
283 
284 	return (CTL_RETVAL_COMPLETE);
285 }
286 
287 static void
288 cfi_datamove(union ctl_io *io)
289 {
290 	struct ctl_fe_ioctl_params *params;
291 
292 	params = (struct ctl_fe_ioctl_params *)
293 		io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
294 
295 	mtx_lock(&params->ioctl_mtx);
296 	params->state = CTL_IOCTL_DATAMOVE;
297 	cv_broadcast(&params->sem);
298 	mtx_unlock(&params->ioctl_mtx);
299 }
300 
301 static void
302 cfi_done(union ctl_io *io)
303 {
304 	struct ctl_fe_ioctl_params *params;
305 
306 	params = (struct ctl_fe_ioctl_params *)
307 		io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
308 
309 	mtx_lock(&params->ioctl_mtx);
310 	params->state = CTL_IOCTL_DONE;
311 	cv_broadcast(&params->sem);
312 	mtx_unlock(&params->ioctl_mtx);
313 }
314 
315 static int
316 cfi_submit_wait(union ctl_io *io)
317 {
318 	struct ctl_fe_ioctl_params params;
319 	ctl_fe_ioctl_state last_state;
320 	int done, retval;
321 
322 	bzero(&params, sizeof(params));
323 	mtx_init(&params.ioctl_mtx, "ctliocmtx", NULL, MTX_DEF);
324 	cv_init(&params.sem, "ctlioccv");
325 	params.state = CTL_IOCTL_INPROG;
326 	last_state = params.state;
327 
328 	io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = &params;
329 
330 	CTL_DEBUG_PRINT(("cfi_submit_wait\n"));
331 
332 	/* This shouldn't happen */
333 	if ((retval = ctl_queue(io)) != CTL_RETVAL_COMPLETE)
334 		return (retval);
335 
336 	done = 0;
337 
338 	do {
339 		mtx_lock(&params.ioctl_mtx);
340 		/*
341 		 * Check the state here, and don't sleep if the state has
342 		 * already changed (i.e. wakeup has already occured, but we
343 		 * weren't waiting yet).
344 		 */
345 		if (params.state == last_state) {
346 			/* XXX KDM cv_wait_sig instead? */
347 			cv_wait(&params.sem, &params.ioctl_mtx);
348 		}
349 		last_state = params.state;
350 
351 		switch (params.state) {
352 		case CTL_IOCTL_INPROG:
353 			/* Why did we wake up? */
354 			/* XXX KDM error here? */
355 			mtx_unlock(&params.ioctl_mtx);
356 			break;
357 		case CTL_IOCTL_DATAMOVE:
358 			CTL_DEBUG_PRINT(("got CTL_IOCTL_DATAMOVE\n"));
359 
360 			/*
361 			 * change last_state back to INPROG to avoid
362 			 * deadlock on subsequent data moves.
363 			 */
364 			params.state = last_state = CTL_IOCTL_INPROG;
365 
366 			mtx_unlock(&params.ioctl_mtx);
367 			ctl_ioctl_do_datamove(&io->scsiio);
368 			/*
369 			 * Note that in some cases, most notably writes,
370 			 * this will queue the I/O and call us back later.
371 			 * In other cases, generally reads, this routine
372 			 * will immediately call back and wake us up,
373 			 * probably using our own context.
374 			 */
375 			io->scsiio.be_move_done(io);
376 			break;
377 		case CTL_IOCTL_DONE:
378 			mtx_unlock(&params.ioctl_mtx);
379 			CTL_DEBUG_PRINT(("got CTL_IOCTL_DONE\n"));
380 			done = 1;
381 			break;
382 		default:
383 			mtx_unlock(&params.ioctl_mtx);
384 			/* XXX KDM error here? */
385 			break;
386 		}
387 	} while (done == 0);
388 
389 	mtx_destroy(&params.ioctl_mtx);
390 	cv_destroy(&params.sem);
391 
392 	return (CTL_RETVAL_COMPLETE);
393 }
394 
395 int
396 ctl_ioctl_io(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
397     struct thread *td)
398 {
399 	union ctl_io *io;
400 	void *pool_tmp;
401 	int retval = 0;
402 
403 	/*
404 	 * If we haven't been "enabled", don't allow any SCSI I/O
405 	 * to this FETD.
406 	 */
407 	if ((cfi_softc.port.status & CTL_PORT_STATUS_ONLINE) == 0)
408 		return (EPERM);
409 
410 	io = ctl_alloc_io(cfi_softc.port.ctl_pool_ref);
411 
412 	/*
413 	 * Need to save the pool reference so it doesn't get
414 	 * spammed by the user's ctl_io.
415 	 */
416 	pool_tmp = io->io_hdr.pool;
417 	memcpy(io, (void *)addr, sizeof(*io));
418 	io->io_hdr.pool = pool_tmp;
419 
420 	/*
421 	 * No status yet, so make sure the status is set properly.
422 	 */
423 	io->io_hdr.status = CTL_STATUS_NONE;
424 
425 	/*
426 	 * The user sets the initiator ID, target and LUN IDs.
427 	 */
428 	io->io_hdr.nexus.targ_port = cfi_softc.port.targ_port;
429 	io->io_hdr.flags |= CTL_FLAG_USER_REQ;
430 	if ((io->io_hdr.io_type == CTL_IO_SCSI) &&
431 	    (io->scsiio.tag_type != CTL_TAG_UNTAGGED))
432 		io->scsiio.tag_num = cfi_softc.cur_tag_num++;
433 
434 	retval = cfi_submit_wait(io);
435 	if (retval == 0)
436 		memcpy((void *)addr, io, sizeof(*io));
437 	ctl_free_io(io);
438 	return (retval);
439 }
440