1 /* 2 * Copyright (c) 2007-2009 Google Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are 7 * met: 8 * 9 * * Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * * Redistributions in binary form must reproduce the above 12 * copyright notice, this list of conditions and the following disclaimer 13 * in the documentation and/or other materials provided with the 14 * distribution. 15 * * Neither the name of Google Inc. nor the names of its 16 * contributors may be used to endorse or promote products derived from 17 * this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 22 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 23 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 25 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 * 31 * Copyright (C) 2005 Csaba Henk. 32 * All rights reserved. 33 * 34 * Redistribution and use in source and binary forms, with or without 35 * modification, are permitted provided that the following conditions 36 * are met: 37 * 1. Redistributions of source code must retain the above copyright 38 * notice, this list of conditions and the following disclaimer. 39 * 2. Redistributions in binary form must reproduce the above copyright 40 * notice, this list of conditions and the following disclaimer in the 41 * documentation and/or other materials provided with the distribution. 42 * 43 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND 44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 46 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 53 * SUCH DAMAGE. 54 */ 55 56 #include <sys/cdefs.h> 57 __FBSDID("$FreeBSD$"); 58 59 #include <sys/types.h> 60 #include <sys/module.h> 61 #include <sys/systm.h> 62 #include <sys/errno.h> 63 #include <sys/param.h> 64 #include <sys/kernel.h> 65 #include <sys/conf.h> 66 #include <sys/uio.h> 67 #include <sys/malloc.h> 68 #include <sys/queue.h> 69 #include <sys/lock.h> 70 #include <sys/sx.h> 71 #include <sys/mutex.h> 72 #include <sys/proc.h> 73 #include <sys/mount.h> 74 #include <sys/stat.h> 75 #include <sys/fcntl.h> 76 #include <sys/sysctl.h> 77 #include <sys/poll.h> 78 #include <sys/selinfo.h> 79 80 #include "fuse.h" 81 #include "fuse_ipc.h" 82 83 #define FUSE_DEBUG_MODULE DEVICE 84 #include "fuse_debug.h" 85 86 static struct cdev *fuse_dev; 87 88 static d_open_t fuse_device_open; 89 static d_close_t fuse_device_close; 90 static d_poll_t fuse_device_poll; 91 static d_read_t fuse_device_read; 92 static d_write_t fuse_device_write; 93 94 static struct cdevsw fuse_device_cdevsw = { 95 .d_open = fuse_device_open, 96 .d_close = fuse_device_close, 97 .d_name = "fuse", 98 .d_poll = fuse_device_poll, 99 .d_read = fuse_device_read, 100 .d_write = fuse_device_write, 101 .d_version = D_VERSION, 102 }; 103 104 /**************************** 105 * 106 * >>> Fuse device op defs 107 * 108 ****************************/ 109 110 static void 111 fdata_dtor(void *arg) 112 { 113 struct fuse_data *fdata; 114 115 fdata = arg; 116 fdata_trydestroy(fdata); 117 } 118 119 /* 120 * Resources are set up on a per-open basis 121 */ 122 static int 123 fuse_device_open(struct cdev *dev, int oflags, int devtype, struct thread *td) 124 { 125 struct fuse_data *fdata; 126 int error; 127 128 FS_DEBUG("device %p\n", dev); 129 130 fdata = fdata_alloc(dev, td->td_ucred); 131 error = devfs_set_cdevpriv(fdata, fdata_dtor); 132 if (error != 0) 133 fdata_trydestroy(fdata); 134 else 135 FS_DEBUG("%s: device opened by thread %d.\n", dev->si_name, 136 td->td_tid); 137 return (error); 138 } 139 140 static int 141 fuse_device_close(struct cdev *dev, int fflag, int devtype, struct thread *td) 142 { 143 struct fuse_data *data; 144 struct fuse_ticket *tick; 145 int error; 146 147 error = devfs_get_cdevpriv((void **)&data); 148 if (error != 0) 149 return (error); 150 if (!data) 151 panic("no fuse data upon fuse device close"); 152 fdata_set_dead(data); 153 154 FUSE_LOCK(); 155 fuse_lck_mtx_lock(data->aw_mtx); 156 /* wakup poll()ers */ 157 selwakeuppri(&data->ks_rsel, PZERO + 1); 158 /* Don't let syscall handlers wait in vain */ 159 while ((tick = fuse_aw_pop(data))) { 160 fuse_lck_mtx_lock(tick->tk_aw_mtx); 161 fticket_set_answered(tick); 162 tick->tk_aw_errno = ENOTCONN; 163 wakeup(tick); 164 fuse_lck_mtx_unlock(tick->tk_aw_mtx); 165 FUSE_ASSERT_AW_DONE(tick); 166 fuse_ticket_drop(tick); 167 } 168 fuse_lck_mtx_unlock(data->aw_mtx); 169 FUSE_UNLOCK(); 170 171 FS_DEBUG("%s: device closed by thread %d.\n", dev->si_name, td->td_tid); 172 return (0); 173 } 174 175 int 176 fuse_device_poll(struct cdev *dev, int events, struct thread *td) 177 { 178 struct fuse_data *data; 179 int error, revents = 0; 180 181 error = devfs_get_cdevpriv((void **)&data); 182 if (error != 0) 183 return (events & 184 (POLLHUP|POLLIN|POLLRDNORM|POLLOUT|POLLWRNORM)); 185 186 if (events & (POLLIN | POLLRDNORM)) { 187 fuse_lck_mtx_lock(data->ms_mtx); 188 if (fdata_get_dead(data) || STAILQ_FIRST(&data->ms_head)) 189 revents |= events & (POLLIN | POLLRDNORM); 190 else 191 selrecord(td, &data->ks_rsel); 192 fuse_lck_mtx_unlock(data->ms_mtx); 193 } 194 if (events & (POLLOUT | POLLWRNORM)) { 195 revents |= events & (POLLOUT | POLLWRNORM); 196 } 197 return (revents); 198 } 199 200 /* 201 * fuse_device_read hangs on the queue of VFS messages. 202 * When it's notified that there is a new one, it picks that and 203 * passes up to the daemon 204 */ 205 int 206 fuse_device_read(struct cdev *dev, struct uio *uio, int ioflag) 207 { 208 int err; 209 struct fuse_data *data; 210 struct fuse_ticket *tick; 211 void *buf[] = {NULL, NULL, NULL}; 212 int buflen[3]; 213 int i; 214 215 FS_DEBUG("fuse device being read on thread %d\n", uio->uio_td->td_tid); 216 217 err = devfs_get_cdevpriv((void **)&data); 218 if (err != 0) 219 return (err); 220 221 fuse_lck_mtx_lock(data->ms_mtx); 222 again: 223 if (fdata_get_dead(data)) { 224 FS_DEBUG2G("we know early on that reader should be kicked so we don't wait for news\n"); 225 fuse_lck_mtx_unlock(data->ms_mtx); 226 return (ENODEV); 227 } 228 if (!(tick = fuse_ms_pop(data))) { 229 /* check if we may block */ 230 if (ioflag & O_NONBLOCK) { 231 /* get outa here soon */ 232 fuse_lck_mtx_unlock(data->ms_mtx); 233 return (EAGAIN); 234 } else { 235 err = msleep(data, &data->ms_mtx, PCATCH, "fu_msg", 0); 236 if (err != 0) { 237 fuse_lck_mtx_unlock(data->ms_mtx); 238 return (fdata_get_dead(data) ? ENODEV : err); 239 } 240 tick = fuse_ms_pop(data); 241 } 242 } 243 if (!tick) { 244 /* 245 * We can get here if fuse daemon suddenly terminates, 246 * eg, by being hit by a SIGKILL 247 * -- and some other cases, too, tho not totally clear, when 248 * (cv_signal/wakeup_one signals the whole process ?) 249 */ 250 FS_DEBUG("no message on thread #%d\n", uio->uio_td->td_tid); 251 goto again; 252 } 253 fuse_lck_mtx_unlock(data->ms_mtx); 254 255 if (fdata_get_dead(data)) { 256 /* 257 * somebody somewhere -- eg., umount routine -- 258 * wants this liaison finished off 259 */ 260 FS_DEBUG2G("reader is to be sacked\n"); 261 if (tick) { 262 FS_DEBUG2G("weird -- \"kick\" is set tho there is message\n"); 263 FUSE_ASSERT_MS_DONE(tick); 264 fuse_ticket_drop(tick); 265 } 266 return (ENODEV); /* This should make the daemon get off 267 * of us */ 268 } 269 FS_DEBUG("message got on thread #%d\n", uio->uio_td->td_tid); 270 271 KASSERT(tick->tk_ms_bufdata || tick->tk_ms_bufsize == 0, 272 ("non-null buf pointer with positive size")); 273 274 switch (tick->tk_ms_type) { 275 case FT_M_FIOV: 276 buf[0] = tick->tk_ms_fiov.base; 277 buflen[0] = tick->tk_ms_fiov.len; 278 break; 279 case FT_M_BUF: 280 buf[0] = tick->tk_ms_fiov.base; 281 buflen[0] = tick->tk_ms_fiov.len; 282 buf[1] = tick->tk_ms_bufdata; 283 buflen[1] = tick->tk_ms_bufsize; 284 break; 285 default: 286 panic("unknown message type for fuse_ticket %p", tick); 287 } 288 289 for (i = 0; buf[i]; i++) { 290 /* 291 * Why not ban mercilessly stupid daemons who can't keep up 292 * with us? (There is no much use of a partial read here...) 293 */ 294 /* 295 * XXX note that in such cases Linux FUSE throws EIO at the 296 * syscall invoker and stands back to the message queue. The 297 * rationale should be made clear (and possibly adopt that 298 * behaviour). Keeping the current scheme at least makes 299 * fallacy as loud as possible... 300 */ 301 if (uio->uio_resid < buflen[i]) { 302 fdata_set_dead(data); 303 FS_DEBUG2G("daemon is stupid, kick it off...\n"); 304 err = ENODEV; 305 break; 306 } 307 err = uiomove(buf[i], buflen[i], uio); 308 if (err) 309 break; 310 } 311 312 FUSE_ASSERT_MS_DONE(tick); 313 fuse_ticket_drop(tick); 314 315 return (err); 316 } 317 318 static __inline int 319 fuse_ohead_audit(struct fuse_out_header *ohead, struct uio *uio) 320 { 321 FS_DEBUG("Out header -- len: %i, error: %i, unique: %llu; iovecs: %d\n", 322 ohead->len, ohead->error, (unsigned long long)ohead->unique, 323 uio->uio_iovcnt); 324 325 if (uio->uio_resid + sizeof(struct fuse_out_header) != ohead->len) { 326 FS_DEBUG("Format error: body size differs from size claimed by header\n"); 327 return (EINVAL); 328 } 329 if (uio->uio_resid && ohead->error) { 330 FS_DEBUG("Format error: non zero error but message had a body\n"); 331 return (EINVAL); 332 } 333 /* Sanitize the linuxism of negative errnos */ 334 ohead->error = -(ohead->error); 335 336 return (0); 337 } 338 339 /* 340 * fuse_device_write first reads the header sent by the daemon. 341 * If that's OK, looks up ticket/callback node by the unique id seen in header. 342 * If the callback node contains a handler function, the uio is passed over 343 * that. 344 */ 345 static int 346 fuse_device_write(struct cdev *dev, struct uio *uio, int ioflag) 347 { 348 struct fuse_out_header ohead; 349 int err = 0; 350 struct fuse_data *data; 351 struct fuse_ticket *tick, *x_tick; 352 int found = 0; 353 354 FS_DEBUG("resid: %zd, iovcnt: %d, thread: %d\n", 355 uio->uio_resid, uio->uio_iovcnt, uio->uio_td->td_tid); 356 357 err = devfs_get_cdevpriv((void **)&data); 358 if (err != 0) 359 return (err); 360 361 if (uio->uio_resid < sizeof(struct fuse_out_header)) { 362 FS_DEBUG("got less than a header!\n"); 363 fdata_set_dead(data); 364 return (EINVAL); 365 } 366 if ((err = uiomove(&ohead, sizeof(struct fuse_out_header), uio)) != 0) 367 return (err); 368 369 /* 370 * We check header information (which is redundant) and compare it 371 * with what we see. If we see some inconsistency we discard the 372 * whole answer and proceed on as if it had never existed. In 373 * particular, no pretender will be woken up, regardless the 374 * "unique" value in the header. 375 */ 376 if ((err = fuse_ohead_audit(&ohead, uio))) { 377 fdata_set_dead(data); 378 return (err); 379 } 380 /* Pass stuff over to callback if there is one installed */ 381 382 /* Looking for ticket with the unique id of header */ 383 fuse_lck_mtx_lock(data->aw_mtx); 384 TAILQ_FOREACH_SAFE(tick, &data->aw_head, tk_aw_link, 385 x_tick) { 386 FS_DEBUG("bumped into callback #%llu\n", 387 (unsigned long long)tick->tk_unique); 388 if (tick->tk_unique == ohead.unique) { 389 found = 1; 390 fuse_aw_remove(tick); 391 break; 392 } 393 } 394 fuse_lck_mtx_unlock(data->aw_mtx); 395 396 if (found) { 397 if (tick->tk_aw_handler) { 398 /* 399 * We found a callback with proper handler. In this 400 * case the out header will be 0wnd by the callback, 401 * so the fun of freeing that is left for her. 402 * (Then, by all chance, she'll just get that's done 403 * via ticket_drop(), so no manual mucking 404 * around...) 405 */ 406 FS_DEBUG("pass ticket to a callback\n"); 407 memcpy(&tick->tk_aw_ohead, &ohead, sizeof(ohead)); 408 err = tick->tk_aw_handler(tick, uio); 409 } else { 410 /* pretender doesn't wanna do anything with answer */ 411 FS_DEBUG("stuff devalidated, so we drop it\n"); 412 } 413 414 /* 415 * As aw_mtx was not held during the callback execution the 416 * ticket may have been inserted again. However, this is safe 417 * because fuse_ticket_drop() will deal with refcount anyway. 418 */ 419 fuse_ticket_drop(tick); 420 } else { 421 /* no callback at all! */ 422 FS_DEBUG("erhm, no handler for this response\n"); 423 err = EINVAL; 424 } 425 426 return (err); 427 } 428 429 int 430 fuse_device_init(void) 431 { 432 433 fuse_dev = make_dev(&fuse_device_cdevsw, 0, UID_ROOT, GID_OPERATOR, 434 S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP, "fuse"); 435 if (fuse_dev == NULL) 436 return (ENOMEM); 437 return (0); 438 } 439 440 void 441 fuse_device_destroy(void) 442 { 443 444 MPASS(fuse_dev != NULL); 445 destroy_dev(fuse_dev); 446 } 447