1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2007-2009 Google Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions are 9 * met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above 14 * copyright notice, this list of conditions and the following disclaimer 15 * in the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Google Inc. nor the names of its 18 * contributors may be used to endorse or promote products derived from 19 * this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 * 33 * Copyright (C) 2005 Csaba Henk. 34 * All rights reserved. 35 * 36 * Copyright (c) 2019 The FreeBSD Foundation 37 * 38 * Portions of this software were developed by BFF Storage Systems, LLC under 39 * sponsorship from the FreeBSD Foundation. 40 * 41 * Redistribution and use in source and binary forms, with or without 42 * modification, are permitted provided that the following conditions 43 * are met: 44 * 1. Redistributions of source code must retain the above copyright 45 * notice, this list of conditions and the following disclaimer. 46 * 2. Redistributions in binary form must reproduce the above copyright 47 * notice, this list of conditions and the following disclaimer in the 48 * documentation and/or other materials provided with the distribution. 49 * 50 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND 51 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 52 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 53 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 54 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 55 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 56 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 57 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 58 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 59 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 60 * SUCH DAMAGE. 61 */ 62 63 #include <sys/cdefs.h> 64 __FBSDID("$FreeBSD$"); 65 66 #include <sys/types.h> 67 #include <sys/module.h> 68 #include <sys/systm.h> 69 #include <sys/errno.h> 70 #include <sys/param.h> 71 #include <sys/kernel.h> 72 #include <sys/conf.h> 73 #include <sys/uio.h> 74 #include <sys/malloc.h> 75 #include <sys/queue.h> 76 #include <sys/lock.h> 77 #include <sys/sx.h> 78 #include <sys/mutex.h> 79 #include <sys/proc.h> 80 #include <sys/mount.h> 81 #include <sys/sdt.h> 82 #include <sys/stat.h> 83 #include <sys/fcntl.h> 84 #include <sys/sysctl.h> 85 #include <sys/poll.h> 86 #include <sys/selinfo.h> 87 88 #include "fuse.h" 89 #include "fuse_internal.h" 90 #include "fuse_ipc.h" 91 92 SDT_PROVIDER_DECLARE(fusefs); 93 /* 94 * Fuse trace probe: 95 * arg0: verbosity. Higher numbers give more verbose messages 96 * arg1: Textual message 97 */ 98 SDT_PROBE_DEFINE2(fusefs, , device, trace, "int", "char*"); 99 100 static struct cdev *fuse_dev; 101 102 static d_kqfilter_t fuse_device_filter; 103 static d_open_t fuse_device_open; 104 static d_poll_t fuse_device_poll; 105 static d_read_t fuse_device_read; 106 static d_write_t fuse_device_write; 107 108 static struct cdevsw fuse_device_cdevsw = { 109 .d_kqfilter = fuse_device_filter, 110 .d_open = fuse_device_open, 111 .d_name = "fuse", 112 .d_poll = fuse_device_poll, 113 .d_read = fuse_device_read, 114 .d_write = fuse_device_write, 115 .d_version = D_VERSION, 116 }; 117 118 static int fuse_device_filt_read(struct knote *kn, long hint); 119 static void fuse_device_filt_detach(struct knote *kn); 120 121 struct filterops fuse_device_rfiltops = { 122 .f_isfd = 1, 123 .f_detach = fuse_device_filt_detach, 124 .f_event = fuse_device_filt_read, 125 }; 126 127 /**************************** 128 * 129 * >>> Fuse device op defs 130 * 131 ****************************/ 132 133 static void 134 fdata_dtor(void *arg) 135 { 136 struct fuse_data *fdata; 137 struct fuse_ticket *tick; 138 139 fdata = arg; 140 if (fdata == NULL) 141 return; 142 143 fdata_set_dead(fdata); 144 145 FUSE_LOCK(); 146 fuse_lck_mtx_lock(fdata->aw_mtx); 147 /* wakup poll()ers */ 148 selwakeuppri(&fdata->ks_rsel, PZERO + 1); 149 /* Don't let syscall handlers wait in vain */ 150 while ((tick = fuse_aw_pop(fdata))) { 151 fuse_lck_mtx_lock(tick->tk_aw_mtx); 152 fticket_set_answered(tick); 153 tick->tk_aw_errno = ENOTCONN; 154 wakeup(tick); 155 fuse_lck_mtx_unlock(tick->tk_aw_mtx); 156 FUSE_ASSERT_AW_DONE(tick); 157 fuse_ticket_drop(tick); 158 } 159 fuse_lck_mtx_unlock(fdata->aw_mtx); 160 161 /* Cleanup unsent operations */ 162 fuse_lck_mtx_lock(fdata->ms_mtx); 163 while ((tick = fuse_ms_pop(fdata))) { 164 fuse_ticket_drop(tick); 165 } 166 fuse_lck_mtx_unlock(fdata->ms_mtx); 167 FUSE_UNLOCK(); 168 169 fdata_trydestroy(fdata); 170 } 171 172 static int 173 fuse_device_filter(struct cdev *dev, struct knote *kn) 174 { 175 struct fuse_data *data; 176 int error; 177 178 error = devfs_get_cdevpriv((void **)&data); 179 180 /* EVFILT_WRITE is not supported; the device is always ready to write */ 181 if (error == 0 && kn->kn_filter == EVFILT_READ) { 182 kn->kn_fop = &fuse_device_rfiltops; 183 kn->kn_hook = data; 184 knlist_add(&data->ks_rsel.si_note, kn, 0); 185 error = 0; 186 } else if (error == 0) { 187 error = EINVAL; 188 kn->kn_data = error; 189 } 190 191 return (error); 192 } 193 194 static void 195 fuse_device_filt_detach(struct knote *kn) 196 { 197 struct fuse_data *data; 198 199 data = (struct fuse_data*)kn->kn_hook; 200 MPASS(data != NULL); 201 knlist_remove(&data->ks_rsel.si_note, kn, 0); 202 kn->kn_hook = NULL; 203 } 204 205 static int 206 fuse_device_filt_read(struct knote *kn, long hint) 207 { 208 struct fuse_data *data; 209 int ready; 210 211 data = (struct fuse_data*)kn->kn_hook; 212 MPASS(data != NULL); 213 214 mtx_assert(&data->ms_mtx, MA_OWNED); 215 if (fdata_get_dead(data)) { 216 kn->kn_flags |= EV_EOF; 217 kn->kn_fflags = ENODEV; 218 kn->kn_data = 1; 219 ready = 1; 220 } else if (STAILQ_FIRST(&data->ms_head)) { 221 MPASS(data->ms_count >= 1); 222 kn->kn_data = data->ms_count; 223 ready = 1; 224 } else { 225 ready = 0; 226 } 227 228 return (ready); 229 } 230 231 /* 232 * Resources are set up on a per-open basis 233 */ 234 static int 235 fuse_device_open(struct cdev *dev, int oflags, int devtype, struct thread *td) 236 { 237 struct fuse_data *fdata; 238 int error; 239 240 SDT_PROBE2(fusefs, , device, trace, 1, "device open"); 241 242 fdata = fdata_alloc(dev, td->td_ucred); 243 error = devfs_set_cdevpriv(fdata, fdata_dtor); 244 if (error != 0) 245 fdata_trydestroy(fdata); 246 else 247 SDT_PROBE2(fusefs, , device, trace, 1, "device open success"); 248 return (error); 249 } 250 251 int 252 fuse_device_poll(struct cdev *dev, int events, struct thread *td) 253 { 254 struct fuse_data *data; 255 int error, revents = 0; 256 257 error = devfs_get_cdevpriv((void **)&data); 258 if (error != 0) 259 return (events & 260 (POLLHUP|POLLIN|POLLRDNORM|POLLOUT|POLLWRNORM)); 261 262 if (events & (POLLIN | POLLRDNORM)) { 263 fuse_lck_mtx_lock(data->ms_mtx); 264 if (fdata_get_dead(data) || STAILQ_FIRST(&data->ms_head)) 265 revents |= events & (POLLIN | POLLRDNORM); 266 else 267 selrecord(td, &data->ks_rsel); 268 fuse_lck_mtx_unlock(data->ms_mtx); 269 } 270 if (events & (POLLOUT | POLLWRNORM)) { 271 revents |= events & (POLLOUT | POLLWRNORM); 272 } 273 return (revents); 274 } 275 276 /* 277 * fuse_device_read hangs on the queue of VFS messages. 278 * When it's notified that there is a new one, it picks that and 279 * passes up to the daemon 280 */ 281 int 282 fuse_device_read(struct cdev *dev, struct uio *uio, int ioflag) 283 { 284 int err; 285 struct fuse_data *data; 286 struct fuse_ticket *tick; 287 void *buf[] = {NULL, NULL, NULL}; 288 int buflen[3]; 289 int i; 290 291 SDT_PROBE2(fusefs, , device, trace, 1, "fuse device read"); 292 293 err = devfs_get_cdevpriv((void **)&data); 294 if (err != 0) 295 return (err); 296 297 fuse_lck_mtx_lock(data->ms_mtx); 298 again: 299 if (fdata_get_dead(data)) { 300 SDT_PROBE2(fusefs, , device, trace, 2, 301 "we know early on that reader should be kicked so we " 302 "don't wait for news"); 303 fuse_lck_mtx_unlock(data->ms_mtx); 304 return (ENODEV); 305 } 306 if (!(tick = fuse_ms_pop(data))) { 307 /* check if we may block */ 308 if (ioflag & O_NONBLOCK) { 309 /* get outa here soon */ 310 fuse_lck_mtx_unlock(data->ms_mtx); 311 return (EAGAIN); 312 } else { 313 err = msleep(data, &data->ms_mtx, PCATCH, "fu_msg", 0); 314 if (err != 0) { 315 fuse_lck_mtx_unlock(data->ms_mtx); 316 return (fdata_get_dead(data) ? ENODEV : err); 317 } 318 tick = fuse_ms_pop(data); 319 } 320 } 321 if (!tick) { 322 /* 323 * We can get here if fuse daemon suddenly terminates, 324 * eg, by being hit by a SIGKILL 325 * -- and some other cases, too, tho not totally clear, when 326 * (cv_signal/wakeup_one signals the whole process ?) 327 */ 328 SDT_PROBE2(fusefs, , device, trace, 1, "no message on thread"); 329 goto again; 330 } 331 fuse_lck_mtx_unlock(data->ms_mtx); 332 333 if (fdata_get_dead(data)) { 334 /* 335 * somebody somewhere -- eg., umount routine -- 336 * wants this liaison finished off 337 */ 338 SDT_PROBE2(fusefs, , device, trace, 2, 339 "reader is to be sacked"); 340 if (tick) { 341 SDT_PROBE2(fusefs, , device, trace, 2, "weird -- " 342 "\"kick\" is set tho there is message"); 343 FUSE_ASSERT_MS_DONE(tick); 344 fuse_ticket_drop(tick); 345 } 346 return (ENODEV); /* This should make the daemon get off 347 * of us */ 348 } 349 SDT_PROBE2(fusefs, , device, trace, 1, 350 "fuse device read message successfully"); 351 352 KASSERT(tick->tk_ms_bufdata || tick->tk_ms_bufsize == 0, 353 ("non-null buf pointer with positive size")); 354 355 switch (tick->tk_ms_type) { 356 case FT_M_FIOV: 357 buf[0] = tick->tk_ms_fiov.base; 358 buflen[0] = tick->tk_ms_fiov.len; 359 break; 360 case FT_M_BUF: 361 buf[0] = tick->tk_ms_fiov.base; 362 buflen[0] = tick->tk_ms_fiov.len; 363 buf[1] = tick->tk_ms_bufdata; 364 buflen[1] = tick->tk_ms_bufsize; 365 break; 366 default: 367 panic("unknown message type for fuse_ticket %p", tick); 368 } 369 370 for (i = 0; buf[i]; i++) { 371 /* 372 * Why not ban mercilessly stupid daemons who can't keep up 373 * with us? (There is no much use of a partial read here...) 374 */ 375 /* 376 * XXX note that in such cases Linux FUSE throws EIO at the 377 * syscall invoker and stands back to the message queue. The 378 * rationale should be made clear (and possibly adopt that 379 * behaviour). Keeping the current scheme at least makes 380 * fallacy as loud as possible... 381 */ 382 if (uio->uio_resid < buflen[i]) { 383 fdata_set_dead(data); 384 SDT_PROBE2(fusefs, , device, trace, 2, 385 "daemon is stupid, kick it off..."); 386 err = ENODEV; 387 break; 388 } 389 err = uiomove(buf[i], buflen[i], uio); 390 if (err) 391 break; 392 } 393 394 FUSE_ASSERT_MS_DONE(tick); 395 fuse_ticket_drop(tick); 396 397 return (err); 398 } 399 400 static inline int 401 fuse_ohead_audit(struct fuse_out_header *ohead, struct uio *uio) 402 { 403 if (uio->uio_resid + sizeof(struct fuse_out_header) != ohead->len) { 404 SDT_PROBE2(fusefs, , device, trace, 1, 405 "Format error: body size " 406 "differs from size claimed by header"); 407 return (EINVAL); 408 } 409 if (uio->uio_resid && ohead->unique != 0 && ohead->error) { 410 SDT_PROBE2(fusefs, , device, trace, 1, 411 "Format error: non zero error but message had a body"); 412 return (EINVAL); 413 } 414 415 return (0); 416 } 417 418 SDT_PROBE_DEFINE1(fusefs, , device, fuse_device_write_notify, 419 "struct fuse_out_header*"); 420 SDT_PROBE_DEFINE1(fusefs, , device, fuse_device_write_missing_ticket, 421 "uint64_t"); 422 SDT_PROBE_DEFINE1(fusefs, , device, fuse_device_write_found, 423 "struct fuse_ticket*"); 424 /* 425 * fuse_device_write first reads the header sent by the daemon. 426 * If that's OK, looks up ticket/callback node by the unique id seen in header. 427 * If the callback node contains a handler function, the uio is passed over 428 * that. 429 */ 430 static int 431 fuse_device_write(struct cdev *dev, struct uio *uio, int ioflag) 432 { 433 struct fuse_out_header ohead; 434 int err = 0; 435 struct fuse_data *data; 436 struct mount *mp; 437 struct fuse_ticket *tick, *itick, *x_tick; 438 int found = 0; 439 440 err = devfs_get_cdevpriv((void **)&data); 441 if (err != 0) 442 return (err); 443 mp = data->mp; 444 445 if (uio->uio_resid < sizeof(struct fuse_out_header)) { 446 SDT_PROBE2(fusefs, , device, trace, 1, 447 "fuse_device_write got less than a header!"); 448 fdata_set_dead(data); 449 return (EINVAL); 450 } 451 if ((err = uiomove(&ohead, sizeof(struct fuse_out_header), uio)) != 0) 452 return (err); 453 454 /* 455 * We check header information (which is redundant) and compare it 456 * with what we see. If we see some inconsistency we discard the 457 * whole answer and proceed on as if it had never existed. In 458 * particular, no pretender will be woken up, regardless the 459 * "unique" value in the header. 460 */ 461 if ((err = fuse_ohead_audit(&ohead, uio))) { 462 fdata_set_dead(data); 463 return (err); 464 } 465 /* Pass stuff over to callback if there is one installed */ 466 467 /* Looking for ticket with the unique id of header */ 468 fuse_lck_mtx_lock(data->aw_mtx); 469 TAILQ_FOREACH_SAFE(tick, &data->aw_head, tk_aw_link, 470 x_tick) { 471 if (tick->tk_unique == ohead.unique) { 472 SDT_PROBE1(fusefs, , device, fuse_device_write_found, 473 tick); 474 found = 1; 475 fuse_aw_remove(tick); 476 break; 477 } 478 } 479 if (found && tick->irq_unique > 0) { 480 /* 481 * Discard the FUSE_INTERRUPT ticket that tried to interrupt 482 * this operation 483 */ 484 TAILQ_FOREACH_SAFE(itick, &data->aw_head, tk_aw_link, 485 x_tick) { 486 if (itick->tk_unique == tick->irq_unique) { 487 fuse_aw_remove(itick); 488 fuse_ticket_drop(itick); 489 break; 490 } 491 } 492 tick->irq_unique = 0; 493 } 494 fuse_lck_mtx_unlock(data->aw_mtx); 495 496 if (found) { 497 if (tick->tk_aw_handler) { 498 /* 499 * We found a callback with proper handler. In this 500 * case the out header will be 0wnd by the callback, 501 * so the fun of freeing that is left for her. 502 * (Then, by all chance, she'll just get that's done 503 * via ticket_drop(), so no manual mucking 504 * around...) 505 */ 506 SDT_PROBE2(fusefs, , device, trace, 1, 507 "pass ticket to a callback"); 508 /* Sanitize the linuxism of negative errnos */ 509 ohead.error *= -1; 510 memcpy(&tick->tk_aw_ohead, &ohead, sizeof(ohead)); 511 err = tick->tk_aw_handler(tick, uio); 512 } else { 513 /* pretender doesn't wanna do anything with answer */ 514 SDT_PROBE2(fusefs, , device, trace, 1, 515 "stuff devalidated, so we drop it"); 516 } 517 518 /* 519 * As aw_mtx was not held during the callback execution the 520 * ticket may have been inserted again. However, this is safe 521 * because fuse_ticket_drop() will deal with refcount anyway. 522 */ 523 fuse_ticket_drop(tick); 524 } else if (ohead.unique == 0){ 525 /* unique == 0 means asynchronous notification */ 526 SDT_PROBE1(fusefs, , device, fuse_device_write_notify, &ohead); 527 switch (ohead.error) { 528 case FUSE_NOTIFY_INVAL_ENTRY: 529 err = fuse_internal_invalidate_entry(mp, uio); 530 break; 531 case FUSE_NOTIFY_INVAL_INODE: 532 err = fuse_internal_invalidate_inode(mp, uio); 533 break; 534 case FUSE_NOTIFY_RETRIEVE: 535 case FUSE_NOTIFY_STORE: 536 /* 537 * Unimplemented. I don't know of any file systems 538 * that use them, and the protocol isn't sound anyway, 539 * since the notification messages don't include the 540 * inode's generation number. Without that, it's 541 * possible to manipulate the cache of the wrong vnode. 542 * Finally, it's not defined what this message should 543 * do for a file with dirty cache. 544 */ 545 case FUSE_NOTIFY_POLL: 546 /* Unimplemented. See comments in fuse_vnops */ 547 default: 548 /* Not implemented */ 549 err = ENOSYS; 550 } 551 } else { 552 /* no callback at all! */ 553 SDT_PROBE1(fusefs, , device, fuse_device_write_missing_ticket, 554 ohead.unique); 555 if (ohead.error == -EAGAIN) { 556 /* 557 * This was probably a response to a FUSE_INTERRUPT 558 * operation whose original operation is already 559 * complete. We can't store FUSE_INTERRUPT tickets 560 * indefinitely because their responses are optional. 561 * So we delete them when the original operation 562 * completes. And sadly the fuse_header_out doesn't 563 * identify the opcode, so we have to guess. 564 */ 565 err = 0; 566 } else { 567 err = EINVAL; 568 } 569 } 570 571 return (err); 572 } 573 574 int 575 fuse_device_init(void) 576 { 577 578 fuse_dev = make_dev(&fuse_device_cdevsw, 0, UID_ROOT, GID_OPERATOR, 579 S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH, "fuse"); 580 if (fuse_dev == NULL) 581 return (ENOMEM); 582 return (0); 583 } 584 585 void 586 fuse_device_destroy(void) 587 { 588 589 MPASS(fuse_dev != NULL); 590 destroy_dev(fuse_dev); 591 } 592