1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2007-2009 Google Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions are 9 * met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above 14 * copyright notice, this list of conditions and the following disclaimer 15 * in the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Google Inc. nor the names of its 18 * contributors may be used to endorse or promote products derived from 19 * this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 * 33 * Copyright (C) 2005 Csaba Henk. 34 * All rights reserved. 35 * 36 * Copyright (c) 2019 The FreeBSD Foundation 37 * 38 * Portions of this software were developed by BFF Storage Systems, LLC under 39 * sponsorship from the FreeBSD Foundation. 40 * 41 * Redistribution and use in source and binary forms, with or without 42 * modification, are permitted provided that the following conditions 43 * are met: 44 * 1. Redistributions of source code must retain the above copyright 45 * notice, this list of conditions and the following disclaimer. 46 * 2. Redistributions in binary form must reproduce the above copyright 47 * notice, this list of conditions and the following disclaimer in the 48 * documentation and/or other materials provided with the distribution. 49 * 50 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND 51 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 52 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 53 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 54 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 55 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 56 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 57 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 58 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 59 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 60 * SUCH DAMAGE. 61 */ 62 63 #include <sys/cdefs.h> 64 __FBSDID("$FreeBSD$"); 65 66 #include <sys/types.h> 67 #include <sys/module.h> 68 #include <sys/systm.h> 69 #include <sys/errno.h> 70 #include <sys/param.h> 71 #include <sys/kernel.h> 72 #include <sys/conf.h> 73 #include <sys/uio.h> 74 #include <sys/malloc.h> 75 #include <sys/queue.h> 76 #include <sys/lock.h> 77 #include <sys/sx.h> 78 #include <sys/mutex.h> 79 #include <sys/proc.h> 80 #include <sys/mount.h> 81 #include <sys/sdt.h> 82 #include <sys/stat.h> 83 #include <sys/fcntl.h> 84 #include <sys/sysctl.h> 85 #include <sys/poll.h> 86 #include <sys/selinfo.h> 87 88 #include "fuse.h" 89 #include "fuse_internal.h" 90 #include "fuse_ipc.h" 91 92 #include <compat/linux/linux_errno.h> 93 #include <compat/linux/linux_errno.inc> 94 95 SDT_PROVIDER_DECLARE(fusefs); 96 /* 97 * Fuse trace probe: 98 * arg0: verbosity. Higher numbers give more verbose messages 99 * arg1: Textual message 100 */ 101 SDT_PROBE_DEFINE2(fusefs, , device, trace, "int", "char*"); 102 103 static struct cdev *fuse_dev; 104 105 static d_kqfilter_t fuse_device_filter; 106 static d_open_t fuse_device_open; 107 static d_poll_t fuse_device_poll; 108 static d_read_t fuse_device_read; 109 static d_write_t fuse_device_write; 110 111 static struct cdevsw fuse_device_cdevsw = { 112 .d_kqfilter = fuse_device_filter, 113 .d_open = fuse_device_open, 114 .d_name = "fuse", 115 .d_poll = fuse_device_poll, 116 .d_read = fuse_device_read, 117 .d_write = fuse_device_write, 118 .d_version = D_VERSION, 119 }; 120 121 static int fuse_device_filt_read(struct knote *kn, long hint); 122 static void fuse_device_filt_detach(struct knote *kn); 123 124 struct filterops fuse_device_rfiltops = { 125 .f_isfd = 1, 126 .f_detach = fuse_device_filt_detach, 127 .f_event = fuse_device_filt_read, 128 }; 129 130 /**************************** 131 * 132 * >>> Fuse device op defs 133 * 134 ****************************/ 135 136 static void 137 fdata_dtor(void *arg) 138 { 139 struct fuse_data *fdata; 140 struct fuse_ticket *tick; 141 142 fdata = arg; 143 if (fdata == NULL) 144 return; 145 146 fdata_set_dead(fdata); 147 148 FUSE_LOCK(); 149 fuse_lck_mtx_lock(fdata->aw_mtx); 150 /* wakup poll()ers */ 151 selwakeuppri(&fdata->ks_rsel, PZERO + 1); 152 /* Don't let syscall handlers wait in vain */ 153 while ((tick = fuse_aw_pop(fdata))) { 154 fuse_lck_mtx_lock(tick->tk_aw_mtx); 155 fticket_set_answered(tick); 156 tick->tk_aw_errno = ENOTCONN; 157 wakeup(tick); 158 fuse_lck_mtx_unlock(tick->tk_aw_mtx); 159 FUSE_ASSERT_AW_DONE(tick); 160 fuse_ticket_drop(tick); 161 } 162 fuse_lck_mtx_unlock(fdata->aw_mtx); 163 164 /* Cleanup unsent operations */ 165 fuse_lck_mtx_lock(fdata->ms_mtx); 166 while ((tick = fuse_ms_pop(fdata))) { 167 fuse_ticket_drop(tick); 168 } 169 fuse_lck_mtx_unlock(fdata->ms_mtx); 170 FUSE_UNLOCK(); 171 172 fdata_trydestroy(fdata); 173 } 174 175 static int 176 fuse_device_filter(struct cdev *dev, struct knote *kn) 177 { 178 struct fuse_data *data; 179 int error; 180 181 error = devfs_get_cdevpriv((void **)&data); 182 183 /* EVFILT_WRITE is not supported; the device is always ready to write */ 184 if (error == 0 && kn->kn_filter == EVFILT_READ) { 185 kn->kn_fop = &fuse_device_rfiltops; 186 kn->kn_hook = data; 187 knlist_add(&data->ks_rsel.si_note, kn, 0); 188 error = 0; 189 } else if (error == 0) { 190 error = EINVAL; 191 kn->kn_data = error; 192 } 193 194 return (error); 195 } 196 197 static void 198 fuse_device_filt_detach(struct knote *kn) 199 { 200 struct fuse_data *data; 201 202 data = (struct fuse_data*)kn->kn_hook; 203 MPASS(data != NULL); 204 knlist_remove(&data->ks_rsel.si_note, kn, 0); 205 kn->kn_hook = NULL; 206 } 207 208 static int 209 fuse_device_filt_read(struct knote *kn, long hint) 210 { 211 struct fuse_data *data; 212 int ready; 213 214 data = (struct fuse_data*)kn->kn_hook; 215 MPASS(data != NULL); 216 217 mtx_assert(&data->ms_mtx, MA_OWNED); 218 if (fdata_get_dead(data)) { 219 kn->kn_flags |= EV_EOF; 220 kn->kn_fflags = ENODEV; 221 kn->kn_data = 1; 222 ready = 1; 223 } else if (STAILQ_FIRST(&data->ms_head)) { 224 MPASS(data->ms_count >= 1); 225 kn->kn_data = data->ms_count; 226 ready = 1; 227 } else { 228 ready = 0; 229 } 230 231 return (ready); 232 } 233 234 /* 235 * Resources are set up on a per-open basis 236 */ 237 static int 238 fuse_device_open(struct cdev *dev, int oflags, int devtype, struct thread *td) 239 { 240 struct fuse_data *fdata; 241 int error; 242 243 SDT_PROBE2(fusefs, , device, trace, 1, "device open"); 244 245 fdata = fdata_alloc(dev, td->td_ucred); 246 error = devfs_set_cdevpriv(fdata, fdata_dtor); 247 if (error != 0) 248 fdata_trydestroy(fdata); 249 else 250 SDT_PROBE2(fusefs, , device, trace, 1, "device open success"); 251 return (error); 252 } 253 254 int 255 fuse_device_poll(struct cdev *dev, int events, struct thread *td) 256 { 257 struct fuse_data *data; 258 int error, revents = 0; 259 260 error = devfs_get_cdevpriv((void **)&data); 261 if (error != 0) 262 return (events & 263 (POLLHUP|POLLIN|POLLRDNORM|POLLOUT|POLLWRNORM)); 264 265 if (events & (POLLIN | POLLRDNORM)) { 266 fuse_lck_mtx_lock(data->ms_mtx); 267 if (fdata_get_dead(data) || STAILQ_FIRST(&data->ms_head)) 268 revents |= events & (POLLIN | POLLRDNORM); 269 else 270 selrecord(td, &data->ks_rsel); 271 fuse_lck_mtx_unlock(data->ms_mtx); 272 } 273 if (events & (POLLOUT | POLLWRNORM)) { 274 revents |= events & (POLLOUT | POLLWRNORM); 275 } 276 return (revents); 277 } 278 279 /* 280 * fuse_device_read hangs on the queue of VFS messages. 281 * When it's notified that there is a new one, it picks that and 282 * passes up to the daemon 283 */ 284 int 285 fuse_device_read(struct cdev *dev, struct uio *uio, int ioflag) 286 { 287 int err; 288 struct fuse_data *data; 289 struct fuse_ticket *tick; 290 void *buf[] = {NULL, NULL, NULL}; 291 int buflen[3]; 292 int i; 293 294 SDT_PROBE2(fusefs, , device, trace, 1, "fuse device read"); 295 296 err = devfs_get_cdevpriv((void **)&data); 297 if (err != 0) 298 return (err); 299 300 fuse_lck_mtx_lock(data->ms_mtx); 301 again: 302 if (fdata_get_dead(data)) { 303 SDT_PROBE2(fusefs, , device, trace, 2, 304 "we know early on that reader should be kicked so we " 305 "don't wait for news"); 306 fuse_lck_mtx_unlock(data->ms_mtx); 307 return (ENODEV); 308 } 309 if (!(tick = fuse_ms_pop(data))) { 310 /* check if we may block */ 311 if (ioflag & O_NONBLOCK) { 312 /* get outa here soon */ 313 fuse_lck_mtx_unlock(data->ms_mtx); 314 return (EAGAIN); 315 } else { 316 err = msleep(data, &data->ms_mtx, PCATCH, "fu_msg", 0); 317 if (err != 0) { 318 fuse_lck_mtx_unlock(data->ms_mtx); 319 return (fdata_get_dead(data) ? ENODEV : err); 320 } 321 tick = fuse_ms_pop(data); 322 } 323 } 324 if (!tick) { 325 /* 326 * We can get here if fuse daemon suddenly terminates, 327 * eg, by being hit by a SIGKILL 328 * -- and some other cases, too, tho not totally clear, when 329 * (cv_signal/wakeup_one signals the whole process ?) 330 */ 331 SDT_PROBE2(fusefs, , device, trace, 1, "no message on thread"); 332 goto again; 333 } 334 fuse_lck_mtx_unlock(data->ms_mtx); 335 336 if (fdata_get_dead(data)) { 337 /* 338 * somebody somewhere -- eg., umount routine -- 339 * wants this liaison finished off 340 */ 341 SDT_PROBE2(fusefs, , device, trace, 2, 342 "reader is to be sacked"); 343 if (tick) { 344 SDT_PROBE2(fusefs, , device, trace, 2, "weird -- " 345 "\"kick\" is set tho there is message"); 346 FUSE_ASSERT_MS_DONE(tick); 347 fuse_ticket_drop(tick); 348 } 349 return (ENODEV); /* This should make the daemon get off 350 * of us */ 351 } 352 SDT_PROBE2(fusefs, , device, trace, 1, 353 "fuse device read message successfully"); 354 355 KASSERT(tick->tk_ms_bufdata || tick->tk_ms_bufsize == 0, 356 ("non-null buf pointer with positive size")); 357 358 switch (tick->tk_ms_type) { 359 case FT_M_FIOV: 360 buf[0] = tick->tk_ms_fiov.base; 361 buflen[0] = tick->tk_ms_fiov.len; 362 break; 363 case FT_M_BUF: 364 buf[0] = tick->tk_ms_fiov.base; 365 buflen[0] = tick->tk_ms_fiov.len; 366 buf[1] = tick->tk_ms_bufdata; 367 buflen[1] = tick->tk_ms_bufsize; 368 break; 369 default: 370 panic("unknown message type for fuse_ticket %p", tick); 371 } 372 373 for (i = 0; buf[i]; i++) { 374 /* 375 * Why not ban mercilessly stupid daemons who can't keep up 376 * with us? (There is no much use of a partial read here...) 377 */ 378 /* 379 * XXX note that in such cases Linux FUSE throws EIO at the 380 * syscall invoker and stands back to the message queue. The 381 * rationale should be made clear (and possibly adopt that 382 * behaviour). Keeping the current scheme at least makes 383 * fallacy as loud as possible... 384 */ 385 if (uio->uio_resid < buflen[i]) { 386 fdata_set_dead(data); 387 SDT_PROBE2(fusefs, , device, trace, 2, 388 "daemon is stupid, kick it off..."); 389 err = ENODEV; 390 break; 391 } 392 err = uiomove(buf[i], buflen[i], uio); 393 if (err) 394 break; 395 } 396 397 FUSE_ASSERT_MS_DONE(tick); 398 fuse_ticket_drop(tick); 399 400 return (err); 401 } 402 403 static inline int 404 fuse_ohead_audit(struct fuse_out_header *ohead, struct uio *uio) 405 { 406 if (uio->uio_resid + sizeof(struct fuse_out_header) != ohead->len) { 407 SDT_PROBE2(fusefs, , device, trace, 1, 408 "Format error: body size " 409 "differs from size claimed by header"); 410 return (EINVAL); 411 } 412 if (uio->uio_resid && ohead->unique != 0 && ohead->error) { 413 SDT_PROBE2(fusefs, , device, trace, 1, 414 "Format error: non zero error but message had a body"); 415 return (EINVAL); 416 } 417 418 return (0); 419 } 420 421 SDT_PROBE_DEFINE1(fusefs, , device, fuse_device_write_notify, 422 "struct fuse_out_header*"); 423 SDT_PROBE_DEFINE1(fusefs, , device, fuse_device_write_missing_ticket, 424 "uint64_t"); 425 SDT_PROBE_DEFINE1(fusefs, , device, fuse_device_write_found, 426 "struct fuse_ticket*"); 427 /* 428 * fuse_device_write first reads the header sent by the daemon. 429 * If that's OK, looks up ticket/callback node by the unique id seen in header. 430 * If the callback node contains a handler function, the uio is passed over 431 * that. 432 */ 433 static int 434 fuse_device_write(struct cdev *dev, struct uio *uio, int ioflag) 435 { 436 struct fuse_out_header ohead; 437 int err = 0; 438 struct fuse_data *data; 439 struct mount *mp; 440 struct fuse_ticket *tick, *itick, *x_tick; 441 int found = 0; 442 443 err = devfs_get_cdevpriv((void **)&data); 444 if (err != 0) 445 return (err); 446 mp = data->mp; 447 448 if (uio->uio_resid < sizeof(struct fuse_out_header)) { 449 SDT_PROBE2(fusefs, , device, trace, 1, 450 "fuse_device_write got less than a header!"); 451 fdata_set_dead(data); 452 return (EINVAL); 453 } 454 if ((err = uiomove(&ohead, sizeof(struct fuse_out_header), uio)) != 0) 455 return (err); 456 457 if (data->linux_errnos != 0 && ohead.error != 0) { 458 err = -ohead.error; 459 if (err < 0 || err >= nitems(linux_to_bsd_errtbl)) 460 return (EINVAL); 461 462 /* '-', because it will get flipped again below */ 463 ohead.error = -linux_to_bsd_errtbl[err]; 464 } 465 466 /* 467 * We check header information (which is redundant) and compare it 468 * with what we see. If we see some inconsistency we discard the 469 * whole answer and proceed on as if it had never existed. In 470 * particular, no pretender will be woken up, regardless the 471 * "unique" value in the header. 472 */ 473 if ((err = fuse_ohead_audit(&ohead, uio))) { 474 fdata_set_dead(data); 475 return (err); 476 } 477 /* Pass stuff over to callback if there is one installed */ 478 479 /* Looking for ticket with the unique id of header */ 480 fuse_lck_mtx_lock(data->aw_mtx); 481 TAILQ_FOREACH_SAFE(tick, &data->aw_head, tk_aw_link, 482 x_tick) { 483 if (tick->tk_unique == ohead.unique) { 484 SDT_PROBE1(fusefs, , device, fuse_device_write_found, 485 tick); 486 found = 1; 487 fuse_aw_remove(tick); 488 break; 489 } 490 } 491 if (found && tick->irq_unique > 0) { 492 /* 493 * Discard the FUSE_INTERRUPT ticket that tried to interrupt 494 * this operation 495 */ 496 TAILQ_FOREACH_SAFE(itick, &data->aw_head, tk_aw_link, 497 x_tick) { 498 if (itick->tk_unique == tick->irq_unique) { 499 fuse_aw_remove(itick); 500 fuse_ticket_drop(itick); 501 break; 502 } 503 } 504 tick->irq_unique = 0; 505 } 506 fuse_lck_mtx_unlock(data->aw_mtx); 507 508 if (found) { 509 if (tick->tk_aw_handler) { 510 /* 511 * We found a callback with proper handler. In this 512 * case the out header will be 0wnd by the callback, 513 * so the fun of freeing that is left for her. 514 * (Then, by all chance, she'll just get that's done 515 * via ticket_drop(), so no manual mucking 516 * around...) 517 */ 518 SDT_PROBE2(fusefs, , device, trace, 1, 519 "pass ticket to a callback"); 520 /* Sanitize the linuxism of negative errnos */ 521 ohead.error *= -1; 522 memcpy(&tick->tk_aw_ohead, &ohead, sizeof(ohead)); 523 err = tick->tk_aw_handler(tick, uio); 524 } else { 525 /* pretender doesn't wanna do anything with answer */ 526 SDT_PROBE2(fusefs, , device, trace, 1, 527 "stuff devalidated, so we drop it"); 528 } 529 530 /* 531 * As aw_mtx was not held during the callback execution the 532 * ticket may have been inserted again. However, this is safe 533 * because fuse_ticket_drop() will deal with refcount anyway. 534 */ 535 fuse_ticket_drop(tick); 536 } else if (ohead.unique == 0){ 537 /* unique == 0 means asynchronous notification */ 538 SDT_PROBE1(fusefs, , device, fuse_device_write_notify, &ohead); 539 switch (ohead.error) { 540 case FUSE_NOTIFY_INVAL_ENTRY: 541 err = fuse_internal_invalidate_entry(mp, uio); 542 break; 543 case FUSE_NOTIFY_INVAL_INODE: 544 err = fuse_internal_invalidate_inode(mp, uio); 545 break; 546 case FUSE_NOTIFY_RETRIEVE: 547 case FUSE_NOTIFY_STORE: 548 /* 549 * Unimplemented. I don't know of any file systems 550 * that use them, and the protocol isn't sound anyway, 551 * since the notification messages don't include the 552 * inode's generation number. Without that, it's 553 * possible to manipulate the cache of the wrong vnode. 554 * Finally, it's not defined what this message should 555 * do for a file with dirty cache. 556 */ 557 case FUSE_NOTIFY_POLL: 558 /* Unimplemented. See comments in fuse_vnops */ 559 default: 560 /* Not implemented */ 561 err = ENOSYS; 562 } 563 } else { 564 /* no callback at all! */ 565 SDT_PROBE1(fusefs, , device, fuse_device_write_missing_ticket, 566 ohead.unique); 567 if (ohead.error == -EAGAIN) { 568 /* 569 * This was probably a response to a FUSE_INTERRUPT 570 * operation whose original operation is already 571 * complete. We can't store FUSE_INTERRUPT tickets 572 * indefinitely because their responses are optional. 573 * So we delete them when the original operation 574 * completes. And sadly the fuse_header_out doesn't 575 * identify the opcode, so we have to guess. 576 */ 577 err = 0; 578 } else { 579 err = EINVAL; 580 } 581 } 582 583 return (err); 584 } 585 586 int 587 fuse_device_init(void) 588 { 589 590 fuse_dev = make_dev(&fuse_device_cdevsw, 0, UID_ROOT, GID_OPERATOR, 591 S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH, "fuse"); 592 if (fuse_dev == NULL) 593 return (ENOMEM); 594 return (0); 595 } 596 597 void 598 fuse_device_destroy(void) 599 { 600 601 MPASS(fuse_dev != NULL); 602 destroy_dev(fuse_dev); 603 } 604