1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2007-2009 Google Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions are 9 * met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above 14 * copyright notice, this list of conditions and the following disclaimer 15 * in the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Google Inc. nor the names of its 18 * contributors may be used to endorse or promote products derived from 19 * this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 * 33 * Copyright (C) 2005 Csaba Henk. 34 * All rights reserved. 35 * 36 * Copyright (c) 2019 The FreeBSD Foundation 37 * 38 * Portions of this software were developed by BFF Storage Systems, LLC under 39 * sponsorship from the FreeBSD Foundation. 40 * 41 * Redistribution and use in source and binary forms, with or without 42 * modification, are permitted provided that the following conditions 43 * are met: 44 * 1. Redistributions of source code must retain the above copyright 45 * notice, this list of conditions and the following disclaimer. 46 * 2. Redistributions in binary form must reproduce the above copyright 47 * notice, this list of conditions and the following disclaimer in the 48 * documentation and/or other materials provided with the distribution. 49 * 50 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND 51 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 52 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 53 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 54 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 55 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 56 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 57 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 58 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 59 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 60 * SUCH DAMAGE. 61 */ 62 63 #include <sys/cdefs.h> 64 __FBSDID("$FreeBSD$"); 65 66 #include <sys/types.h> 67 #include <sys/module.h> 68 #include <sys/systm.h> 69 #include <sys/errno.h> 70 #include <sys/param.h> 71 #include <sys/kernel.h> 72 #include <sys/conf.h> 73 #include <sys/uio.h> 74 #include <sys/malloc.h> 75 #include <sys/queue.h> 76 #include <sys/lock.h> 77 #include <sys/sx.h> 78 #include <sys/mutex.h> 79 #include <sys/proc.h> 80 #include <sys/mount.h> 81 #include <sys/sdt.h> 82 #include <sys/stat.h> 83 #include <sys/fcntl.h> 84 #include <sys/sysctl.h> 85 #include <sys/poll.h> 86 #include <sys/selinfo.h> 87 88 #include "fuse.h" 89 #include "fuse_internal.h" 90 #include "fuse_ipc.h" 91 92 #include <compat/linux/linux_errno.h> 93 #include <compat/linux/linux_errno.inc> 94 95 SDT_PROVIDER_DECLARE(fusefs); 96 /* 97 * Fuse trace probe: 98 * arg0: verbosity. Higher numbers give more verbose messages 99 * arg1: Textual message 100 */ 101 SDT_PROBE_DEFINE2(fusefs, , device, trace, "int", "char*"); 102 103 static struct cdev *fuse_dev; 104 105 static d_kqfilter_t fuse_device_filter; 106 static d_open_t fuse_device_open; 107 static d_poll_t fuse_device_poll; 108 static d_read_t fuse_device_read; 109 static d_write_t fuse_device_write; 110 111 static struct cdevsw fuse_device_cdevsw = { 112 .d_kqfilter = fuse_device_filter, 113 .d_open = fuse_device_open, 114 .d_name = "fuse", 115 .d_poll = fuse_device_poll, 116 .d_read = fuse_device_read, 117 .d_write = fuse_device_write, 118 .d_version = D_VERSION, 119 }; 120 121 static int fuse_device_filt_read(struct knote *kn, long hint); 122 static void fuse_device_filt_detach(struct knote *kn); 123 124 struct filterops fuse_device_rfiltops = { 125 .f_isfd = 1, 126 .f_detach = fuse_device_filt_detach, 127 .f_event = fuse_device_filt_read, 128 }; 129 130 /**************************** 131 * 132 * >>> Fuse device op defs 133 * 134 ****************************/ 135 136 static void 137 fdata_dtor(void *arg) 138 { 139 struct fuse_data *fdata; 140 struct fuse_ticket *tick; 141 142 fdata = arg; 143 if (fdata == NULL) 144 return; 145 146 fdata_set_dead(fdata); 147 148 FUSE_LOCK(); 149 fuse_lck_mtx_lock(fdata->aw_mtx); 150 /* wakup poll()ers */ 151 selwakeuppri(&fdata->ks_rsel, PZERO + 1); 152 /* Don't let syscall handlers wait in vain */ 153 while ((tick = fuse_aw_pop(fdata))) { 154 fuse_lck_mtx_lock(tick->tk_aw_mtx); 155 fticket_set_answered(tick); 156 tick->tk_aw_errno = ENOTCONN; 157 wakeup(tick); 158 fuse_lck_mtx_unlock(tick->tk_aw_mtx); 159 FUSE_ASSERT_AW_DONE(tick); 160 fuse_ticket_drop(tick); 161 } 162 fuse_lck_mtx_unlock(fdata->aw_mtx); 163 164 /* Cleanup unsent operations */ 165 fuse_lck_mtx_lock(fdata->ms_mtx); 166 while ((tick = fuse_ms_pop(fdata))) { 167 fuse_ticket_drop(tick); 168 } 169 fuse_lck_mtx_unlock(fdata->ms_mtx); 170 FUSE_UNLOCK(); 171 172 fdata_trydestroy(fdata); 173 } 174 175 static int 176 fuse_device_filter(struct cdev *dev, struct knote *kn) 177 { 178 struct fuse_data *data; 179 int error; 180 181 error = devfs_get_cdevpriv((void **)&data); 182 183 /* EVFILT_WRITE is not supported; the device is always ready to write */ 184 if (error == 0 && kn->kn_filter == EVFILT_READ) { 185 kn->kn_fop = &fuse_device_rfiltops; 186 kn->kn_hook = data; 187 knlist_add(&data->ks_rsel.si_note, kn, 0); 188 error = 0; 189 } else if (error == 0) { 190 error = EINVAL; 191 kn->kn_data = error; 192 } 193 194 return (error); 195 } 196 197 static void 198 fuse_device_filt_detach(struct knote *kn) 199 { 200 struct fuse_data *data; 201 202 data = (struct fuse_data*)kn->kn_hook; 203 MPASS(data != NULL); 204 knlist_remove(&data->ks_rsel.si_note, kn, 0); 205 kn->kn_hook = NULL; 206 } 207 208 static int 209 fuse_device_filt_read(struct knote *kn, long hint) 210 { 211 struct fuse_data *data; 212 int ready; 213 214 data = (struct fuse_data*)kn->kn_hook; 215 MPASS(data != NULL); 216 217 mtx_assert(&data->ms_mtx, MA_OWNED); 218 if (fdata_get_dead(data)) { 219 kn->kn_flags |= EV_EOF; 220 kn->kn_fflags = ENODEV; 221 kn->kn_data = 1; 222 ready = 1; 223 } else if (STAILQ_FIRST(&data->ms_head)) { 224 MPASS(data->ms_count >= 1); 225 kn->kn_data = data->ms_count; 226 ready = 1; 227 } else { 228 ready = 0; 229 } 230 231 return (ready); 232 } 233 234 /* 235 * Resources are set up on a per-open basis 236 */ 237 static int 238 fuse_device_open(struct cdev *dev, int oflags, int devtype, struct thread *td) 239 { 240 struct fuse_data *fdata; 241 int error; 242 243 SDT_PROBE2(fusefs, , device, trace, 1, "device open"); 244 245 fdata = fdata_alloc(dev, td->td_ucred); 246 error = devfs_set_cdevpriv(fdata, fdata_dtor); 247 if (error != 0) 248 fdata_trydestroy(fdata); 249 else 250 SDT_PROBE2(fusefs, , device, trace, 1, "device open success"); 251 return (error); 252 } 253 254 int 255 fuse_device_poll(struct cdev *dev, int events, struct thread *td) 256 { 257 struct fuse_data *data; 258 int error, revents = 0; 259 260 error = devfs_get_cdevpriv((void **)&data); 261 if (error != 0) 262 return (events & 263 (POLLHUP|POLLIN|POLLRDNORM|POLLOUT|POLLWRNORM)); 264 265 if (events & (POLLIN | POLLRDNORM)) { 266 fuse_lck_mtx_lock(data->ms_mtx); 267 if (fdata_get_dead(data) || STAILQ_FIRST(&data->ms_head)) 268 revents |= events & (POLLIN | POLLRDNORM); 269 else 270 selrecord(td, &data->ks_rsel); 271 fuse_lck_mtx_unlock(data->ms_mtx); 272 } 273 if (events & (POLLOUT | POLLWRNORM)) { 274 revents |= events & (POLLOUT | POLLWRNORM); 275 } 276 return (revents); 277 } 278 279 /* 280 * fuse_device_read hangs on the queue of VFS messages. 281 * When it's notified that there is a new one, it picks that and 282 * passes up to the daemon 283 */ 284 int 285 fuse_device_read(struct cdev *dev, struct uio *uio, int ioflag) 286 { 287 int err; 288 struct fuse_data *data; 289 struct fuse_ticket *tick; 290 void *buf; 291 int buflen; 292 293 SDT_PROBE2(fusefs, , device, trace, 1, "fuse device read"); 294 295 err = devfs_get_cdevpriv((void **)&data); 296 if (err != 0) 297 return (err); 298 299 fuse_lck_mtx_lock(data->ms_mtx); 300 again: 301 if (fdata_get_dead(data)) { 302 SDT_PROBE2(fusefs, , device, trace, 2, 303 "we know early on that reader should be kicked so we " 304 "don't wait for news"); 305 fuse_lck_mtx_unlock(data->ms_mtx); 306 return (ENODEV); 307 } 308 if (!(tick = fuse_ms_pop(data))) { 309 /* check if we may block */ 310 if (ioflag & O_NONBLOCK) { 311 /* get outa here soon */ 312 fuse_lck_mtx_unlock(data->ms_mtx); 313 return (EAGAIN); 314 } else { 315 err = msleep(data, &data->ms_mtx, PCATCH, "fu_msg", 0); 316 if (err != 0) { 317 fuse_lck_mtx_unlock(data->ms_mtx); 318 return (fdata_get_dead(data) ? ENODEV : err); 319 } 320 tick = fuse_ms_pop(data); 321 } 322 } 323 if (!tick) { 324 /* 325 * We can get here if fuse daemon suddenly terminates, 326 * eg, by being hit by a SIGKILL 327 * -- and some other cases, too, tho not totally clear, when 328 * (cv_signal/wakeup_one signals the whole process ?) 329 */ 330 SDT_PROBE2(fusefs, , device, trace, 1, "no message on thread"); 331 goto again; 332 } 333 fuse_lck_mtx_unlock(data->ms_mtx); 334 335 if (fdata_get_dead(data)) { 336 /* 337 * somebody somewhere -- eg., umount routine -- 338 * wants this liaison finished off 339 */ 340 SDT_PROBE2(fusefs, , device, trace, 2, 341 "reader is to be sacked"); 342 if (tick) { 343 SDT_PROBE2(fusefs, , device, trace, 2, "weird -- " 344 "\"kick\" is set tho there is message"); 345 FUSE_ASSERT_MS_DONE(tick); 346 fuse_ticket_drop(tick); 347 } 348 return (ENODEV); /* This should make the daemon get off 349 * of us */ 350 } 351 SDT_PROBE2(fusefs, , device, trace, 1, 352 "fuse device read message successfully"); 353 354 buf = tick->tk_ms_fiov.base; 355 buflen = tick->tk_ms_fiov.len; 356 357 /* 358 * Why not ban mercilessly stupid daemons who can't keep up 359 * with us? (There is no much use of a partial read here...) 360 */ 361 /* 362 * XXX note that in such cases Linux FUSE throws EIO at the 363 * syscall invoker and stands back to the message queue. The 364 * rationale should be made clear (and possibly adopt that 365 * behaviour). Keeping the current scheme at least makes 366 * fallacy as loud as possible... 367 */ 368 if (uio->uio_resid < buflen) { 369 fdata_set_dead(data); 370 SDT_PROBE2(fusefs, , device, trace, 2, 371 "daemon is stupid, kick it off..."); 372 err = ENODEV; 373 } else { 374 err = uiomove(buf, buflen, uio); 375 } 376 377 FUSE_ASSERT_MS_DONE(tick); 378 fuse_ticket_drop(tick); 379 380 return (err); 381 } 382 383 static inline int 384 fuse_ohead_audit(struct fuse_out_header *ohead, struct uio *uio) 385 { 386 if (uio->uio_resid + sizeof(struct fuse_out_header) != ohead->len) { 387 SDT_PROBE2(fusefs, , device, trace, 1, 388 "Format error: body size " 389 "differs from size claimed by header"); 390 return (EINVAL); 391 } 392 if (uio->uio_resid && ohead->unique != 0 && ohead->error) { 393 SDT_PROBE2(fusefs, , device, trace, 1, 394 "Format error: non zero error but message had a body"); 395 return (EINVAL); 396 } 397 398 return (0); 399 } 400 401 SDT_PROBE_DEFINE1(fusefs, , device, fuse_device_write_notify, 402 "struct fuse_out_header*"); 403 SDT_PROBE_DEFINE1(fusefs, , device, fuse_device_write_missing_ticket, 404 "uint64_t"); 405 SDT_PROBE_DEFINE1(fusefs, , device, fuse_device_write_found, 406 "struct fuse_ticket*"); 407 /* 408 * fuse_device_write first reads the header sent by the daemon. 409 * If that's OK, looks up ticket/callback node by the unique id seen in header. 410 * If the callback node contains a handler function, the uio is passed over 411 * that. 412 */ 413 static int 414 fuse_device_write(struct cdev *dev, struct uio *uio, int ioflag) 415 { 416 struct fuse_out_header ohead; 417 int err = 0; 418 struct fuse_data *data; 419 struct mount *mp; 420 struct fuse_ticket *tick, *itick, *x_tick; 421 int found = 0; 422 423 err = devfs_get_cdevpriv((void **)&data); 424 if (err != 0) 425 return (err); 426 mp = data->mp; 427 428 if (uio->uio_resid < sizeof(struct fuse_out_header)) { 429 SDT_PROBE2(fusefs, , device, trace, 1, 430 "fuse_device_write got less than a header!"); 431 fdata_set_dead(data); 432 return (EINVAL); 433 } 434 if ((err = uiomove(&ohead, sizeof(struct fuse_out_header), uio)) != 0) 435 return (err); 436 437 if (data->linux_errnos != 0 && ohead.error != 0) { 438 err = -ohead.error; 439 if (err < 0 || err >= nitems(linux_to_bsd_errtbl)) 440 return (EINVAL); 441 442 /* '-', because it will get flipped again below */ 443 ohead.error = -linux_to_bsd_errtbl[err]; 444 } 445 446 /* 447 * We check header information (which is redundant) and compare it 448 * with what we see. If we see some inconsistency we discard the 449 * whole answer and proceed on as if it had never existed. In 450 * particular, no pretender will be woken up, regardless the 451 * "unique" value in the header. 452 */ 453 if ((err = fuse_ohead_audit(&ohead, uio))) { 454 fdata_set_dead(data); 455 return (err); 456 } 457 /* Pass stuff over to callback if there is one installed */ 458 459 /* Looking for ticket with the unique id of header */ 460 fuse_lck_mtx_lock(data->aw_mtx); 461 TAILQ_FOREACH_SAFE(tick, &data->aw_head, tk_aw_link, 462 x_tick) { 463 if (tick->tk_unique == ohead.unique) { 464 SDT_PROBE1(fusefs, , device, fuse_device_write_found, 465 tick); 466 found = 1; 467 fuse_aw_remove(tick); 468 break; 469 } 470 } 471 if (found && tick->irq_unique > 0) { 472 /* 473 * Discard the FUSE_INTERRUPT ticket that tried to interrupt 474 * this operation 475 */ 476 TAILQ_FOREACH_SAFE(itick, &data->aw_head, tk_aw_link, 477 x_tick) { 478 if (itick->tk_unique == tick->irq_unique) { 479 fuse_aw_remove(itick); 480 fuse_ticket_drop(itick); 481 break; 482 } 483 } 484 tick->irq_unique = 0; 485 } 486 fuse_lck_mtx_unlock(data->aw_mtx); 487 488 if (found) { 489 if (tick->tk_aw_handler) { 490 /* 491 * We found a callback with proper handler. In this 492 * case the out header will be 0wnd by the callback, 493 * so the fun of freeing that is left for her. 494 * (Then, by all chance, she'll just get that's done 495 * via ticket_drop(), so no manual mucking 496 * around...) 497 */ 498 SDT_PROBE2(fusefs, , device, trace, 1, 499 "pass ticket to a callback"); 500 /* Sanitize the linuxism of negative errnos */ 501 ohead.error *= -1; 502 memcpy(&tick->tk_aw_ohead, &ohead, sizeof(ohead)); 503 err = tick->tk_aw_handler(tick, uio); 504 } else { 505 /* pretender doesn't wanna do anything with answer */ 506 SDT_PROBE2(fusefs, , device, trace, 1, 507 "stuff devalidated, so we drop it"); 508 } 509 510 /* 511 * As aw_mtx was not held during the callback execution the 512 * ticket may have been inserted again. However, this is safe 513 * because fuse_ticket_drop() will deal with refcount anyway. 514 */ 515 fuse_ticket_drop(tick); 516 } else if (ohead.unique == 0){ 517 /* unique == 0 means asynchronous notification */ 518 SDT_PROBE1(fusefs, , device, fuse_device_write_notify, &ohead); 519 switch (ohead.error) { 520 case FUSE_NOTIFY_INVAL_ENTRY: 521 err = fuse_internal_invalidate_entry(mp, uio); 522 break; 523 case FUSE_NOTIFY_INVAL_INODE: 524 err = fuse_internal_invalidate_inode(mp, uio); 525 break; 526 case FUSE_NOTIFY_RETRIEVE: 527 case FUSE_NOTIFY_STORE: 528 /* 529 * Unimplemented. I don't know of any file systems 530 * that use them, and the protocol isn't sound anyway, 531 * since the notification messages don't include the 532 * inode's generation number. Without that, it's 533 * possible to manipulate the cache of the wrong vnode. 534 * Finally, it's not defined what this message should 535 * do for a file with dirty cache. 536 */ 537 case FUSE_NOTIFY_POLL: 538 /* Unimplemented. See comments in fuse_vnops */ 539 default: 540 /* Not implemented */ 541 err = ENOSYS; 542 } 543 } else { 544 /* no callback at all! */ 545 SDT_PROBE1(fusefs, , device, fuse_device_write_missing_ticket, 546 ohead.unique); 547 if (ohead.error == -EAGAIN) { 548 /* 549 * This was probably a response to a FUSE_INTERRUPT 550 * operation whose original operation is already 551 * complete. We can't store FUSE_INTERRUPT tickets 552 * indefinitely because their responses are optional. 553 * So we delete them when the original operation 554 * completes. And sadly the fuse_header_out doesn't 555 * identify the opcode, so we have to guess. 556 */ 557 err = 0; 558 } else { 559 err = EINVAL; 560 } 561 } 562 563 return (err); 564 } 565 566 int 567 fuse_device_init(void) 568 { 569 570 fuse_dev = make_dev(&fuse_device_cdevsw, 0, UID_ROOT, GID_OPERATOR, 571 S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH, "fuse"); 572 if (fuse_dev == NULL) 573 return (ENOMEM); 574 return (0); 575 } 576 577 void 578 fuse_device_destroy(void) 579 { 580 581 MPASS(fuse_dev != NULL); 582 destroy_dev(fuse_dev); 583 } 584