xref: /freebsd/sys/fs/fuse/fuse_io.c (revision f8ebf1cd7efa5a3586dd3250da619e30aac6ae92)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2007-2009 Google Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions are
9  * met:
10  *
11  * * Redistributions of source code must retain the above copyright
12  *   notice, this list of conditions and the following disclaimer.
13  * * Redistributions in binary form must reproduce the above
14  *   copyright notice, this list of conditions and the following disclaimer
15  *   in the documentation and/or other materials provided with the
16  *   distribution.
17  * * Neither the name of Google Inc. nor the names of its
18  *   contributors may be used to endorse or promote products derived from
19  *   this software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  *
33  * Copyright (C) 2005 Csaba Henk.
34  * All rights reserved.
35  *
36  * Redistribution and use in source and binary forms, with or without
37  * modification, are permitted provided that the following conditions
38  * are met:
39  * 1. Redistributions of source code must retain the above copyright
40  *    notice, this list of conditions and the following disclaimer.
41  * 2. Redistributions in binary form must reproduce the above copyright
42  *    notice, this list of conditions and the following disclaimer in the
43  *    documentation and/or other materials provided with the distribution.
44  *
45  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
46  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
49  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55  * SUCH DAMAGE.
56  */
57 
58 #include <sys/cdefs.h>
59 __FBSDID("$FreeBSD$");
60 
61 #include <sys/types.h>
62 #include <sys/module.h>
63 #include <sys/systm.h>
64 #include <sys/errno.h>
65 #include <sys/param.h>
66 #include <sys/kernel.h>
67 #include <sys/conf.h>
68 #include <sys/uio.h>
69 #include <sys/malloc.h>
70 #include <sys/queue.h>
71 #include <sys/lock.h>
72 #include <sys/sx.h>
73 #include <sys/mutex.h>
74 #include <sys/rwlock.h>
75 #include <sys/priv.h>
76 #include <sys/proc.h>
77 #include <sys/mount.h>
78 #include <sys/vnode.h>
79 #include <sys/stat.h>
80 #include <sys/unistd.h>
81 #include <sys/filedesc.h>
82 #include <sys/file.h>
83 #include <sys/fcntl.h>
84 #include <sys/bio.h>
85 #include <sys/buf.h>
86 #include <sys/sysctl.h>
87 #include <sys/vmmeter.h>
88 
89 #include <vm/vm.h>
90 #include <vm/vm_extern.h>
91 #include <vm/pmap.h>
92 #include <vm/vm_map.h>
93 #include <vm/vm_page.h>
94 #include <vm/vm_object.h>
95 
96 #include "fuse.h"
97 #include "fuse_file.h"
98 #include "fuse_node.h"
99 #include "fuse_internal.h"
100 #include "fuse_ipc.h"
101 #include "fuse_io.h"
102 
103 /*
104  * Set in a struct buf to indicate that the write came from the buffer cache
105  * and the originating cred and pid are no longer known.
106  */
107 #define B_FUSEFS_WRITE_CACHE B_FS_FLAG1
108 
109 SDT_PROVIDER_DECLARE(fusefs);
110 /*
111  * Fuse trace probe:
112  * arg0: verbosity.  Higher numbers give more verbose messages
113  * arg1: Textual message
114  */
115 SDT_PROBE_DEFINE2(fusefs, , io, trace, "int", "char*");
116 
117 static void
118 fuse_io_clear_suid_on_write(struct vnode *vp, struct ucred *cred,
119 	struct thread *td);
120 static int
121 fuse_read_directbackend(struct vnode *vp, struct uio *uio,
122     struct ucred *cred, struct fuse_filehandle *fufh);
123 static int
124 fuse_read_biobackend(struct vnode *vp, struct uio *uio, int ioflag,
125     struct ucred *cred, struct fuse_filehandle *fufh, pid_t pid);
126 static int
127 fuse_write_directbackend(struct vnode *vp, struct uio *uio,
128     struct ucred *cred, struct fuse_filehandle *fufh, off_t filesize,
129     int ioflag, bool pages);
130 static int
131 fuse_write_biobackend(struct vnode *vp, struct uio *uio,
132     struct ucred *cred, struct fuse_filehandle *fufh, int ioflag, pid_t pid);
133 
134 /*
135  * FreeBSD clears the SUID and SGID bits on any write by a non-root user.
136  */
137 static void
138 fuse_io_clear_suid_on_write(struct vnode *vp, struct ucred *cred,
139 	struct thread *td)
140 {
141 	struct fuse_data *data;
142 	struct mount *mp;
143 	struct vattr va;
144 	int dataflags;
145 
146 	mp = vnode_mount(vp);
147 	data = fuse_get_mpdata(mp);
148 	dataflags = data->dataflags;
149 
150 	if (dataflags & FSESS_DEFAULT_PERMISSIONS) {
151 		if (priv_check_cred(cred, PRIV_VFS_RETAINSUGID)) {
152 			fuse_internal_getattr(vp, &va, cred, td);
153 			if (va.va_mode & (S_ISUID | S_ISGID)) {
154 				mode_t mode = va.va_mode & ~(S_ISUID | S_ISGID);
155 				/* Clear all vattr fields except mode */
156 				vattr_null(&va);
157 				va.va_mode = mode;
158 
159 				/*
160 				 * Ignore fuse_internal_setattr's return value,
161 				 * because at this point the write operation has
162 				 * already succeeded and we don't want to return
163 				 * failing status for that.
164 				 */
165 				(void)fuse_internal_setattr(vp, &va, td, NULL);
166 			}
167 		}
168 	}
169 }
170 
171 SDT_PROBE_DEFINE5(fusefs, , io, io_dispatch, "struct vnode*", "struct uio*",
172 		"int", "struct ucred*", "struct fuse_filehandle*");
173 SDT_PROBE_DEFINE4(fusefs, , io, io_dispatch_filehandles_closed, "struct vnode*",
174     "struct uio*", "int", "struct ucred*");
175 int
176 fuse_io_dispatch(struct vnode *vp, struct uio *uio, int ioflag,
177     struct ucred *cred, pid_t pid)
178 {
179 	struct fuse_filehandle *fufh;
180 	int err, directio;
181 	int fflag;
182 	bool closefufh = false;
183 
184 	MPASS(vp->v_type == VREG || vp->v_type == VDIR);
185 
186 	fflag = (uio->uio_rw == UIO_READ) ? FREAD : FWRITE;
187 	err = fuse_filehandle_getrw(vp, fflag, &fufh, cred, pid);
188 	if (err == EBADF && vnode_mount(vp)->mnt_flag & MNT_EXPORTED) {
189 		/*
190 		 * nfsd will do I/O without first doing VOP_OPEN.  We
191 		 * must implicitly open the file here
192 		 */
193 		err = fuse_filehandle_open(vp, fflag, &fufh, curthread, cred);
194 		closefufh = true;
195 	}
196 	else if (err) {
197 		SDT_PROBE4(fusefs, , io, io_dispatch_filehandles_closed,
198 			vp, uio, ioflag, cred);
199 		printf("FUSE: io dispatch: filehandles are closed\n");
200 		return err;
201 	}
202 	if (err)
203 		goto out;
204 	SDT_PROBE5(fusefs, , io, io_dispatch, vp, uio, ioflag, cred, fufh);
205 
206 	/*
207          * Ideally, when the daemon asks for direct io at open time, the
208          * standard file flag should be set according to this, so that would
209          * just change the default mode, which later on could be changed via
210          * fcntl(2).
211          * But this doesn't work, the O_DIRECT flag gets cleared at some point
212          * (don't know where). So to make any use of the Fuse direct_io option,
213          * we hardwire it into the file's private data (similarly to Linux,
214          * btw.).
215          */
216 	directio = (ioflag & IO_DIRECT) || !fsess_opt_datacache(vnode_mount(vp));
217 
218 	switch (uio->uio_rw) {
219 	case UIO_READ:
220 		if (directio) {
221 			SDT_PROBE2(fusefs, , io, trace, 1,
222 				"direct read of vnode");
223 			err = fuse_read_directbackend(vp, uio, cred, fufh);
224 		} else {
225 			SDT_PROBE2(fusefs, , io, trace, 1,
226 				"buffered read of vnode");
227 			err = fuse_read_biobackend(vp, uio, ioflag, cred, fufh,
228 				pid);
229 		}
230 		break;
231 	case UIO_WRITE:
232 		fuse_vnode_update(vp, FN_MTIMECHANGE | FN_CTIMECHANGE);
233 		if (directio) {
234 			const int iosize = fuse_iosize(vp);
235 			off_t start, end, filesize;
236 
237 			SDT_PROBE2(fusefs, , io, trace, 1,
238 				"direct write of vnode");
239 
240 			err = fuse_vnode_size(vp, &filesize, cred, curthread);
241 			if (err)
242 				goto out;
243 
244 			start = uio->uio_offset;
245 			end = start + uio->uio_resid;
246 			KASSERT((ioflag & (IO_VMIO | IO_DIRECT)) !=
247 				(IO_VMIO | IO_DIRECT),
248 			    ("IO_DIRECT used for a cache flush?"));
249 			/* Invalidate the write cache when writing directly */
250 			v_inval_buf_range(vp, start, end, iosize);
251 			err = fuse_write_directbackend(vp, uio, cred, fufh,
252 				filesize, ioflag, false);
253 		} else {
254 			SDT_PROBE2(fusefs, , io, trace, 1,
255 				"buffered write of vnode");
256 			if (!fsess_opt_writeback(vnode_mount(vp)))
257 				ioflag |= IO_SYNC;
258 			err = fuse_write_biobackend(vp, uio, cred, fufh, ioflag,
259 				pid);
260 		}
261 		fuse_io_clear_suid_on_write(vp, cred, uio->uio_td);
262 		break;
263 	default:
264 		panic("uninterpreted mode passed to fuse_io_dispatch");
265 	}
266 
267 out:
268 	if (closefufh)
269 		fuse_filehandle_close(vp, fufh, curthread, cred);
270 
271 	return (err);
272 }
273 
274 SDT_PROBE_DEFINE4(fusefs, , io, read_bio_backend_start, "int", "int", "int", "int");
275 SDT_PROBE_DEFINE2(fusefs, , io, read_bio_backend_feed, "int", "struct buf*");
276 SDT_PROBE_DEFINE4(fusefs, , io, read_bio_backend_end, "int", "ssize_t", "int",
277 		"struct buf*");
278 static int
279 fuse_read_biobackend(struct vnode *vp, struct uio *uio, int ioflag,
280     struct ucred *cred, struct fuse_filehandle *fufh, pid_t pid)
281 {
282 	struct buf *bp;
283 	struct mount *mp;
284 	struct fuse_data *data;
285 	daddr_t lbn, nextlbn;
286 	int bcount, nextsize;
287 	int err, n = 0, on = 0, seqcount;
288 	off_t filesize;
289 
290 	const int biosize = fuse_iosize(vp);
291 	mp = vnode_mount(vp);
292 	data = fuse_get_mpdata(mp);
293 
294 	if (uio->uio_offset < 0)
295 		return (EINVAL);
296 
297 	seqcount = ioflag >> IO_SEQSHIFT;
298 
299 	err = fuse_vnode_size(vp, &filesize, cred, curthread);
300 	if (err)
301 		return err;
302 
303 	for (err = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) {
304 		if (fuse_isdeadfs(vp)) {
305 			err = ENXIO;
306 			break;
307 		}
308 		if (filesize - uio->uio_offset <= 0)
309 			break;
310 		lbn = uio->uio_offset / biosize;
311 		on = uio->uio_offset & (biosize - 1);
312 
313 		if ((off_t)lbn * biosize >= filesize) {
314 			bcount = 0;
315 		} else if ((off_t)(lbn + 1) * biosize > filesize) {
316 			bcount = filesize - (off_t)lbn *biosize;
317 		} else {
318 			bcount = biosize;
319 		}
320 		nextlbn = lbn + 1;
321 		nextsize = MIN(biosize, filesize - nextlbn * biosize);
322 
323 		SDT_PROBE4(fusefs, , io, read_bio_backend_start,
324 			biosize, (int)lbn, on, bcount);
325 
326 		if (bcount < biosize) {
327 			/* If near EOF, don't do readahead */
328 			err = bread(vp, lbn, bcount, NOCRED, &bp);
329 		} else if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERR) == 0) {
330 			/* Try clustered read */
331 			long totread = uio->uio_resid + on;
332 			seqcount = MIN(seqcount,
333 				data->max_readahead_blocks + 1);
334 			err = cluster_read(vp, filesize, lbn, bcount, NOCRED,
335 				totread, seqcount, 0, &bp);
336 		} else if (seqcount > 1 && data->max_readahead_blocks >= 1) {
337 			/* Try non-clustered readahead */
338 			err = breadn(vp, lbn, bcount, &nextlbn, &nextsize, 1,
339 				NOCRED, &bp);
340 		} else {
341 			/* Just read what was requested */
342 			err = bread(vp, lbn, bcount, NOCRED, &bp);
343 		}
344 
345 		if (err) {
346 			brelse(bp);
347 			bp = NULL;
348 			break;
349 		}
350 
351 		/*
352 	         * on is the offset into the current bp.  Figure out how many
353 	         * bytes we can copy out of the bp.  Note that bcount is
354 	         * NOT DEV_BSIZE aligned.
355 	         *
356 	         * Then figure out how many bytes we can copy into the uio.
357 	         */
358 
359 		n = 0;
360 		if (on < bcount - bp->b_resid)
361 			n = MIN((unsigned)(bcount - bp->b_resid - on),
362 			    uio->uio_resid);
363 		if (n > 0) {
364 			SDT_PROBE2(fusefs, , io, read_bio_backend_feed, n, bp);
365 			err = uiomove(bp->b_data + on, n, uio);
366 		}
367 		vfs_bio_brelse(bp, ioflag);
368 		SDT_PROBE4(fusefs, , io, read_bio_backend_end, err,
369 			uio->uio_resid, n, bp);
370 		if (bp->b_resid > 0) {
371 			/* Short read indicates EOF */
372 			break;
373 		}
374 	}
375 
376 	return (err);
377 }
378 
379 SDT_PROBE_DEFINE1(fusefs, , io, read_directbackend_start,
380 	"struct fuse_read_in*");
381 SDT_PROBE_DEFINE3(fusefs, , io, read_directbackend_complete,
382 	"struct fuse_dispatcher*", "struct fuse_read_in*", "struct uio*");
383 
384 static int
385 fuse_read_directbackend(struct vnode *vp, struct uio *uio,
386     struct ucred *cred, struct fuse_filehandle *fufh)
387 {
388 	struct fuse_data *data;
389 	struct fuse_dispatcher fdi;
390 	struct fuse_read_in *fri;
391 	int err = 0;
392 
393 	data = fuse_get_mpdata(vp->v_mount);
394 
395 	if (uio->uio_resid == 0)
396 		return (0);
397 
398 	fdisp_init(&fdi, 0);
399 
400 	/*
401          * XXX In "normal" case we use an intermediate kernel buffer for
402          * transmitting data from daemon's context to ours. Eventually, we should
403          * get rid of this. Anyway, if the target uio lives in sysspace (we are
404          * called from pageops), and the input data doesn't need kernel-side
405          * processing (we are not called from readdir) we can already invoke
406          * an optimized, "peer-to-peer" I/O routine.
407          */
408 	while (uio->uio_resid > 0) {
409 		fdi.iosize = sizeof(*fri);
410 		fdisp_make_vp(&fdi, FUSE_READ, vp, uio->uio_td, cred);
411 		fri = fdi.indata;
412 		fri->fh = fufh->fh_id;
413 		fri->offset = uio->uio_offset;
414 		fri->size = MIN(uio->uio_resid,
415 		    fuse_get_mpdata(vp->v_mount)->max_read);
416 		if (fuse_libabi_geq(data, 7, 9)) {
417 			/* See comment regarding FUSE_WRITE_LOCKOWNER */
418 			fri->read_flags = 0;
419 			fri->flags = fufh_type_2_fflags(fufh->fufh_type);
420 		}
421 
422 		SDT_PROBE1(fusefs, , io, read_directbackend_start, fri);
423 
424 		if ((err = fdisp_wait_answ(&fdi)))
425 			goto out;
426 
427 		SDT_PROBE3(fusefs, , io, read_directbackend_complete,
428 			&fdi, fri, uio);
429 
430 		if ((err = uiomove(fdi.answ, MIN(fri->size, fdi.iosize), uio)))
431 			break;
432 		if (fdi.iosize < fri->size) {
433 			/*
434 			 * Short read.  Should only happen at EOF or with
435 			 * direct io.
436 			 */
437 			break;
438 		}
439 	}
440 
441 out:
442 	fdisp_destroy(&fdi);
443 	return (err);
444 }
445 
446 static int
447 fuse_write_directbackend(struct vnode *vp, struct uio *uio,
448     struct ucred *cred, struct fuse_filehandle *fufh, off_t filesize,
449     int ioflag, bool pages)
450 {
451 	struct fuse_vnode_data *fvdat = VTOFUD(vp);
452 	struct fuse_data *data;
453 	struct fuse_write_in *fwi;
454 	struct fuse_write_out *fwo;
455 	struct fuse_dispatcher fdi;
456 	size_t chunksize;
457 	void *fwi_data;
458 	off_t as_written_offset;
459 	int diff;
460 	int err = 0;
461 	bool direct_io = fufh->fuse_open_flags & FOPEN_DIRECT_IO;
462 	bool wrote_anything = false;
463 	uint32_t write_flags;
464 
465 	data = fuse_get_mpdata(vp->v_mount);
466 
467 	/*
468 	 * Don't set FUSE_WRITE_LOCKOWNER in write_flags.  It can't be set
469 	 * accurately when using POSIX AIO, libfuse doesn't use it, and I'm not
470 	 * aware of any file systems that do.  It was an attempt to add
471 	 * Linux-style mandatory locking to the FUSE protocol, but mandatory
472 	 * locking is deprecated even on Linux.  See Linux commit
473 	 * f33321141b273d60cbb3a8f56a5489baad82ba5e .
474 	 */
475 	/*
476 	 * Set FUSE_WRITE_CACHE whenever we don't know the uid, gid, and/or pid
477 	 * that originated a write.  For example when writing from the
478 	 * writeback cache.  I don't know of a single file system that cares,
479 	 * but the protocol says we're supposed to do this.
480 	 */
481 	write_flags = !pages && (
482 		(ioflag & IO_DIRECT) ||
483 		!fsess_opt_datacache(vnode_mount(vp)) ||
484 		!fsess_opt_writeback(vnode_mount(vp))) ? 0 : FUSE_WRITE_CACHE;
485 
486 	if (uio->uio_resid == 0)
487 		return (0);
488 
489 	if (ioflag & IO_APPEND)
490 		uio_setoffset(uio, filesize);
491 
492 	if (vn_rlimit_fsize(vp, uio, uio->uio_td))
493 		return (EFBIG);
494 
495 	fdisp_init(&fdi, 0);
496 
497 	while (uio->uio_resid > 0) {
498 		chunksize = MIN(uio->uio_resid, data->max_write);
499 
500 		fdi.iosize = sizeof(*fwi) + chunksize;
501 		fdisp_make_vp(&fdi, FUSE_WRITE, vp, uio->uio_td, cred);
502 
503 		fwi = fdi.indata;
504 		fwi->fh = fufh->fh_id;
505 		fwi->offset = uio->uio_offset;
506 		fwi->size = chunksize;
507 		fwi->write_flags = write_flags;
508 		if (fuse_libabi_geq(data, 7, 9)) {
509 			fwi->flags = fufh_type_2_fflags(fufh->fufh_type);
510 			fwi_data = (char *)fdi.indata + sizeof(*fwi);
511 		} else {
512 			fwi_data = (char *)fdi.indata +
513 				FUSE_COMPAT_WRITE_IN_SIZE;
514 		}
515 
516 		if ((err = uiomove(fwi_data, chunksize, uio)))
517 			break;
518 
519 retry:
520 		err = fdisp_wait_answ(&fdi);
521 		if (err == ERESTART || err == EINTR || err == EWOULDBLOCK) {
522 			/*
523 			 * Rewind the uio so dofilewrite will know it's
524 			 * incomplete
525 			 */
526 			uio->uio_resid += fwi->size;
527 			uio->uio_offset -= fwi->size;
528 			/*
529 			 * Change ERESTART into EINTR because we can't rewind
530 			 * uio->uio_iov.  Basically, once uiomove(9) has been
531 			 * called, it's impossible to restart a syscall.
532 			 */
533 			if (err == ERESTART)
534 				err = EINTR;
535 			break;
536 		} else if (err) {
537 			break;
538 		} else {
539 			wrote_anything = true;
540 		}
541 
542 		fwo = ((struct fuse_write_out *)fdi.answ);
543 
544 		/* Adjust the uio in the case of short writes */
545 		diff = fwi->size - fwo->size;
546 		as_written_offset = uio->uio_offset - diff;
547 
548 		if (as_written_offset - diff > filesize)
549 			fuse_vnode_setsize(vp, as_written_offset);
550 		if (as_written_offset - diff >= filesize)
551 			fvdat->flag &= ~FN_SIZECHANGE;
552 
553 		if (diff < 0) {
554 			printf("WARNING: misbehaving FUSE filesystem "
555 				"wrote more data than we provided it\n");
556 			err = EINVAL;
557 			break;
558 		} else if (diff > 0) {
559 			/* Short write */
560 			if (!direct_io) {
561 				printf("WARNING: misbehaving FUSE filesystem: "
562 					"short writes are only allowed with "
563 					"direct_io\n");
564 			}
565 			if (ioflag & IO_DIRECT) {
566 				/* Return early */
567 				uio->uio_resid += diff;
568 				uio->uio_offset -= diff;
569 				break;
570 			} else {
571 				/* Resend the unwritten portion of data */
572 				fdi.iosize = sizeof(*fwi) + diff;
573 				/* Refresh fdi without clearing data buffer */
574 				fdisp_refresh_vp(&fdi, FUSE_WRITE, vp,
575 					uio->uio_td, cred);
576 				fwi = fdi.indata;
577 				MPASS2(fwi == fdi.indata, "FUSE dispatcher "
578 					"reallocated despite no increase in "
579 					"size?");
580 				void *src = (char*)fwi_data + fwo->size;
581 				memmove(fwi_data, src, diff);
582 				fwi->fh = fufh->fh_id;
583 				fwi->offset = as_written_offset;
584 				fwi->size = diff;
585 				fwi->write_flags = write_flags;
586 				goto retry;
587 			}
588 		}
589 	}
590 
591 	fdisp_destroy(&fdi);
592 
593 	if (wrote_anything)
594 		fuse_vnode_undirty_cached_timestamps(vp);
595 
596 	return (err);
597 }
598 
599 SDT_PROBE_DEFINE6(fusefs, , io, write_biobackend_start, "int64_t", "int", "int",
600 		"struct uio*", "int", "bool");
601 SDT_PROBE_DEFINE2(fusefs, , io, write_biobackend_append_race, "long", "int");
602 SDT_PROBE_DEFINE2(fusefs, , io, write_biobackend_issue, "int", "struct buf*");
603 
604 static int
605 fuse_write_biobackend(struct vnode *vp, struct uio *uio,
606     struct ucred *cred, struct fuse_filehandle *fufh, int ioflag, pid_t pid)
607 {
608 	struct fuse_vnode_data *fvdat = VTOFUD(vp);
609 	struct buf *bp;
610 	daddr_t lbn;
611 	off_t filesize;
612 	int bcount;
613 	int n, on, seqcount, err = 0;
614 	bool last_page;
615 
616 	const int biosize = fuse_iosize(vp);
617 
618 	seqcount = ioflag >> IO_SEQSHIFT;
619 
620 	KASSERT(uio->uio_rw == UIO_WRITE, ("fuse_write_biobackend mode"));
621 	if (vp->v_type != VREG)
622 		return (EIO);
623 	if (uio->uio_offset < 0)
624 		return (EINVAL);
625 	if (uio->uio_resid == 0)
626 		return (0);
627 
628 	err = fuse_vnode_size(vp, &filesize, cred, curthread);
629 	if (err)
630 		return err;
631 
632 	if (ioflag & IO_APPEND)
633 		uio_setoffset(uio, filesize);
634 
635 	if (vn_rlimit_fsize(vp, uio, uio->uio_td))
636 		return (EFBIG);
637 
638 	do {
639 		bool direct_append, extending;
640 
641 		if (fuse_isdeadfs(vp)) {
642 			err = ENXIO;
643 			break;
644 		}
645 		lbn = uio->uio_offset / biosize;
646 		on = uio->uio_offset & (biosize - 1);
647 		n = MIN((unsigned)(biosize - on), uio->uio_resid);
648 
649 again:
650 		/* Get or create a buffer for the write */
651 		direct_append = uio->uio_offset == filesize && n;
652 		if (uio->uio_offset + n < filesize) {
653 			extending = false;
654 			if ((off_t)(lbn + 1) * biosize < filesize) {
655 				/* Not the file's last block */
656 				bcount = biosize;
657 			} else {
658 				/* The file's last block */
659 				bcount = filesize - (off_t)lbn * biosize;
660 			}
661 		} else {
662 			extending = true;
663 			bcount = on + n;
664 		}
665 		if (howmany(((off_t)lbn * biosize + on + n - 1), PAGE_SIZE) >=
666 		    howmany(filesize, PAGE_SIZE))
667 			last_page = true;
668 		else
669 			last_page = false;
670 		if (direct_append) {
671 			/*
672 			 * Take care to preserve the buffer's B_CACHE state so
673 			 * as not to cause an unnecessary read.
674 			 */
675 			bp = getblk(vp, lbn, on, PCATCH, 0, 0);
676 			if (bp != NULL) {
677 				uint32_t save = bp->b_flags & B_CACHE;
678 				allocbuf(bp, bcount);
679 				bp->b_flags |= save;
680 			}
681 		} else {
682 			bp = getblk(vp, lbn, bcount, PCATCH, 0, 0);
683 		}
684 		if (!bp) {
685 			err = EINTR;
686 			break;
687 		}
688 		if (extending) {
689 			/*
690 			 * Extend file _after_ locking buffer so we won't race
691 			 * with other readers
692 			 */
693 			err = fuse_vnode_setsize(vp, uio->uio_offset + n);
694 			filesize = uio->uio_offset + n;
695 			fvdat->flag |= FN_SIZECHANGE;
696 			if (err) {
697 				brelse(bp);
698 				break;
699 			}
700 		}
701 
702 		SDT_PROBE6(fusefs, , io, write_biobackend_start,
703 			lbn, on, n, uio, bcount, direct_append);
704 		/*
705 	         * Issue a READ if B_CACHE is not set.  In special-append
706 	         * mode, B_CACHE is based on the buffer prior to the write
707 	         * op and is typically set, avoiding the read.  If a read
708 	         * is required in special append mode, the server will
709 	         * probably send us a short-read since we extended the file
710 	         * on our end, resulting in b_resid == 0 and, thusly,
711 	         * B_CACHE getting set.
712 	         *
713 	         * We can also avoid issuing the read if the write covers
714 	         * the entire buffer.  We have to make sure the buffer state
715 	         * is reasonable in this case since we will not be initiating
716 	         * I/O.  See the comments in kern/vfs_bio.c's getblk() for
717 	         * more information.
718 	         *
719 	         * B_CACHE may also be set due to the buffer being cached
720 	         * normally.
721 	         */
722 
723 		if (on == 0 && n == bcount) {
724 			bp->b_flags |= B_CACHE;
725 			bp->b_flags &= ~B_INVAL;
726 			bp->b_ioflags &= ~BIO_ERROR;
727 		}
728 		if ((bp->b_flags & B_CACHE) == 0) {
729 			bp->b_iocmd = BIO_READ;
730 			vfs_busy_pages(bp, 0);
731 			fuse_io_strategy(vp, bp);
732 			if ((err = bp->b_error)) {
733 				brelse(bp);
734 				break;
735 			}
736 			if (bp->b_resid > 0) {
737 				/*
738 				 * Short read indicates EOF.  Update file size
739 				 * from the server and try again.
740 				 */
741 				SDT_PROBE2(fusefs, , io, trace, 1,
742 					"Short read during a RMW");
743 				brelse(bp);
744 				err = fuse_vnode_size(vp, &filesize, cred,
745 				    curthread);
746 				if (err)
747 					break;
748 				else
749 					goto again;
750 			}
751 		}
752 		if (bp->b_wcred == NOCRED)
753 			bp->b_wcred = crhold(cred);
754 
755 		/*
756 	         * If dirtyend exceeds file size, chop it down.  This should
757 	         * not normally occur but there is an append race where it
758 	         * might occur XXX, so we log it.
759 	         *
760 	         * If the chopping creates a reverse-indexed or degenerate
761 	         * situation with dirtyoff/end, we 0 both of them.
762 	         */
763 		if (bp->b_dirtyend > bcount) {
764 			SDT_PROBE2(fusefs, , io, write_biobackend_append_race,
765 			    (long)bp->b_blkno * biosize,
766 			    bp->b_dirtyend - bcount);
767 			bp->b_dirtyend = bcount;
768 		}
769 		if (bp->b_dirtyoff >= bp->b_dirtyend)
770 			bp->b_dirtyoff = bp->b_dirtyend = 0;
771 
772 		/*
773 	         * If the new write will leave a contiguous dirty
774 	         * area, just update the b_dirtyoff and b_dirtyend,
775 	         * otherwise force a write rpc of the old dirty area.
776 	         *
777 	         * While it is possible to merge discontiguous writes due to
778 	         * our having a B_CACHE buffer ( and thus valid read data
779 	         * for the hole), we don't because it could lead to
780 	         * significant cache coherency problems with multiple clients,
781 	         * especially if locking is implemented later on.
782 	         *
783 	         * as an optimization we could theoretically maintain
784 	         * a linked list of discontinuous areas, but we would still
785 	         * have to commit them separately so there isn't much
786 	         * advantage to it except perhaps a bit of asynchronization.
787 	         */
788 
789 		if (bp->b_dirtyend > 0 &&
790 		    (on > bp->b_dirtyend || (on + n) < bp->b_dirtyoff)) {
791 			/*
792 	                 * Yes, we mean it. Write out everything to "storage"
793 	                 * immediately, without hesitation. (Apart from other
794 	                 * reasons: the only way to know if a write is valid
795 	                 * if its actually written out.)
796 	                 */
797 			SDT_PROBE2(fusefs, , io, write_biobackend_issue, 0, bp);
798 			bwrite(bp);
799 			if (bp->b_error == EINTR) {
800 				err = EINTR;
801 				break;
802 			}
803 			goto again;
804 		}
805 		err = uiomove((char *)bp->b_data + on, n, uio);
806 
807 		if (err) {
808 			bp->b_ioflags |= BIO_ERROR;
809 			bp->b_error = err;
810 			brelse(bp);
811 			break;
812 			/* TODO: vfs_bio_clrbuf like ffs_write does? */
813 		}
814 		/*
815 	         * Only update dirtyoff/dirtyend if not a degenerate
816 	         * condition.
817 	         */
818 		if (n) {
819 			if (bp->b_dirtyend > 0) {
820 				bp->b_dirtyoff = MIN(on, bp->b_dirtyoff);
821 				bp->b_dirtyend = MAX((on + n), bp->b_dirtyend);
822 			} else {
823 				bp->b_dirtyoff = on;
824 				bp->b_dirtyend = on + n;
825 			}
826 			vfs_bio_set_valid(bp, on, n);
827 		}
828 
829 		vfs_bio_set_flags(bp, ioflag);
830 
831 		bp->b_flags |= B_FUSEFS_WRITE_CACHE;
832 		if (ioflag & IO_SYNC) {
833 			SDT_PROBE2(fusefs, , io, write_biobackend_issue, 2, bp);
834 			if (!(ioflag & IO_VMIO))
835 				bp->b_flags &= ~B_FUSEFS_WRITE_CACHE;
836 			err = bwrite(bp);
837 		} else if (vm_page_count_severe() ||
838 			    buf_dirty_count_severe() ||
839 			    (ioflag & IO_ASYNC)) {
840 			bp->b_flags |= B_CLUSTEROK;
841 			SDT_PROBE2(fusefs, , io, write_biobackend_issue, 3, bp);
842 			bawrite(bp);
843 		} else if (on == 0 && n == bcount) {
844 			if ((vp->v_mount->mnt_flag & MNT_NOCLUSTERW) == 0) {
845 				bp->b_flags |= B_CLUSTEROK;
846 				SDT_PROBE2(fusefs, , io, write_biobackend_issue,
847 					4, bp);
848 				cluster_write(vp, bp, filesize, seqcount, 0);
849 			} else {
850 				SDT_PROBE2(fusefs, , io, write_biobackend_issue,
851 					5, bp);
852 				bawrite(bp);
853 			}
854 		} else if (ioflag & IO_DIRECT) {
855 			bp->b_flags |= B_CLUSTEROK;
856 			SDT_PROBE2(fusefs, , io, write_biobackend_issue, 6, bp);
857 			bawrite(bp);
858 		} else {
859 			bp->b_flags &= ~B_CLUSTEROK;
860 			SDT_PROBE2(fusefs, , io, write_biobackend_issue, 7, bp);
861 			bdwrite(bp);
862 		}
863 		if (err)
864 			break;
865 	} while (uio->uio_resid > 0 && n > 0);
866 
867 	return (err);
868 }
869 
870 int
871 fuse_io_strategy(struct vnode *vp, struct buf *bp)
872 {
873 	struct fuse_vnode_data *fvdat = VTOFUD(vp);
874 	struct fuse_filehandle *fufh;
875 	struct ucred *cred;
876 	struct uio *uiop;
877 	struct uio uio;
878 	struct iovec io;
879 	off_t filesize;
880 	int error = 0;
881 	int fflag;
882 	/* We don't know the true pid when we're dealing with the cache */
883 	pid_t pid = 0;
884 
885 	const int biosize = fuse_iosize(vp);
886 
887 	MPASS(vp->v_type == VREG || vp->v_type == VDIR);
888 	MPASS(bp->b_iocmd == BIO_READ || bp->b_iocmd == BIO_WRITE);
889 
890 	fflag = bp->b_iocmd == BIO_READ ? FREAD : FWRITE;
891 	cred = bp->b_iocmd == BIO_READ ? bp->b_rcred : bp->b_wcred;
892 	error = fuse_filehandle_getrw(vp, fflag, &fufh, cred, pid);
893 	if (bp->b_iocmd == BIO_READ && error == EBADF) {
894 		/*
895 		 * This may be a read-modify-write operation on a cached file
896 		 * opened O_WRONLY.  The FUSE protocol allows this.
897 		 */
898 		error = fuse_filehandle_get(vp, FWRITE, &fufh, cred, pid);
899 	}
900 	if (error) {
901 		printf("FUSE: strategy: filehandles are closed\n");
902 		bp->b_ioflags |= BIO_ERROR;
903 		bp->b_error = error;
904 		bufdone(bp);
905 		return (error);
906 	}
907 
908 	uiop = &uio;
909 	uiop->uio_iov = &io;
910 	uiop->uio_iovcnt = 1;
911 	uiop->uio_segflg = UIO_SYSSPACE;
912 	uiop->uio_td = curthread;
913 
914 	/*
915          * clear BIO_ERROR and B_INVAL state prior to initiating the I/O.  We
916          * do this here so we do not have to do it in all the code that
917          * calls us.
918          */
919 	bp->b_flags &= ~B_INVAL;
920 	bp->b_ioflags &= ~BIO_ERROR;
921 
922 	KASSERT(!(bp->b_flags & B_DONE),
923 	    ("fuse_io_strategy: bp %p already marked done", bp));
924 	if (bp->b_iocmd == BIO_READ) {
925 		ssize_t left;
926 
927 		io.iov_len = uiop->uio_resid = bp->b_bcount;
928 		io.iov_base = bp->b_data;
929 		uiop->uio_rw = UIO_READ;
930 
931 		uiop->uio_offset = ((off_t)bp->b_lblkno) * biosize;
932 		error = fuse_read_directbackend(vp, uiop, cred, fufh);
933 		/*
934 		 * Store the amount we failed to read in the buffer's private
935 		 * field, so callers can truncate the file if necessary'
936 		 */
937 
938 		if (!error && uiop->uio_resid) {
939 			int nread = bp->b_bcount - uiop->uio_resid;
940 			left = uiop->uio_resid;
941 			bzero((char *)bp->b_data + nread, left);
942 
943 			if ((fvdat->flag & FN_SIZECHANGE) == 0) {
944 				/*
945 				 * A short read with no error, when not using
946 				 * direct io, and when no writes are cached,
947 				 * indicates EOF caused by a server-side
948 				 * truncation.  Clear the attr cache so we'll
949 				 * pick up the new file size and timestamps.
950 				 *
951 				 * We must still bzero the remaining buffer so
952 				 * uninitialized data doesn't get exposed by a
953 				 * future truncate that extends the file.
954 				 *
955 				 * To prevent lock order problems, we must
956 				 * truncate the file upstack, not here.
957 				 */
958 				SDT_PROBE2(fusefs, , io, trace, 1,
959 					"Short read of a clean file");
960 				fuse_vnode_clear_attr_cache(vp);
961 			} else {
962 				/*
963 				 * If dirty writes _are_ cached beyond EOF,
964 				 * that indicates a newly created hole that the
965 				 * server doesn't know about.  Those don't pose
966 				 * any problem.
967 				 * XXX: we don't currently track whether dirty
968 				 * writes are cached beyond EOF, before EOF, or
969 				 * both.
970 				 */
971 				SDT_PROBE2(fusefs, , io, trace, 1,
972 					"Short read of a dirty file");
973 				uiop->uio_resid = 0;
974 			}
975 
976 		}
977 		if (error) {
978 			bp->b_ioflags |= BIO_ERROR;
979 			bp->b_error = error;
980 		}
981 	} else {
982 		/*
983 	         * Setup for actual write
984 	         */
985 		error = fuse_vnode_size(vp, &filesize, cred, curthread);
986 		if (error) {
987 			bp->b_ioflags |= BIO_ERROR;
988 			bp->b_error = error;
989 			bufdone(bp);
990 			return (error);
991 		}
992 
993 		if ((off_t)bp->b_lblkno * biosize + bp->b_dirtyend > filesize)
994 			bp->b_dirtyend = filesize -
995 				(off_t)bp->b_lblkno * biosize;
996 
997 		if (bp->b_dirtyend > bp->b_dirtyoff) {
998 			io.iov_len = uiop->uio_resid = bp->b_dirtyend
999 			    - bp->b_dirtyoff;
1000 			uiop->uio_offset = (off_t)bp->b_lblkno * biosize
1001 			    + bp->b_dirtyoff;
1002 			io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
1003 			uiop->uio_rw = UIO_WRITE;
1004 
1005 			bool pages = bp->b_flags & B_FUSEFS_WRITE_CACHE;
1006 			error = fuse_write_directbackend(vp, uiop, cred, fufh,
1007 				filesize, 0, pages);
1008 
1009 			if (error == EINTR || error == ETIMEDOUT) {
1010 				bp->b_flags &= ~(B_INVAL | B_NOCACHE);
1011 				if ((bp->b_flags & B_PAGING) == 0) {
1012 					bdirty(bp);
1013 					bp->b_flags &= ~B_DONE;
1014 				}
1015 				if ((error == EINTR || error == ETIMEDOUT) &&
1016 				    (bp->b_flags & B_ASYNC) == 0)
1017 					bp->b_flags |= B_EINTR;
1018 			} else {
1019 				if (error) {
1020 					bp->b_ioflags |= BIO_ERROR;
1021 					bp->b_flags |= B_INVAL;
1022 					bp->b_error = error;
1023 				}
1024 				bp->b_dirtyoff = bp->b_dirtyend = 0;
1025 			}
1026 		} else {
1027 			bp->b_resid = 0;
1028 			bufdone(bp);
1029 			return (0);
1030 		}
1031 	}
1032 	bp->b_resid = uiop->uio_resid;
1033 	bufdone(bp);
1034 	return (error);
1035 }
1036 
1037 int
1038 fuse_io_flushbuf(struct vnode *vp, int waitfor, struct thread *td)
1039 {
1040 
1041 	return (vn_fsync_buf(vp, waitfor));
1042 }
1043 
1044 /*
1045  * Flush and invalidate all dirty buffers. If another process is already
1046  * doing the flush, just wait for completion.
1047  */
1048 int
1049 fuse_io_invalbuf(struct vnode *vp, struct thread *td)
1050 {
1051 	struct fuse_vnode_data *fvdat = VTOFUD(vp);
1052 	int error = 0;
1053 
1054 	if (vp->v_iflag & VI_DOOMED)
1055 		return 0;
1056 
1057 	ASSERT_VOP_ELOCKED(vp, "fuse_io_invalbuf");
1058 
1059 	while (fvdat->flag & FN_FLUSHINPROG) {
1060 		struct proc *p = td->td_proc;
1061 
1062 		if (vp->v_mount->mnt_kern_flag & MNTK_UNMOUNTF)
1063 			return EIO;
1064 		fvdat->flag |= FN_FLUSHWANT;
1065 		tsleep(&fvdat->flag, PRIBIO + 2, "fusevinv", 2 * hz);
1066 		error = 0;
1067 		if (p != NULL) {
1068 			PROC_LOCK(p);
1069 			if (SIGNOTEMPTY(p->p_siglist) ||
1070 			    SIGNOTEMPTY(td->td_siglist))
1071 				error = EINTR;
1072 			PROC_UNLOCK(p);
1073 		}
1074 		if (error == EINTR)
1075 			return EINTR;
1076 	}
1077 	fvdat->flag |= FN_FLUSHINPROG;
1078 
1079 	if (vp->v_bufobj.bo_object != NULL) {
1080 		VM_OBJECT_WLOCK(vp->v_bufobj.bo_object);
1081 		vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC);
1082 		VM_OBJECT_WUNLOCK(vp->v_bufobj.bo_object);
1083 	}
1084 	error = vinvalbuf(vp, V_SAVE, PCATCH, 0);
1085 	while (error) {
1086 		if (error == ERESTART || error == EINTR) {
1087 			fvdat->flag &= ~FN_FLUSHINPROG;
1088 			if (fvdat->flag & FN_FLUSHWANT) {
1089 				fvdat->flag &= ~FN_FLUSHWANT;
1090 				wakeup(&fvdat->flag);
1091 			}
1092 			return EINTR;
1093 		}
1094 		error = vinvalbuf(vp, V_SAVE, PCATCH, 0);
1095 	}
1096 	fvdat->flag &= ~FN_FLUSHINPROG;
1097 	if (fvdat->flag & FN_FLUSHWANT) {
1098 		fvdat->flag &= ~FN_FLUSHWANT;
1099 		wakeup(&fvdat->flag);
1100 	}
1101 	return (error);
1102 }
1103