xref: /linux/fs/netfs/direct_write.c (revision cd80e7ee47d2fd5c97563c003ff31ce8240ca2d8)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Unbuffered and direct write support.
3  *
4  * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 
8 #include <linux/export.h>
9 #include <linux/uio.h>
10 #include "internal.h"
11 
12 static void netfs_cleanup_dio_write(struct netfs_io_request *wreq)
13 {
14 	struct inode *inode = wreq->inode;
15 	unsigned long long end = wreq->start + wreq->transferred;
16 
17 	if (!wreq->error &&
18 	    i_size_read(inode) < end) {
19 		if (wreq->netfs_ops->update_i_size)
20 			wreq->netfs_ops->update_i_size(inode, end);
21 		else
22 			i_size_write(inode, end);
23 	}
24 }
25 
26 /*
27  * Perform an unbuffered write where we may have to do an RMW operation on an
28  * encrypted file.  This can also be used for direct I/O writes.
29  */
30 ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *iter,
31 						  struct netfs_group *netfs_group)
32 {
33 	struct netfs_io_request *wreq;
34 	unsigned long long start = iocb->ki_pos;
35 	unsigned long long end = start + iov_iter_count(iter);
36 	ssize_t ret, n;
37 	size_t len = iov_iter_count(iter);
38 	bool async = !is_sync_kiocb(iocb);
39 
40 	_enter("");
41 
42 	/* We're going to need a bounce buffer if what we transmit is going to
43 	 * be different in some way to the source buffer, e.g. because it gets
44 	 * encrypted/compressed or because it needs expanding to a block size.
45 	 */
46 	// TODO
47 
48 	_debug("uw %llx-%llx", start, end);
49 
50 	wreq = netfs_create_write_req(iocb->ki_filp->f_mapping, iocb->ki_filp, start,
51 				      iocb->ki_flags & IOCB_DIRECT ?
52 				      NETFS_DIO_WRITE : NETFS_UNBUFFERED_WRITE);
53 	if (IS_ERR(wreq))
54 		return PTR_ERR(wreq);
55 
56 	wreq->io_streams[0].avail = true;
57 	trace_netfs_write(wreq, (iocb->ki_flags & IOCB_DIRECT ?
58 				 netfs_write_trace_dio_write :
59 				 netfs_write_trace_unbuffered_write));
60 
61 	{
62 		/* If this is an async op and we're not using a bounce buffer,
63 		 * we have to save the source buffer as the iterator is only
64 		 * good until we return.  In such a case, extract an iterator
65 		 * to represent as much of the the output buffer as we can
66 		 * manage.  Note that the extraction might not be able to
67 		 * allocate a sufficiently large bvec array and may shorten the
68 		 * request.
69 		 */
70 		if (async || user_backed_iter(iter)) {
71 			n = netfs_extract_user_iter(iter, len, &wreq->iter, 0);
72 			if (n < 0) {
73 				ret = n;
74 				goto out;
75 			}
76 			wreq->direct_bv = (struct bio_vec *)wreq->iter.bvec;
77 			wreq->direct_bv_count = n;
78 			wreq->direct_bv_unpin = iov_iter_extract_will_pin(iter);
79 		} else {
80 			wreq->iter = *iter;
81 		}
82 
83 		wreq->io_iter = wreq->iter;
84 	}
85 
86 	__set_bit(NETFS_RREQ_USE_IO_ITER, &wreq->flags);
87 
88 	/* Copy the data into the bounce buffer and encrypt it. */
89 	// TODO
90 
91 	/* Dispatch the write. */
92 	__set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags);
93 	if (async)
94 		wreq->iocb = iocb;
95 	wreq->cleanup = netfs_cleanup_dio_write;
96 	ret = netfs_unbuffered_write(wreq, is_sync_kiocb(iocb), iov_iter_count(&wreq->io_iter));
97 	if (ret < 0) {
98 		_debug("begin = %zd", ret);
99 		goto out;
100 	}
101 
102 	if (!async) {
103 		trace_netfs_rreq(wreq, netfs_rreq_trace_wait_ip);
104 		wait_on_bit(&wreq->flags, NETFS_RREQ_IN_PROGRESS,
105 			    TASK_UNINTERRUPTIBLE);
106 		smp_rmb(); /* Read error/transferred after RIP flag */
107 		ret = wreq->error;
108 		if (ret == 0) {
109 			ret = wreq->transferred;
110 			iocb->ki_pos += ret;
111 		}
112 	} else {
113 		ret = -EIOCBQUEUED;
114 	}
115 
116 out:
117 	netfs_put_request(wreq, false, netfs_rreq_trace_put_return);
118 	return ret;
119 }
120 EXPORT_SYMBOL(netfs_unbuffered_write_iter_locked);
121 
122 /**
123  * netfs_unbuffered_write_iter - Unbuffered write to a file
124  * @iocb: IO state structure
125  * @from: iov_iter with data to write
126  *
127  * Do an unbuffered write to a file, writing the data directly to the server
128  * and not lodging the data in the pagecache.
129  *
130  * Return:
131  * * Negative error code if no data has been written at all of
132  *   vfs_fsync_range() failed for a synchronous write
133  * * Number of bytes written, even for truncated writes
134  */
135 ssize_t netfs_unbuffered_write_iter(struct kiocb *iocb, struct iov_iter *from)
136 {
137 	struct file *file = iocb->ki_filp;
138 	struct address_space *mapping = file->f_mapping;
139 	struct inode *inode = mapping->host;
140 	struct netfs_inode *ictx = netfs_inode(inode);
141 	ssize_t ret;
142 	loff_t pos = iocb->ki_pos;
143 	unsigned long long end = pos + iov_iter_count(from) - 1;
144 
145 	_enter("%llx,%zx,%llx", pos, iov_iter_count(from), i_size_read(inode));
146 
147 	if (!iov_iter_count(from))
148 		return 0;
149 
150 	trace_netfs_write_iter(iocb, from);
151 	netfs_stat(&netfs_n_wh_dio_write);
152 
153 	ret = netfs_start_io_direct(inode);
154 	if (ret < 0)
155 		return ret;
156 	ret = generic_write_checks(iocb, from);
157 	if (ret <= 0)
158 		goto out;
159 	ret = file_remove_privs(file);
160 	if (ret < 0)
161 		goto out;
162 	ret = file_update_time(file);
163 	if (ret < 0)
164 		goto out;
165 	if (iocb->ki_flags & IOCB_NOWAIT) {
166 		/* We could block if there are any pages in the range. */
167 		ret = -EAGAIN;
168 		if (filemap_range_has_page(mapping, pos, end))
169 			if (filemap_invalidate_inode(inode, true, pos, end))
170 				goto out;
171 	} else {
172 		ret = filemap_write_and_wait_range(mapping, pos, end);
173 		if (ret < 0)
174 			goto out;
175 	}
176 
177 	/*
178 	 * After a write we want buffered reads to be sure to go to disk to get
179 	 * the new data.  We invalidate clean cached page from the region we're
180 	 * about to write.  We do this *before* the write so that we can return
181 	 * without clobbering -EIOCBQUEUED from ->direct_IO().
182 	 */
183 	ret = filemap_invalidate_inode(inode, true, pos, end);
184 	if (ret < 0)
185 		goto out;
186 	end = iocb->ki_pos + iov_iter_count(from);
187 	if (end > ictx->zero_point)
188 		ictx->zero_point = end;
189 
190 	fscache_invalidate(netfs_i_cookie(ictx), NULL, i_size_read(inode),
191 			   FSCACHE_INVAL_DIO_WRITE);
192 	ret = netfs_unbuffered_write_iter_locked(iocb, from, NULL);
193 out:
194 	netfs_end_io_direct(inode);
195 	return ret;
196 }
197 EXPORT_SYMBOL(netfs_unbuffered_write_iter);
198