xref: /linux/fs/afs/write.c (revision 5fb70e7275a61dd404f684370e1add7fe0ebe9c5)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* handling of writes to regular files and writing back to the server
3  *
4  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 
8 #include <linux/backing-dev.h>
9 #include <linux/slab.h>
10 #include <linux/fs.h>
11 #include <linux/pagemap.h>
12 #include <linux/writeback.h>
13 #include <linux/pagevec.h>
14 #include <linux/netfs.h>
15 #include <trace/events/netfs.h>
16 #include "internal.h"
17 
18 /*
19  * completion of write to server
20  */
21 static void afs_pages_written_back(struct afs_vnode *vnode, loff_t start, unsigned int len)
22 {
23 	_enter("{%llx:%llu},{%x @%llx}",
24 	       vnode->fid.vid, vnode->fid.vnode, len, start);
25 
26 	afs_prune_wb_keys(vnode);
27 	_leave("");
28 }
29 
30 /*
31  * Find a key to use for the writeback.  We cached the keys used to author the
32  * writes on the vnode.  *_wbk will contain the last writeback key used or NULL
33  * and we need to start from there if it's set.
34  */
35 static int afs_get_writeback_key(struct afs_vnode *vnode,
36 				 struct afs_wb_key **_wbk)
37 {
38 	struct afs_wb_key *wbk = NULL;
39 	struct list_head *p;
40 	int ret = -ENOKEY, ret2;
41 
42 	spin_lock(&vnode->wb_lock);
43 	if (*_wbk)
44 		p = (*_wbk)->vnode_link.next;
45 	else
46 		p = vnode->wb_keys.next;
47 
48 	while (p != &vnode->wb_keys) {
49 		wbk = list_entry(p, struct afs_wb_key, vnode_link);
50 		_debug("wbk %u", key_serial(wbk->key));
51 		ret2 = key_validate(wbk->key);
52 		if (ret2 == 0) {
53 			refcount_inc(&wbk->usage);
54 			_debug("USE WB KEY %u", key_serial(wbk->key));
55 			break;
56 		}
57 
58 		wbk = NULL;
59 		if (ret == -ENOKEY)
60 			ret = ret2;
61 		p = p->next;
62 	}
63 
64 	spin_unlock(&vnode->wb_lock);
65 	if (*_wbk)
66 		afs_put_wb_key(*_wbk);
67 	*_wbk = wbk;
68 	return 0;
69 }
70 
71 static void afs_store_data_success(struct afs_operation *op)
72 {
73 	struct afs_vnode *vnode = op->file[0].vnode;
74 
75 	op->ctime = op->file[0].scb.status.mtime_client;
76 	afs_vnode_commit_status(op, &op->file[0]);
77 	if (!afs_op_error(op)) {
78 		afs_pages_written_back(vnode, op->store.pos, op->store.size);
79 		afs_stat_v(vnode, n_stores);
80 		atomic_long_add(op->store.size, &afs_v2net(vnode)->n_store_bytes);
81 	}
82 }
83 
84 static const struct afs_operation_ops afs_store_data_operation = {
85 	.issue_afs_rpc	= afs_fs_store_data,
86 	.issue_yfs_rpc	= yfs_fs_store_data,
87 	.success	= afs_store_data_success,
88 };
89 
90 /*
91  * write to a file
92  */
93 static int afs_store_data(struct afs_vnode *vnode, struct iov_iter *iter, loff_t pos)
94 {
95 	struct afs_operation *op;
96 	struct afs_wb_key *wbk = NULL;
97 	loff_t size = iov_iter_count(iter);
98 	int ret = -ENOKEY;
99 
100 	_enter("%s{%llx:%llu.%u},%llx,%llx",
101 	       vnode->volume->name,
102 	       vnode->fid.vid,
103 	       vnode->fid.vnode,
104 	       vnode->fid.unique,
105 	       size, pos);
106 
107 	ret = afs_get_writeback_key(vnode, &wbk);
108 	if (ret) {
109 		_leave(" = %d [no keys]", ret);
110 		return ret;
111 	}
112 
113 	op = afs_alloc_operation(wbk->key, vnode->volume);
114 	if (IS_ERR(op)) {
115 		afs_put_wb_key(wbk);
116 		return -ENOMEM;
117 	}
118 
119 	afs_op_set_vnode(op, 0, vnode);
120 	op->file[0].dv_delta = 1;
121 	op->file[0].modification = true;
122 	op->store.pos = pos;
123 	op->store.size = size;
124 	op->flags |= AFS_OPERATION_UNINTR;
125 	op->ops = &afs_store_data_operation;
126 
127 try_next_key:
128 	afs_begin_vnode_operation(op);
129 
130 	op->store.write_iter = iter;
131 	op->store.i_size = max(pos + size, vnode->netfs.remote_i_size);
132 	op->mtime = inode_get_mtime(&vnode->netfs.inode);
133 
134 	afs_wait_for_operation(op);
135 
136 	switch (afs_op_error(op)) {
137 	case -EACCES:
138 	case -EPERM:
139 	case -ENOKEY:
140 	case -EKEYEXPIRED:
141 	case -EKEYREJECTED:
142 	case -EKEYREVOKED:
143 		_debug("next");
144 
145 		ret = afs_get_writeback_key(vnode, &wbk);
146 		if (ret == 0) {
147 			key_put(op->key);
148 			op->key = key_get(wbk->key);
149 			goto try_next_key;
150 		}
151 		break;
152 	}
153 
154 	afs_put_wb_key(wbk);
155 	_leave(" = %d", afs_op_error(op));
156 	return afs_put_operation(op);
157 }
158 
159 static void afs_upload_to_server(struct netfs_io_subrequest *subreq)
160 {
161 	struct afs_vnode *vnode = AFS_FS_I(subreq->rreq->inode);
162 	ssize_t ret;
163 
164 	_enter("%x[%x],%zx",
165 	       subreq->rreq->debug_id, subreq->debug_index, subreq->io_iter.count);
166 
167 	trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
168 	ret = afs_store_data(vnode, &subreq->io_iter, subreq->start);
169 	netfs_write_subrequest_terminated(subreq, ret < 0 ? ret : subreq->len,
170 					  false);
171 }
172 
173 static void afs_upload_to_server_worker(struct work_struct *work)
174 {
175 	struct netfs_io_subrequest *subreq =
176 		container_of(work, struct netfs_io_subrequest, work);
177 
178 	afs_upload_to_server(subreq);
179 }
180 
181 /*
182  * Set up write requests for a writeback slice.  We need to add a write request
183  * for each write we want to make.
184  */
185 void afs_create_write_requests(struct netfs_io_request *wreq, loff_t start, size_t len)
186 {
187 	struct netfs_io_subrequest *subreq;
188 
189 	_enter("%x,%llx-%llx", wreq->debug_id, start, start + len);
190 
191 	subreq = netfs_create_write_request(wreq, NETFS_UPLOAD_TO_SERVER,
192 					    start, len, afs_upload_to_server_worker);
193 	if (subreq)
194 		netfs_queue_write_request(subreq);
195 }
196 
197 /*
198  * Writeback calls this when it finds a folio that needs uploading.  This isn't
199  * called if writeback only has copy-to-cache to deal with.
200  */
201 void afs_begin_writeback(struct netfs_io_request *wreq)
202 {
203 	wreq->io_streams[0].avail = true;
204 }
205 
206 /*
207  * Prepare a subrequest to write to the server.  This sets the max_len
208  * parameter.
209  */
210 void afs_prepare_write(struct netfs_io_subrequest *subreq)
211 {
212 	//if (test_bit(NETFS_SREQ_RETRYING, &subreq->flags))
213 	//	subreq->max_len = 512 * 1024;
214 	//else
215 	subreq->max_len = 256 * 1024 * 1024;
216 }
217 
218 /*
219  * Issue a subrequest to write to the server.
220  */
221 static void afs_issue_write_worker(struct work_struct *work)
222 {
223 	struct netfs_io_subrequest *subreq = container_of(work, struct netfs_io_subrequest, work);
224 	struct afs_vnode *vnode = AFS_FS_I(subreq->rreq->inode);
225 	ssize_t ret;
226 
227 	_enter("%x[%x],%zx",
228 	       subreq->rreq->debug_id, subreq->debug_index, subreq->io_iter.count);
229 
230 #if 0 // Error injection
231 	if (subreq->debug_index == 3)
232 		return netfs_write_subrequest_terminated(subreq, -ENOANO, false);
233 
234 	if (!test_bit(NETFS_SREQ_RETRYING, &subreq->flags)) {
235 		set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
236 		return netfs_write_subrequest_terminated(subreq, -EAGAIN, false);
237 	}
238 #endif
239 
240 	ret = afs_store_data(vnode, &subreq->io_iter, subreq->start);
241 	netfs_write_subrequest_terminated(subreq, ret < 0 ? ret : subreq->len, false);
242 }
243 
244 void afs_issue_write(struct netfs_io_subrequest *subreq)
245 {
246 	subreq->work.func = afs_issue_write_worker;
247 	if (!queue_work(system_unbound_wq, &subreq->work))
248 		WARN_ON_ONCE(1);
249 }
250 
251 /*
252  * write some of the pending data back to the server
253  */
254 int afs_writepages(struct address_space *mapping, struct writeback_control *wbc)
255 {
256 	struct afs_vnode *vnode = AFS_FS_I(mapping->host);
257 	int ret;
258 
259 	/* We have to be careful as we can end up racing with setattr()
260 	 * truncating the pagecache since the caller doesn't take a lock here
261 	 * to prevent it.
262 	 */
263 	if (wbc->sync_mode == WB_SYNC_ALL)
264 		down_read(&vnode->validate_lock);
265 	else if (!down_read_trylock(&vnode->validate_lock))
266 		return 0;
267 
268 	ret = netfs_writepages(mapping, wbc);
269 	up_read(&vnode->validate_lock);
270 	return ret;
271 }
272 
273 /*
274  * flush any dirty pages for this process, and check for write errors.
275  * - the return status from this call provides a reliable indication of
276  *   whether any write errors occurred for this process.
277  */
278 int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
279 {
280 	struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
281 	struct afs_file *af = file->private_data;
282 	int ret;
283 
284 	_enter("{%llx:%llu},{n=%pD},%d",
285 	       vnode->fid.vid, vnode->fid.vnode, file,
286 	       datasync);
287 
288 	ret = afs_validate(vnode, af->key);
289 	if (ret < 0)
290 		return ret;
291 
292 	return file_write_and_wait_range(file, start, end);
293 }
294 
295 /*
296  * notification that a previously read-only page is about to become writable
297  * - if it returns an error, the caller will deliver a bus error signal
298  */
299 vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
300 {
301 	struct file *file = vmf->vma->vm_file;
302 
303 	if (afs_validate(AFS_FS_I(file_inode(file)), afs_file_key(file)) < 0)
304 		return VM_FAULT_SIGBUS;
305 	return netfs_page_mkwrite(vmf, NULL);
306 }
307 
308 /*
309  * Prune the keys cached for writeback.  The caller must hold vnode->wb_lock.
310  */
311 void afs_prune_wb_keys(struct afs_vnode *vnode)
312 {
313 	LIST_HEAD(graveyard);
314 	struct afs_wb_key *wbk, *tmp;
315 
316 	/* Discard unused keys */
317 	spin_lock(&vnode->wb_lock);
318 
319 	if (!mapping_tagged(&vnode->netfs.inode.i_data, PAGECACHE_TAG_WRITEBACK) &&
320 	    !mapping_tagged(&vnode->netfs.inode.i_data, PAGECACHE_TAG_DIRTY)) {
321 		list_for_each_entry_safe(wbk, tmp, &vnode->wb_keys, vnode_link) {
322 			if (refcount_read(&wbk->usage) == 1)
323 				list_move(&wbk->vnode_link, &graveyard);
324 		}
325 	}
326 
327 	spin_unlock(&vnode->wb_lock);
328 
329 	while (!list_empty(&graveyard)) {
330 		wbk = list_entry(graveyard.next, struct afs_wb_key, vnode_link);
331 		list_del(&wbk->vnode_link);
332 		afs_put_wb_key(wbk);
333 	}
334 }
335