xref: /linux/fs/netfs/write_issue.c (revision c2a96b7f187fb6a455836d4a6e113947ff11de97)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Network filesystem high-level (buffered) writeback.
3  *
4  * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  *
7  *
8  * To support network filesystems with local caching, we manage a situation
9  * that can be envisioned like the following:
10  *
11  *               +---+---+-----+-----+---+----------+
12  *    Folios:    |   |   |     |     |   |          |
13  *               +---+---+-----+-----+---+----------+
14  *
15  *                 +------+------+     +----+----+
16  *    Upload:      |      |      |.....|    |    |
17  *  (Stream 0)     +------+------+     +----+----+
18  *
19  *               +------+------+------+------+------+
20  *    Cache:     |      |      |      |      |      |
21  *  (Stream 1)   +------+------+------+------+------+
22  *
23  * Where we have a sequence of folios of varying sizes that we need to overlay
24  * with multiple parallel streams of I/O requests, where the I/O requests in a
25  * stream may also be of various sizes (in cifs, for example, the sizes are
26  * negotiated with the server; in something like ceph, they may represent the
27  * sizes of storage objects).
28  *
29  * The sequence in each stream may contain gaps and noncontiguous subrequests
30  * may be glued together into single vectored write RPCs.
31  */
32 
33 #include <linux/export.h>
34 #include <linux/fs.h>
35 #include <linux/mm.h>
36 #include <linux/pagemap.h>
37 #include "internal.h"
38 
39 /*
40  * Kill all dirty folios in the event of an unrecoverable error, starting with
41  * a locked folio we've already obtained from writeback_iter().
42  */
43 static void netfs_kill_dirty_pages(struct address_space *mapping,
44 				   struct writeback_control *wbc,
45 				   struct folio *folio)
46 {
47 	int error = 0;
48 
49 	do {
50 		enum netfs_folio_trace why = netfs_folio_trace_kill;
51 		struct netfs_group *group = NULL;
52 		struct netfs_folio *finfo = NULL;
53 		void *priv;
54 
55 		priv = folio_detach_private(folio);
56 		if (priv) {
57 			finfo = __netfs_folio_info(priv);
58 			if (finfo) {
59 				/* Kill folio from streaming write. */
60 				group = finfo->netfs_group;
61 				why = netfs_folio_trace_kill_s;
62 			} else {
63 				group = priv;
64 				if (group == NETFS_FOLIO_COPY_TO_CACHE) {
65 					/* Kill copy-to-cache folio */
66 					why = netfs_folio_trace_kill_cc;
67 					group = NULL;
68 				} else {
69 					/* Kill folio with group */
70 					why = netfs_folio_trace_kill_g;
71 				}
72 			}
73 		}
74 
75 		trace_netfs_folio(folio, why);
76 
77 		folio_start_writeback(folio);
78 		folio_unlock(folio);
79 		folio_end_writeback(folio);
80 
81 		netfs_put_group(group);
82 		kfree(finfo);
83 
84 	} while ((folio = writeback_iter(mapping, wbc, folio, &error)));
85 }
86 
87 /*
88  * Create a write request and set it up appropriately for the origin type.
89  */
90 struct netfs_io_request *netfs_create_write_req(struct address_space *mapping,
91 						struct file *file,
92 						loff_t start,
93 						enum netfs_io_origin origin)
94 {
95 	struct netfs_io_request *wreq;
96 	struct netfs_inode *ictx;
97 
98 	wreq = netfs_alloc_request(mapping, file, start, 0, origin);
99 	if (IS_ERR(wreq))
100 		return wreq;
101 
102 	_enter("R=%x", wreq->debug_id);
103 
104 	ictx = netfs_inode(wreq->inode);
105 	if (test_bit(NETFS_RREQ_WRITE_TO_CACHE, &wreq->flags))
106 		fscache_begin_write_operation(&wreq->cache_resources, netfs_i_cookie(ictx));
107 
108 	wreq->contiguity = wreq->start;
109 	wreq->cleaned_to = wreq->start;
110 	INIT_WORK(&wreq->work, netfs_write_collection_worker);
111 
112 	wreq->io_streams[0].stream_nr		= 0;
113 	wreq->io_streams[0].source		= NETFS_UPLOAD_TO_SERVER;
114 	wreq->io_streams[0].prepare_write	= ictx->ops->prepare_write;
115 	wreq->io_streams[0].issue_write		= ictx->ops->issue_write;
116 	wreq->io_streams[0].collected_to	= start;
117 	wreq->io_streams[0].transferred		= LONG_MAX;
118 
119 	wreq->io_streams[1].stream_nr		= 1;
120 	wreq->io_streams[1].source		= NETFS_WRITE_TO_CACHE;
121 	wreq->io_streams[1].collected_to	= start;
122 	wreq->io_streams[1].transferred		= LONG_MAX;
123 	if (fscache_resources_valid(&wreq->cache_resources)) {
124 		wreq->io_streams[1].avail	= true;
125 		wreq->io_streams[1].active	= true;
126 		wreq->io_streams[1].prepare_write = wreq->cache_resources.ops->prepare_write_subreq;
127 		wreq->io_streams[1].issue_write = wreq->cache_resources.ops->issue_write;
128 	}
129 
130 	return wreq;
131 }
132 
133 /**
134  * netfs_prepare_write_failed - Note write preparation failed
135  * @subreq: The subrequest to mark
136  *
137  * Mark a subrequest to note that preparation for write failed.
138  */
139 void netfs_prepare_write_failed(struct netfs_io_subrequest *subreq)
140 {
141 	__set_bit(NETFS_SREQ_FAILED, &subreq->flags);
142 	trace_netfs_sreq(subreq, netfs_sreq_trace_prep_failed);
143 }
144 EXPORT_SYMBOL(netfs_prepare_write_failed);
145 
146 /*
147  * Prepare a write subrequest.  We need to allocate a new subrequest
148  * if we don't have one.
149  */
150 static void netfs_prepare_write(struct netfs_io_request *wreq,
151 				struct netfs_io_stream *stream,
152 				loff_t start)
153 {
154 	struct netfs_io_subrequest *subreq;
155 
156 	subreq = netfs_alloc_subrequest(wreq);
157 	subreq->source		= stream->source;
158 	subreq->start		= start;
159 	subreq->max_len		= ULONG_MAX;
160 	subreq->max_nr_segs	= INT_MAX;
161 	subreq->stream_nr	= stream->stream_nr;
162 
163 	_enter("R=%x[%x]", wreq->debug_id, subreq->debug_index);
164 
165 	trace_netfs_sreq_ref(wreq->debug_id, subreq->debug_index,
166 			     refcount_read(&subreq->ref),
167 			     netfs_sreq_trace_new);
168 
169 	trace_netfs_sreq(subreq, netfs_sreq_trace_prepare);
170 
171 	switch (stream->source) {
172 	case NETFS_UPLOAD_TO_SERVER:
173 		netfs_stat(&netfs_n_wh_upload);
174 		subreq->max_len = wreq->wsize;
175 		break;
176 	case NETFS_WRITE_TO_CACHE:
177 		netfs_stat(&netfs_n_wh_write);
178 		break;
179 	default:
180 		WARN_ON_ONCE(1);
181 		break;
182 	}
183 
184 	if (stream->prepare_write)
185 		stream->prepare_write(subreq);
186 
187 	__set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
188 
189 	/* We add to the end of the list whilst the collector may be walking
190 	 * the list.  The collector only goes nextwards and uses the lock to
191 	 * remove entries off of the front.
192 	 */
193 	spin_lock(&wreq->lock);
194 	list_add_tail(&subreq->rreq_link, &stream->subrequests);
195 	if (list_is_first(&subreq->rreq_link, &stream->subrequests)) {
196 		stream->front = subreq;
197 		if (!stream->active) {
198 			stream->collected_to = stream->front->start;
199 			/* Write list pointers before active flag */
200 			smp_store_release(&stream->active, true);
201 		}
202 	}
203 
204 	spin_unlock(&wreq->lock);
205 
206 	stream->construct = subreq;
207 }
208 
209 /*
210  * Set the I/O iterator for the filesystem/cache to use and dispatch the I/O
211  * operation.  The operation may be asynchronous and should call
212  * netfs_write_subrequest_terminated() when complete.
213  */
214 static void netfs_do_issue_write(struct netfs_io_stream *stream,
215 				 struct netfs_io_subrequest *subreq)
216 {
217 	struct netfs_io_request *wreq = subreq->rreq;
218 
219 	_enter("R=%x[%x],%zx", wreq->debug_id, subreq->debug_index, subreq->len);
220 
221 	if (test_bit(NETFS_SREQ_FAILED, &subreq->flags))
222 		return netfs_write_subrequest_terminated(subreq, subreq->error, false);
223 
224 	// TODO: Use encrypted buffer
225 	if (test_bit(NETFS_RREQ_USE_IO_ITER, &wreq->flags)) {
226 		subreq->io_iter = wreq->io_iter;
227 		iov_iter_advance(&subreq->io_iter,
228 				 subreq->start + subreq->transferred - wreq->start);
229 		iov_iter_truncate(&subreq->io_iter,
230 				 subreq->len - subreq->transferred);
231 	} else {
232 		iov_iter_xarray(&subreq->io_iter, ITER_SOURCE, &wreq->mapping->i_pages,
233 				subreq->start + subreq->transferred,
234 				subreq->len   - subreq->transferred);
235 	}
236 
237 	trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
238 	stream->issue_write(subreq);
239 }
240 
241 void netfs_reissue_write(struct netfs_io_stream *stream,
242 			 struct netfs_io_subrequest *subreq)
243 {
244 	__set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
245 	netfs_do_issue_write(stream, subreq);
246 }
247 
248 static void netfs_issue_write(struct netfs_io_request *wreq,
249 			      struct netfs_io_stream *stream)
250 {
251 	struct netfs_io_subrequest *subreq = stream->construct;
252 
253 	if (!subreq)
254 		return;
255 	stream->construct = NULL;
256 
257 	if (subreq->start + subreq->len > wreq->start + wreq->submitted)
258 		WRITE_ONCE(wreq->submitted, subreq->start + subreq->len - wreq->start);
259 	netfs_do_issue_write(stream, subreq);
260 }
261 
262 /*
263  * Add data to the write subrequest, dispatching each as we fill it up or if it
264  * is discontiguous with the previous.  We only fill one part at a time so that
265  * we can avoid overrunning the credits obtained (cifs) and try to parallelise
266  * content-crypto preparation with network writes.
267  */
268 int netfs_advance_write(struct netfs_io_request *wreq,
269 			struct netfs_io_stream *stream,
270 			loff_t start, size_t len, bool to_eof)
271 {
272 	struct netfs_io_subrequest *subreq = stream->construct;
273 	size_t part;
274 
275 	if (!stream->avail) {
276 		_leave("no write");
277 		return len;
278 	}
279 
280 	_enter("R=%x[%x]", wreq->debug_id, subreq ? subreq->debug_index : 0);
281 
282 	if (subreq && start != subreq->start + subreq->len) {
283 		netfs_issue_write(wreq, stream);
284 		subreq = NULL;
285 	}
286 
287 	if (!stream->construct)
288 		netfs_prepare_write(wreq, stream, start);
289 	subreq = stream->construct;
290 
291 	part = min(subreq->max_len - subreq->len, len);
292 	_debug("part %zx/%zx %zx/%zx", subreq->len, subreq->max_len, part, len);
293 	subreq->len += part;
294 	subreq->nr_segs++;
295 
296 	if (subreq->len >= subreq->max_len ||
297 	    subreq->nr_segs >= subreq->max_nr_segs ||
298 	    to_eof) {
299 		netfs_issue_write(wreq, stream);
300 		subreq = NULL;
301 	}
302 
303 	return part;
304 }
305 
306 /*
307  * Write some of a pending folio data back to the server.
308  */
309 static int netfs_write_folio(struct netfs_io_request *wreq,
310 			     struct writeback_control *wbc,
311 			     struct folio *folio)
312 {
313 	struct netfs_io_stream *upload = &wreq->io_streams[0];
314 	struct netfs_io_stream *cache  = &wreq->io_streams[1];
315 	struct netfs_io_stream *stream;
316 	struct netfs_group *fgroup; /* TODO: Use this with ceph */
317 	struct netfs_folio *finfo;
318 	size_t fsize = folio_size(folio), flen = fsize, foff = 0;
319 	loff_t fpos = folio_pos(folio), i_size;
320 	bool to_eof = false, streamw = false;
321 	bool debug = false;
322 
323 	_enter("");
324 
325 	/* netfs_perform_write() may shift i_size around the page or from out
326 	 * of the page to beyond it, but cannot move i_size into or through the
327 	 * page since we have it locked.
328 	 */
329 	i_size = i_size_read(wreq->inode);
330 
331 	if (fpos >= i_size) {
332 		/* mmap beyond eof. */
333 		_debug("beyond eof");
334 		folio_start_writeback(folio);
335 		folio_unlock(folio);
336 		wreq->nr_group_rel += netfs_folio_written_back(folio);
337 		netfs_put_group_many(wreq->group, wreq->nr_group_rel);
338 		wreq->nr_group_rel = 0;
339 		return 0;
340 	}
341 
342 	if (fpos + fsize > wreq->i_size)
343 		wreq->i_size = i_size;
344 
345 	fgroup = netfs_folio_group(folio);
346 	finfo = netfs_folio_info(folio);
347 	if (finfo) {
348 		foff = finfo->dirty_offset;
349 		flen = foff + finfo->dirty_len;
350 		streamw = true;
351 	}
352 
353 	if (wreq->origin == NETFS_WRITETHROUGH) {
354 		to_eof = false;
355 		if (flen > i_size - fpos)
356 			flen = i_size - fpos;
357 	} else if (flen > i_size - fpos) {
358 		flen = i_size - fpos;
359 		if (!streamw)
360 			folio_zero_segment(folio, flen, fsize);
361 		to_eof = true;
362 	} else if (flen == i_size - fpos) {
363 		to_eof = true;
364 	}
365 	flen -= foff;
366 
367 	_debug("folio %zx %zx %zx", foff, flen, fsize);
368 
369 	/* Deal with discontinuities in the stream of dirty pages.  These can
370 	 * arise from a number of sources:
371 	 *
372 	 * (1) Intervening non-dirty pages from random-access writes, multiple
373 	 *     flushers writing back different parts simultaneously and manual
374 	 *     syncing.
375 	 *
376 	 * (2) Partially-written pages from write-streaming.
377 	 *
378 	 * (3) Pages that belong to a different write-back group (eg.  Ceph
379 	 *     snapshots).
380 	 *
381 	 * (4) Actually-clean pages that were marked for write to the cache
382 	 *     when they were read.  Note that these appear as a special
383 	 *     write-back group.
384 	 */
385 	if (fgroup == NETFS_FOLIO_COPY_TO_CACHE) {
386 		netfs_issue_write(wreq, upload);
387 	} else if (fgroup != wreq->group) {
388 		/* We can't write this page to the server yet. */
389 		kdebug("wrong group");
390 		folio_redirty_for_writepage(wbc, folio);
391 		folio_unlock(folio);
392 		netfs_issue_write(wreq, upload);
393 		netfs_issue_write(wreq, cache);
394 		return 0;
395 	}
396 
397 	if (foff > 0)
398 		netfs_issue_write(wreq, upload);
399 	if (streamw)
400 		netfs_issue_write(wreq, cache);
401 
402 	/* Flip the page to the writeback state and unlock.  If we're called
403 	 * from write-through, then the page has already been put into the wb
404 	 * state.
405 	 */
406 	if (wreq->origin == NETFS_WRITEBACK)
407 		folio_start_writeback(folio);
408 	folio_unlock(folio);
409 
410 	if (fgroup == NETFS_FOLIO_COPY_TO_CACHE) {
411 		if (!fscache_resources_valid(&wreq->cache_resources)) {
412 			trace_netfs_folio(folio, netfs_folio_trace_cancel_copy);
413 			netfs_issue_write(wreq, upload);
414 			netfs_folio_written_back(folio);
415 			return 0;
416 		}
417 		trace_netfs_folio(folio, netfs_folio_trace_store_copy);
418 	} else if (!upload->construct) {
419 		trace_netfs_folio(folio, netfs_folio_trace_store);
420 	} else {
421 		trace_netfs_folio(folio, netfs_folio_trace_store_plus);
422 	}
423 
424 	/* Move the submission point forward to allow for write-streaming data
425 	 * not starting at the front of the page.  We don't do write-streaming
426 	 * with the cache as the cache requires DIO alignment.
427 	 *
428 	 * Also skip uploading for data that's been read and just needs copying
429 	 * to the cache.
430 	 */
431 	for (int s = 0; s < NR_IO_STREAMS; s++) {
432 		stream = &wreq->io_streams[s];
433 		stream->submit_max_len = fsize;
434 		stream->submit_off = foff;
435 		stream->submit_len = flen;
436 		if ((stream->source == NETFS_WRITE_TO_CACHE && streamw) ||
437 		    (stream->source == NETFS_UPLOAD_TO_SERVER &&
438 		     fgroup == NETFS_FOLIO_COPY_TO_CACHE)) {
439 			stream->submit_off = UINT_MAX;
440 			stream->submit_len = 0;
441 			stream->submit_max_len = 0;
442 		}
443 	}
444 
445 	/* Attach the folio to one or more subrequests.  For a big folio, we
446 	 * could end up with thousands of subrequests if the wsize is small -
447 	 * but we might need to wait during the creation of subrequests for
448 	 * network resources (eg. SMB credits).
449 	 */
450 	for (;;) {
451 		ssize_t part;
452 		size_t lowest_off = ULONG_MAX;
453 		int choose_s = -1;
454 
455 		/* Always add to the lowest-submitted stream first. */
456 		for (int s = 0; s < NR_IO_STREAMS; s++) {
457 			stream = &wreq->io_streams[s];
458 			if (stream->submit_len > 0 &&
459 			    stream->submit_off < lowest_off) {
460 				lowest_off = stream->submit_off;
461 				choose_s = s;
462 			}
463 		}
464 
465 		if (choose_s < 0)
466 			break;
467 		stream = &wreq->io_streams[choose_s];
468 
469 		part = netfs_advance_write(wreq, stream, fpos + stream->submit_off,
470 					   stream->submit_len, to_eof);
471 		atomic64_set(&wreq->issued_to, fpos + stream->submit_off);
472 		stream->submit_off += part;
473 		stream->submit_max_len -= part;
474 		if (part > stream->submit_len)
475 			stream->submit_len = 0;
476 		else
477 			stream->submit_len -= part;
478 		if (part > 0)
479 			debug = true;
480 	}
481 
482 	atomic64_set(&wreq->issued_to, fpos + fsize);
483 
484 	if (!debug)
485 		kdebug("R=%x: No submit", wreq->debug_id);
486 
487 	if (foff + flen < fsize)
488 		for (int s = 0; s < NR_IO_STREAMS; s++)
489 			netfs_issue_write(wreq, &wreq->io_streams[s]);
490 
491 	_leave(" = 0");
492 	return 0;
493 }
494 
495 /*
496  * Write some of the pending data back to the server
497  */
498 int netfs_writepages(struct address_space *mapping,
499 		     struct writeback_control *wbc)
500 {
501 	struct netfs_inode *ictx = netfs_inode(mapping->host);
502 	struct netfs_io_request *wreq = NULL;
503 	struct folio *folio;
504 	int error = 0;
505 
506 	if (wbc->sync_mode == WB_SYNC_ALL)
507 		mutex_lock(&ictx->wb_lock);
508 	else if (!mutex_trylock(&ictx->wb_lock))
509 		return 0;
510 
511 	/* Need the first folio to be able to set up the op. */
512 	folio = writeback_iter(mapping, wbc, NULL, &error);
513 	if (!folio)
514 		goto out;
515 
516 	wreq = netfs_create_write_req(mapping, NULL, folio_pos(folio), NETFS_WRITEBACK);
517 	if (IS_ERR(wreq)) {
518 		error = PTR_ERR(wreq);
519 		goto couldnt_start;
520 	}
521 
522 	trace_netfs_write(wreq, netfs_write_trace_writeback);
523 	netfs_stat(&netfs_n_wh_writepages);
524 
525 	do {
526 		_debug("wbiter %lx %llx", folio->index, wreq->start + wreq->submitted);
527 
528 		/* It appears we don't have to handle cyclic writeback wrapping. */
529 		WARN_ON_ONCE(wreq && folio_pos(folio) < wreq->start + wreq->submitted);
530 
531 		if (netfs_folio_group(folio) != NETFS_FOLIO_COPY_TO_CACHE &&
532 		    unlikely(!test_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags))) {
533 			set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags);
534 			wreq->netfs_ops->begin_writeback(wreq);
535 		}
536 
537 		error = netfs_write_folio(wreq, wbc, folio);
538 		if (error < 0)
539 			break;
540 	} while ((folio = writeback_iter(mapping, wbc, folio, &error)));
541 
542 	for (int s = 0; s < NR_IO_STREAMS; s++)
543 		netfs_issue_write(wreq, &wreq->io_streams[s]);
544 	smp_wmb(); /* Write lists before ALL_QUEUED. */
545 	set_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags);
546 
547 	mutex_unlock(&ictx->wb_lock);
548 
549 	netfs_put_request(wreq, false, netfs_rreq_trace_put_return);
550 	_leave(" = %d", error);
551 	return error;
552 
553 couldnt_start:
554 	netfs_kill_dirty_pages(mapping, wbc, folio);
555 out:
556 	mutex_unlock(&ictx->wb_lock);
557 	_leave(" = %d", error);
558 	return error;
559 }
560 EXPORT_SYMBOL(netfs_writepages);
561 
562 /*
563  * Begin a write operation for writing through the pagecache.
564  */
565 struct netfs_io_request *netfs_begin_writethrough(struct kiocb *iocb, size_t len)
566 {
567 	struct netfs_io_request *wreq = NULL;
568 	struct netfs_inode *ictx = netfs_inode(file_inode(iocb->ki_filp));
569 
570 	mutex_lock(&ictx->wb_lock);
571 
572 	wreq = netfs_create_write_req(iocb->ki_filp->f_mapping, iocb->ki_filp,
573 				      iocb->ki_pos, NETFS_WRITETHROUGH);
574 	if (IS_ERR(wreq)) {
575 		mutex_unlock(&ictx->wb_lock);
576 		return wreq;
577 	}
578 
579 	wreq->io_streams[0].avail = true;
580 	trace_netfs_write(wreq, netfs_write_trace_writethrough);
581 	return wreq;
582 }
583 
584 /*
585  * Advance the state of the write operation used when writing through the
586  * pagecache.  Data has been copied into the pagecache that we need to append
587  * to the request.  If we've added more than wsize then we need to create a new
588  * subrequest.
589  */
590 int netfs_advance_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc,
591 			       struct folio *folio, size_t copied, bool to_page_end,
592 			       struct folio **writethrough_cache)
593 {
594 	_enter("R=%x ic=%zu ws=%u cp=%zu tp=%u",
595 	       wreq->debug_id, wreq->iter.count, wreq->wsize, copied, to_page_end);
596 
597 	if (!*writethrough_cache) {
598 		if (folio_test_dirty(folio))
599 			/* Sigh.  mmap. */
600 			folio_clear_dirty_for_io(folio);
601 
602 		/* We can make multiple writes to the folio... */
603 		folio_start_writeback(folio);
604 		if (wreq->len == 0)
605 			trace_netfs_folio(folio, netfs_folio_trace_wthru);
606 		else
607 			trace_netfs_folio(folio, netfs_folio_trace_wthru_plus);
608 		*writethrough_cache = folio;
609 	}
610 
611 	wreq->len += copied;
612 	if (!to_page_end)
613 		return 0;
614 
615 	*writethrough_cache = NULL;
616 	return netfs_write_folio(wreq, wbc, folio);
617 }
618 
619 /*
620  * End a write operation used when writing through the pagecache.
621  */
622 int netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc,
623 			   struct folio *writethrough_cache)
624 {
625 	struct netfs_inode *ictx = netfs_inode(wreq->inode);
626 	int ret;
627 
628 	_enter("R=%x", wreq->debug_id);
629 
630 	if (writethrough_cache)
631 		netfs_write_folio(wreq, wbc, writethrough_cache);
632 
633 	netfs_issue_write(wreq, &wreq->io_streams[0]);
634 	netfs_issue_write(wreq, &wreq->io_streams[1]);
635 	smp_wmb(); /* Write lists before ALL_QUEUED. */
636 	set_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags);
637 
638 	mutex_unlock(&ictx->wb_lock);
639 
640 	if (wreq->iocb) {
641 		ret = -EIOCBQUEUED;
642 	} else {
643 		wait_on_bit(&wreq->flags, NETFS_RREQ_IN_PROGRESS, TASK_UNINTERRUPTIBLE);
644 		ret = wreq->error;
645 	}
646 	netfs_put_request(wreq, false, netfs_rreq_trace_put_return);
647 	return ret;
648 }
649 
650 /*
651  * Write data to the server without going through the pagecache and without
652  * writing it to the local cache.
653  */
654 int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t len)
655 {
656 	struct netfs_io_stream *upload = &wreq->io_streams[0];
657 	ssize_t part;
658 	loff_t start = wreq->start;
659 	int error = 0;
660 
661 	_enter("%zx", len);
662 
663 	if (wreq->origin == NETFS_DIO_WRITE)
664 		inode_dio_begin(wreq->inode);
665 
666 	while (len) {
667 		// TODO: Prepare content encryption
668 
669 		_debug("unbuffered %zx", len);
670 		part = netfs_advance_write(wreq, upload, start, len, false);
671 		start += part;
672 		len -= part;
673 		if (test_bit(NETFS_RREQ_PAUSE, &wreq->flags)) {
674 			trace_netfs_rreq(wreq, netfs_rreq_trace_wait_pause);
675 			wait_on_bit(&wreq->flags, NETFS_RREQ_PAUSE, TASK_UNINTERRUPTIBLE);
676 		}
677 		if (test_bit(NETFS_RREQ_FAILED, &wreq->flags))
678 			break;
679 	}
680 
681 	netfs_issue_write(wreq, upload);
682 
683 	smp_wmb(); /* Write lists before ALL_QUEUED. */
684 	set_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags);
685 	if (list_empty(&upload->subrequests))
686 		netfs_wake_write_collector(wreq, false);
687 
688 	_leave(" = %d", error);
689 	return error;
690 }
691