1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Network filesystem high-level (buffered) writeback. 3 * 4 * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved. 5 * Written by David Howells (dhowells@redhat.com) 6 * 7 * 8 * To support network filesystems with local caching, we manage a situation 9 * that can be envisioned like the following: 10 * 11 * +---+---+-----+-----+---+----------+ 12 * Folios: | | | | | | | 13 * +---+---+-----+-----+---+----------+ 14 * 15 * +------+------+ +----+----+ 16 * Upload: | | |.....| | | 17 * (Stream 0) +------+------+ +----+----+ 18 * 19 * +------+------+------+------+------+ 20 * Cache: | | | | | | 21 * (Stream 1) +------+------+------+------+------+ 22 * 23 * Where we have a sequence of folios of varying sizes that we need to overlay 24 * with multiple parallel streams of I/O requests, where the I/O requests in a 25 * stream may also be of various sizes (in cifs, for example, the sizes are 26 * negotiated with the server; in something like ceph, they may represent the 27 * sizes of storage objects). 28 * 29 * The sequence in each stream may contain gaps and noncontiguous subrequests 30 * may be glued together into single vectored write RPCs. 31 */ 32 33 #include <linux/export.h> 34 #include <linux/fs.h> 35 #include <linux/mm.h> 36 #include <linux/pagemap.h> 37 #include "internal.h" 38 39 /* 40 * Kill all dirty folios in the event of an unrecoverable error, starting with 41 * a locked folio we've already obtained from writeback_iter(). 42 */ 43 static void netfs_kill_dirty_pages(struct address_space *mapping, 44 struct writeback_control *wbc, 45 struct folio *folio) 46 { 47 int error = 0; 48 49 do { 50 enum netfs_folio_trace why = netfs_folio_trace_kill; 51 struct netfs_group *group = NULL; 52 struct netfs_folio *finfo = NULL; 53 void *priv; 54 55 priv = folio_detach_private(folio); 56 if (priv) { 57 finfo = __netfs_folio_info(priv); 58 if (finfo) { 59 /* Kill folio from streaming write. */ 60 group = finfo->netfs_group; 61 why = netfs_folio_trace_kill_s; 62 } else { 63 group = priv; 64 if (group == NETFS_FOLIO_COPY_TO_CACHE) { 65 /* Kill copy-to-cache folio */ 66 why = netfs_folio_trace_kill_cc; 67 group = NULL; 68 } else { 69 /* Kill folio with group */ 70 why = netfs_folio_trace_kill_g; 71 } 72 } 73 } 74 75 trace_netfs_folio(folio, why); 76 77 folio_start_writeback(folio); 78 folio_unlock(folio); 79 folio_end_writeback(folio); 80 81 netfs_put_group(group); 82 kfree(finfo); 83 84 } while ((folio = writeback_iter(mapping, wbc, folio, &error))); 85 } 86 87 /* 88 * Create a write request and set it up appropriately for the origin type. 89 */ 90 struct netfs_io_request *netfs_create_write_req(struct address_space *mapping, 91 struct file *file, 92 loff_t start, 93 enum netfs_io_origin origin) 94 { 95 struct netfs_io_request *wreq; 96 struct netfs_inode *ictx; 97 bool is_buffered = (origin == NETFS_WRITEBACK || 98 origin == NETFS_WRITETHROUGH || 99 origin == NETFS_PGPRIV2_COPY_TO_CACHE); 100 101 wreq = netfs_alloc_request(mapping, file, start, 0, origin); 102 if (IS_ERR(wreq)) 103 return wreq; 104 105 _enter("R=%x", wreq->debug_id); 106 107 ictx = netfs_inode(wreq->inode); 108 if (is_buffered && netfs_is_cache_enabled(ictx)) 109 fscache_begin_write_operation(&wreq->cache_resources, netfs_i_cookie(ictx)); 110 111 wreq->cleaned_to = wreq->start; 112 113 wreq->io_streams[0].stream_nr = 0; 114 wreq->io_streams[0].source = NETFS_UPLOAD_TO_SERVER; 115 wreq->io_streams[0].prepare_write = ictx->ops->prepare_write; 116 wreq->io_streams[0].issue_write = ictx->ops->issue_write; 117 wreq->io_streams[0].collected_to = start; 118 wreq->io_streams[0].transferred = LONG_MAX; 119 120 wreq->io_streams[1].stream_nr = 1; 121 wreq->io_streams[1].source = NETFS_WRITE_TO_CACHE; 122 wreq->io_streams[1].collected_to = start; 123 wreq->io_streams[1].transferred = LONG_MAX; 124 if (fscache_resources_valid(&wreq->cache_resources)) { 125 wreq->io_streams[1].avail = true; 126 wreq->io_streams[1].active = true; 127 wreq->io_streams[1].prepare_write = wreq->cache_resources.ops->prepare_write_subreq; 128 wreq->io_streams[1].issue_write = wreq->cache_resources.ops->issue_write; 129 } 130 131 return wreq; 132 } 133 134 /** 135 * netfs_prepare_write_failed - Note write preparation failed 136 * @subreq: The subrequest to mark 137 * 138 * Mark a subrequest to note that preparation for write failed. 139 */ 140 void netfs_prepare_write_failed(struct netfs_io_subrequest *subreq) 141 { 142 __set_bit(NETFS_SREQ_FAILED, &subreq->flags); 143 trace_netfs_sreq(subreq, netfs_sreq_trace_prep_failed); 144 } 145 EXPORT_SYMBOL(netfs_prepare_write_failed); 146 147 /* 148 * Prepare a write subrequest. We need to allocate a new subrequest 149 * if we don't have one. 150 */ 151 static void netfs_prepare_write(struct netfs_io_request *wreq, 152 struct netfs_io_stream *stream, 153 loff_t start) 154 { 155 struct netfs_io_subrequest *subreq; 156 157 subreq = netfs_alloc_subrequest(wreq); 158 subreq->source = stream->source; 159 subreq->start = start; 160 subreq->stream_nr = stream->stream_nr; 161 subreq->io_iter = wreq->io_iter; 162 163 _enter("R=%x[%x]", wreq->debug_id, subreq->debug_index); 164 165 trace_netfs_sreq(subreq, netfs_sreq_trace_prepare); 166 167 stream->sreq_max_len = UINT_MAX; 168 stream->sreq_max_segs = INT_MAX; 169 switch (stream->source) { 170 case NETFS_UPLOAD_TO_SERVER: 171 netfs_stat(&netfs_n_wh_upload); 172 stream->sreq_max_len = wreq->wsize; 173 break; 174 case NETFS_WRITE_TO_CACHE: 175 netfs_stat(&netfs_n_wh_write); 176 break; 177 default: 178 WARN_ON_ONCE(1); 179 break; 180 } 181 182 if (stream->prepare_write) 183 stream->prepare_write(subreq); 184 185 __set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags); 186 187 /* We add to the end of the list whilst the collector may be walking 188 * the list. The collector only goes nextwards and uses the lock to 189 * remove entries off of the front. 190 */ 191 spin_lock_bh(&wreq->lock); 192 list_add_tail(&subreq->rreq_link, &stream->subrequests); 193 if (list_is_first(&subreq->rreq_link, &stream->subrequests)) { 194 stream->front = subreq; 195 if (!stream->active) { 196 stream->collected_to = stream->front->start; 197 /* Write list pointers before active flag */ 198 smp_store_release(&stream->active, true); 199 } 200 } 201 202 spin_unlock_bh(&wreq->lock); 203 204 stream->construct = subreq; 205 } 206 207 /* 208 * Set the I/O iterator for the filesystem/cache to use and dispatch the I/O 209 * operation. The operation may be asynchronous and should call 210 * netfs_write_subrequest_terminated() when complete. 211 */ 212 static void netfs_do_issue_write(struct netfs_io_stream *stream, 213 struct netfs_io_subrequest *subreq) 214 { 215 struct netfs_io_request *wreq = subreq->rreq; 216 217 _enter("R=%x[%x],%zx", wreq->debug_id, subreq->debug_index, subreq->len); 218 219 if (test_bit(NETFS_SREQ_FAILED, &subreq->flags)) 220 return netfs_write_subrequest_terminated(subreq, subreq->error, false); 221 222 trace_netfs_sreq(subreq, netfs_sreq_trace_submit); 223 stream->issue_write(subreq); 224 } 225 226 void netfs_reissue_write(struct netfs_io_stream *stream, 227 struct netfs_io_subrequest *subreq, 228 struct iov_iter *source) 229 { 230 size_t size = subreq->len - subreq->transferred; 231 232 // TODO: Use encrypted buffer 233 subreq->io_iter = *source; 234 iov_iter_advance(source, size); 235 iov_iter_truncate(&subreq->io_iter, size); 236 237 __set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags); 238 netfs_do_issue_write(stream, subreq); 239 } 240 241 void netfs_issue_write(struct netfs_io_request *wreq, 242 struct netfs_io_stream *stream) 243 { 244 struct netfs_io_subrequest *subreq = stream->construct; 245 246 if (!subreq) 247 return; 248 stream->construct = NULL; 249 subreq->io_iter.count = subreq->len; 250 netfs_do_issue_write(stream, subreq); 251 } 252 253 /* 254 * Add data to the write subrequest, dispatching each as we fill it up or if it 255 * is discontiguous with the previous. We only fill one part at a time so that 256 * we can avoid overrunning the credits obtained (cifs) and try to parallelise 257 * content-crypto preparation with network writes. 258 */ 259 int netfs_advance_write(struct netfs_io_request *wreq, 260 struct netfs_io_stream *stream, 261 loff_t start, size_t len, bool to_eof) 262 { 263 struct netfs_io_subrequest *subreq = stream->construct; 264 size_t part; 265 266 if (!stream->avail) { 267 _leave("no write"); 268 return len; 269 } 270 271 _enter("R=%x[%x]", wreq->debug_id, subreq ? subreq->debug_index : 0); 272 273 if (subreq && start != subreq->start + subreq->len) { 274 netfs_issue_write(wreq, stream); 275 subreq = NULL; 276 } 277 278 if (!stream->construct) 279 netfs_prepare_write(wreq, stream, start); 280 subreq = stream->construct; 281 282 part = umin(stream->sreq_max_len - subreq->len, len); 283 _debug("part %zx/%zx %zx/%zx", subreq->len, stream->sreq_max_len, part, len); 284 subreq->len += part; 285 subreq->nr_segs++; 286 stream->submit_extendable_to -= part; 287 288 if (subreq->len >= stream->sreq_max_len || 289 subreq->nr_segs >= stream->sreq_max_segs || 290 to_eof) { 291 netfs_issue_write(wreq, stream); 292 subreq = NULL; 293 } 294 295 return part; 296 } 297 298 /* 299 * Write some of a pending folio data back to the server. 300 */ 301 static int netfs_write_folio(struct netfs_io_request *wreq, 302 struct writeback_control *wbc, 303 struct folio *folio) 304 { 305 struct netfs_io_stream *upload = &wreq->io_streams[0]; 306 struct netfs_io_stream *cache = &wreq->io_streams[1]; 307 struct netfs_io_stream *stream; 308 struct netfs_group *fgroup; /* TODO: Use this with ceph */ 309 struct netfs_folio *finfo; 310 size_t iter_off = 0; 311 size_t fsize = folio_size(folio), flen = fsize, foff = 0; 312 loff_t fpos = folio_pos(folio), i_size; 313 bool to_eof = false, streamw = false; 314 bool debug = false; 315 316 _enter(""); 317 318 /* netfs_perform_write() may shift i_size around the page or from out 319 * of the page to beyond it, but cannot move i_size into or through the 320 * page since we have it locked. 321 */ 322 i_size = i_size_read(wreq->inode); 323 324 if (fpos >= i_size) { 325 /* mmap beyond eof. */ 326 _debug("beyond eof"); 327 folio_start_writeback(folio); 328 folio_unlock(folio); 329 wreq->nr_group_rel += netfs_folio_written_back(folio); 330 netfs_put_group_many(wreq->group, wreq->nr_group_rel); 331 wreq->nr_group_rel = 0; 332 return 0; 333 } 334 335 if (fpos + fsize > wreq->i_size) 336 wreq->i_size = i_size; 337 338 fgroup = netfs_folio_group(folio); 339 finfo = netfs_folio_info(folio); 340 if (finfo) { 341 foff = finfo->dirty_offset; 342 flen = foff + finfo->dirty_len; 343 streamw = true; 344 } 345 346 if (wreq->origin == NETFS_WRITETHROUGH) { 347 to_eof = false; 348 if (flen > i_size - fpos) 349 flen = i_size - fpos; 350 } else if (flen > i_size - fpos) { 351 flen = i_size - fpos; 352 if (!streamw) 353 folio_zero_segment(folio, flen, fsize); 354 to_eof = true; 355 } else if (flen == i_size - fpos) { 356 to_eof = true; 357 } 358 flen -= foff; 359 360 _debug("folio %zx %zx %zx", foff, flen, fsize); 361 362 /* Deal with discontinuities in the stream of dirty pages. These can 363 * arise from a number of sources: 364 * 365 * (1) Intervening non-dirty pages from random-access writes, multiple 366 * flushers writing back different parts simultaneously and manual 367 * syncing. 368 * 369 * (2) Partially-written pages from write-streaming. 370 * 371 * (3) Pages that belong to a different write-back group (eg. Ceph 372 * snapshots). 373 * 374 * (4) Actually-clean pages that were marked for write to the cache 375 * when they were read. Note that these appear as a special 376 * write-back group. 377 */ 378 if (fgroup == NETFS_FOLIO_COPY_TO_CACHE) { 379 netfs_issue_write(wreq, upload); 380 } else if (fgroup != wreq->group) { 381 /* We can't write this page to the server yet. */ 382 kdebug("wrong group"); 383 folio_redirty_for_writepage(wbc, folio); 384 folio_unlock(folio); 385 netfs_issue_write(wreq, upload); 386 netfs_issue_write(wreq, cache); 387 return 0; 388 } 389 390 if (foff > 0) 391 netfs_issue_write(wreq, upload); 392 if (streamw) 393 netfs_issue_write(wreq, cache); 394 395 /* Flip the page to the writeback state and unlock. If we're called 396 * from write-through, then the page has already been put into the wb 397 * state. 398 */ 399 if (wreq->origin == NETFS_WRITEBACK) 400 folio_start_writeback(folio); 401 folio_unlock(folio); 402 403 if (fgroup == NETFS_FOLIO_COPY_TO_CACHE) { 404 if (!cache->avail) { 405 trace_netfs_folio(folio, netfs_folio_trace_cancel_copy); 406 netfs_issue_write(wreq, upload); 407 netfs_folio_written_back(folio); 408 return 0; 409 } 410 trace_netfs_folio(folio, netfs_folio_trace_store_copy); 411 } else if (!upload->avail && !cache->avail) { 412 trace_netfs_folio(folio, netfs_folio_trace_cancel_store); 413 netfs_folio_written_back(folio); 414 return 0; 415 } else if (!upload->construct) { 416 trace_netfs_folio(folio, netfs_folio_trace_store); 417 } else { 418 trace_netfs_folio(folio, netfs_folio_trace_store_plus); 419 } 420 421 /* Attach the folio to the rolling buffer. */ 422 netfs_buffer_append_folio(wreq, folio, false); 423 424 /* Move the submission point forward to allow for write-streaming data 425 * not starting at the front of the page. We don't do write-streaming 426 * with the cache as the cache requires DIO alignment. 427 * 428 * Also skip uploading for data that's been read and just needs copying 429 * to the cache. 430 */ 431 for (int s = 0; s < NR_IO_STREAMS; s++) { 432 stream = &wreq->io_streams[s]; 433 stream->submit_off = foff; 434 stream->submit_len = flen; 435 if ((stream->source == NETFS_WRITE_TO_CACHE && streamw) || 436 (stream->source == NETFS_UPLOAD_TO_SERVER && 437 fgroup == NETFS_FOLIO_COPY_TO_CACHE)) { 438 stream->submit_off = UINT_MAX; 439 stream->submit_len = 0; 440 } 441 } 442 443 /* Attach the folio to one or more subrequests. For a big folio, we 444 * could end up with thousands of subrequests if the wsize is small - 445 * but we might need to wait during the creation of subrequests for 446 * network resources (eg. SMB credits). 447 */ 448 for (;;) { 449 ssize_t part; 450 size_t lowest_off = ULONG_MAX; 451 int choose_s = -1; 452 453 /* Always add to the lowest-submitted stream first. */ 454 for (int s = 0; s < NR_IO_STREAMS; s++) { 455 stream = &wreq->io_streams[s]; 456 if (stream->submit_len > 0 && 457 stream->submit_off < lowest_off) { 458 lowest_off = stream->submit_off; 459 choose_s = s; 460 } 461 } 462 463 if (choose_s < 0) 464 break; 465 stream = &wreq->io_streams[choose_s]; 466 467 /* Advance the iterator(s). */ 468 if (stream->submit_off > iter_off) { 469 iov_iter_advance(&wreq->io_iter, stream->submit_off - iter_off); 470 iter_off = stream->submit_off; 471 } 472 473 atomic64_set(&wreq->issued_to, fpos + stream->submit_off); 474 stream->submit_extendable_to = fsize - stream->submit_off; 475 part = netfs_advance_write(wreq, stream, fpos + stream->submit_off, 476 stream->submit_len, to_eof); 477 stream->submit_off += part; 478 if (part > stream->submit_len) 479 stream->submit_len = 0; 480 else 481 stream->submit_len -= part; 482 if (part > 0) 483 debug = true; 484 } 485 486 if (fsize > iter_off) 487 iov_iter_advance(&wreq->io_iter, fsize - iter_off); 488 atomic64_set(&wreq->issued_to, fpos + fsize); 489 490 if (!debug) 491 kdebug("R=%x: No submit", wreq->debug_id); 492 493 if (foff + flen < fsize) 494 for (int s = 0; s < NR_IO_STREAMS; s++) 495 netfs_issue_write(wreq, &wreq->io_streams[s]); 496 497 _leave(" = 0"); 498 return 0; 499 } 500 501 /* 502 * Write some of the pending data back to the server 503 */ 504 int netfs_writepages(struct address_space *mapping, 505 struct writeback_control *wbc) 506 { 507 struct netfs_inode *ictx = netfs_inode(mapping->host); 508 struct netfs_io_request *wreq = NULL; 509 struct folio *folio; 510 int error = 0; 511 512 if (!mutex_trylock(&ictx->wb_lock)) { 513 if (wbc->sync_mode == WB_SYNC_NONE) { 514 netfs_stat(&netfs_n_wb_lock_skip); 515 return 0; 516 } 517 netfs_stat(&netfs_n_wb_lock_wait); 518 mutex_lock(&ictx->wb_lock); 519 } 520 521 /* Need the first folio to be able to set up the op. */ 522 folio = writeback_iter(mapping, wbc, NULL, &error); 523 if (!folio) 524 goto out; 525 526 wreq = netfs_create_write_req(mapping, NULL, folio_pos(folio), NETFS_WRITEBACK); 527 if (IS_ERR(wreq)) { 528 error = PTR_ERR(wreq); 529 goto couldnt_start; 530 } 531 532 trace_netfs_write(wreq, netfs_write_trace_writeback); 533 netfs_stat(&netfs_n_wh_writepages); 534 535 do { 536 _debug("wbiter %lx %llx", folio->index, atomic64_read(&wreq->issued_to)); 537 538 /* It appears we don't have to handle cyclic writeback wrapping. */ 539 WARN_ON_ONCE(wreq && folio_pos(folio) < atomic64_read(&wreq->issued_to)); 540 541 if (netfs_folio_group(folio) != NETFS_FOLIO_COPY_TO_CACHE && 542 unlikely(!test_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags))) { 543 set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags); 544 wreq->netfs_ops->begin_writeback(wreq); 545 } 546 547 error = netfs_write_folio(wreq, wbc, folio); 548 if (error < 0) 549 break; 550 } while ((folio = writeback_iter(mapping, wbc, folio, &error))); 551 552 for (int s = 0; s < NR_IO_STREAMS; s++) 553 netfs_issue_write(wreq, &wreq->io_streams[s]); 554 smp_wmb(); /* Write lists before ALL_QUEUED. */ 555 set_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags); 556 557 mutex_unlock(&ictx->wb_lock); 558 559 netfs_put_request(wreq, false, netfs_rreq_trace_put_return); 560 _leave(" = %d", error); 561 return error; 562 563 couldnt_start: 564 netfs_kill_dirty_pages(mapping, wbc, folio); 565 out: 566 mutex_unlock(&ictx->wb_lock); 567 _leave(" = %d", error); 568 return error; 569 } 570 EXPORT_SYMBOL(netfs_writepages); 571 572 /* 573 * Begin a write operation for writing through the pagecache. 574 */ 575 struct netfs_io_request *netfs_begin_writethrough(struct kiocb *iocb, size_t len) 576 { 577 struct netfs_io_request *wreq = NULL; 578 struct netfs_inode *ictx = netfs_inode(file_inode(iocb->ki_filp)); 579 580 mutex_lock(&ictx->wb_lock); 581 582 wreq = netfs_create_write_req(iocb->ki_filp->f_mapping, iocb->ki_filp, 583 iocb->ki_pos, NETFS_WRITETHROUGH); 584 if (IS_ERR(wreq)) { 585 mutex_unlock(&ictx->wb_lock); 586 return wreq; 587 } 588 589 wreq->io_streams[0].avail = true; 590 trace_netfs_write(wreq, netfs_write_trace_writethrough); 591 return wreq; 592 } 593 594 /* 595 * Advance the state of the write operation used when writing through the 596 * pagecache. Data has been copied into the pagecache that we need to append 597 * to the request. If we've added more than wsize then we need to create a new 598 * subrequest. 599 */ 600 int netfs_advance_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc, 601 struct folio *folio, size_t copied, bool to_page_end, 602 struct folio **writethrough_cache) 603 { 604 _enter("R=%x ic=%zu ws=%u cp=%zu tp=%u", 605 wreq->debug_id, wreq->iter.count, wreq->wsize, copied, to_page_end); 606 607 if (!*writethrough_cache) { 608 if (folio_test_dirty(folio)) 609 /* Sigh. mmap. */ 610 folio_clear_dirty_for_io(folio); 611 612 /* We can make multiple writes to the folio... */ 613 folio_start_writeback(folio); 614 if (wreq->len == 0) 615 trace_netfs_folio(folio, netfs_folio_trace_wthru); 616 else 617 trace_netfs_folio(folio, netfs_folio_trace_wthru_plus); 618 *writethrough_cache = folio; 619 } 620 621 wreq->len += copied; 622 if (!to_page_end) 623 return 0; 624 625 *writethrough_cache = NULL; 626 return netfs_write_folio(wreq, wbc, folio); 627 } 628 629 /* 630 * End a write operation used when writing through the pagecache. 631 */ 632 int netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc, 633 struct folio *writethrough_cache) 634 { 635 struct netfs_inode *ictx = netfs_inode(wreq->inode); 636 int ret; 637 638 _enter("R=%x", wreq->debug_id); 639 640 if (writethrough_cache) 641 netfs_write_folio(wreq, wbc, writethrough_cache); 642 643 netfs_issue_write(wreq, &wreq->io_streams[0]); 644 netfs_issue_write(wreq, &wreq->io_streams[1]); 645 smp_wmb(); /* Write lists before ALL_QUEUED. */ 646 set_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags); 647 648 mutex_unlock(&ictx->wb_lock); 649 650 if (wreq->iocb) { 651 ret = -EIOCBQUEUED; 652 } else { 653 wait_on_bit(&wreq->flags, NETFS_RREQ_IN_PROGRESS, TASK_UNINTERRUPTIBLE); 654 ret = wreq->error; 655 } 656 netfs_put_request(wreq, false, netfs_rreq_trace_put_return); 657 return ret; 658 } 659 660 /* 661 * Write data to the server without going through the pagecache and without 662 * writing it to the local cache. 663 */ 664 int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t len) 665 { 666 struct netfs_io_stream *upload = &wreq->io_streams[0]; 667 ssize_t part; 668 loff_t start = wreq->start; 669 int error = 0; 670 671 _enter("%zx", len); 672 673 if (wreq->origin == NETFS_DIO_WRITE) 674 inode_dio_begin(wreq->inode); 675 676 while (len) { 677 // TODO: Prepare content encryption 678 679 _debug("unbuffered %zx", len); 680 part = netfs_advance_write(wreq, upload, start, len, false); 681 start += part; 682 len -= part; 683 iov_iter_advance(&wreq->io_iter, part); 684 if (test_bit(NETFS_RREQ_PAUSE, &wreq->flags)) { 685 trace_netfs_rreq(wreq, netfs_rreq_trace_wait_pause); 686 wait_on_bit(&wreq->flags, NETFS_RREQ_PAUSE, TASK_UNINTERRUPTIBLE); 687 } 688 if (test_bit(NETFS_RREQ_FAILED, &wreq->flags)) 689 break; 690 } 691 692 netfs_issue_write(wreq, upload); 693 694 smp_wmb(); /* Write lists before ALL_QUEUED. */ 695 set_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags); 696 if (list_empty(&upload->subrequests)) 697 netfs_wake_write_collector(wreq, false); 698 699 _leave(" = %d", error); 700 return error; 701 } 702