1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Network filesystem read subrequest result collection, assessment and 3 * retrying. 4 * 5 * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved. 6 * Written by David Howells (dhowells@redhat.com) 7 */ 8 9 #include <linux/export.h> 10 #include <linux/fs.h> 11 #include <linux/mm.h> 12 #include <linux/pagemap.h> 13 #include <linux/slab.h> 14 #include <linux/task_io_accounting_ops.h> 15 #include "internal.h" 16 17 /* 18 * Clear the unread part of an I/O request. 19 */ 20 static void netfs_clear_unread(struct netfs_io_subrequest *subreq) 21 { 22 netfs_reset_iter(subreq); 23 WARN_ON_ONCE(subreq->len - subreq->transferred != iov_iter_count(&subreq->io_iter)); 24 iov_iter_zero(iov_iter_count(&subreq->io_iter), &subreq->io_iter); 25 if (subreq->start + subreq->transferred >= subreq->rreq->i_size) 26 __set_bit(NETFS_SREQ_HIT_EOF, &subreq->flags); 27 } 28 29 /* 30 * Flush, mark and unlock a folio that's now completely read. If we want to 31 * cache the folio, we set the group to NETFS_FOLIO_COPY_TO_CACHE, mark it 32 * dirty and let writeback handle it. 33 */ 34 static void netfs_unlock_read_folio(struct netfs_io_subrequest *subreq, 35 struct netfs_io_request *rreq, 36 struct folio_queue *folioq, 37 int slot) 38 { 39 struct netfs_folio *finfo; 40 struct folio *folio = folioq_folio(folioq, slot); 41 42 flush_dcache_folio(folio); 43 folio_mark_uptodate(folio); 44 45 if (!test_bit(NETFS_RREQ_USE_PGPRIV2, &rreq->flags)) { 46 finfo = netfs_folio_info(folio); 47 if (finfo) { 48 trace_netfs_folio(folio, netfs_folio_trace_filled_gaps); 49 if (finfo->netfs_group) 50 folio_change_private(folio, finfo->netfs_group); 51 else 52 folio_detach_private(folio); 53 kfree(finfo); 54 } 55 56 if (test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags)) { 57 if (!WARN_ON_ONCE(folio_get_private(folio) != NULL)) { 58 trace_netfs_folio(folio, netfs_folio_trace_copy_to_cache); 59 folio_attach_private(folio, NETFS_FOLIO_COPY_TO_CACHE); 60 folio_mark_dirty(folio); 61 } 62 } else { 63 trace_netfs_folio(folio, netfs_folio_trace_read_done); 64 } 65 } else { 66 // TODO: Use of PG_private_2 is deprecated. 67 if (test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags)) 68 netfs_pgpriv2_mark_copy_to_cache(subreq, rreq, folioq, slot); 69 } 70 71 if (!test_bit(NETFS_RREQ_DONT_UNLOCK_FOLIOS, &rreq->flags)) { 72 if (folio->index == rreq->no_unlock_folio && 73 test_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags)) { 74 _debug("no unlock"); 75 } else { 76 trace_netfs_folio(folio, netfs_folio_trace_read_unlock); 77 folio_unlock(folio); 78 } 79 } 80 } 81 82 /* 83 * Unlock any folios that are now completely read. Returns true if the 84 * subrequest is removed from the list. 85 */ 86 static bool netfs_consume_read_data(struct netfs_io_subrequest *subreq, bool was_async) 87 { 88 struct netfs_io_subrequest *prev, *next; 89 struct netfs_io_request *rreq = subreq->rreq; 90 struct folio_queue *folioq = subreq->curr_folioq; 91 size_t avail, prev_donated, next_donated, fsize, part, excess; 92 loff_t fpos, start; 93 loff_t fend; 94 int slot = subreq->curr_folioq_slot; 95 96 if (WARN(subreq->transferred > subreq->len, 97 "Subreq overread: R%x[%x] %zu > %zu", 98 rreq->debug_id, subreq->debug_index, 99 subreq->transferred, subreq->len)) 100 subreq->transferred = subreq->len; 101 102 next_folio: 103 fsize = PAGE_SIZE << subreq->curr_folio_order; 104 fpos = round_down(subreq->start + subreq->consumed, fsize); 105 fend = fpos + fsize; 106 107 if (WARN_ON_ONCE(!folioq) || 108 WARN_ON_ONCE(!folioq_folio(folioq, slot)) || 109 WARN_ON_ONCE(folioq_folio(folioq, slot)->index != fpos / PAGE_SIZE)) { 110 pr_err("R=%08x[%x] s=%llx-%llx ctl=%zx/%zx/%zx sl=%u\n", 111 rreq->debug_id, subreq->debug_index, 112 subreq->start, subreq->start + subreq->transferred - 1, 113 subreq->consumed, subreq->transferred, subreq->len, 114 slot); 115 if (folioq) { 116 struct folio *folio = folioq_folio(folioq, slot); 117 118 pr_err("folioq: orders=%02x%02x%02x%02x\n", 119 folioq->orders[0], folioq->orders[1], 120 folioq->orders[2], folioq->orders[3]); 121 if (folio) 122 pr_err("folio: %llx-%llx ix=%llx o=%u qo=%u\n", 123 fpos, fend - 1, folio_pos(folio), folio_order(folio), 124 folioq_folio_order(folioq, slot)); 125 } 126 } 127 128 donation_changed: 129 /* Try to consume the current folio if we've hit or passed the end of 130 * it. There's a possibility that this subreq doesn't start at the 131 * beginning of the folio, in which case we need to donate to/from the 132 * preceding subreq. 133 * 134 * We also need to include any potential donation back from the 135 * following subreq. 136 */ 137 prev_donated = READ_ONCE(subreq->prev_donated); 138 next_donated = READ_ONCE(subreq->next_donated); 139 if (prev_donated || next_donated) { 140 spin_lock_bh(&rreq->lock); 141 prev_donated = subreq->prev_donated; 142 next_donated = subreq->next_donated; 143 subreq->start -= prev_donated; 144 subreq->len += prev_donated; 145 subreq->transferred += prev_donated; 146 prev_donated = subreq->prev_donated = 0; 147 if (subreq->transferred == subreq->len) { 148 subreq->len += next_donated; 149 subreq->transferred += next_donated; 150 next_donated = subreq->next_donated = 0; 151 } 152 trace_netfs_sreq(subreq, netfs_sreq_trace_add_donations); 153 spin_unlock_bh(&rreq->lock); 154 } 155 156 avail = subreq->transferred; 157 if (avail == subreq->len) 158 avail += next_donated; 159 start = subreq->start; 160 if (subreq->consumed == 0) { 161 start -= prev_donated; 162 avail += prev_donated; 163 } else { 164 start += subreq->consumed; 165 avail -= subreq->consumed; 166 } 167 part = umin(avail, fsize); 168 169 trace_netfs_progress(subreq, start, avail, part); 170 171 if (start + avail >= fend) { 172 if (fpos == start) { 173 /* Flush, unlock and mark for caching any folio we've just read. */ 174 subreq->consumed = fend - subreq->start; 175 netfs_unlock_read_folio(subreq, rreq, folioq, slot); 176 folioq_mark2(folioq, slot); 177 if (subreq->consumed >= subreq->len) 178 goto remove_subreq; 179 } else if (fpos < start) { 180 excess = fend - subreq->start; 181 182 spin_lock_bh(&rreq->lock); 183 /* If we complete first on a folio split with the 184 * preceding subreq, donate to that subreq - otherwise 185 * we get the responsibility. 186 */ 187 if (subreq->prev_donated != prev_donated) { 188 spin_unlock_bh(&rreq->lock); 189 goto donation_changed; 190 } 191 192 if (list_is_first(&subreq->rreq_link, &rreq->subrequests)) { 193 spin_unlock_bh(&rreq->lock); 194 pr_err("Can't donate prior to front\n"); 195 goto bad; 196 } 197 198 prev = list_prev_entry(subreq, rreq_link); 199 WRITE_ONCE(prev->next_donated, prev->next_donated + excess); 200 subreq->start += excess; 201 subreq->len -= excess; 202 subreq->transferred -= excess; 203 trace_netfs_donate(rreq, subreq, prev, excess, 204 netfs_trace_donate_tail_to_prev); 205 trace_netfs_sreq(subreq, netfs_sreq_trace_donate_to_prev); 206 207 if (subreq->consumed >= subreq->len) 208 goto remove_subreq_locked; 209 spin_unlock_bh(&rreq->lock); 210 } else { 211 pr_err("fpos > start\n"); 212 goto bad; 213 } 214 215 /* Advance the rolling buffer to the next folio. */ 216 slot++; 217 if (slot >= folioq_nr_slots(folioq)) { 218 slot = 0; 219 folioq = folioq->next; 220 subreq->curr_folioq = folioq; 221 } 222 subreq->curr_folioq_slot = slot; 223 if (folioq && folioq_folio(folioq, slot)) 224 subreq->curr_folio_order = folioq->orders[slot]; 225 if (!was_async) 226 cond_resched(); 227 goto next_folio; 228 } 229 230 /* Deal with partial progress. */ 231 if (subreq->transferred < subreq->len) 232 return false; 233 234 /* Donate the remaining downloaded data to one of the neighbouring 235 * subrequests. Note that we may race with them doing the same thing. 236 */ 237 spin_lock_bh(&rreq->lock); 238 239 if (subreq->prev_donated != prev_donated || 240 subreq->next_donated != next_donated) { 241 spin_unlock_bh(&rreq->lock); 242 cond_resched(); 243 goto donation_changed; 244 } 245 246 /* Deal with the trickiest case: that this subreq is in the middle of a 247 * folio, not touching either edge, but finishes first. In such a 248 * case, we donate to the previous subreq, if there is one, so that the 249 * donation is only handled when that completes - and remove this 250 * subreq from the list. 251 * 252 * If the previous subreq finished first, we will have acquired their 253 * donation and should be able to unlock folios and/or donate nextwards. 254 */ 255 if (!subreq->consumed && 256 !prev_donated && 257 !list_is_first(&subreq->rreq_link, &rreq->subrequests)) { 258 prev = list_prev_entry(subreq, rreq_link); 259 WRITE_ONCE(prev->next_donated, prev->next_donated + subreq->len); 260 subreq->start += subreq->len; 261 subreq->len = 0; 262 subreq->transferred = 0; 263 trace_netfs_donate(rreq, subreq, prev, subreq->len, 264 netfs_trace_donate_to_prev); 265 trace_netfs_sreq(subreq, netfs_sreq_trace_donate_to_prev); 266 goto remove_subreq_locked; 267 } 268 269 /* If we can't donate down the chain, donate up the chain instead. */ 270 excess = subreq->len - subreq->consumed + next_donated; 271 272 if (!subreq->consumed) 273 excess += prev_donated; 274 275 if (list_is_last(&subreq->rreq_link, &rreq->subrequests)) { 276 rreq->prev_donated = excess; 277 trace_netfs_donate(rreq, subreq, NULL, excess, 278 netfs_trace_donate_to_deferred_next); 279 } else { 280 next = list_next_entry(subreq, rreq_link); 281 WRITE_ONCE(next->prev_donated, excess); 282 trace_netfs_donate(rreq, subreq, next, excess, 283 netfs_trace_donate_to_next); 284 } 285 trace_netfs_sreq(subreq, netfs_sreq_trace_donate_to_next); 286 subreq->len = subreq->consumed; 287 subreq->transferred = subreq->consumed; 288 goto remove_subreq_locked; 289 290 remove_subreq: 291 spin_lock_bh(&rreq->lock); 292 remove_subreq_locked: 293 subreq->consumed = subreq->len; 294 list_del(&subreq->rreq_link); 295 spin_unlock_bh(&rreq->lock); 296 netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_consumed); 297 return true; 298 299 bad: 300 /* Errr... prev and next both donated to us, but insufficient to finish 301 * the folio. 302 */ 303 printk("R=%08x[%x] s=%llx-%llx %zx/%zx/%zx\n", 304 rreq->debug_id, subreq->debug_index, 305 subreq->start, subreq->start + subreq->transferred - 1, 306 subreq->consumed, subreq->transferred, subreq->len); 307 printk("folio: %llx-%llx\n", fpos, fend - 1); 308 printk("donated: prev=%zx next=%zx\n", prev_donated, next_donated); 309 printk("s=%llx av=%zx part=%zx\n", start, avail, part); 310 BUG(); 311 } 312 313 /* 314 * Do page flushing and suchlike after DIO. 315 */ 316 static void netfs_rreq_assess_dio(struct netfs_io_request *rreq) 317 { 318 struct netfs_io_subrequest *subreq; 319 unsigned int i; 320 321 /* Collect unbuffered reads and direct reads, adding up the transfer 322 * sizes until we find the first short or failed subrequest. 323 */ 324 list_for_each_entry(subreq, &rreq->subrequests, rreq_link) { 325 rreq->transferred += subreq->transferred; 326 327 if (subreq->transferred < subreq->len || 328 test_bit(NETFS_SREQ_FAILED, &subreq->flags)) { 329 rreq->error = subreq->error; 330 break; 331 } 332 } 333 334 if (rreq->origin == NETFS_DIO_READ) { 335 for (i = 0; i < rreq->direct_bv_count; i++) { 336 flush_dcache_page(rreq->direct_bv[i].bv_page); 337 // TODO: cifs marks pages in the destination buffer 338 // dirty under some circumstances after a read. Do we 339 // need to do that too? 340 set_page_dirty(rreq->direct_bv[i].bv_page); 341 } 342 } 343 344 if (rreq->iocb) { 345 rreq->iocb->ki_pos += rreq->transferred; 346 if (rreq->iocb->ki_complete) 347 rreq->iocb->ki_complete( 348 rreq->iocb, rreq->error ? rreq->error : rreq->transferred); 349 } 350 if (rreq->netfs_ops->done) 351 rreq->netfs_ops->done(rreq); 352 if (rreq->origin == NETFS_DIO_READ) 353 inode_dio_end(rreq->inode); 354 } 355 356 /* 357 * Assess the state of a read request and decide what to do next. 358 * 359 * Note that we're in normal kernel thread context at this point, possibly 360 * running on a workqueue. 361 */ 362 static void netfs_rreq_assess(struct netfs_io_request *rreq) 363 { 364 trace_netfs_rreq(rreq, netfs_rreq_trace_assess); 365 366 //netfs_rreq_is_still_valid(rreq); 367 368 if (test_and_clear_bit(NETFS_RREQ_NEED_RETRY, &rreq->flags)) { 369 netfs_retry_reads(rreq); 370 return; 371 } 372 373 if (rreq->origin == NETFS_DIO_READ || 374 rreq->origin == NETFS_READ_GAPS) 375 netfs_rreq_assess_dio(rreq); 376 task_io_account_read(rreq->transferred); 377 378 trace_netfs_rreq(rreq, netfs_rreq_trace_wake_ip); 379 clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &rreq->flags); 380 wake_up_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS); 381 382 trace_netfs_rreq(rreq, netfs_rreq_trace_done); 383 netfs_clear_subrequests(rreq, false); 384 netfs_unlock_abandoned_read_pages(rreq); 385 if (unlikely(test_bit(NETFS_RREQ_USE_PGPRIV2, &rreq->flags))) 386 netfs_pgpriv2_write_to_the_cache(rreq); 387 } 388 389 void netfs_read_termination_worker(struct work_struct *work) 390 { 391 struct netfs_io_request *rreq = 392 container_of(work, struct netfs_io_request, work); 393 netfs_see_request(rreq, netfs_rreq_trace_see_work); 394 netfs_rreq_assess(rreq); 395 netfs_put_request(rreq, false, netfs_rreq_trace_put_work_complete); 396 } 397 398 /* 399 * Handle the completion of all outstanding I/O operations on a read request. 400 * We inherit a ref from the caller. 401 */ 402 void netfs_rreq_terminated(struct netfs_io_request *rreq, bool was_async) 403 { 404 if (!was_async) 405 return netfs_rreq_assess(rreq); 406 if (!work_pending(&rreq->work)) { 407 netfs_get_request(rreq, netfs_rreq_trace_get_work); 408 if (!queue_work(system_unbound_wq, &rreq->work)) 409 netfs_put_request(rreq, was_async, netfs_rreq_trace_put_work_nq); 410 } 411 } 412 413 /** 414 * netfs_read_subreq_progress - Note progress of a read operation. 415 * @subreq: The read request that has terminated. 416 * @was_async: True if we're in an asynchronous context. 417 * 418 * This tells the read side of netfs lib that a contributory I/O operation has 419 * made some progress and that it may be possible to unlock some folios. 420 * 421 * Before calling, the filesystem should update subreq->transferred to track 422 * the amount of data copied into the output buffer. 423 * 424 * If @was_async is true, the caller might be running in softirq or interrupt 425 * context and we can't sleep. 426 */ 427 void netfs_read_subreq_progress(struct netfs_io_subrequest *subreq, 428 bool was_async) 429 { 430 struct netfs_io_request *rreq = subreq->rreq; 431 432 trace_netfs_sreq(subreq, netfs_sreq_trace_progress); 433 434 if (subreq->transferred > subreq->consumed && 435 (rreq->origin == NETFS_READAHEAD || 436 rreq->origin == NETFS_READPAGE || 437 rreq->origin == NETFS_READ_FOR_WRITE)) { 438 netfs_consume_read_data(subreq, was_async); 439 __clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags); 440 } 441 } 442 EXPORT_SYMBOL(netfs_read_subreq_progress); 443 444 /** 445 * netfs_read_subreq_terminated - Note the termination of an I/O operation. 446 * @subreq: The I/O request that has terminated. 447 * @error: Error code indicating type of completion. 448 * @was_async: The termination was asynchronous 449 * 450 * This tells the read helper that a contributory I/O operation has terminated, 451 * one way or another, and that it should integrate the results. 452 * 453 * The caller indicates the outcome of the operation through @error, supplying 454 * 0 to indicate a successful or retryable transfer (if NETFS_SREQ_NEED_RETRY 455 * is set) or a negative error code. The helper will look after reissuing I/O 456 * operations as appropriate and writing downloaded data to the cache. 457 * 458 * Before calling, the filesystem should update subreq->transferred to track 459 * the amount of data copied into the output buffer. 460 * 461 * If @was_async is true, the caller might be running in softirq or interrupt 462 * context and we can't sleep. 463 */ 464 void netfs_read_subreq_terminated(struct netfs_io_subrequest *subreq, 465 int error, bool was_async) 466 { 467 struct netfs_io_request *rreq = subreq->rreq; 468 469 switch (subreq->source) { 470 case NETFS_READ_FROM_CACHE: 471 netfs_stat(&netfs_n_rh_read_done); 472 break; 473 case NETFS_DOWNLOAD_FROM_SERVER: 474 netfs_stat(&netfs_n_rh_download_done); 475 break; 476 default: 477 break; 478 } 479 480 if (rreq->origin != NETFS_DIO_READ) { 481 /* Collect buffered reads. 482 * 483 * If the read completed validly short, then we can clear the 484 * tail before going on to unlock the folios. 485 */ 486 if (error == 0 && subreq->transferred < subreq->len && 487 (test_bit(NETFS_SREQ_HIT_EOF, &subreq->flags) || 488 test_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags))) { 489 netfs_clear_unread(subreq); 490 subreq->transferred = subreq->len; 491 trace_netfs_sreq(subreq, netfs_sreq_trace_clear); 492 } 493 if (subreq->transferred > subreq->consumed && 494 (rreq->origin == NETFS_READAHEAD || 495 rreq->origin == NETFS_READPAGE || 496 rreq->origin == NETFS_READ_FOR_WRITE)) { 497 netfs_consume_read_data(subreq, was_async); 498 __clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags); 499 } 500 rreq->transferred += subreq->transferred; 501 } 502 503 /* Deal with retry requests, short reads and errors. If we retry 504 * but don't make progress, we abandon the attempt. 505 */ 506 if (!error && subreq->transferred < subreq->len) { 507 if (test_bit(NETFS_SREQ_HIT_EOF, &subreq->flags)) { 508 trace_netfs_sreq(subreq, netfs_sreq_trace_hit_eof); 509 } else { 510 trace_netfs_sreq(subreq, netfs_sreq_trace_short); 511 if (subreq->transferred > subreq->consumed) { 512 __set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags); 513 __clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags); 514 set_bit(NETFS_RREQ_NEED_RETRY, &rreq->flags); 515 } else if (!__test_and_set_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags)) { 516 __set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags); 517 set_bit(NETFS_RREQ_NEED_RETRY, &rreq->flags); 518 } else { 519 __set_bit(NETFS_SREQ_FAILED, &subreq->flags); 520 error = -ENODATA; 521 } 522 } 523 } 524 525 subreq->error = error; 526 trace_netfs_sreq(subreq, netfs_sreq_trace_terminated); 527 528 if (unlikely(error < 0)) { 529 trace_netfs_failure(rreq, subreq, error, netfs_fail_read); 530 if (subreq->source == NETFS_READ_FROM_CACHE) { 531 netfs_stat(&netfs_n_rh_read_failed); 532 } else { 533 netfs_stat(&netfs_n_rh_download_failed); 534 set_bit(NETFS_RREQ_FAILED, &rreq->flags); 535 rreq->error = subreq->error; 536 } 537 } 538 539 if (atomic_dec_and_test(&rreq->nr_outstanding)) 540 netfs_rreq_terminated(rreq, was_async); 541 542 netfs_put_subrequest(subreq, was_async, netfs_sreq_trace_put_terminated); 543 } 544 EXPORT_SYMBOL(netfs_read_subreq_terminated); 545