read_pgpriv2.c (ee4cdf7ba857a894ad1650d6ab77669cbbfa329e) read_pgpriv2.c (c4f1450ecccc5311db87f806998eda1c824c4e35)
1// SPDX-License-Identifier: GPL-2.0-only
2/* Read with PG_private_2 [DEPRECATED].
3 *
4 * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7
8#include <linux/export.h>

--- 83 unchanged lines hidden (view full) ---

92 _debug("folio %zx %zx", flen, fsize);
93
94 trace_netfs_folio(folio, netfs_folio_trace_store_copy);
95
96 /* Attach the folio to the rolling buffer. */
97 if (netfs_buffer_append_folio(wreq, folio, false) < 0)
98 return -ENOMEM;
99
1// SPDX-License-Identifier: GPL-2.0-only
2/* Read with PG_private_2 [DEPRECATED].
3 *
4 * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7
8#include <linux/export.h>

--- 83 unchanged lines hidden (view full) ---

92 _debug("folio %zx %zx", flen, fsize);
93
94 trace_netfs_folio(folio, netfs_folio_trace_store_copy);
95
96 /* Attach the folio to the rolling buffer. */
97 if (netfs_buffer_append_folio(wreq, folio, false) < 0)
98 return -ENOMEM;
99
100 cache->submit_max_len = fsize;
100 cache->submit_extendable_to = fsize;
101 cache->submit_off = 0;
102 cache->submit_len = flen;
103
104 /* Attach the folio to one or more subrequests. For a big folio, we
105 * could end up with thousands of subrequests if the wsize is small -
106 * but we might need to wait during the creation of subrequests for
107 * network resources (eg. SMB credits).
108 */
109 do {
110 ssize_t part;
111
112 wreq->io_iter.iov_offset = cache->submit_off;
113
114 atomic64_set(&wreq->issued_to, fpos + cache->submit_off);
101 cache->submit_off = 0;
102 cache->submit_len = flen;
103
104 /* Attach the folio to one or more subrequests. For a big folio, we
105 * could end up with thousands of subrequests if the wsize is small -
106 * but we might need to wait during the creation of subrequests for
107 * network resources (eg. SMB credits).
108 */
109 do {
110 ssize_t part;
111
112 wreq->io_iter.iov_offset = cache->submit_off;
113
114 atomic64_set(&wreq->issued_to, fpos + cache->submit_off);
115 cache->submit_extendable_to = fsize - cache->submit_off;
115 part = netfs_advance_write(wreq, cache, fpos + cache->submit_off,
116 cache->submit_len, to_eof);
117 cache->submit_off += part;
116 part = netfs_advance_write(wreq, cache, fpos + cache->submit_off,
117 cache->submit_len, to_eof);
118 cache->submit_off += part;
118 cache->submit_max_len -= part;
119 if (part > cache->submit_len)
120 cache->submit_len = 0;
121 else
122 cache->submit_len -= part;
123 } while (cache->submit_len > 0);
124
125 wreq->io_iter.iov_offset = 0;
126 iov_iter_advance(&wreq->io_iter, fsize);

--- 138 unchanged lines hidden ---
119 if (part > cache->submit_len)
120 cache->submit_len = 0;
121 else
122 cache->submit_len -= part;
123 } while (cache->submit_len > 0);
124
125 wreq->io_iter.iov_offset = 0;
126 iov_iter_advance(&wreq->io_iter, fsize);

--- 138 unchanged lines hidden ---