1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* handling of writes to regular files and writing back to the server 3 * 4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 5 * Written by David Howells (dhowells@redhat.com) 6 */ 7 8 #include <linux/backing-dev.h> 9 #include <linux/slab.h> 10 #include <linux/fs.h> 11 #include <linux/pagemap.h> 12 #include <linux/writeback.h> 13 #include <linux/pagevec.h> 14 #include <linux/netfs.h> 15 #include <trace/events/netfs.h> 16 #include "internal.h" 17 18 /* 19 * completion of write to server 20 */ 21 static void afs_pages_written_back(struct afs_vnode *vnode, loff_t start, unsigned int len) 22 { 23 _enter("{%llx:%llu},{%x @%llx}", 24 vnode->fid.vid, vnode->fid.vnode, len, start); 25 26 afs_prune_wb_keys(vnode); 27 _leave(""); 28 } 29 30 /* 31 * Find a key to use for the writeback. We cached the keys used to author the 32 * writes on the vnode. wreq->netfs_priv2 will contain the last writeback key 33 * record used or NULL and we need to start from there if it's set. 34 * wreq->netfs_priv will be set to the key itself or NULL. 35 */ 36 static void afs_get_writeback_key(struct netfs_io_request *wreq) 37 { 38 struct afs_wb_key *wbk, *old = wreq->netfs_priv2; 39 struct afs_vnode *vnode = AFS_FS_I(wreq->inode); 40 41 key_put(wreq->netfs_priv); 42 wreq->netfs_priv = NULL; 43 wreq->netfs_priv2 = NULL; 44 45 spin_lock(&vnode->wb_lock); 46 if (old) 47 wbk = list_next_entry(old, vnode_link); 48 else 49 wbk = list_first_entry(&vnode->wb_keys, struct afs_wb_key, vnode_link); 50 51 list_for_each_entry_from(wbk, &vnode->wb_keys, vnode_link) { 52 _debug("wbk %u", key_serial(wbk->key)); 53 if (key_validate(wbk->key) == 0) { 54 refcount_inc(&wbk->usage); 55 wreq->netfs_priv = key_get(wbk->key); 56 wreq->netfs_priv2 = wbk; 57 _debug("USE WB KEY %u", key_serial(wbk->key)); 58 break; 59 } 60 } 61 62 spin_unlock(&vnode->wb_lock); 63 64 afs_put_wb_key(old); 65 } 66 67 static void afs_store_data_success(struct afs_operation *op) 68 { 69 struct afs_vnode *vnode = op->file[0].vnode; 70 71 op->ctime = op->file[0].scb.status.mtime_client; 72 afs_vnode_commit_status(op, &op->file[0]); 73 if (!afs_op_error(op)) { 74 afs_pages_written_back(vnode, op->store.pos, op->store.size); 75 afs_stat_v(vnode, n_stores); 76 atomic_long_add(op->store.size, &afs_v2net(vnode)->n_store_bytes); 77 } 78 } 79 80 static const struct afs_operation_ops afs_store_data_operation = { 81 .issue_afs_rpc = afs_fs_store_data, 82 .issue_yfs_rpc = yfs_fs_store_data, 83 .success = afs_store_data_success, 84 }; 85 86 /* 87 * Prepare a subrequest to write to the server. This sets the max_len 88 * parameter. 89 */ 90 void afs_prepare_write(struct netfs_io_subrequest *subreq) 91 { 92 struct netfs_io_stream *stream = &subreq->rreq->io_streams[subreq->stream_nr]; 93 94 //if (test_bit(NETFS_SREQ_RETRYING, &subreq->flags)) 95 // subreq->max_len = 512 * 1024; 96 //else 97 stream->sreq_max_len = 256 * 1024 * 1024; 98 } 99 100 /* 101 * Issue a subrequest to write to the server. 102 */ 103 static void afs_issue_write_worker(struct work_struct *work) 104 { 105 struct netfs_io_subrequest *subreq = container_of(work, struct netfs_io_subrequest, work); 106 struct netfs_io_request *wreq = subreq->rreq; 107 struct afs_operation *op; 108 struct afs_vnode *vnode = AFS_FS_I(wreq->inode); 109 unsigned long long pos = subreq->start + subreq->transferred; 110 size_t len = subreq->len - subreq->transferred; 111 int ret = -ENOKEY; 112 113 _enter("R=%x[%x],%s{%llx:%llu.%u},%llx,%zx", 114 wreq->debug_id, subreq->debug_index, 115 vnode->volume->name, 116 vnode->fid.vid, 117 vnode->fid.vnode, 118 vnode->fid.unique, 119 pos, len); 120 121 #if 0 // Error injection 122 if (subreq->debug_index == 3) 123 return netfs_write_subrequest_terminated(subreq, -ENOANO, false); 124 125 if (!subreq->retry_count) { 126 set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags); 127 return netfs_write_subrequest_terminated(subreq, -EAGAIN, false); 128 } 129 #endif 130 131 op = afs_alloc_operation(wreq->netfs_priv, vnode->volume); 132 if (IS_ERR(op)) 133 return netfs_write_subrequest_terminated(subreq, -EAGAIN, false); 134 135 afs_op_set_vnode(op, 0, vnode); 136 op->file[0].dv_delta = 1; 137 op->file[0].modification = true; 138 op->store.pos = pos; 139 op->store.size = len; 140 op->flags |= AFS_OPERATION_UNINTR; 141 op->ops = &afs_store_data_operation; 142 143 afs_begin_vnode_operation(op); 144 145 op->store.write_iter = &subreq->io_iter; 146 op->store.i_size = umax(pos + len, vnode->netfs.remote_i_size); 147 op->mtime = inode_get_mtime(&vnode->netfs.inode); 148 149 afs_wait_for_operation(op); 150 ret = afs_put_operation(op); 151 switch (ret) { 152 case 0: 153 __set_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags); 154 break; 155 case -EACCES: 156 case -EPERM: 157 case -ENOKEY: 158 case -EKEYEXPIRED: 159 case -EKEYREJECTED: 160 case -EKEYREVOKED: 161 /* If there are more keys we can try, use the retry algorithm 162 * to rotate the keys. 163 */ 164 if (wreq->netfs_priv2) 165 set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags); 166 break; 167 } 168 169 netfs_write_subrequest_terminated(subreq, ret < 0 ? ret : subreq->len, false); 170 } 171 172 void afs_issue_write(struct netfs_io_subrequest *subreq) 173 { 174 subreq->work.func = afs_issue_write_worker; 175 if (!queue_work(system_unbound_wq, &subreq->work)) 176 WARN_ON_ONCE(1); 177 } 178 179 /* 180 * Writeback calls this when it finds a folio that needs uploading. This isn't 181 * called if writeback only has copy-to-cache to deal with. 182 */ 183 void afs_begin_writeback(struct netfs_io_request *wreq) 184 { 185 if (S_ISREG(wreq->inode->i_mode)) 186 afs_get_writeback_key(wreq); 187 } 188 189 /* 190 * Prepare to retry the writes in request. Use this to try rotating the 191 * available writeback keys. 192 */ 193 void afs_retry_request(struct netfs_io_request *wreq, struct netfs_io_stream *stream) 194 { 195 struct netfs_io_subrequest *subreq = 196 list_first_entry(&stream->subrequests, 197 struct netfs_io_subrequest, rreq_link); 198 199 switch (wreq->origin) { 200 case NETFS_READAHEAD: 201 case NETFS_READPAGE: 202 case NETFS_READ_GAPS: 203 case NETFS_READ_SINGLE: 204 case NETFS_READ_FOR_WRITE: 205 case NETFS_DIO_READ: 206 return; 207 default: 208 break; 209 } 210 211 switch (subreq->error) { 212 case -EACCES: 213 case -EPERM: 214 case -ENOKEY: 215 case -EKEYEXPIRED: 216 case -EKEYREJECTED: 217 case -EKEYREVOKED: 218 afs_get_writeback_key(wreq); 219 if (!wreq->netfs_priv) 220 stream->failed = true; 221 break; 222 } 223 } 224 225 /* 226 * write some of the pending data back to the server 227 */ 228 int afs_writepages(struct address_space *mapping, struct writeback_control *wbc) 229 { 230 struct afs_vnode *vnode = AFS_FS_I(mapping->host); 231 int ret; 232 233 /* We have to be careful as we can end up racing with setattr() 234 * truncating the pagecache since the caller doesn't take a lock here 235 * to prevent it. 236 */ 237 if (wbc->sync_mode == WB_SYNC_ALL) 238 down_read(&vnode->validate_lock); 239 else if (!down_read_trylock(&vnode->validate_lock)) 240 return 0; 241 242 ret = netfs_writepages(mapping, wbc); 243 up_read(&vnode->validate_lock); 244 return ret; 245 } 246 247 /* 248 * flush any dirty pages for this process, and check for write errors. 249 * - the return status from this call provides a reliable indication of 250 * whether any write errors occurred for this process. 251 */ 252 int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync) 253 { 254 struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); 255 struct afs_file *af = file->private_data; 256 int ret; 257 258 _enter("{%llx:%llu},{n=%pD},%d", 259 vnode->fid.vid, vnode->fid.vnode, file, 260 datasync); 261 262 ret = afs_validate(vnode, af->key); 263 if (ret < 0) 264 return ret; 265 266 return file_write_and_wait_range(file, start, end); 267 } 268 269 /* 270 * notification that a previously read-only page is about to become writable 271 * - if it returns an error, the caller will deliver a bus error signal 272 */ 273 vm_fault_t afs_page_mkwrite(struct vm_fault *vmf) 274 { 275 struct file *file = vmf->vma->vm_file; 276 277 if (afs_validate(AFS_FS_I(file_inode(file)), afs_file_key(file)) < 0) 278 return VM_FAULT_SIGBUS; 279 return netfs_page_mkwrite(vmf, NULL); 280 } 281 282 /* 283 * Prune the keys cached for writeback. The caller must hold vnode->wb_lock. 284 */ 285 void afs_prune_wb_keys(struct afs_vnode *vnode) 286 { 287 LIST_HEAD(graveyard); 288 struct afs_wb_key *wbk, *tmp; 289 290 /* Discard unused keys */ 291 spin_lock(&vnode->wb_lock); 292 293 if (!mapping_tagged(&vnode->netfs.inode.i_data, PAGECACHE_TAG_WRITEBACK) && 294 !mapping_tagged(&vnode->netfs.inode.i_data, PAGECACHE_TAG_DIRTY)) { 295 list_for_each_entry_safe(wbk, tmp, &vnode->wb_keys, vnode_link) { 296 if (refcount_read(&wbk->usage) == 1) 297 list_move(&wbk->vnode_link, &graveyard); 298 } 299 } 300 301 spin_unlock(&vnode->wb_lock); 302 303 while (!list_empty(&graveyard)) { 304 wbk = list_entry(graveyard.next, struct afs_wb_key, vnode_link); 305 list_del(&wbk->vnode_link); 306 afs_put_wb_key(wbk); 307 } 308 } 309