1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* handling of writes to regular files and writing back to the server 3 * 4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 5 * Written by David Howells (dhowells@redhat.com) 6 */ 7 8 #include <linux/backing-dev.h> 9 #include <linux/slab.h> 10 #include <linux/fs.h> 11 #include <linux/pagemap.h> 12 #include <linux/writeback.h> 13 #include <linux/netfs.h> 14 #include <trace/events/netfs.h> 15 #include "internal.h" 16 17 /* 18 * completion of write to server 19 */ 20 static void afs_pages_written_back(struct afs_vnode *vnode, loff_t start, unsigned int len) 21 { 22 _enter("{%llx:%llu},{%x @%llx}", 23 vnode->fid.vid, vnode->fid.vnode, len, start); 24 25 afs_prune_wb_keys(vnode); 26 _leave(""); 27 } 28 29 /* 30 * Find a key to use for the writeback. We cached the keys used to author the 31 * writes on the vnode. wreq->netfs_priv2 will contain the last writeback key 32 * record used or NULL and we need to start from there if it's set. 33 * wreq->netfs_priv will be set to the key itself or NULL. 34 */ 35 static void afs_get_writeback_key(struct netfs_io_request *wreq) 36 { 37 struct afs_wb_key *wbk, *old = wreq->netfs_priv2; 38 struct afs_vnode *vnode = AFS_FS_I(wreq->inode); 39 40 key_put(wreq->netfs_priv); 41 wreq->netfs_priv = NULL; 42 wreq->netfs_priv2 = NULL; 43 44 spin_lock(&vnode->wb_lock); 45 if (old) 46 wbk = list_next_entry(old, vnode_link); 47 else 48 wbk = list_first_entry(&vnode->wb_keys, struct afs_wb_key, vnode_link); 49 50 list_for_each_entry_from(wbk, &vnode->wb_keys, vnode_link) { 51 _debug("wbk %u", key_serial(wbk->key)); 52 if (key_validate(wbk->key) == 0) { 53 refcount_inc(&wbk->usage); 54 wreq->netfs_priv = key_get(wbk->key); 55 wreq->netfs_priv2 = wbk; 56 _debug("USE WB KEY %u", key_serial(wbk->key)); 57 break; 58 } 59 } 60 61 spin_unlock(&vnode->wb_lock); 62 63 afs_put_wb_key(old); 64 } 65 66 static void afs_store_data_success(struct afs_operation *op) 67 { 68 struct afs_vnode *vnode = op->file[0].vnode; 69 70 op->ctime = op->file[0].scb.status.mtime_client; 71 afs_vnode_commit_status(op, &op->file[0]); 72 if (!afs_op_error(op)) { 73 afs_pages_written_back(vnode, op->store.pos, op->store.size); 74 afs_stat_v(vnode, n_stores); 75 atomic_long_add(op->store.size, &afs_v2net(vnode)->n_store_bytes); 76 } 77 } 78 79 static const struct afs_operation_ops afs_store_data_operation = { 80 .issue_afs_rpc = afs_fs_store_data, 81 .issue_yfs_rpc = yfs_fs_store_data, 82 .success = afs_store_data_success, 83 }; 84 85 /* 86 * Prepare a subrequest to write to the server. This sets the max_len 87 * parameter. 88 */ 89 void afs_prepare_write(struct netfs_io_subrequest *subreq) 90 { 91 struct netfs_io_stream *stream = &subreq->rreq->io_streams[subreq->stream_nr]; 92 93 //if (test_bit(NETFS_SREQ_RETRYING, &subreq->flags)) 94 // subreq->max_len = 512 * 1024; 95 //else 96 stream->sreq_max_len = 256 * 1024 * 1024; 97 } 98 99 /* 100 * Issue a subrequest to write to the server. 101 */ 102 static void afs_issue_write_worker(struct work_struct *work) 103 { 104 struct netfs_io_subrequest *subreq = container_of(work, struct netfs_io_subrequest, work); 105 struct netfs_io_request *wreq = subreq->rreq; 106 struct afs_operation *op; 107 struct afs_vnode *vnode = AFS_FS_I(wreq->inode); 108 unsigned long long pos = subreq->start + subreq->transferred; 109 size_t len = subreq->len - subreq->transferred; 110 int ret = -ENOKEY; 111 112 _enter("R=%x[%x],%s{%llx:%llu.%u},%llx,%zx", 113 wreq->debug_id, subreq->debug_index, 114 vnode->volume->name, 115 vnode->fid.vid, 116 vnode->fid.vnode, 117 vnode->fid.unique, 118 pos, len); 119 120 #if 0 // Error injection 121 if (subreq->debug_index == 3) 122 return netfs_write_subrequest_terminated(subreq, -ENOANO); 123 124 if (!subreq->retry_count) { 125 set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags); 126 return netfs_write_subrequest_terminated(subreq, -EAGAIN); 127 } 128 #endif 129 130 op = afs_alloc_operation(wreq->netfs_priv, vnode->volume); 131 if (IS_ERR(op)) 132 return netfs_write_subrequest_terminated(subreq, -EAGAIN); 133 134 afs_op_set_vnode(op, 0, vnode); 135 op->file[0].dv_delta = 1; 136 op->file[0].modification = true; 137 op->store.pos = pos; 138 op->store.size = len; 139 op->flags |= AFS_OPERATION_UNINTR; 140 op->ops = &afs_store_data_operation; 141 142 afs_begin_vnode_operation(op); 143 144 op->store.write_iter = &subreq->io_iter; 145 op->store.i_size = umax(pos + len, vnode->netfs.remote_i_size); 146 op->mtime = inode_get_mtime(&vnode->netfs.inode); 147 148 afs_wait_for_operation(op); 149 ret = afs_put_operation(op); 150 switch (ret) { 151 case 0: 152 __set_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags); 153 break; 154 case -EACCES: 155 case -EPERM: 156 case -ENOKEY: 157 case -EKEYEXPIRED: 158 case -EKEYREJECTED: 159 case -EKEYREVOKED: 160 /* If there are more keys we can try, use the retry algorithm 161 * to rotate the keys. 162 */ 163 if (wreq->netfs_priv2) 164 set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags); 165 break; 166 } 167 168 netfs_write_subrequest_terminated(subreq, ret < 0 ? ret : subreq->len); 169 } 170 171 void afs_issue_write(struct netfs_io_subrequest *subreq) 172 { 173 subreq->work.func = afs_issue_write_worker; 174 if (!queue_work(system_dfl_wq, &subreq->work)) 175 WARN_ON_ONCE(1); 176 } 177 178 /* 179 * Writeback calls this when it finds a folio that needs uploading. This isn't 180 * called if writeback only has copy-to-cache to deal with. 181 */ 182 void afs_begin_writeback(struct netfs_io_request *wreq) 183 { 184 if (S_ISREG(wreq->inode->i_mode)) 185 afs_get_writeback_key(wreq); 186 } 187 188 /* 189 * Prepare to retry the writes in request. Use this to try rotating the 190 * available writeback keys. 191 */ 192 void afs_retry_request(struct netfs_io_request *wreq, struct netfs_io_stream *stream) 193 { 194 struct netfs_io_subrequest *subreq = 195 list_first_entry(&stream->subrequests, 196 struct netfs_io_subrequest, rreq_link); 197 198 switch (wreq->origin) { 199 case NETFS_READAHEAD: 200 case NETFS_READPAGE: 201 case NETFS_READ_GAPS: 202 case NETFS_READ_SINGLE: 203 case NETFS_READ_FOR_WRITE: 204 case NETFS_UNBUFFERED_READ: 205 case NETFS_DIO_READ: 206 return; 207 default: 208 break; 209 } 210 211 switch (subreq->error) { 212 case -EACCES: 213 case -EPERM: 214 case -ENOKEY: 215 case -EKEYEXPIRED: 216 case -EKEYREJECTED: 217 case -EKEYREVOKED: 218 afs_get_writeback_key(wreq); 219 if (!wreq->netfs_priv) 220 stream->failed = true; 221 break; 222 } 223 } 224 225 /* 226 * write some of the pending data back to the server 227 */ 228 int afs_writepages(struct address_space *mapping, struct writeback_control *wbc) 229 { 230 struct afs_vnode *vnode = AFS_FS_I(mapping->host); 231 int ret; 232 233 /* We have to be careful as we can end up racing with setattr() 234 * truncating the pagecache since the caller doesn't take a lock here 235 * to prevent it. 236 */ 237 if (wbc->sync_mode == WB_SYNC_ALL) 238 down_read(&vnode->validate_lock); 239 else if (!down_read_trylock(&vnode->validate_lock)) 240 return 0; 241 242 ret = netfs_writepages(mapping, wbc); 243 up_read(&vnode->validate_lock); 244 return ret; 245 } 246 247 /* 248 * flush any dirty pages for this process, and check for write errors. 249 * - the return status from this call provides a reliable indication of 250 * whether any write errors occurred for this process. 251 */ 252 int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync) 253 { 254 struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); 255 struct afs_file *af = file->private_data; 256 int ret; 257 258 _enter("{%llx:%llu},{n=%pD},%d", 259 vnode->fid.vid, vnode->fid.vnode, file, 260 datasync); 261 262 ret = afs_validate(vnode, af->key); 263 if (ret < 0) 264 return ret; 265 266 return file_write_and_wait_range(file, start, end); 267 } 268 269 /* 270 * notification that a previously read-only page is about to become writable 271 * - if it returns an error, the caller will deliver a bus error signal 272 */ 273 vm_fault_t afs_page_mkwrite(struct vm_fault *vmf) 274 { 275 struct file *file = vmf->vma->vm_file; 276 277 if (afs_validate(AFS_FS_I(file_inode(file)), afs_file_key(file)) < 0) 278 return VM_FAULT_SIGBUS; 279 return netfs_page_mkwrite(vmf, NULL); 280 } 281 282 /* 283 * Prune the keys cached for writeback. The caller must hold vnode->wb_lock. 284 */ 285 void afs_prune_wb_keys(struct afs_vnode *vnode) 286 { 287 LIST_HEAD(graveyard); 288 struct afs_wb_key *wbk, *tmp; 289 290 /* Discard unused keys */ 291 spin_lock(&vnode->wb_lock); 292 293 if (!mapping_tagged(&vnode->netfs.inode.i_data, PAGECACHE_TAG_WRITEBACK) && 294 !mapping_tagged(&vnode->netfs.inode.i_data, PAGECACHE_TAG_DIRTY)) { 295 list_for_each_entry_safe(wbk, tmp, &vnode->wb_keys, vnode_link) { 296 if (refcount_read(&wbk->usage) == 1) 297 list_move(&wbk->vnode_link, &graveyard); 298 } 299 } 300 301 spin_unlock(&vnode->wb_lock); 302 303 while (!list_empty(&graveyard)) { 304 wbk = list_entry(graveyard.next, struct afs_wb_key, vnode_link); 305 list_del(&wbk->vnode_link); 306 afs_put_wb_key(wbk); 307 } 308 } 309