1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/fs/nfs/inode.c
4 *
5 * Copyright (C) 1992 Rick Sladkey
6 *
7 * nfs inode and superblock handling functions
8 *
9 * Modularised by Alan Cox <alan@lxorguk.ukuu.org.uk>, while hacking some
10 * experimental NFS changes. Modularisation taken straight from SYS5 fs.
11 *
12 * Change to nfs_read_super() to permit NFS mounts to multi-homed hosts.
13 * J.S.Peatfield@damtp.cam.ac.uk
14 *
15 */
16
17 #include <linux/module.h>
18 #include <linux/init.h>
19 #include <linux/sched/signal.h>
20 #include <linux/time.h>
21 #include <linux/kernel.h>
22 #include <linux/mm.h>
23 #include <linux/string.h>
24 #include <linux/stat.h>
25 #include <linux/errno.h>
26 #include <linux/unistd.h>
27 #include <linux/sunrpc/clnt.h>
28 #include <linux/sunrpc/stats.h>
29 #include <linux/sunrpc/metrics.h>
30 #include <linux/nfs_fs.h>
31 #include <linux/nfs_mount.h>
32 #include <linux/nfs4_mount.h>
33 #include <linux/lockd/bind.h>
34 #include <linux/seq_file.h>
35 #include <linux/mount.h>
36 #include <linux/vfs.h>
37 #include <linux/inet.h>
38 #include <linux/nfs_xdr.h>
39 #include <linux/slab.h>
40 #include <linux/compat.h>
41 #include <linux/freezer.h>
42 #include <linux/uaccess.h>
43 #include <linux/iversion.h>
44
45 #include "nfs4_fs.h"
46 #include "callback.h"
47 #include "delegation.h"
48 #include "iostat.h"
49 #include "internal.h"
50 #include "fscache.h"
51 #include "pnfs.h"
52 #include "nfs.h"
53 #include "netns.h"
54 #include "sysfs.h"
55
56 #include "nfstrace.h"
57
58 #define NFSDBG_FACILITY NFSDBG_VFS
59
60 #define NFS_64_BIT_INODE_NUMBERS_ENABLED 1
61
62 /* Default is to see 64-bit inode numbers */
63 static bool enable_ino64 = NFS_64_BIT_INODE_NUMBERS_ENABLED;
64
65 static int nfs_update_inode(struct inode *, struct nfs_fattr *);
66
67 static struct kmem_cache * nfs_inode_cachep;
68
69 static inline unsigned long
nfs_fattr_to_ino_t(struct nfs_fattr * fattr)70 nfs_fattr_to_ino_t(struct nfs_fattr *fattr)
71 {
72 return nfs_fileid_to_ino_t(fattr->fileid);
73 }
74
nfs_wait_bit_killable(struct wait_bit_key * key,int mode)75 int nfs_wait_bit_killable(struct wait_bit_key *key, int mode)
76 {
77 if (unlikely(nfs_current_task_exiting()))
78 return -EINTR;
79 schedule();
80 if (signal_pending_state(mode, current))
81 return -ERESTARTSYS;
82 return 0;
83 }
84 EXPORT_SYMBOL_GPL(nfs_wait_bit_killable);
85
86 /**
87 * nfs_compat_user_ino64 - returns the user-visible inode number
88 * @fileid: 64-bit fileid
89 *
90 * This function returns a 32-bit inode number if the boot parameter
91 * nfs.enable_ino64 is zero.
92 */
nfs_compat_user_ino64(u64 fileid)93 u64 nfs_compat_user_ino64(u64 fileid)
94 {
95 #ifdef CONFIG_COMPAT
96 compat_ulong_t ino;
97 #else
98 unsigned long ino;
99 #endif
100
101 if (enable_ino64)
102 return fileid;
103 ino = fileid;
104 if (sizeof(ino) < sizeof(fileid))
105 ino ^= fileid >> (sizeof(fileid)-sizeof(ino)) * 8;
106 return ino;
107 }
108
nfs_drop_inode(struct inode * inode)109 int nfs_drop_inode(struct inode *inode)
110 {
111 return NFS_STALE(inode) || generic_drop_inode(inode);
112 }
113 EXPORT_SYMBOL_GPL(nfs_drop_inode);
114
nfs_clear_inode(struct inode * inode)115 void nfs_clear_inode(struct inode *inode)
116 {
117 /*
118 * The following should never happen...
119 */
120 WARN_ON_ONCE(nfs_have_writebacks(inode));
121 WARN_ON_ONCE(!list_empty(&NFS_I(inode)->open_files));
122 nfs_zap_acl_cache(inode);
123 nfs_access_zap_cache(inode);
124 nfs_fscache_clear_inode(inode);
125 }
126 EXPORT_SYMBOL_GPL(nfs_clear_inode);
127
nfs_evict_inode(struct inode * inode)128 void nfs_evict_inode(struct inode *inode)
129 {
130 truncate_inode_pages_final(&inode->i_data);
131 clear_inode(inode);
132 nfs_clear_inode(inode);
133 }
134
nfs_sync_inode(struct inode * inode)135 int nfs_sync_inode(struct inode *inode)
136 {
137 inode_dio_wait(inode);
138 return nfs_wb_all(inode);
139 }
140 EXPORT_SYMBOL_GPL(nfs_sync_inode);
141
142 /**
143 * nfs_sync_mapping - helper to flush all mmapped dirty data to disk
144 * @mapping: pointer to struct address_space
145 */
nfs_sync_mapping(struct address_space * mapping)146 int nfs_sync_mapping(struct address_space *mapping)
147 {
148 int ret = 0;
149
150 if (mapping->nrpages != 0) {
151 unmap_mapping_range(mapping, 0, 0, 0);
152 ret = nfs_wb_all(mapping->host);
153 }
154 return ret;
155 }
156
nfs_attribute_timeout(struct inode * inode)157 static int nfs_attribute_timeout(struct inode *inode)
158 {
159 struct nfs_inode *nfsi = NFS_I(inode);
160
161 return !time_in_range_open(jiffies, nfsi->read_cache_jiffies, nfsi->read_cache_jiffies + nfsi->attrtimeo);
162 }
163
nfs_check_cache_flags_invalid(struct inode * inode,unsigned long flags)164 static bool nfs_check_cache_flags_invalid(struct inode *inode,
165 unsigned long flags)
166 {
167 unsigned long cache_validity = READ_ONCE(NFS_I(inode)->cache_validity);
168
169 return (cache_validity & flags) != 0;
170 }
171
nfs_check_cache_invalid(struct inode * inode,unsigned long flags)172 bool nfs_check_cache_invalid(struct inode *inode, unsigned long flags)
173 {
174 if (nfs_check_cache_flags_invalid(inode, flags))
175 return true;
176 return nfs_attribute_cache_expired(inode);
177 }
178 EXPORT_SYMBOL_GPL(nfs_check_cache_invalid);
179
180 #ifdef CONFIG_NFS_V4_2
nfs_has_xattr_cache(const struct nfs_inode * nfsi)181 static bool nfs_has_xattr_cache(const struct nfs_inode *nfsi)
182 {
183 return nfsi->xattr_cache != NULL;
184 }
185 #else
nfs_has_xattr_cache(const struct nfs_inode * nfsi)186 static bool nfs_has_xattr_cache(const struct nfs_inode *nfsi)
187 {
188 return false;
189 }
190 #endif
191
nfs_set_cache_invalid(struct inode * inode,unsigned long flags)192 void nfs_set_cache_invalid(struct inode *inode, unsigned long flags)
193 {
194 struct nfs_inode *nfsi = NFS_I(inode);
195
196 if (nfs_have_delegated_attributes(inode)) {
197 if (!(flags & NFS_INO_REVAL_FORCED))
198 flags &= ~(NFS_INO_INVALID_MODE |
199 NFS_INO_INVALID_OTHER |
200 NFS_INO_INVALID_XATTR);
201 flags &= ~(NFS_INO_INVALID_CHANGE | NFS_INO_INVALID_SIZE);
202 }
203
204 if (!nfs_has_xattr_cache(nfsi))
205 flags &= ~NFS_INO_INVALID_XATTR;
206 if (flags & NFS_INO_INVALID_DATA)
207 nfs_fscache_invalidate(inode, 0);
208 flags &= ~NFS_INO_REVAL_FORCED;
209
210 flags |= nfsi->cache_validity;
211 if (inode->i_mapping->nrpages == 0)
212 flags &= ~NFS_INO_INVALID_DATA;
213
214 /* pairs with nfs_clear_invalid_mapping()'s smp_load_acquire() */
215 smp_store_release(&nfsi->cache_validity, flags);
216
217 if (inode->i_mapping->nrpages == 0 ||
218 nfsi->cache_validity & NFS_INO_INVALID_DATA) {
219 nfs_ooo_clear(nfsi);
220 }
221 trace_nfs_set_cache_invalid(inode, 0);
222 }
223 EXPORT_SYMBOL_GPL(nfs_set_cache_invalid);
224
225 /*
226 * Invalidate the local caches
227 */
nfs_zap_caches_locked(struct inode * inode)228 static void nfs_zap_caches_locked(struct inode *inode)
229 {
230 struct nfs_inode *nfsi = NFS_I(inode);
231 int mode = inode->i_mode;
232
233 nfs_inc_stats(inode, NFSIOS_ATTRINVALIDATE);
234
235 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
236 nfsi->attrtimeo_timestamp = jiffies;
237
238 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
239 nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR |
240 NFS_INO_INVALID_DATA |
241 NFS_INO_INVALID_ACCESS |
242 NFS_INO_INVALID_ACL |
243 NFS_INO_INVALID_XATTR);
244 else
245 nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR |
246 NFS_INO_INVALID_ACCESS |
247 NFS_INO_INVALID_ACL |
248 NFS_INO_INVALID_XATTR);
249 nfs_zap_label_cache_locked(nfsi);
250 }
251
nfs_zap_caches(struct inode * inode)252 void nfs_zap_caches(struct inode *inode)
253 {
254 spin_lock(&inode->i_lock);
255 nfs_zap_caches_locked(inode);
256 spin_unlock(&inode->i_lock);
257 }
258
nfs_zap_mapping(struct inode * inode,struct address_space * mapping)259 void nfs_zap_mapping(struct inode *inode, struct address_space *mapping)
260 {
261 if (mapping->nrpages != 0) {
262 spin_lock(&inode->i_lock);
263 nfs_set_cache_invalid(inode, NFS_INO_INVALID_DATA);
264 spin_unlock(&inode->i_lock);
265 }
266 }
267
nfs_zap_acl_cache(struct inode * inode)268 void nfs_zap_acl_cache(struct inode *inode)
269 {
270 void (*clear_acl_cache)(struct inode *);
271
272 clear_acl_cache = NFS_PROTO(inode)->clear_acl_cache;
273 if (clear_acl_cache != NULL)
274 clear_acl_cache(inode);
275 spin_lock(&inode->i_lock);
276 NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_ACL;
277 spin_unlock(&inode->i_lock);
278 }
279 EXPORT_SYMBOL_GPL(nfs_zap_acl_cache);
280
nfs_invalidate_atime(struct inode * inode)281 void nfs_invalidate_atime(struct inode *inode)
282 {
283 if (nfs_have_delegated_atime(inode))
284 return;
285 spin_lock(&inode->i_lock);
286 nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATIME);
287 spin_unlock(&inode->i_lock);
288 }
289 EXPORT_SYMBOL_GPL(nfs_invalidate_atime);
290
291 /*
292 * Invalidate, but do not unhash, the inode.
293 * NB: must be called with inode->i_lock held!
294 */
nfs_set_inode_stale_locked(struct inode * inode)295 static void nfs_set_inode_stale_locked(struct inode *inode)
296 {
297 set_bit(NFS_INO_STALE, &NFS_I(inode)->flags);
298 nfs_zap_caches_locked(inode);
299 trace_nfs_set_inode_stale(inode);
300 }
301
nfs_set_inode_stale(struct inode * inode)302 void nfs_set_inode_stale(struct inode *inode)
303 {
304 spin_lock(&inode->i_lock);
305 nfs_set_inode_stale_locked(inode);
306 spin_unlock(&inode->i_lock);
307 }
308
309 struct nfs_find_desc {
310 struct nfs_fh *fh;
311 struct nfs_fattr *fattr;
312 };
313
314 /*
315 * In NFSv3 we can have 64bit inode numbers. In order to support
316 * this, and re-exported directories (also seen in NFSv2)
317 * we are forced to allow 2 different inodes to have the same
318 * i_ino.
319 */
320 static int
nfs_find_actor(struct inode * inode,void * opaque)321 nfs_find_actor(struct inode *inode, void *opaque)
322 {
323 struct nfs_find_desc *desc = opaque;
324 struct nfs_fh *fh = desc->fh;
325 struct nfs_fattr *fattr = desc->fattr;
326
327 if (NFS_FILEID(inode) != fattr->fileid)
328 return 0;
329 if (inode_wrong_type(inode, fattr->mode))
330 return 0;
331 if (nfs_compare_fh(NFS_FH(inode), fh))
332 return 0;
333 if (is_bad_inode(inode) || NFS_STALE(inode))
334 return 0;
335 return 1;
336 }
337
338 static int
nfs_init_locked(struct inode * inode,void * opaque)339 nfs_init_locked(struct inode *inode, void *opaque)
340 {
341 struct nfs_find_desc *desc = opaque;
342 struct nfs_fattr *fattr = desc->fattr;
343
344 set_nfs_fileid(inode, fattr->fileid);
345 inode->i_mode = fattr->mode;
346 nfs_copy_fh(NFS_FH(inode), desc->fh);
347 return 0;
348 }
349
350 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
nfs_clear_label_invalid(struct inode * inode)351 static void nfs_clear_label_invalid(struct inode *inode)
352 {
353 spin_lock(&inode->i_lock);
354 NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_LABEL;
355 spin_unlock(&inode->i_lock);
356 }
357
nfs_setsecurity(struct inode * inode,struct nfs_fattr * fattr)358 void nfs_setsecurity(struct inode *inode, struct nfs_fattr *fattr)
359 {
360 int error;
361
362 if (fattr->label == NULL)
363 return;
364
365 if ((fattr->valid & NFS_ATTR_FATTR_V4_SECURITY_LABEL) && inode->i_security) {
366 error = security_inode_notifysecctx(inode, fattr->label->label,
367 fattr->label->len);
368 if (error)
369 printk(KERN_ERR "%s() %s %d "
370 "security_inode_notifysecctx() %d\n",
371 __func__,
372 (char *)fattr->label->label,
373 fattr->label->len, error);
374 nfs_clear_label_invalid(inode);
375 }
376 }
377
nfs4_label_alloc(struct nfs_server * server,gfp_t flags)378 struct nfs4_label *nfs4_label_alloc(struct nfs_server *server, gfp_t flags)
379 {
380 struct nfs4_label *label;
381
382 if (!(server->caps & NFS_CAP_SECURITY_LABEL))
383 return NULL;
384
385 label = kzalloc(sizeof(struct nfs4_label), flags);
386 if (label == NULL)
387 return ERR_PTR(-ENOMEM);
388
389 label->label = kzalloc(NFS4_MAXLABELLEN, flags);
390 if (label->label == NULL) {
391 kfree(label);
392 return ERR_PTR(-ENOMEM);
393 }
394 label->len = NFS4_MAXLABELLEN;
395
396 return label;
397 }
398 EXPORT_SYMBOL_GPL(nfs4_label_alloc);
399 #else
nfs_setsecurity(struct inode * inode,struct nfs_fattr * fattr)400 void nfs_setsecurity(struct inode *inode, struct nfs_fattr *fattr)
401 {
402 }
403 #endif
404 EXPORT_SYMBOL_GPL(nfs_setsecurity);
405
406 /* Search for inode identified by fh, fileid and i_mode in inode cache. */
407 struct inode *
nfs_ilookup(struct super_block * sb,struct nfs_fattr * fattr,struct nfs_fh * fh)408 nfs_ilookup(struct super_block *sb, struct nfs_fattr *fattr, struct nfs_fh *fh)
409 {
410 struct nfs_find_desc desc = {
411 .fh = fh,
412 .fattr = fattr,
413 };
414 struct inode *inode;
415 unsigned long hash;
416
417 if (!(fattr->valid & NFS_ATTR_FATTR_FILEID) ||
418 !(fattr->valid & NFS_ATTR_FATTR_TYPE))
419 return NULL;
420
421 hash = nfs_fattr_to_ino_t(fattr);
422 inode = ilookup5(sb, hash, nfs_find_actor, &desc);
423
424 dprintk("%s: returning %p\n", __func__, inode);
425 return inode;
426 }
427
nfs_inode_init_regular(struct nfs_inode * nfsi)428 static void nfs_inode_init_regular(struct nfs_inode *nfsi)
429 {
430 atomic_long_set(&nfsi->nrequests, 0);
431 atomic_long_set(&nfsi->redirtied_pages, 0);
432 INIT_LIST_HEAD(&nfsi->commit_info.list);
433 atomic_long_set(&nfsi->commit_info.ncommit, 0);
434 atomic_set(&nfsi->commit_info.rpcs_out, 0);
435 mutex_init(&nfsi->commit_mutex);
436 }
437
nfs_inode_init_dir(struct nfs_inode * nfsi)438 static void nfs_inode_init_dir(struct nfs_inode *nfsi)
439 {
440 nfsi->cache_change_attribute = 0;
441 memset(nfsi->cookieverf, 0, sizeof(nfsi->cookieverf));
442 init_rwsem(&nfsi->rmdir_sem);
443 }
444
445 /*
446 * This is our front-end to iget that looks up inodes by file handle
447 * instead of inode number.
448 */
449 struct inode *
nfs_fhget(struct super_block * sb,struct nfs_fh * fh,struct nfs_fattr * fattr)450 nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
451 {
452 struct nfs_find_desc desc = {
453 .fh = fh,
454 .fattr = fattr
455 };
456 struct inode *inode = ERR_PTR(-ENOENT);
457 u64 fattr_supported = NFS_SB(sb)->fattr_valid;
458 unsigned long hash;
459
460 nfs_attr_check_mountpoint(sb, fattr);
461
462 if (nfs_attr_use_mounted_on_fileid(fattr))
463 fattr->fileid = fattr->mounted_on_fileid;
464 else if ((fattr->valid & NFS_ATTR_FATTR_FILEID) == 0)
465 goto out_no_inode;
466 if ((fattr->valid & NFS_ATTR_FATTR_TYPE) == 0)
467 goto out_no_inode;
468
469 hash = nfs_fattr_to_ino_t(fattr);
470
471 inode = iget5_locked(sb, hash, nfs_find_actor, nfs_init_locked, &desc);
472 if (inode == NULL) {
473 inode = ERR_PTR(-ENOMEM);
474 goto out_no_inode;
475 }
476
477 if (inode->i_state & I_NEW) {
478 struct nfs_inode *nfsi = NFS_I(inode);
479 unsigned long now = jiffies;
480
481 /* We set i_ino for the few things that still rely on it,
482 * such as stat(2) */
483 inode->i_ino = hash;
484
485 /* We can't support update_atime(), since the server will reset it */
486 inode->i_flags |= S_NOATIME|S_NOCMTIME;
487 inode->i_mode = fattr->mode;
488 nfsi->cache_validity = 0;
489 if ((fattr->valid & NFS_ATTR_FATTR_MODE) == 0
490 && (fattr_supported & NFS_ATTR_FATTR_MODE))
491 nfs_set_cache_invalid(inode, NFS_INO_INVALID_MODE);
492 /* Why so? Because we want revalidate for devices/FIFOs, and
493 * that's precisely what we have in nfs_file_inode_operations.
494 */
495 inode->i_op = NFS_SB(sb)->nfs_client->rpc_ops->file_inode_ops;
496 if (S_ISREG(inode->i_mode)) {
497 inode->i_fop = NFS_SB(sb)->nfs_client->rpc_ops->file_ops;
498 inode->i_data.a_ops = &nfs_file_aops;
499 nfs_inode_init_regular(nfsi);
500 mapping_set_large_folios(inode->i_mapping);
501 } else if (S_ISDIR(inode->i_mode)) {
502 inode->i_op = NFS_SB(sb)->nfs_client->rpc_ops->dir_inode_ops;
503 inode->i_fop = &nfs_dir_operations;
504 inode->i_data.a_ops = &nfs_dir_aops;
505 nfs_inode_init_dir(nfsi);
506 /* Deal with crossing mountpoints */
507 if (fattr->valid & NFS_ATTR_FATTR_MOUNTPOINT ||
508 fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL) {
509 if (fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL)
510 inode->i_op = &nfs_referral_inode_operations;
511 else
512 inode->i_op = &nfs_mountpoint_inode_operations;
513 inode->i_fop = NULL;
514 inode->i_flags |= S_AUTOMOUNT;
515 }
516 } else if (S_ISLNK(inode->i_mode)) {
517 inode->i_op = &nfs_symlink_inode_operations;
518 inode_nohighmem(inode);
519 } else
520 init_special_inode(inode, inode->i_mode, fattr->rdev);
521
522 inode_set_atime(inode, 0, 0);
523 inode_set_mtime(inode, 0, 0);
524 inode_set_ctime(inode, 0, 0);
525 inode_set_iversion_raw(inode, 0);
526 inode->i_size = 0;
527 clear_nlink(inode);
528 inode->i_uid = make_kuid(&init_user_ns, -2);
529 inode->i_gid = make_kgid(&init_user_ns, -2);
530 inode->i_blocks = 0;
531 nfsi->write_io = 0;
532 nfsi->read_io = 0;
533
534 nfsi->read_cache_jiffies = fattr->time_start;
535 nfsi->attr_gencount = fattr->gencount;
536 if (fattr->valid & NFS_ATTR_FATTR_ATIME)
537 inode_set_atime_to_ts(inode, fattr->atime);
538 else if (fattr_supported & NFS_ATTR_FATTR_ATIME)
539 nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATIME);
540 if (fattr->valid & NFS_ATTR_FATTR_MTIME)
541 inode_set_mtime_to_ts(inode, fattr->mtime);
542 else if (fattr_supported & NFS_ATTR_FATTR_MTIME)
543 nfs_set_cache_invalid(inode, NFS_INO_INVALID_MTIME);
544 if (fattr->valid & NFS_ATTR_FATTR_CTIME)
545 inode_set_ctime_to_ts(inode, fattr->ctime);
546 else if (fattr_supported & NFS_ATTR_FATTR_CTIME)
547 nfs_set_cache_invalid(inode, NFS_INO_INVALID_CTIME);
548 if (fattr->valid & NFS_ATTR_FATTR_CHANGE)
549 inode_set_iversion_raw(inode, fattr->change_attr);
550 else
551 nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE);
552 if (fattr->valid & NFS_ATTR_FATTR_SIZE)
553 inode->i_size = nfs_size_to_loff_t(fattr->size);
554 else
555 nfs_set_cache_invalid(inode, NFS_INO_INVALID_SIZE);
556 if (fattr->valid & NFS_ATTR_FATTR_NLINK)
557 set_nlink(inode, fattr->nlink);
558 else if (fattr_supported & NFS_ATTR_FATTR_NLINK)
559 nfs_set_cache_invalid(inode, NFS_INO_INVALID_NLINK);
560 else
561 set_nlink(inode, 1);
562 if (fattr->valid & NFS_ATTR_FATTR_OWNER)
563 inode->i_uid = fattr->uid;
564 else if (fattr_supported & NFS_ATTR_FATTR_OWNER)
565 nfs_set_cache_invalid(inode, NFS_INO_INVALID_OTHER);
566 if (fattr->valid & NFS_ATTR_FATTR_GROUP)
567 inode->i_gid = fattr->gid;
568 else if (fattr_supported & NFS_ATTR_FATTR_GROUP)
569 nfs_set_cache_invalid(inode, NFS_INO_INVALID_OTHER);
570 if (fattr->valid & NFS_ATTR_FATTR_BLOCKS_USED)
571 inode->i_blocks = fattr->du.nfs2.blocks;
572 else if (fattr_supported & NFS_ATTR_FATTR_BLOCKS_USED &&
573 fattr->size != 0)
574 nfs_set_cache_invalid(inode, NFS_INO_INVALID_BLOCKS);
575 if (fattr->valid & NFS_ATTR_FATTR_SPACE_USED) {
576 /*
577 * report the blocks in 512byte units
578 */
579 inode->i_blocks = nfs_calc_block_size(fattr->du.nfs3.used);
580 } else if (fattr_supported & NFS_ATTR_FATTR_SPACE_USED &&
581 fattr->size != 0)
582 nfs_set_cache_invalid(inode, NFS_INO_INVALID_BLOCKS);
583
584 nfs_setsecurity(inode, fattr);
585
586 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
587 nfsi->attrtimeo_timestamp = now;
588 nfsi->access_cache = RB_ROOT;
589
590 nfs_fscache_init_inode(inode);
591
592 unlock_new_inode(inode);
593 } else {
594 int err = nfs_refresh_inode(inode, fattr);
595 if (err < 0) {
596 iput(inode);
597 inode = ERR_PTR(err);
598 goto out_no_inode;
599 }
600 }
601 dprintk("NFS: nfs_fhget(%s/%Lu fh_crc=0x%08x ct=%d)\n",
602 inode->i_sb->s_id,
603 (unsigned long long)NFS_FILEID(inode),
604 nfs_display_fhandle_hash(fh),
605 atomic_read(&inode->i_count));
606
607 out:
608 return inode;
609
610 out_no_inode:
611 dprintk("nfs_fhget: iget failed with error %ld\n", PTR_ERR(inode));
612 goto out;
613 }
614 EXPORT_SYMBOL_GPL(nfs_fhget);
615
616 static void
nfs_fattr_fixup_delegated(struct inode * inode,struct nfs_fattr * fattr)617 nfs_fattr_fixup_delegated(struct inode *inode, struct nfs_fattr *fattr)
618 {
619 unsigned long cache_validity = NFS_I(inode)->cache_validity;
620
621 if (nfs_have_delegated_mtime(inode)) {
622 if (!(cache_validity & NFS_INO_INVALID_CTIME))
623 fattr->valid &= ~(NFS_ATTR_FATTR_PRECTIME |
624 NFS_ATTR_FATTR_CTIME);
625
626 if (!(cache_validity & NFS_INO_INVALID_MTIME))
627 fattr->valid &= ~(NFS_ATTR_FATTR_PREMTIME |
628 NFS_ATTR_FATTR_MTIME);
629
630 if (!(cache_validity & NFS_INO_INVALID_ATIME))
631 fattr->valid &= ~NFS_ATTR_FATTR_ATIME;
632 } else if (nfs_have_delegated_atime(inode)) {
633 if (!(cache_validity & NFS_INO_INVALID_ATIME))
634 fattr->valid &= ~NFS_ATTR_FATTR_ATIME;
635 }
636 }
637
nfs_set_timestamps_to_ts(struct inode * inode,struct iattr * attr)638 static void nfs_set_timestamps_to_ts(struct inode *inode, struct iattr *attr)
639 {
640 unsigned int cache_flags = 0;
641
642 if (attr->ia_valid & ATTR_MTIME_SET) {
643 struct timespec64 ctime = inode_get_ctime(inode);
644 struct timespec64 mtime = inode_get_mtime(inode);
645 struct timespec64 now;
646 int updated = 0;
647
648 now = inode_set_ctime_current(inode);
649 if (!timespec64_equal(&now, &ctime))
650 updated |= S_CTIME;
651
652 inode_set_mtime_to_ts(inode, attr->ia_mtime);
653 if (!timespec64_equal(&now, &mtime))
654 updated |= S_MTIME;
655
656 inode_maybe_inc_iversion(inode, updated);
657 cache_flags |= NFS_INO_INVALID_CTIME | NFS_INO_INVALID_MTIME;
658 }
659 if (attr->ia_valid & ATTR_ATIME_SET) {
660 inode_set_atime_to_ts(inode, attr->ia_atime);
661 cache_flags |= NFS_INO_INVALID_ATIME;
662 }
663 NFS_I(inode)->cache_validity &= ~cache_flags;
664 }
665
nfs_update_timestamps(struct inode * inode,unsigned int ia_valid)666 static void nfs_update_timestamps(struct inode *inode, unsigned int ia_valid)
667 {
668 enum file_time_flags time_flags = 0;
669 unsigned int cache_flags = 0;
670
671 if (ia_valid & ATTR_MTIME) {
672 time_flags |= S_MTIME | S_CTIME;
673 cache_flags |= NFS_INO_INVALID_CTIME | NFS_INO_INVALID_MTIME;
674 }
675 if (ia_valid & ATTR_ATIME) {
676 time_flags |= S_ATIME;
677 cache_flags |= NFS_INO_INVALID_ATIME;
678 }
679 inode_update_timestamps(inode, time_flags);
680 NFS_I(inode)->cache_validity &= ~cache_flags;
681 }
682
nfs_update_delegated_atime(struct inode * inode)683 void nfs_update_delegated_atime(struct inode *inode)
684 {
685 spin_lock(&inode->i_lock);
686 if (nfs_have_delegated_atime(inode))
687 nfs_update_timestamps(inode, ATTR_ATIME);
688 spin_unlock(&inode->i_lock);
689 }
690
nfs_update_delegated_mtime_locked(struct inode * inode)691 void nfs_update_delegated_mtime_locked(struct inode *inode)
692 {
693 if (nfs_have_delegated_mtime(inode))
694 nfs_update_timestamps(inode, ATTR_MTIME);
695 }
696
nfs_update_delegated_mtime(struct inode * inode)697 void nfs_update_delegated_mtime(struct inode *inode)
698 {
699 spin_lock(&inode->i_lock);
700 nfs_update_delegated_mtime_locked(inode);
701 spin_unlock(&inode->i_lock);
702 }
703 EXPORT_SYMBOL_GPL(nfs_update_delegated_mtime);
704
705 #define NFS_VALID_ATTRS (ATTR_MODE|ATTR_UID|ATTR_GID|ATTR_SIZE|ATTR_ATIME|ATTR_ATIME_SET|ATTR_MTIME|ATTR_MTIME_SET|ATTR_FILE|ATTR_OPEN)
706
707 int
nfs_setattr(struct mnt_idmap * idmap,struct dentry * dentry,struct iattr * attr)708 nfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
709 struct iattr *attr)
710 {
711 struct inode *inode = d_inode(dentry);
712 struct nfs_fattr *fattr;
713 int error = 0;
714
715 nfs_inc_stats(inode, NFSIOS_VFSSETATTR);
716
717 /* skip mode change if it's just for clearing setuid/setgid */
718 if (attr->ia_valid & (ATTR_KILL_SUID | ATTR_KILL_SGID))
719 attr->ia_valid &= ~ATTR_MODE;
720
721 if (attr->ia_valid & ATTR_SIZE) {
722 BUG_ON(!S_ISREG(inode->i_mode));
723
724 error = inode_newsize_ok(inode, attr->ia_size);
725 if (error)
726 return error;
727
728 if (attr->ia_size == i_size_read(inode))
729 attr->ia_valid &= ~ATTR_SIZE;
730 }
731
732 if (nfs_have_delegated_mtime(inode) && attr->ia_valid & ATTR_MTIME) {
733 spin_lock(&inode->i_lock);
734 if (attr->ia_valid & ATTR_MTIME_SET) {
735 nfs_set_timestamps_to_ts(inode, attr);
736 attr->ia_valid &= ~(ATTR_MTIME|ATTR_MTIME_SET|
737 ATTR_ATIME|ATTR_ATIME_SET);
738 } else {
739 nfs_update_timestamps(inode, attr->ia_valid);
740 attr->ia_valid &= ~(ATTR_MTIME|ATTR_ATIME);
741 }
742 spin_unlock(&inode->i_lock);
743 } else if (nfs_have_delegated_atime(inode) &&
744 attr->ia_valid & ATTR_ATIME &&
745 !(attr->ia_valid & ATTR_MTIME)) {
746 if (attr->ia_valid & ATTR_ATIME_SET) {
747 spin_lock(&inode->i_lock);
748 nfs_set_timestamps_to_ts(inode, attr);
749 spin_unlock(&inode->i_lock);
750 attr->ia_valid &= ~(ATTR_ATIME|ATTR_ATIME_SET);
751 } else {
752 nfs_update_delegated_atime(inode);
753 attr->ia_valid &= ~ATTR_ATIME;
754 }
755 }
756
757 /* Optimization: if the end result is no change, don't RPC */
758 if (((attr->ia_valid & NFS_VALID_ATTRS) & ~(ATTR_FILE|ATTR_OPEN)) == 0)
759 return 0;
760
761 trace_nfs_setattr_enter(inode);
762
763 /* Write all dirty data */
764 if (S_ISREG(inode->i_mode))
765 nfs_sync_inode(inode);
766
767 fattr = nfs_alloc_fattr_with_label(NFS_SERVER(inode));
768 if (fattr == NULL) {
769 error = -ENOMEM;
770 goto out;
771 }
772
773 error = NFS_PROTO(inode)->setattr(dentry, fattr, attr);
774 if (error == 0)
775 error = nfs_refresh_inode(inode, fattr);
776 nfs_free_fattr(fattr);
777 out:
778 trace_nfs_setattr_exit(inode, error);
779 return error;
780 }
781 EXPORT_SYMBOL_GPL(nfs_setattr);
782
783 /**
784 * nfs_vmtruncate - unmap mappings "freed" by truncate() syscall
785 * @inode: inode of the file used
786 * @offset: file offset to start truncating
787 *
788 * This is a copy of the common vmtruncate, but with the locking
789 * corrected to take into account the fact that NFS requires
790 * inode->i_size to be updated under the inode->i_lock.
791 * Note: must be called with inode->i_lock held!
792 */
nfs_vmtruncate(struct inode * inode,loff_t offset)793 static int nfs_vmtruncate(struct inode * inode, loff_t offset)
794 {
795 int err;
796
797 err = inode_newsize_ok(inode, offset);
798 if (err)
799 goto out;
800
801 trace_nfs_size_truncate(inode, offset);
802 i_size_write(inode, offset);
803 /* Optimisation */
804 if (offset == 0) {
805 NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_DATA;
806 nfs_ooo_clear(NFS_I(inode));
807 }
808 NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_SIZE;
809
810 spin_unlock(&inode->i_lock);
811 truncate_pagecache(inode, offset);
812 nfs_update_delegated_mtime_locked(inode);
813 spin_lock(&inode->i_lock);
814 out:
815 return err;
816 }
817
818 /**
819 * nfs_setattr_update_inode - Update inode metadata after a setattr call.
820 * @inode: pointer to struct inode
821 * @attr: pointer to struct iattr
822 * @fattr: pointer to struct nfs_fattr
823 *
824 * Note: we do this in the *proc.c in order to ensure that
825 * it works for things like exclusive creates too.
826 */
nfs_setattr_update_inode(struct inode * inode,struct iattr * attr,struct nfs_fattr * fattr)827 void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr,
828 struct nfs_fattr *fattr)
829 {
830 /* Barrier: bump the attribute generation count. */
831 nfs_fattr_set_barrier(fattr);
832
833 spin_lock(&inode->i_lock);
834 NFS_I(inode)->attr_gencount = fattr->gencount;
835 if ((attr->ia_valid & ATTR_SIZE) != 0) {
836 if (!nfs_have_delegated_mtime(inode))
837 nfs_set_cache_invalid(inode, NFS_INO_INVALID_MTIME);
838 nfs_set_cache_invalid(inode, NFS_INO_INVALID_BLOCKS);
839 nfs_inc_stats(inode, NFSIOS_SETATTRTRUNC);
840 nfs_vmtruncate(inode, attr->ia_size);
841 }
842 if ((attr->ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID)) != 0) {
843 NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_CTIME;
844 if ((attr->ia_valid & ATTR_KILL_SUID) != 0 &&
845 inode->i_mode & S_ISUID)
846 inode->i_mode &= ~S_ISUID;
847 if (setattr_should_drop_sgid(&nop_mnt_idmap, inode))
848 inode->i_mode &= ~S_ISGID;
849 if ((attr->ia_valid & ATTR_MODE) != 0) {
850 int mode = attr->ia_mode & S_IALLUGO;
851 mode |= inode->i_mode & ~S_IALLUGO;
852 inode->i_mode = mode;
853 }
854 if ((attr->ia_valid & ATTR_UID) != 0)
855 inode->i_uid = attr->ia_uid;
856 if ((attr->ia_valid & ATTR_GID) != 0)
857 inode->i_gid = attr->ia_gid;
858 if (fattr->valid & NFS_ATTR_FATTR_CTIME)
859 inode_set_ctime_to_ts(inode, fattr->ctime);
860 else
861 nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE
862 | NFS_INO_INVALID_CTIME);
863 nfs_set_cache_invalid(inode, NFS_INO_INVALID_ACCESS
864 | NFS_INO_INVALID_ACL);
865 }
866 if (attr->ia_valid & (ATTR_ATIME_SET|ATTR_ATIME)) {
867 NFS_I(inode)->cache_validity &= ~(NFS_INO_INVALID_ATIME
868 | NFS_INO_INVALID_CTIME);
869 if (fattr->valid & NFS_ATTR_FATTR_ATIME)
870 inode_set_atime_to_ts(inode, fattr->atime);
871 else if (attr->ia_valid & ATTR_ATIME_SET)
872 inode_set_atime_to_ts(inode, attr->ia_atime);
873 else
874 nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATIME);
875
876 if (fattr->valid & NFS_ATTR_FATTR_CTIME)
877 inode_set_ctime_to_ts(inode, fattr->ctime);
878 else
879 nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE
880 | NFS_INO_INVALID_CTIME);
881 }
882 if (attr->ia_valid & (ATTR_MTIME_SET|ATTR_MTIME)) {
883 NFS_I(inode)->cache_validity &= ~(NFS_INO_INVALID_MTIME
884 | NFS_INO_INVALID_CTIME);
885 if (fattr->valid & NFS_ATTR_FATTR_MTIME)
886 inode_set_mtime_to_ts(inode, fattr->mtime);
887 else if (attr->ia_valid & ATTR_MTIME_SET)
888 inode_set_mtime_to_ts(inode, attr->ia_mtime);
889 else
890 nfs_set_cache_invalid(inode, NFS_INO_INVALID_MTIME);
891
892 if (fattr->valid & NFS_ATTR_FATTR_CTIME)
893 inode_set_ctime_to_ts(inode, fattr->ctime);
894 else
895 nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE
896 | NFS_INO_INVALID_CTIME);
897 }
898 if (fattr->valid)
899 nfs_update_inode(inode, fattr);
900 spin_unlock(&inode->i_lock);
901 }
902 EXPORT_SYMBOL_GPL(nfs_setattr_update_inode);
903
904 /*
905 * Don't request help from readdirplus if the file is being written to,
906 * or if attribute caching is turned off
907 */
nfs_getattr_readdirplus_enable(const struct inode * inode)908 static bool nfs_getattr_readdirplus_enable(const struct inode *inode)
909 {
910 return nfs_server_capable(inode, NFS_CAP_READDIRPLUS) &&
911 !nfs_have_writebacks(inode) && NFS_MAXATTRTIMEO(inode) > 5 * HZ;
912 }
913
nfs_readdirplus_parent_cache_miss(struct dentry * dentry)914 static void nfs_readdirplus_parent_cache_miss(struct dentry *dentry)
915 {
916 if (!IS_ROOT(dentry)) {
917 struct dentry *parent = dget_parent(dentry);
918 nfs_readdir_record_entry_cache_miss(d_inode(parent));
919 dput(parent);
920 }
921 }
922
nfs_readdirplus_parent_cache_hit(struct dentry * dentry)923 static void nfs_readdirplus_parent_cache_hit(struct dentry *dentry)
924 {
925 if (!IS_ROOT(dentry)) {
926 struct dentry *parent = dget_parent(dentry);
927 nfs_readdir_record_entry_cache_hit(d_inode(parent));
928 dput(parent);
929 }
930 }
931
nfs_get_valid_attrmask(struct inode * inode)932 static u32 nfs_get_valid_attrmask(struct inode *inode)
933 {
934 unsigned long cache_validity = READ_ONCE(NFS_I(inode)->cache_validity);
935 u32 reply_mask = STATX_INO | STATX_TYPE;
936
937 if (!(cache_validity & NFS_INO_INVALID_ATIME))
938 reply_mask |= STATX_ATIME;
939 if (!(cache_validity & NFS_INO_INVALID_CTIME))
940 reply_mask |= STATX_CTIME;
941 if (!(cache_validity & NFS_INO_INVALID_MTIME))
942 reply_mask |= STATX_MTIME;
943 if (!(cache_validity & NFS_INO_INVALID_SIZE))
944 reply_mask |= STATX_SIZE;
945 if (!(cache_validity & NFS_INO_INVALID_NLINK))
946 reply_mask |= STATX_NLINK;
947 if (!(cache_validity & NFS_INO_INVALID_MODE))
948 reply_mask |= STATX_MODE;
949 if (!(cache_validity & NFS_INO_INVALID_OTHER))
950 reply_mask |= STATX_UID | STATX_GID;
951 if (!(cache_validity & NFS_INO_INVALID_BLOCKS))
952 reply_mask |= STATX_BLOCKS;
953 if (!(cache_validity & NFS_INO_INVALID_CHANGE))
954 reply_mask |= STATX_CHANGE_COOKIE;
955 return reply_mask;
956 }
957
nfs_getattr(struct mnt_idmap * idmap,const struct path * path,struct kstat * stat,u32 request_mask,unsigned int query_flags)958 int nfs_getattr(struct mnt_idmap *idmap, const struct path *path,
959 struct kstat *stat, u32 request_mask, unsigned int query_flags)
960 {
961 struct inode *inode = d_inode(path->dentry);
962 struct nfs_server *server = NFS_SERVER(inode);
963 unsigned long cache_validity;
964 int err = 0;
965 bool force_sync = query_flags & AT_STATX_FORCE_SYNC;
966 bool do_update = false;
967 bool readdirplus_enabled = nfs_getattr_readdirplus_enable(inode);
968
969 trace_nfs_getattr_enter(inode);
970
971 request_mask &= STATX_TYPE | STATX_MODE | STATX_NLINK | STATX_UID |
972 STATX_GID | STATX_ATIME | STATX_MTIME | STATX_CTIME |
973 STATX_INO | STATX_SIZE | STATX_BLOCKS |
974 STATX_CHANGE_COOKIE;
975
976 if ((query_flags & AT_STATX_DONT_SYNC) && !force_sync) {
977 if (readdirplus_enabled)
978 nfs_readdirplus_parent_cache_hit(path->dentry);
979 goto out_no_revalidate;
980 }
981
982 /* Flush out writes to the server in order to update c/mtime/version. */
983 if ((request_mask & (STATX_CTIME | STATX_MTIME | STATX_CHANGE_COOKIE)) &&
984 S_ISREG(inode->i_mode)) {
985 if (nfs_have_delegated_mtime(inode))
986 filemap_fdatawrite(inode->i_mapping);
987 else
988 filemap_write_and_wait(inode->i_mapping);
989 }
990
991 /*
992 * We may force a getattr if the user cares about atime.
993 *
994 * Note that we only have to check the vfsmount flags here:
995 * - NFS always sets S_NOATIME by so checking it would give a
996 * bogus result
997 * - NFS never sets SB_NOATIME or SB_NODIRATIME so there is
998 * no point in checking those.
999 */
1000 if ((path->mnt->mnt_flags & MNT_NOATIME) ||
1001 ((path->mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode)))
1002 request_mask &= ~STATX_ATIME;
1003
1004 /* Is the user requesting attributes that might need revalidation? */
1005 if (!(request_mask & (STATX_MODE|STATX_NLINK|STATX_ATIME|STATX_CTIME|
1006 STATX_MTIME|STATX_UID|STATX_GID|
1007 STATX_SIZE|STATX_BLOCKS|
1008 STATX_CHANGE_COOKIE)))
1009 goto out_no_revalidate;
1010
1011 /* Check whether the cached attributes are stale */
1012 do_update |= force_sync || nfs_attribute_cache_expired(inode);
1013 cache_validity = READ_ONCE(NFS_I(inode)->cache_validity);
1014 do_update |= cache_validity & NFS_INO_INVALID_CHANGE;
1015 if (request_mask & STATX_ATIME)
1016 do_update |= cache_validity & NFS_INO_INVALID_ATIME;
1017 if (request_mask & STATX_CTIME)
1018 do_update |= cache_validity & NFS_INO_INVALID_CTIME;
1019 if (request_mask & STATX_MTIME)
1020 do_update |= cache_validity & NFS_INO_INVALID_MTIME;
1021 if (request_mask & STATX_SIZE)
1022 do_update |= cache_validity & NFS_INO_INVALID_SIZE;
1023 if (request_mask & STATX_NLINK)
1024 do_update |= cache_validity & NFS_INO_INVALID_NLINK;
1025 if (request_mask & STATX_MODE)
1026 do_update |= cache_validity & NFS_INO_INVALID_MODE;
1027 if (request_mask & (STATX_UID | STATX_GID))
1028 do_update |= cache_validity & NFS_INO_INVALID_OTHER;
1029 if (request_mask & STATX_BLOCKS)
1030 do_update |= cache_validity & NFS_INO_INVALID_BLOCKS;
1031
1032 if (do_update) {
1033 if (readdirplus_enabled)
1034 nfs_readdirplus_parent_cache_miss(path->dentry);
1035 err = __nfs_revalidate_inode(server, inode);
1036 if (err)
1037 goto out;
1038 } else if (readdirplus_enabled)
1039 nfs_readdirplus_parent_cache_hit(path->dentry);
1040 out_no_revalidate:
1041 /* Only return attributes that were revalidated. */
1042 stat->result_mask = nfs_get_valid_attrmask(inode) | request_mask;
1043
1044 generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
1045 stat->ino = nfs_compat_user_ino64(NFS_FILEID(inode));
1046 stat->change_cookie = inode_peek_iversion_raw(inode);
1047 stat->attributes_mask |= STATX_ATTR_CHANGE_MONOTONIC;
1048 if (server->change_attr_type != NFS4_CHANGE_TYPE_IS_UNDEFINED)
1049 stat->attributes |= STATX_ATTR_CHANGE_MONOTONIC;
1050 if (S_ISDIR(inode->i_mode))
1051 stat->blksize = NFS_SERVER(inode)->dtsize;
1052 out:
1053 trace_nfs_getattr_exit(inode, err);
1054 return err;
1055 }
1056 EXPORT_SYMBOL_GPL(nfs_getattr);
1057
nfs_init_lock_context(struct nfs_lock_context * l_ctx)1058 static void nfs_init_lock_context(struct nfs_lock_context *l_ctx)
1059 {
1060 refcount_set(&l_ctx->count, 1);
1061 l_ctx->lockowner = current->files;
1062 INIT_LIST_HEAD(&l_ctx->list);
1063 atomic_set(&l_ctx->io_count, 0);
1064 }
1065
__nfs_find_lock_context(struct nfs_open_context * ctx)1066 static struct nfs_lock_context *__nfs_find_lock_context(struct nfs_open_context *ctx)
1067 {
1068 struct nfs_lock_context *pos;
1069
1070 list_for_each_entry_rcu(pos, &ctx->lock_context.list, list) {
1071 if (pos->lockowner != current->files)
1072 continue;
1073 if (refcount_inc_not_zero(&pos->count))
1074 return pos;
1075 }
1076 return NULL;
1077 }
1078
nfs_get_lock_context(struct nfs_open_context * ctx)1079 struct nfs_lock_context *nfs_get_lock_context(struct nfs_open_context *ctx)
1080 {
1081 struct nfs_lock_context *res, *new = NULL;
1082 struct inode *inode = d_inode(ctx->dentry);
1083
1084 rcu_read_lock();
1085 res = __nfs_find_lock_context(ctx);
1086 rcu_read_unlock();
1087 if (res == NULL) {
1088 new = kmalloc(sizeof(*new), GFP_KERNEL_ACCOUNT);
1089 if (new == NULL)
1090 return ERR_PTR(-ENOMEM);
1091 nfs_init_lock_context(new);
1092 spin_lock(&inode->i_lock);
1093 res = __nfs_find_lock_context(ctx);
1094 if (res == NULL) {
1095 new->open_context = get_nfs_open_context(ctx);
1096 if (new->open_context) {
1097 list_add_tail_rcu(&new->list,
1098 &ctx->lock_context.list);
1099 res = new;
1100 new = NULL;
1101 } else
1102 res = ERR_PTR(-EBADF);
1103 }
1104 spin_unlock(&inode->i_lock);
1105 kfree(new);
1106 }
1107 return res;
1108 }
1109 EXPORT_SYMBOL_GPL(nfs_get_lock_context);
1110
nfs_put_lock_context(struct nfs_lock_context * l_ctx)1111 void nfs_put_lock_context(struct nfs_lock_context *l_ctx)
1112 {
1113 struct nfs_open_context *ctx = l_ctx->open_context;
1114 struct inode *inode = d_inode(ctx->dentry);
1115
1116 if (!refcount_dec_and_lock(&l_ctx->count, &inode->i_lock))
1117 return;
1118 list_del_rcu(&l_ctx->list);
1119 spin_unlock(&inode->i_lock);
1120 put_nfs_open_context(ctx);
1121 kfree_rcu(l_ctx, rcu_head);
1122 }
1123 EXPORT_SYMBOL_GPL(nfs_put_lock_context);
1124
1125 /**
1126 * nfs_close_context - Common close_context() routine NFSv2/v3
1127 * @ctx: pointer to context
1128 * @is_sync: is this a synchronous close
1129 *
1130 * Ensure that the attributes are up to date if we're mounted
1131 * with close-to-open semantics and we have cached data that will
1132 * need to be revalidated on open.
1133 */
nfs_close_context(struct nfs_open_context * ctx,int is_sync)1134 void nfs_close_context(struct nfs_open_context *ctx, int is_sync)
1135 {
1136 struct nfs_inode *nfsi;
1137 struct inode *inode;
1138
1139 if (!(ctx->mode & FMODE_WRITE))
1140 return;
1141 if (!is_sync)
1142 return;
1143 inode = d_inode(ctx->dentry);
1144 if (nfs_have_read_or_write_delegation(inode))
1145 return;
1146 nfsi = NFS_I(inode);
1147 if (inode->i_mapping->nrpages == 0)
1148 return;
1149 if (nfsi->cache_validity & NFS_INO_INVALID_DATA)
1150 return;
1151 if (!list_empty(&nfsi->open_files))
1152 return;
1153 if (NFS_SERVER(inode)->flags & NFS_MOUNT_NOCTO)
1154 return;
1155 nfs_revalidate_inode(inode,
1156 NFS_INO_INVALID_CHANGE | NFS_INO_INVALID_SIZE);
1157 }
1158 EXPORT_SYMBOL_GPL(nfs_close_context);
1159
alloc_nfs_open_context(struct dentry * dentry,fmode_t f_mode,struct file * filp)1160 struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry,
1161 fmode_t f_mode,
1162 struct file *filp)
1163 {
1164 struct nfs_open_context *ctx;
1165
1166 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL_ACCOUNT);
1167 if (!ctx)
1168 return ERR_PTR(-ENOMEM);
1169 nfs_sb_active(dentry->d_sb);
1170 ctx->dentry = dget(dentry);
1171 if (filp)
1172 ctx->cred = get_cred(filp->f_cred);
1173 else
1174 ctx->cred = get_current_cred();
1175 rcu_assign_pointer(ctx->ll_cred, NULL);
1176 ctx->state = NULL;
1177 ctx->mode = f_mode;
1178 ctx->flags = 0;
1179 ctx->error = 0;
1180 ctx->flock_owner = (fl_owner_t)filp;
1181 nfs_init_lock_context(&ctx->lock_context);
1182 ctx->lock_context.open_context = ctx;
1183 INIT_LIST_HEAD(&ctx->list);
1184 ctx->mdsthreshold = NULL;
1185 nfs_localio_file_init(&ctx->nfl);
1186
1187 return ctx;
1188 }
1189 EXPORT_SYMBOL_GPL(alloc_nfs_open_context);
1190
get_nfs_open_context(struct nfs_open_context * ctx)1191 struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx)
1192 {
1193 if (ctx != NULL && refcount_inc_not_zero(&ctx->lock_context.count))
1194 return ctx;
1195 return NULL;
1196 }
1197 EXPORT_SYMBOL_GPL(get_nfs_open_context);
1198
__put_nfs_open_context(struct nfs_open_context * ctx,int is_sync)1199 static void __put_nfs_open_context(struct nfs_open_context *ctx, int is_sync)
1200 {
1201 struct inode *inode = d_inode(ctx->dentry);
1202 struct super_block *sb = ctx->dentry->d_sb;
1203
1204 if (!refcount_dec_and_test(&ctx->lock_context.count))
1205 return;
1206 if (!list_empty(&ctx->list)) {
1207 spin_lock(&inode->i_lock);
1208 list_del_rcu(&ctx->list);
1209 spin_unlock(&inode->i_lock);
1210 }
1211 if (inode != NULL)
1212 NFS_PROTO(inode)->close_context(ctx, is_sync);
1213 put_cred(ctx->cred);
1214 dput(ctx->dentry);
1215 nfs_sb_deactive(sb);
1216 put_rpccred(rcu_dereference_protected(ctx->ll_cred, 1));
1217 kfree(ctx->mdsthreshold);
1218 nfs_close_local_fh(&ctx->nfl);
1219 kfree_rcu(ctx, rcu_head);
1220 }
1221
put_nfs_open_context(struct nfs_open_context * ctx)1222 void put_nfs_open_context(struct nfs_open_context *ctx)
1223 {
1224 __put_nfs_open_context(ctx, 0);
1225 }
1226 EXPORT_SYMBOL_GPL(put_nfs_open_context);
1227
put_nfs_open_context_sync(struct nfs_open_context * ctx)1228 static void put_nfs_open_context_sync(struct nfs_open_context *ctx)
1229 {
1230 __put_nfs_open_context(ctx, 1);
1231 }
1232
1233 /*
1234 * Ensure that mmap has a recent RPC credential for use when writing out
1235 * shared pages
1236 */
nfs_inode_attach_open_context(struct nfs_open_context * ctx)1237 void nfs_inode_attach_open_context(struct nfs_open_context *ctx)
1238 {
1239 struct inode *inode = d_inode(ctx->dentry);
1240 struct nfs_inode *nfsi = NFS_I(inode);
1241
1242 spin_lock(&inode->i_lock);
1243 if (list_empty(&nfsi->open_files) &&
1244 nfs_ooo_test(nfsi))
1245 nfs_set_cache_invalid(inode, NFS_INO_INVALID_DATA |
1246 NFS_INO_REVAL_FORCED);
1247 list_add_tail_rcu(&ctx->list, &nfsi->open_files);
1248 spin_unlock(&inode->i_lock);
1249 }
1250 EXPORT_SYMBOL_GPL(nfs_inode_attach_open_context);
1251
nfs_file_set_open_context(struct file * filp,struct nfs_open_context * ctx)1252 void nfs_file_set_open_context(struct file *filp, struct nfs_open_context *ctx)
1253 {
1254 filp->private_data = get_nfs_open_context(ctx);
1255 set_bit(NFS_CONTEXT_FILE_OPEN, &ctx->flags);
1256 if (list_empty(&ctx->list))
1257 nfs_inode_attach_open_context(ctx);
1258 }
1259 EXPORT_SYMBOL_GPL(nfs_file_set_open_context);
1260
1261 /*
1262 * Given an inode, search for an open context with the desired characteristics
1263 */
nfs_find_open_context(struct inode * inode,const struct cred * cred,fmode_t mode)1264 struct nfs_open_context *nfs_find_open_context(struct inode *inode, const struct cred *cred, fmode_t mode)
1265 {
1266 struct nfs_inode *nfsi = NFS_I(inode);
1267 struct nfs_open_context *pos, *ctx = NULL;
1268
1269 rcu_read_lock();
1270 list_for_each_entry_rcu(pos, &nfsi->open_files, list) {
1271 if (cred != NULL && cred_fscmp(pos->cred, cred) != 0)
1272 continue;
1273 if ((pos->mode & (FMODE_READ|FMODE_WRITE)) != mode)
1274 continue;
1275 if (!test_bit(NFS_CONTEXT_FILE_OPEN, &pos->flags))
1276 continue;
1277 ctx = get_nfs_open_context(pos);
1278 if (ctx)
1279 break;
1280 }
1281 rcu_read_unlock();
1282 return ctx;
1283 }
1284
nfs_file_clear_open_context(struct file * filp)1285 void nfs_file_clear_open_context(struct file *filp)
1286 {
1287 struct nfs_open_context *ctx = nfs_file_open_context(filp);
1288
1289 if (ctx) {
1290 struct inode *inode = d_inode(ctx->dentry);
1291
1292 clear_bit(NFS_CONTEXT_FILE_OPEN, &ctx->flags);
1293 /*
1294 * We fatal error on write before. Try to writeback
1295 * every page again.
1296 */
1297 if (ctx->error < 0)
1298 invalidate_inode_pages2(inode->i_mapping);
1299 filp->private_data = NULL;
1300 put_nfs_open_context_sync(ctx);
1301 }
1302 }
1303
1304 /*
1305 * These allocate and release file read/write context information.
1306 */
nfs_open(struct inode * inode,struct file * filp)1307 int nfs_open(struct inode *inode, struct file *filp)
1308 {
1309 struct nfs_open_context *ctx;
1310
1311 ctx = alloc_nfs_open_context(file_dentry(filp),
1312 flags_to_mode(filp->f_flags), filp);
1313 if (IS_ERR(ctx))
1314 return PTR_ERR(ctx);
1315 nfs_file_set_open_context(filp, ctx);
1316 put_nfs_open_context(ctx);
1317 nfs_fscache_open_file(inode, filp);
1318 return 0;
1319 }
1320
1321 /*
1322 * This function is called whenever some part of NFS notices that
1323 * the cached attributes have to be refreshed.
1324 */
1325 int
__nfs_revalidate_inode(struct nfs_server * server,struct inode * inode)1326 __nfs_revalidate_inode(struct nfs_server *server, struct inode *inode)
1327 {
1328 int status = -ESTALE;
1329 struct nfs_fattr *fattr = NULL;
1330 struct nfs_inode *nfsi = NFS_I(inode);
1331
1332 dfprintk(PAGECACHE, "NFS: revalidating (%s/%Lu)\n",
1333 inode->i_sb->s_id, (unsigned long long)NFS_FILEID(inode));
1334
1335 trace_nfs_revalidate_inode_enter(inode);
1336
1337 if (is_bad_inode(inode))
1338 goto out;
1339 if (NFS_STALE(inode))
1340 goto out;
1341
1342 /* pNFS: Attributes aren't updated until we layoutcommit */
1343 if (S_ISREG(inode->i_mode)) {
1344 status = pnfs_sync_inode(inode, false);
1345 if (status)
1346 goto out;
1347 }
1348
1349 status = -ENOMEM;
1350 fattr = nfs_alloc_fattr_with_label(NFS_SERVER(inode));
1351 if (fattr == NULL)
1352 goto out;
1353
1354 nfs_inc_stats(inode, NFSIOS_INODEREVALIDATE);
1355
1356 status = NFS_PROTO(inode)->getattr(server, NFS_FH(inode), fattr, inode);
1357 if (status != 0) {
1358 dfprintk(PAGECACHE, "nfs_revalidate_inode: (%s/%Lu) getattr failed, error=%d\n",
1359 inode->i_sb->s_id,
1360 (unsigned long long)NFS_FILEID(inode), status);
1361 switch (status) {
1362 case -ETIMEDOUT:
1363 /* A soft timeout occurred. Use cached information? */
1364 if (server->flags & NFS_MOUNT_SOFTREVAL)
1365 status = 0;
1366 break;
1367 case -ESTALE:
1368 if (!S_ISDIR(inode->i_mode))
1369 nfs_set_inode_stale(inode);
1370 else
1371 nfs_zap_caches(inode);
1372 }
1373 goto out;
1374 }
1375
1376 status = nfs_refresh_inode(inode, fattr);
1377 if (status) {
1378 dfprintk(PAGECACHE, "nfs_revalidate_inode: (%s/%Lu) refresh failed, error=%d\n",
1379 inode->i_sb->s_id,
1380 (unsigned long long)NFS_FILEID(inode), status);
1381 goto out;
1382 }
1383
1384 if (nfsi->cache_validity & NFS_INO_INVALID_ACL)
1385 nfs_zap_acl_cache(inode);
1386
1387 nfs_setsecurity(inode, fattr);
1388
1389 dfprintk(PAGECACHE, "NFS: (%s/%Lu) revalidation complete\n",
1390 inode->i_sb->s_id,
1391 (unsigned long long)NFS_FILEID(inode));
1392
1393 out:
1394 nfs_free_fattr(fattr);
1395 trace_nfs_revalidate_inode_exit(inode, status);
1396 return status;
1397 }
1398
nfs_attribute_cache_expired(struct inode * inode)1399 int nfs_attribute_cache_expired(struct inode *inode)
1400 {
1401 if (nfs_have_delegated_attributes(inode))
1402 return 0;
1403 return nfs_attribute_timeout(inode);
1404 }
1405
1406 /**
1407 * nfs_revalidate_inode - Revalidate the inode attributes
1408 * @inode: pointer to inode struct
1409 * @flags: cache flags to check
1410 *
1411 * Updates inode attribute information by retrieving the data from the server.
1412 */
nfs_revalidate_inode(struct inode * inode,unsigned long flags)1413 int nfs_revalidate_inode(struct inode *inode, unsigned long flags)
1414 {
1415 if (!nfs_check_cache_invalid(inode, flags))
1416 return NFS_STALE(inode) ? -ESTALE : 0;
1417 return __nfs_revalidate_inode(NFS_SERVER(inode), inode);
1418 }
1419 EXPORT_SYMBOL_GPL(nfs_revalidate_inode);
1420
nfs_invalidate_mapping(struct inode * inode,struct address_space * mapping)1421 static int nfs_invalidate_mapping(struct inode *inode, struct address_space *mapping)
1422 {
1423 int ret;
1424
1425 nfs_fscache_invalidate(inode, 0);
1426 if (mapping->nrpages != 0) {
1427 if (S_ISREG(inode->i_mode)) {
1428 ret = nfs_sync_mapping(mapping);
1429 if (ret < 0)
1430 return ret;
1431 }
1432 ret = invalidate_inode_pages2(mapping);
1433 if (ret < 0)
1434 return ret;
1435 }
1436 nfs_inc_stats(inode, NFSIOS_DATAINVALIDATE);
1437
1438 dfprintk(PAGECACHE, "NFS: (%s/%Lu) data cache invalidated\n",
1439 inode->i_sb->s_id,
1440 (unsigned long long)NFS_FILEID(inode));
1441 return 0;
1442 }
1443
1444 /**
1445 * nfs_clear_invalid_mapping - Conditionally clear a mapping
1446 * @mapping: pointer to mapping
1447 *
1448 * If the NFS_INO_INVALID_DATA inode flag is set, clear the mapping.
1449 */
nfs_clear_invalid_mapping(struct address_space * mapping)1450 int nfs_clear_invalid_mapping(struct address_space *mapping)
1451 {
1452 struct inode *inode = mapping->host;
1453 struct nfs_inode *nfsi = NFS_I(inode);
1454 unsigned long *bitlock = &nfsi->flags;
1455 int ret = 0;
1456
1457 /*
1458 * We must clear NFS_INO_INVALID_DATA first to ensure that
1459 * invalidations that come in while we're shooting down the mappings
1460 * are respected. But, that leaves a race window where one revalidator
1461 * can clear the flag, and then another checks it before the mapping
1462 * gets invalidated. Fix that by serializing access to this part of
1463 * the function.
1464 *
1465 * At the same time, we need to allow other tasks to see whether we
1466 * might be in the middle of invalidating the pages, so we only set
1467 * the bit lock here if it looks like we're going to be doing that.
1468 */
1469 for (;;) {
1470 ret = wait_on_bit_action(bitlock, NFS_INO_INVALIDATING,
1471 nfs_wait_bit_killable,
1472 TASK_KILLABLE|TASK_FREEZABLE_UNSAFE);
1473 if (ret)
1474 goto out;
1475 smp_rmb(); /* pairs with smp_wmb() below */
1476 if (test_bit(NFS_INO_INVALIDATING, bitlock))
1477 continue;
1478 /* pairs with nfs_set_cache_invalid()'s smp_store_release() */
1479 if (!(smp_load_acquire(&nfsi->cache_validity) & NFS_INO_INVALID_DATA))
1480 goto out;
1481 /* Slow-path that double-checks with spinlock held */
1482 spin_lock(&inode->i_lock);
1483 if (test_bit(NFS_INO_INVALIDATING, bitlock)) {
1484 spin_unlock(&inode->i_lock);
1485 continue;
1486 }
1487 if (nfsi->cache_validity & NFS_INO_INVALID_DATA)
1488 break;
1489 spin_unlock(&inode->i_lock);
1490 goto out;
1491 }
1492
1493 set_bit(NFS_INO_INVALIDATING, bitlock);
1494 smp_wmb();
1495 nfsi->cache_validity &= ~NFS_INO_INVALID_DATA;
1496 nfs_ooo_clear(nfsi);
1497 spin_unlock(&inode->i_lock);
1498 trace_nfs_invalidate_mapping_enter(inode);
1499 ret = nfs_invalidate_mapping(inode, mapping);
1500 trace_nfs_invalidate_mapping_exit(inode, ret);
1501
1502 clear_bit_unlock(NFS_INO_INVALIDATING, bitlock);
1503 smp_mb__after_atomic();
1504 wake_up_bit(bitlock, NFS_INO_INVALIDATING);
1505 out:
1506 return ret;
1507 }
1508
nfs_mapping_need_revalidate_inode(struct inode * inode)1509 bool nfs_mapping_need_revalidate_inode(struct inode *inode)
1510 {
1511 return nfs_check_cache_invalid(inode, NFS_INO_INVALID_CHANGE) ||
1512 NFS_STALE(inode);
1513 }
1514
nfs_revalidate_mapping_rcu(struct inode * inode)1515 int nfs_revalidate_mapping_rcu(struct inode *inode)
1516 {
1517 struct nfs_inode *nfsi = NFS_I(inode);
1518 unsigned long *bitlock = &nfsi->flags;
1519 int ret = 0;
1520
1521 if (IS_SWAPFILE(inode))
1522 goto out;
1523 if (nfs_mapping_need_revalidate_inode(inode)) {
1524 ret = -ECHILD;
1525 goto out;
1526 }
1527 spin_lock(&inode->i_lock);
1528 if (test_bit(NFS_INO_INVALIDATING, bitlock) ||
1529 (nfsi->cache_validity & NFS_INO_INVALID_DATA))
1530 ret = -ECHILD;
1531 spin_unlock(&inode->i_lock);
1532 out:
1533 return ret;
1534 }
1535
1536 /**
1537 * nfs_revalidate_mapping - Revalidate the pagecache
1538 * @inode: pointer to host inode
1539 * @mapping: pointer to mapping
1540 */
nfs_revalidate_mapping(struct inode * inode,struct address_space * mapping)1541 int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping)
1542 {
1543 /* swapfiles are not supposed to be shared. */
1544 if (IS_SWAPFILE(inode))
1545 return 0;
1546
1547 if (nfs_mapping_need_revalidate_inode(inode)) {
1548 int ret = __nfs_revalidate_inode(NFS_SERVER(inode), inode);
1549 if (ret < 0)
1550 return ret;
1551 }
1552
1553 return nfs_clear_invalid_mapping(mapping);
1554 }
1555
nfs_file_has_writers(struct nfs_inode * nfsi)1556 static bool nfs_file_has_writers(struct nfs_inode *nfsi)
1557 {
1558 struct inode *inode = &nfsi->vfs_inode;
1559
1560 if (!S_ISREG(inode->i_mode))
1561 return false;
1562 if (list_empty(&nfsi->open_files))
1563 return false;
1564 return inode_is_open_for_write(inode);
1565 }
1566
nfs_file_has_buffered_writers(struct nfs_inode * nfsi)1567 static bool nfs_file_has_buffered_writers(struct nfs_inode *nfsi)
1568 {
1569 return nfs_file_has_writers(nfsi) && nfs_file_io_is_buffered(nfsi);
1570 }
1571
nfs_wcc_update_inode(struct inode * inode,struct nfs_fattr * fattr)1572 static void nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr *fattr)
1573 {
1574 struct timespec64 ts;
1575
1576 if ((fattr->valid & NFS_ATTR_FATTR_PRECHANGE)
1577 && (fattr->valid & NFS_ATTR_FATTR_CHANGE)
1578 && inode_eq_iversion_raw(inode, fattr->pre_change_attr)) {
1579 inode_set_iversion_raw(inode, fattr->change_attr);
1580 if (S_ISDIR(inode->i_mode))
1581 nfs_set_cache_invalid(inode, NFS_INO_INVALID_DATA);
1582 else if (nfs_server_capable(inode, NFS_CAP_XATTR))
1583 nfs_set_cache_invalid(inode, NFS_INO_INVALID_XATTR);
1584 }
1585 /* If we have atomic WCC data, we may update some attributes */
1586 ts = inode_get_ctime(inode);
1587 if ((fattr->valid & NFS_ATTR_FATTR_PRECTIME)
1588 && (fattr->valid & NFS_ATTR_FATTR_CTIME)
1589 && timespec64_equal(&ts, &fattr->pre_ctime)) {
1590 inode_set_ctime_to_ts(inode, fattr->ctime);
1591 }
1592
1593 ts = inode_get_mtime(inode);
1594 if ((fattr->valid & NFS_ATTR_FATTR_PREMTIME)
1595 && (fattr->valid & NFS_ATTR_FATTR_MTIME)
1596 && timespec64_equal(&ts, &fattr->pre_mtime)) {
1597 inode_set_mtime_to_ts(inode, fattr->mtime);
1598 }
1599 if ((fattr->valid & NFS_ATTR_FATTR_PRESIZE)
1600 && (fattr->valid & NFS_ATTR_FATTR_SIZE)
1601 && i_size_read(inode) == nfs_size_to_loff_t(fattr->pre_size)
1602 && !nfs_have_writebacks(inode)) {
1603 trace_nfs_size_wcc(inode, fattr->size);
1604 i_size_write(inode, nfs_size_to_loff_t(fattr->size));
1605 }
1606 }
1607
1608 /**
1609 * nfs_check_inode_attributes - verify consistency of the inode attribute cache
1610 * @inode: pointer to inode
1611 * @fattr: updated attributes
1612 *
1613 * Verifies the attribute cache. If we have just changed the attributes,
1614 * so that fattr carries weak cache consistency data, then it may
1615 * also update the ctime/mtime/change_attribute.
1616 */
nfs_check_inode_attributes(struct inode * inode,struct nfs_fattr * fattr)1617 static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fattr)
1618 {
1619 struct nfs_inode *nfsi = NFS_I(inode);
1620 loff_t cur_size, new_isize;
1621 unsigned long invalid = 0;
1622 struct timespec64 ts;
1623
1624 if (nfs_have_delegated_attributes(inode))
1625 return 0;
1626
1627 if (!(fattr->valid & NFS_ATTR_FATTR_FILEID)) {
1628 /* Only a mounted-on-fileid? Just exit */
1629 if (fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID)
1630 return 0;
1631 /* Has the inode gone and changed behind our back? */
1632 } else if (nfsi->fileid != fattr->fileid) {
1633 /* Is this perhaps the mounted-on fileid? */
1634 if ((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) &&
1635 nfsi->fileid == fattr->mounted_on_fileid)
1636 return 0;
1637 return -ESTALE;
1638 }
1639 if ((fattr->valid & NFS_ATTR_FATTR_TYPE) && inode_wrong_type(inode, fattr->mode))
1640 return -ESTALE;
1641
1642
1643 if (!nfs_file_has_buffered_writers(nfsi)) {
1644 /* Verify a few of the more important attributes */
1645 if ((fattr->valid & NFS_ATTR_FATTR_CHANGE) != 0 && !inode_eq_iversion_raw(inode, fattr->change_attr))
1646 invalid |= NFS_INO_INVALID_CHANGE;
1647
1648 ts = inode_get_mtime(inode);
1649 if ((fattr->valid & NFS_ATTR_FATTR_MTIME) && !timespec64_equal(&ts, &fattr->mtime))
1650 invalid |= NFS_INO_INVALID_MTIME;
1651
1652 ts = inode_get_ctime(inode);
1653 if ((fattr->valid & NFS_ATTR_FATTR_CTIME) && !timespec64_equal(&ts, &fattr->ctime))
1654 invalid |= NFS_INO_INVALID_CTIME;
1655
1656 if (fattr->valid & NFS_ATTR_FATTR_SIZE) {
1657 cur_size = i_size_read(inode);
1658 new_isize = nfs_size_to_loff_t(fattr->size);
1659 if (cur_size != new_isize)
1660 invalid |= NFS_INO_INVALID_SIZE;
1661 }
1662 }
1663
1664 /* Have any file permissions changed? */
1665 if ((fattr->valid & NFS_ATTR_FATTR_MODE) && (inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO))
1666 invalid |= NFS_INO_INVALID_MODE;
1667 if ((fattr->valid & NFS_ATTR_FATTR_OWNER) && !uid_eq(inode->i_uid, fattr->uid))
1668 invalid |= NFS_INO_INVALID_OTHER;
1669 if ((fattr->valid & NFS_ATTR_FATTR_GROUP) && !gid_eq(inode->i_gid, fattr->gid))
1670 invalid |= NFS_INO_INVALID_OTHER;
1671
1672 /* Has the link count changed? */
1673 if ((fattr->valid & NFS_ATTR_FATTR_NLINK) && inode->i_nlink != fattr->nlink)
1674 invalid |= NFS_INO_INVALID_NLINK;
1675
1676 ts = inode_get_atime(inode);
1677 if ((fattr->valid & NFS_ATTR_FATTR_ATIME) && !timespec64_equal(&ts, &fattr->atime))
1678 invalid |= NFS_INO_INVALID_ATIME;
1679
1680 if (invalid != 0)
1681 nfs_set_cache_invalid(inode, invalid);
1682
1683 nfsi->read_cache_jiffies = fattr->time_start;
1684 return 0;
1685 }
1686
1687 static atomic_long_t nfs_attr_generation_counter;
1688
nfs_read_attr_generation_counter(void)1689 static unsigned long nfs_read_attr_generation_counter(void)
1690 {
1691 return atomic_long_read(&nfs_attr_generation_counter);
1692 }
1693
nfs_inc_attr_generation_counter(void)1694 unsigned long nfs_inc_attr_generation_counter(void)
1695 {
1696 return atomic_long_inc_return(&nfs_attr_generation_counter);
1697 }
1698 EXPORT_SYMBOL_GPL(nfs_inc_attr_generation_counter);
1699
nfs_fattr_init(struct nfs_fattr * fattr)1700 void nfs_fattr_init(struct nfs_fattr *fattr)
1701 {
1702 fattr->valid = 0;
1703 fattr->time_start = jiffies;
1704 fattr->gencount = nfs_inc_attr_generation_counter();
1705 fattr->owner_name = NULL;
1706 fattr->group_name = NULL;
1707 fattr->mdsthreshold = NULL;
1708 }
1709 EXPORT_SYMBOL_GPL(nfs_fattr_init);
1710
1711 /**
1712 * nfs_fattr_set_barrier
1713 * @fattr: attributes
1714 *
1715 * Used to set a barrier after an attribute was updated. This
1716 * barrier ensures that older attributes from RPC calls that may
1717 * have raced with our update cannot clobber these new values.
1718 * Note that you are still responsible for ensuring that other
1719 * operations which change the attribute on the server do not
1720 * collide.
1721 */
nfs_fattr_set_barrier(struct nfs_fattr * fattr)1722 void nfs_fattr_set_barrier(struct nfs_fattr *fattr)
1723 {
1724 fattr->gencount = nfs_inc_attr_generation_counter();
1725 }
1726
nfs_alloc_fattr(void)1727 struct nfs_fattr *nfs_alloc_fattr(void)
1728 {
1729 struct nfs_fattr *fattr;
1730
1731 fattr = kmalloc(sizeof(*fattr), GFP_KERNEL);
1732 if (fattr != NULL) {
1733 nfs_fattr_init(fattr);
1734 fattr->label = NULL;
1735 }
1736 return fattr;
1737 }
1738 EXPORT_SYMBOL_GPL(nfs_alloc_fattr);
1739
nfs_alloc_fattr_with_label(struct nfs_server * server)1740 struct nfs_fattr *nfs_alloc_fattr_with_label(struct nfs_server *server)
1741 {
1742 struct nfs_fattr *fattr = nfs_alloc_fattr();
1743
1744 if (!fattr)
1745 return NULL;
1746
1747 fattr->label = nfs4_label_alloc(server, GFP_KERNEL);
1748 if (IS_ERR(fattr->label)) {
1749 kfree(fattr);
1750 return NULL;
1751 }
1752
1753 return fattr;
1754 }
1755 EXPORT_SYMBOL_GPL(nfs_alloc_fattr_with_label);
1756
nfs_alloc_fhandle(void)1757 struct nfs_fh *nfs_alloc_fhandle(void)
1758 {
1759 struct nfs_fh *fh;
1760
1761 fh = kmalloc(sizeof(struct nfs_fh), GFP_KERNEL);
1762 if (fh != NULL)
1763 fh->size = 0;
1764 return fh;
1765 }
1766 EXPORT_SYMBOL_GPL(nfs_alloc_fhandle);
1767
1768 #ifdef NFS_DEBUG
1769 /*
1770 * _nfs_display_fhandle_hash - calculate the crc32 hash for the filehandle
1771 * in the same way that wireshark does
1772 *
1773 * @fh: file handle
1774 *
1775 * For debugging only.
1776 */
_nfs_display_fhandle_hash(const struct nfs_fh * fh)1777 u32 _nfs_display_fhandle_hash(const struct nfs_fh *fh)
1778 {
1779 /* wireshark uses 32-bit AUTODIN crc and does a bitwise
1780 * not on the result */
1781 return nfs_fhandle_hash(fh);
1782 }
1783 EXPORT_SYMBOL_GPL(_nfs_display_fhandle_hash);
1784
1785 /*
1786 * _nfs_display_fhandle - display an NFS file handle on the console
1787 *
1788 * @fh: file handle to display
1789 * @caption: display caption
1790 *
1791 * For debugging only.
1792 */
_nfs_display_fhandle(const struct nfs_fh * fh,const char * caption)1793 void _nfs_display_fhandle(const struct nfs_fh *fh, const char *caption)
1794 {
1795 unsigned short i;
1796
1797 if (fh == NULL || fh->size == 0) {
1798 printk(KERN_DEFAULT "%s at %p is empty\n", caption, fh);
1799 return;
1800 }
1801
1802 printk(KERN_DEFAULT "%s at %p is %u bytes, crc: 0x%08x:\n",
1803 caption, fh, fh->size, _nfs_display_fhandle_hash(fh));
1804 for (i = 0; i < fh->size; i += 16) {
1805 __be32 *pos = (__be32 *)&fh->data[i];
1806
1807 switch ((fh->size - i - 1) >> 2) {
1808 case 0:
1809 printk(KERN_DEFAULT " %08x\n",
1810 be32_to_cpup(pos));
1811 break;
1812 case 1:
1813 printk(KERN_DEFAULT " %08x %08x\n",
1814 be32_to_cpup(pos), be32_to_cpup(pos + 1));
1815 break;
1816 case 2:
1817 printk(KERN_DEFAULT " %08x %08x %08x\n",
1818 be32_to_cpup(pos), be32_to_cpup(pos + 1),
1819 be32_to_cpup(pos + 2));
1820 break;
1821 default:
1822 printk(KERN_DEFAULT " %08x %08x %08x %08x\n",
1823 be32_to_cpup(pos), be32_to_cpup(pos + 1),
1824 be32_to_cpup(pos + 2), be32_to_cpup(pos + 3));
1825 }
1826 }
1827 }
1828 EXPORT_SYMBOL_GPL(_nfs_display_fhandle);
1829 #endif
1830
1831 /**
1832 * nfs_inode_attrs_cmp_generic - compare attributes
1833 * @fattr: attributes
1834 * @inode: pointer to inode
1835 *
1836 * Attempt to divine whether or not an RPC call reply carrying stale
1837 * attributes got scheduled after another call carrying updated ones.
1838 * Note also the check for wraparound of 'attr_gencount'
1839 *
1840 * The function returns '1' if it thinks the attributes in @fattr are
1841 * more recent than the ones cached in @inode. Otherwise it returns
1842 * the value '0'.
1843 */
nfs_inode_attrs_cmp_generic(const struct nfs_fattr * fattr,const struct inode * inode)1844 static int nfs_inode_attrs_cmp_generic(const struct nfs_fattr *fattr,
1845 const struct inode *inode)
1846 {
1847 unsigned long attr_gencount = NFS_I(inode)->attr_gencount;
1848
1849 return (long)(fattr->gencount - attr_gencount) > 0 ||
1850 (long)(attr_gencount - nfs_read_attr_generation_counter()) > 0;
1851 }
1852
1853 /**
1854 * nfs_inode_attrs_cmp_monotonic - compare attributes
1855 * @fattr: attributes
1856 * @inode: pointer to inode
1857 *
1858 * Attempt to divine whether or not an RPC call reply carrying stale
1859 * attributes got scheduled after another call carrying updated ones.
1860 *
1861 * We assume that the server observes monotonic semantics for
1862 * the change attribute, so a larger value means that the attributes in
1863 * @fattr are more recent, in which case the function returns the
1864 * value '1'.
1865 * A return value of '0' indicates no measurable change
1866 * A return value of '-1' means that the attributes in @inode are
1867 * more recent.
1868 */
nfs_inode_attrs_cmp_monotonic(const struct nfs_fattr * fattr,const struct inode * inode)1869 static int nfs_inode_attrs_cmp_monotonic(const struct nfs_fattr *fattr,
1870 const struct inode *inode)
1871 {
1872 s64 diff = fattr->change_attr - inode_peek_iversion_raw(inode);
1873 if (diff > 0)
1874 return 1;
1875 return diff == 0 ? 0 : -1;
1876 }
1877
1878 /**
1879 * nfs_inode_attrs_cmp_strict_monotonic - compare attributes
1880 * @fattr: attributes
1881 * @inode: pointer to inode
1882 *
1883 * Attempt to divine whether or not an RPC call reply carrying stale
1884 * attributes got scheduled after another call carrying updated ones.
1885 *
1886 * We assume that the server observes strictly monotonic semantics for
1887 * the change attribute, so a larger value means that the attributes in
1888 * @fattr are more recent, in which case the function returns the
1889 * value '1'.
1890 * A return value of '-1' means that the attributes in @inode are
1891 * more recent or unchanged.
1892 */
nfs_inode_attrs_cmp_strict_monotonic(const struct nfs_fattr * fattr,const struct inode * inode)1893 static int nfs_inode_attrs_cmp_strict_monotonic(const struct nfs_fattr *fattr,
1894 const struct inode *inode)
1895 {
1896 return nfs_inode_attrs_cmp_monotonic(fattr, inode) > 0 ? 1 : -1;
1897 }
1898
1899 /**
1900 * nfs_inode_attrs_cmp - compare attributes
1901 * @fattr: attributes
1902 * @inode: pointer to inode
1903 *
1904 * This function returns '1' if it thinks the attributes in @fattr are
1905 * more recent than the ones cached in @inode. It returns '-1' if
1906 * the attributes in @inode are more recent than the ones in @fattr,
1907 * and it returns 0 if not sure.
1908 */
nfs_inode_attrs_cmp(const struct nfs_fattr * fattr,const struct inode * inode)1909 static int nfs_inode_attrs_cmp(const struct nfs_fattr *fattr,
1910 const struct inode *inode)
1911 {
1912 if (nfs_inode_attrs_cmp_generic(fattr, inode) > 0)
1913 return 1;
1914 switch (NFS_SERVER(inode)->change_attr_type) {
1915 case NFS4_CHANGE_TYPE_IS_UNDEFINED:
1916 break;
1917 case NFS4_CHANGE_TYPE_IS_TIME_METADATA:
1918 if (!(fattr->valid & NFS_ATTR_FATTR_CHANGE))
1919 break;
1920 return nfs_inode_attrs_cmp_monotonic(fattr, inode);
1921 default:
1922 if (!(fattr->valid & NFS_ATTR_FATTR_CHANGE))
1923 break;
1924 return nfs_inode_attrs_cmp_strict_monotonic(fattr, inode);
1925 }
1926 return 0;
1927 }
1928
1929 /**
1930 * nfs_inode_finish_partial_attr_update - complete a previous inode update
1931 * @fattr: attributes
1932 * @inode: pointer to inode
1933 *
1934 * Returns '1' if the last attribute update left the inode cached
1935 * attributes in a partially unrevalidated state, and @fattr
1936 * matches the change attribute of that partial update.
1937 * Otherwise returns '0'.
1938 */
nfs_inode_finish_partial_attr_update(const struct nfs_fattr * fattr,const struct inode * inode)1939 static int nfs_inode_finish_partial_attr_update(const struct nfs_fattr *fattr,
1940 const struct inode *inode)
1941 {
1942 const unsigned long check_valid =
1943 NFS_INO_INVALID_ATIME | NFS_INO_INVALID_CTIME |
1944 NFS_INO_INVALID_MTIME | NFS_INO_INVALID_SIZE |
1945 NFS_INO_INVALID_BLOCKS | NFS_INO_INVALID_OTHER |
1946 NFS_INO_INVALID_NLINK;
1947 unsigned long cache_validity = NFS_I(inode)->cache_validity;
1948 enum nfs4_change_attr_type ctype = NFS_SERVER(inode)->change_attr_type;
1949
1950 if (ctype != NFS4_CHANGE_TYPE_IS_UNDEFINED &&
1951 !(cache_validity & NFS_INO_INVALID_CHANGE) &&
1952 (cache_validity & check_valid) != 0 &&
1953 (fattr->valid & NFS_ATTR_FATTR_CHANGE) != 0 &&
1954 nfs_inode_attrs_cmp_monotonic(fattr, inode) == 0)
1955 return 1;
1956 return 0;
1957 }
1958
nfs_ooo_merge(struct nfs_inode * nfsi,u64 start,u64 end)1959 static void nfs_ooo_merge(struct nfs_inode *nfsi,
1960 u64 start, u64 end)
1961 {
1962 int i, cnt;
1963
1964 if (nfsi->cache_validity & NFS_INO_DATA_INVAL_DEFER)
1965 /* No point merging anything */
1966 return;
1967
1968 if (!nfsi->ooo) {
1969 nfsi->ooo = kmalloc(sizeof(*nfsi->ooo), GFP_ATOMIC);
1970 if (!nfsi->ooo) {
1971 nfsi->cache_validity |= NFS_INO_DATA_INVAL_DEFER;
1972 return;
1973 }
1974 nfsi->ooo->cnt = 0;
1975 }
1976
1977 /* add this range, merging if possible */
1978 cnt = nfsi->ooo->cnt;
1979 for (i = 0; i < cnt; i++) {
1980 if (end == nfsi->ooo->gap[i].start)
1981 end = nfsi->ooo->gap[i].end;
1982 else if (start == nfsi->ooo->gap[i].end)
1983 start = nfsi->ooo->gap[i].start;
1984 else
1985 continue;
1986 /* Remove 'i' from table and loop to insert the new range */
1987 cnt -= 1;
1988 nfsi->ooo->gap[i] = nfsi->ooo->gap[cnt];
1989 i = -1;
1990 }
1991 if (start != end) {
1992 if (cnt >= ARRAY_SIZE(nfsi->ooo->gap)) {
1993 nfsi->cache_validity |= NFS_INO_DATA_INVAL_DEFER;
1994 kfree(nfsi->ooo);
1995 nfsi->ooo = NULL;
1996 return;
1997 }
1998 nfsi->ooo->gap[cnt].start = start;
1999 nfsi->ooo->gap[cnt].end = end;
2000 cnt += 1;
2001 }
2002 nfsi->ooo->cnt = cnt;
2003 }
2004
nfs_ooo_record(struct nfs_inode * nfsi,struct nfs_fattr * fattr)2005 static void nfs_ooo_record(struct nfs_inode *nfsi,
2006 struct nfs_fattr *fattr)
2007 {
2008 /* This reply was out-of-order, so record in the
2009 * pre/post change id, possibly cancelling
2010 * gaps created when iversion was jumpped forward.
2011 */
2012 if ((fattr->valid & NFS_ATTR_FATTR_CHANGE) &&
2013 (fattr->valid & NFS_ATTR_FATTR_PRECHANGE))
2014 nfs_ooo_merge(nfsi,
2015 fattr->change_attr,
2016 fattr->pre_change_attr);
2017 }
2018
nfs_refresh_inode_locked(struct inode * inode,struct nfs_fattr * fattr)2019 static int nfs_refresh_inode_locked(struct inode *inode,
2020 struct nfs_fattr *fattr)
2021 {
2022 int attr_cmp = nfs_inode_attrs_cmp(fattr, inode);
2023 int ret = 0;
2024
2025 trace_nfs_refresh_inode_enter(inode);
2026
2027 if (attr_cmp > 0 || nfs_inode_finish_partial_attr_update(fattr, inode))
2028 ret = nfs_update_inode(inode, fattr);
2029 else {
2030 nfs_ooo_record(NFS_I(inode), fattr);
2031
2032 if (attr_cmp == 0)
2033 ret = nfs_check_inode_attributes(inode, fattr);
2034 }
2035
2036 trace_nfs_refresh_inode_exit(inode, ret);
2037 return ret;
2038 }
2039
2040 /**
2041 * nfs_refresh_inode - try to update the inode attribute cache
2042 * @inode: pointer to inode
2043 * @fattr: updated attributes
2044 *
2045 * Check that an RPC call that returned attributes has not overlapped with
2046 * other recent updates of the inode metadata, then decide whether it is
2047 * safe to do a full update of the inode attributes, or whether just to
2048 * call nfs_check_inode_attributes.
2049 */
nfs_refresh_inode(struct inode * inode,struct nfs_fattr * fattr)2050 int nfs_refresh_inode(struct inode *inode, struct nfs_fattr *fattr)
2051 {
2052 int status;
2053
2054 if ((fattr->valid & NFS_ATTR_FATTR) == 0)
2055 return 0;
2056 spin_lock(&inode->i_lock);
2057 status = nfs_refresh_inode_locked(inode, fattr);
2058 spin_unlock(&inode->i_lock);
2059
2060 return status;
2061 }
2062 EXPORT_SYMBOL_GPL(nfs_refresh_inode);
2063
nfs_post_op_update_inode_locked(struct inode * inode,struct nfs_fattr * fattr,unsigned int invalid)2064 static int nfs_post_op_update_inode_locked(struct inode *inode,
2065 struct nfs_fattr *fattr, unsigned int invalid)
2066 {
2067 if (S_ISDIR(inode->i_mode))
2068 invalid |= NFS_INO_INVALID_DATA;
2069 nfs_set_cache_invalid(inode, invalid);
2070 if ((fattr->valid & NFS_ATTR_FATTR) == 0)
2071 return 0;
2072 return nfs_refresh_inode_locked(inode, fattr);
2073 }
2074
2075 /**
2076 * nfs_post_op_update_inode - try to update the inode attribute cache
2077 * @inode: pointer to inode
2078 * @fattr: updated attributes
2079 *
2080 * After an operation that has changed the inode metadata, mark the
2081 * attribute cache as being invalid, then try to update it.
2082 *
2083 * NB: if the server didn't return any post op attributes, this
2084 * function will force the retrieval of attributes before the next
2085 * NFS request. Thus it should be used only for operations that
2086 * are expected to change one or more attributes, to avoid
2087 * unnecessary NFS requests and trips through nfs_update_inode().
2088 */
nfs_post_op_update_inode(struct inode * inode,struct nfs_fattr * fattr)2089 int nfs_post_op_update_inode(struct inode *inode, struct nfs_fattr *fattr)
2090 {
2091 int status;
2092
2093 spin_lock(&inode->i_lock);
2094 nfs_fattr_set_barrier(fattr);
2095 status = nfs_post_op_update_inode_locked(inode, fattr,
2096 NFS_INO_INVALID_CHANGE
2097 | NFS_INO_INVALID_CTIME
2098 | NFS_INO_REVAL_FORCED);
2099 spin_unlock(&inode->i_lock);
2100
2101 return status;
2102 }
2103 EXPORT_SYMBOL_GPL(nfs_post_op_update_inode);
2104
2105 /**
2106 * nfs_post_op_update_inode_force_wcc_locked - update the inode attribute cache
2107 * @inode: pointer to inode
2108 * @fattr: updated attributes
2109 *
2110 * After an operation that has changed the inode metadata, mark the
2111 * attribute cache as being invalid, then try to update it. Fake up
2112 * weak cache consistency data, if none exist.
2113 *
2114 * This function is mainly designed to be used by the ->write_done() functions.
2115 */
nfs_post_op_update_inode_force_wcc_locked(struct inode * inode,struct nfs_fattr * fattr)2116 int nfs_post_op_update_inode_force_wcc_locked(struct inode *inode, struct nfs_fattr *fattr)
2117 {
2118 int attr_cmp = nfs_inode_attrs_cmp(fattr, inode);
2119 int status;
2120
2121 /* Don't do a WCC update if these attributes are already stale */
2122 if (attr_cmp < 0)
2123 return 0;
2124 if ((fattr->valid & NFS_ATTR_FATTR) == 0 || !attr_cmp) {
2125 /* Record the pre/post change info before clearing PRECHANGE */
2126 nfs_ooo_record(NFS_I(inode), fattr);
2127 fattr->valid &= ~(NFS_ATTR_FATTR_PRECHANGE
2128 | NFS_ATTR_FATTR_PRESIZE
2129 | NFS_ATTR_FATTR_PREMTIME
2130 | NFS_ATTR_FATTR_PRECTIME);
2131 goto out_noforce;
2132 }
2133 if ((fattr->valid & NFS_ATTR_FATTR_CHANGE) != 0 &&
2134 (fattr->valid & NFS_ATTR_FATTR_PRECHANGE) == 0) {
2135 fattr->pre_change_attr = inode_peek_iversion_raw(inode);
2136 fattr->valid |= NFS_ATTR_FATTR_PRECHANGE;
2137 }
2138 if ((fattr->valid & NFS_ATTR_FATTR_CTIME) != 0 &&
2139 (fattr->valid & NFS_ATTR_FATTR_PRECTIME) == 0) {
2140 fattr->pre_ctime = inode_get_ctime(inode);
2141 fattr->valid |= NFS_ATTR_FATTR_PRECTIME;
2142 }
2143 if ((fattr->valid & NFS_ATTR_FATTR_MTIME) != 0 &&
2144 (fattr->valid & NFS_ATTR_FATTR_PREMTIME) == 0) {
2145 fattr->pre_mtime = inode_get_mtime(inode);
2146 fattr->valid |= NFS_ATTR_FATTR_PREMTIME;
2147 }
2148 if ((fattr->valid & NFS_ATTR_FATTR_SIZE) != 0 &&
2149 (fattr->valid & NFS_ATTR_FATTR_PRESIZE) == 0) {
2150 fattr->pre_size = i_size_read(inode);
2151 fattr->valid |= NFS_ATTR_FATTR_PRESIZE;
2152 }
2153 out_noforce:
2154 status = nfs_post_op_update_inode_locked(inode, fattr,
2155 NFS_INO_INVALID_CHANGE
2156 | NFS_INO_INVALID_CTIME
2157 | NFS_INO_INVALID_MTIME
2158 | NFS_INO_INVALID_BLOCKS);
2159 return status;
2160 }
2161
2162 /**
2163 * nfs_post_op_update_inode_force_wcc - try to update the inode attribute cache
2164 * @inode: pointer to inode
2165 * @fattr: updated attributes
2166 *
2167 * After an operation that has changed the inode metadata, mark the
2168 * attribute cache as being invalid, then try to update it. Fake up
2169 * weak cache consistency data, if none exist.
2170 *
2171 * This function is mainly designed to be used by the ->write_done() functions.
2172 */
nfs_post_op_update_inode_force_wcc(struct inode * inode,struct nfs_fattr * fattr)2173 int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fattr)
2174 {
2175 int status;
2176
2177 spin_lock(&inode->i_lock);
2178 nfs_fattr_set_barrier(fattr);
2179 status = nfs_post_op_update_inode_force_wcc_locked(inode, fattr);
2180 spin_unlock(&inode->i_lock);
2181 return status;
2182 }
2183 EXPORT_SYMBOL_GPL(nfs_post_op_update_inode_force_wcc);
2184
2185
2186 /*
2187 * Many nfs protocol calls return the new file attributes after
2188 * an operation. Here we update the inode to reflect the state
2189 * of the server's inode.
2190 *
2191 * This is a bit tricky because we have to make sure all dirty pages
2192 * have been sent off to the server before calling invalidate_inode_pages.
2193 * To make sure no other process adds more write requests while we try
2194 * our best to flush them, we make them sleep during the attribute refresh.
2195 *
2196 * A very similar scenario holds for the dir cache.
2197 */
nfs_update_inode(struct inode * inode,struct nfs_fattr * fattr)2198 static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
2199 {
2200 struct nfs_server *server = NFS_SERVER(inode);
2201 struct nfs_inode *nfsi = NFS_I(inode);
2202 loff_t cur_isize, new_isize;
2203 u64 fattr_supported = server->fattr_valid;
2204 unsigned long invalid = 0;
2205 unsigned long now = jiffies;
2206 unsigned long save_cache_validity;
2207 bool have_writers = nfs_file_has_buffered_writers(nfsi);
2208 bool cache_revalidated = true;
2209 bool attr_changed = false;
2210 bool have_delegation;
2211
2212 dfprintk(VFS, "NFS: %s(%s/%lu fh_crc=0x%08x ct=%d info=0x%x)\n",
2213 __func__, inode->i_sb->s_id, inode->i_ino,
2214 nfs_display_fhandle_hash(NFS_FH(inode)),
2215 atomic_read(&inode->i_count), fattr->valid);
2216
2217 if (!(fattr->valid & NFS_ATTR_FATTR_FILEID)) {
2218 /* Only a mounted-on-fileid? Just exit */
2219 if (fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID)
2220 return 0;
2221 /* Has the inode gone and changed behind our back? */
2222 } else if (nfsi->fileid != fattr->fileid) {
2223 /* Is this perhaps the mounted-on fileid? */
2224 if ((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) &&
2225 nfsi->fileid == fattr->mounted_on_fileid)
2226 return 0;
2227 printk(KERN_ERR "NFS: server %s error: fileid changed\n"
2228 "fsid %s: expected fileid 0x%Lx, got 0x%Lx\n",
2229 NFS_SERVER(inode)->nfs_client->cl_hostname,
2230 inode->i_sb->s_id, (long long)nfsi->fileid,
2231 (long long)fattr->fileid);
2232 goto out_err;
2233 }
2234
2235 /*
2236 * Make sure the inode's type hasn't changed.
2237 */
2238 if ((fattr->valid & NFS_ATTR_FATTR_TYPE) && inode_wrong_type(inode, fattr->mode)) {
2239 /*
2240 * Big trouble! The inode has become a different object.
2241 */
2242 printk(KERN_DEBUG "NFS: %s: inode %lu mode changed, %07o to %07o\n",
2243 __func__, inode->i_ino, inode->i_mode, fattr->mode);
2244 goto out_err;
2245 }
2246
2247 /* Update the fsid? */
2248 if (S_ISDIR(inode->i_mode) && (fattr->valid & NFS_ATTR_FATTR_FSID) &&
2249 !nfs_fsid_equal(&server->fsid, &fattr->fsid) &&
2250 !IS_AUTOMOUNT(inode))
2251 server->fsid = fattr->fsid;
2252
2253 /* Save the delegation state before clearing cache_validity */
2254 have_delegation = nfs_have_delegated_attributes(inode);
2255
2256 /*
2257 * Update the read time so we don't revalidate too often.
2258 */
2259 nfsi->read_cache_jiffies = fattr->time_start;
2260
2261 /* Fix up any delegated attributes in the struct nfs_fattr */
2262 nfs_fattr_fixup_delegated(inode, fattr);
2263
2264 save_cache_validity = nfsi->cache_validity;
2265 nfsi->cache_validity &= ~(NFS_INO_INVALID_ATTR
2266 | NFS_INO_INVALID_ATIME
2267 | NFS_INO_REVAL_FORCED
2268 | NFS_INO_INVALID_BLOCKS);
2269
2270 /* Do atomic weak cache consistency updates */
2271 nfs_wcc_update_inode(inode, fattr);
2272
2273 if (pnfs_layoutcommit_outstanding(inode)) {
2274 nfsi->cache_validity |=
2275 save_cache_validity &
2276 (NFS_INO_INVALID_CHANGE | NFS_INO_INVALID_CTIME |
2277 NFS_INO_INVALID_MTIME | NFS_INO_INVALID_SIZE |
2278 NFS_INO_INVALID_BLOCKS);
2279 cache_revalidated = false;
2280 }
2281
2282 /* More cache consistency checks */
2283 if (fattr->valid & NFS_ATTR_FATTR_CHANGE) {
2284 if (!have_writers && nfsi->ooo && nfsi->ooo->cnt == 1 &&
2285 nfsi->ooo->gap[0].end == inode_peek_iversion_raw(inode)) {
2286 /* There is one remaining gap that hasn't been
2287 * merged into iversion - do that now.
2288 */
2289 inode_set_iversion_raw(inode, nfsi->ooo->gap[0].start);
2290 kfree(nfsi->ooo);
2291 nfsi->ooo = NULL;
2292 }
2293 if (!inode_eq_iversion_raw(inode, fattr->change_attr)) {
2294 /* Could it be a race with writeback? */
2295 if (!(have_writers || have_delegation)) {
2296 invalid |= NFS_INO_INVALID_DATA
2297 | NFS_INO_INVALID_ACCESS
2298 | NFS_INO_INVALID_ACL
2299 | NFS_INO_INVALID_XATTR;
2300 /* Force revalidate of all attributes */
2301 save_cache_validity |= NFS_INO_INVALID_CTIME
2302 | NFS_INO_INVALID_MTIME
2303 | NFS_INO_INVALID_SIZE
2304 | NFS_INO_INVALID_BLOCKS
2305 | NFS_INO_INVALID_NLINK
2306 | NFS_INO_INVALID_MODE
2307 | NFS_INO_INVALID_OTHER;
2308 if (S_ISDIR(inode->i_mode))
2309 nfs_force_lookup_revalidate(inode);
2310 attr_changed = true;
2311 dprintk("NFS: change_attr change on server for file %s/%ld\n",
2312 inode->i_sb->s_id,
2313 inode->i_ino);
2314 } else if (!have_delegation) {
2315 nfs_ooo_record(nfsi, fattr);
2316 nfs_ooo_merge(nfsi, inode_peek_iversion_raw(inode),
2317 fattr->change_attr);
2318 }
2319 inode_set_iversion_raw(inode, fattr->change_attr);
2320 }
2321 } else {
2322 nfsi->cache_validity |=
2323 save_cache_validity & NFS_INO_INVALID_CHANGE;
2324 if (!have_delegation ||
2325 (nfsi->cache_validity & NFS_INO_INVALID_CHANGE) != 0)
2326 cache_revalidated = false;
2327 }
2328
2329 if (fattr->valid & NFS_ATTR_FATTR_MTIME)
2330 inode_set_mtime_to_ts(inode, fattr->mtime);
2331 else if (fattr_supported & NFS_ATTR_FATTR_MTIME)
2332 nfsi->cache_validity |=
2333 save_cache_validity & NFS_INO_INVALID_MTIME;
2334
2335 if (fattr->valid & NFS_ATTR_FATTR_CTIME)
2336 inode_set_ctime_to_ts(inode, fattr->ctime);
2337 else if (fattr_supported & NFS_ATTR_FATTR_CTIME)
2338 nfsi->cache_validity |=
2339 save_cache_validity & NFS_INO_INVALID_CTIME;
2340
2341 /* Check if our cached file size is stale */
2342 if (fattr->valid & NFS_ATTR_FATTR_SIZE) {
2343 new_isize = nfs_size_to_loff_t(fattr->size);
2344 cur_isize = i_size_read(inode);
2345 if (new_isize != cur_isize && !have_delegation) {
2346 /* Do we perhaps have any outstanding writes, or has
2347 * the file grown beyond our last write? */
2348 if (!nfs_have_writebacks(inode) || new_isize > cur_isize) {
2349 trace_nfs_size_update(inode, new_isize);
2350 i_size_write(inode, new_isize);
2351 if (!have_writers)
2352 invalid |= NFS_INO_INVALID_DATA;
2353 }
2354 }
2355 if (new_isize == 0 &&
2356 !(fattr->valid & (NFS_ATTR_FATTR_SPACE_USED |
2357 NFS_ATTR_FATTR_BLOCKS_USED))) {
2358 fattr->du.nfs3.used = 0;
2359 fattr->valid |= NFS_ATTR_FATTR_SPACE_USED;
2360 }
2361 } else
2362 nfsi->cache_validity |=
2363 save_cache_validity & NFS_INO_INVALID_SIZE;
2364
2365 if (fattr->valid & NFS_ATTR_FATTR_ATIME)
2366 inode_set_atime_to_ts(inode, fattr->atime);
2367 else if (fattr_supported & NFS_ATTR_FATTR_ATIME)
2368 nfsi->cache_validity |=
2369 save_cache_validity & NFS_INO_INVALID_ATIME;
2370
2371 if (fattr->valid & NFS_ATTR_FATTR_MODE) {
2372 if ((inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO)) {
2373 umode_t newmode = inode->i_mode & S_IFMT;
2374 newmode |= fattr->mode & S_IALLUGO;
2375 inode->i_mode = newmode;
2376 invalid |= NFS_INO_INVALID_ACCESS
2377 | NFS_INO_INVALID_ACL;
2378 }
2379 } else if (fattr_supported & NFS_ATTR_FATTR_MODE)
2380 nfsi->cache_validity |=
2381 save_cache_validity & NFS_INO_INVALID_MODE;
2382
2383 if (fattr->valid & NFS_ATTR_FATTR_OWNER) {
2384 if (!uid_eq(inode->i_uid, fattr->uid)) {
2385 invalid |= NFS_INO_INVALID_ACCESS
2386 | NFS_INO_INVALID_ACL;
2387 inode->i_uid = fattr->uid;
2388 }
2389 } else if (fattr_supported & NFS_ATTR_FATTR_OWNER)
2390 nfsi->cache_validity |=
2391 save_cache_validity & NFS_INO_INVALID_OTHER;
2392
2393 if (fattr->valid & NFS_ATTR_FATTR_GROUP) {
2394 if (!gid_eq(inode->i_gid, fattr->gid)) {
2395 invalid |= NFS_INO_INVALID_ACCESS
2396 | NFS_INO_INVALID_ACL;
2397 inode->i_gid = fattr->gid;
2398 }
2399 } else if (fattr_supported & NFS_ATTR_FATTR_GROUP)
2400 nfsi->cache_validity |=
2401 save_cache_validity & NFS_INO_INVALID_OTHER;
2402
2403 if (fattr->valid & NFS_ATTR_FATTR_NLINK) {
2404 if (inode->i_nlink != fattr->nlink)
2405 set_nlink(inode, fattr->nlink);
2406 } else if (fattr_supported & NFS_ATTR_FATTR_NLINK)
2407 nfsi->cache_validity |=
2408 save_cache_validity & NFS_INO_INVALID_NLINK;
2409
2410 if (fattr->valid & NFS_ATTR_FATTR_SPACE_USED) {
2411 /*
2412 * report the blocks in 512byte units
2413 */
2414 inode->i_blocks = nfs_calc_block_size(fattr->du.nfs3.used);
2415 } else if (fattr_supported & NFS_ATTR_FATTR_SPACE_USED)
2416 nfsi->cache_validity |=
2417 save_cache_validity & NFS_INO_INVALID_BLOCKS;
2418
2419 if (fattr->valid & NFS_ATTR_FATTR_BLOCKS_USED)
2420 inode->i_blocks = fattr->du.nfs2.blocks;
2421 else if (fattr_supported & NFS_ATTR_FATTR_BLOCKS_USED)
2422 nfsi->cache_validity |=
2423 save_cache_validity & NFS_INO_INVALID_BLOCKS;
2424
2425 /* Update attrtimeo value if we're out of the unstable period */
2426 if (attr_changed) {
2427 nfs_inc_stats(inode, NFSIOS_ATTRINVALIDATE);
2428 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
2429 nfsi->attrtimeo_timestamp = now;
2430 /* Set barrier to be more recent than all outstanding updates */
2431 nfsi->attr_gencount = nfs_inc_attr_generation_counter();
2432 } else {
2433 if (cache_revalidated) {
2434 if (!time_in_range_open(now, nfsi->attrtimeo_timestamp,
2435 nfsi->attrtimeo_timestamp + nfsi->attrtimeo)) {
2436 nfsi->attrtimeo <<= 1;
2437 if (nfsi->attrtimeo > NFS_MAXATTRTIMEO(inode))
2438 nfsi->attrtimeo = NFS_MAXATTRTIMEO(inode);
2439 }
2440 nfsi->attrtimeo_timestamp = now;
2441 }
2442 /* Set the barrier to be more recent than this fattr */
2443 if ((long)(fattr->gencount - nfsi->attr_gencount) > 0)
2444 nfsi->attr_gencount = fattr->gencount;
2445 }
2446
2447 /* Don't invalidate the data if we were to blame */
2448 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)
2449 || S_ISLNK(inode->i_mode)))
2450 invalid &= ~NFS_INO_INVALID_DATA;
2451 nfs_set_cache_invalid(inode, invalid);
2452
2453 return 0;
2454 out_err:
2455 /*
2456 * No need to worry about unhashing the dentry, as the
2457 * lookup validation will know that the inode is bad.
2458 * (But we fall through to invalidate the caches.)
2459 */
2460 nfs_set_inode_stale_locked(inode);
2461 return -ESTALE;
2462 }
2463
nfs_alloc_inode(struct super_block * sb)2464 struct inode *nfs_alloc_inode(struct super_block *sb)
2465 {
2466 struct nfs_inode *nfsi;
2467 nfsi = alloc_inode_sb(sb, nfs_inode_cachep, GFP_KERNEL);
2468 if (!nfsi)
2469 return NULL;
2470 nfsi->flags = 0UL;
2471 nfsi->cache_validity = 0UL;
2472 nfsi->ooo = NULL;
2473 #if IS_ENABLED(CONFIG_NFS_V4)
2474 nfsi->nfs4_acl = NULL;
2475 #endif /* CONFIG_NFS_V4 */
2476 #ifdef CONFIG_NFS_V4_2
2477 nfsi->xattr_cache = NULL;
2478 #endif
2479 nfs_netfs_inode_init(nfsi);
2480
2481 return &nfsi->vfs_inode;
2482 }
2483 EXPORT_SYMBOL_GPL(nfs_alloc_inode);
2484
nfs_free_inode(struct inode * inode)2485 void nfs_free_inode(struct inode *inode)
2486 {
2487 kfree(NFS_I(inode)->ooo);
2488 kmem_cache_free(nfs_inode_cachep, NFS_I(inode));
2489 }
2490 EXPORT_SYMBOL_GPL(nfs_free_inode);
2491
nfs4_init_once(struct nfs_inode * nfsi)2492 static inline void nfs4_init_once(struct nfs_inode *nfsi)
2493 {
2494 #if IS_ENABLED(CONFIG_NFS_V4)
2495 INIT_LIST_HEAD(&nfsi->open_states);
2496 nfsi->delegation = NULL;
2497 init_rwsem(&nfsi->rwsem);
2498 nfsi->layout = NULL;
2499 #endif
2500 }
2501
init_once(void * foo)2502 static void init_once(void *foo)
2503 {
2504 struct nfs_inode *nfsi = foo;
2505
2506 inode_init_once(&nfsi->vfs_inode);
2507 INIT_LIST_HEAD(&nfsi->open_files);
2508 INIT_LIST_HEAD(&nfsi->access_cache_entry_lru);
2509 INIT_LIST_HEAD(&nfsi->access_cache_inode_lru);
2510 nfs4_init_once(nfsi);
2511 }
2512
nfs_init_inodecache(void)2513 static int __init nfs_init_inodecache(void)
2514 {
2515 nfs_inode_cachep = kmem_cache_create("nfs_inode_cache",
2516 sizeof(struct nfs_inode),
2517 0, (SLAB_RECLAIM_ACCOUNT|
2518 SLAB_ACCOUNT),
2519 init_once);
2520 if (nfs_inode_cachep == NULL)
2521 return -ENOMEM;
2522
2523 return 0;
2524 }
2525
nfs_destroy_inodecache(void)2526 static void nfs_destroy_inodecache(void)
2527 {
2528 /*
2529 * Make sure all delayed rcu free inodes are flushed before we
2530 * destroy cache.
2531 */
2532 rcu_barrier();
2533 kmem_cache_destroy(nfs_inode_cachep);
2534 }
2535
2536 struct workqueue_struct *nfslocaliod_workqueue;
2537 struct workqueue_struct *nfsiod_workqueue;
2538 EXPORT_SYMBOL_GPL(nfsiod_workqueue);
2539
2540 /*
2541 * Destroy the nfsiod workqueues
2542 */
nfsiod_stop(void)2543 static void nfsiod_stop(void)
2544 {
2545 struct workqueue_struct *wq;
2546
2547 wq = nfsiod_workqueue;
2548 if (wq != NULL) {
2549 nfsiod_workqueue = NULL;
2550 destroy_workqueue(wq);
2551 }
2552 #if IS_ENABLED(CONFIG_NFS_LOCALIO)
2553 wq = nfslocaliod_workqueue;
2554 if (wq != NULL) {
2555 nfslocaliod_workqueue = NULL;
2556 destroy_workqueue(wq);
2557 }
2558 #endif /* CONFIG_NFS_LOCALIO */
2559 }
2560
2561 /*
2562 * Start the nfsiod workqueues
2563 */
nfsiod_start(void)2564 static int nfsiod_start(void)
2565 {
2566 dprintk("RPC: creating workqueue nfsiod\n");
2567 nfsiod_workqueue = alloc_workqueue("nfsiod", WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
2568 if (nfsiod_workqueue == NULL)
2569 return -ENOMEM;
2570 #if IS_ENABLED(CONFIG_NFS_LOCALIO)
2571 /*
2572 * localio writes need to use a normal (non-memreclaim) workqueue.
2573 * When we start getting low on space, XFS goes and calls flush_work() on
2574 * a non-memreclaim work queue, which causes a priority inversion problem.
2575 */
2576 dprintk("RPC: creating workqueue nfslocaliod\n");
2577 nfslocaliod_workqueue = alloc_workqueue("nfslocaliod", WQ_UNBOUND, 0);
2578 if (unlikely(nfslocaliod_workqueue == NULL)) {
2579 nfsiod_stop();
2580 return -ENOMEM;
2581 }
2582 #endif /* CONFIG_NFS_LOCALIO */
2583 return 0;
2584 }
2585
2586 unsigned int nfs_net_id;
2587 EXPORT_SYMBOL_GPL(nfs_net_id);
2588
nfs_net_init(struct net * net)2589 static int nfs_net_init(struct net *net)
2590 {
2591 struct nfs_net *nn = net_generic(net, nfs_net_id);
2592 int err;
2593
2594 nfs_clients_init(net);
2595
2596 if (!rpc_proc_register(net, &nn->rpcstats)) {
2597 err = -ENOMEM;
2598 goto err_proc_rpc;
2599 }
2600
2601 err = nfs_fs_proc_net_init(net);
2602 if (err)
2603 goto err_proc_nfs;
2604
2605 return 0;
2606
2607 err_proc_nfs:
2608 rpc_proc_unregister(net, "nfs");
2609 err_proc_rpc:
2610 nfs_clients_exit(net);
2611 return err;
2612 }
2613
nfs_net_exit(struct net * net)2614 static void nfs_net_exit(struct net *net)
2615 {
2616 rpc_proc_unregister(net, "nfs");
2617 nfs_fs_proc_net_exit(net);
2618 nfs_clients_exit(net);
2619 }
2620
2621 static struct pernet_operations nfs_net_ops = {
2622 .init = nfs_net_init,
2623 .exit = nfs_net_exit,
2624 .id = &nfs_net_id,
2625 .size = sizeof(struct nfs_net),
2626 };
2627
2628 /*
2629 * Initialize NFS
2630 */
init_nfs_fs(void)2631 static int __init init_nfs_fs(void)
2632 {
2633 int err;
2634
2635 err = nfs_sysfs_init();
2636 if (err < 0)
2637 goto out10;
2638
2639 err = register_pernet_subsys(&nfs_net_ops);
2640 if (err < 0)
2641 goto out9;
2642
2643 err = nfsiod_start();
2644 if (err)
2645 goto out7;
2646
2647 err = nfs_fs_proc_init();
2648 if (err)
2649 goto out6;
2650
2651 err = nfs_init_nfspagecache();
2652 if (err)
2653 goto out5;
2654
2655 err = nfs_init_inodecache();
2656 if (err)
2657 goto out4;
2658
2659 err = nfs_init_readpagecache();
2660 if (err)
2661 goto out3;
2662
2663 err = nfs_init_writepagecache();
2664 if (err)
2665 goto out2;
2666
2667 err = nfs_init_directcache();
2668 if (err)
2669 goto out1;
2670
2671 err = register_nfs_fs();
2672 if (err)
2673 goto out0;
2674
2675 return 0;
2676 out0:
2677 nfs_destroy_directcache();
2678 out1:
2679 nfs_destroy_writepagecache();
2680 out2:
2681 nfs_destroy_readpagecache();
2682 out3:
2683 nfs_destroy_inodecache();
2684 out4:
2685 nfs_destroy_nfspagecache();
2686 out5:
2687 nfs_fs_proc_exit();
2688 out6:
2689 nfsiod_stop();
2690 out7:
2691 unregister_pernet_subsys(&nfs_net_ops);
2692 out9:
2693 nfs_sysfs_exit();
2694 out10:
2695 return err;
2696 }
2697
exit_nfs_fs(void)2698 static void __exit exit_nfs_fs(void)
2699 {
2700 nfs_destroy_directcache();
2701 nfs_destroy_writepagecache();
2702 nfs_destroy_readpagecache();
2703 nfs_destroy_inodecache();
2704 nfs_destroy_nfspagecache();
2705 unregister_pernet_subsys(&nfs_net_ops);
2706 unregister_nfs_fs();
2707 nfs_fs_proc_exit();
2708 nfsiod_stop();
2709 nfs_sysfs_exit();
2710 }
2711
2712 /* Not quite true; I just maintain it */
2713 MODULE_AUTHOR("Olaf Kirch <okir@monad.swb.de>");
2714 MODULE_DESCRIPTION("NFS client support");
2715 MODULE_LICENSE("GPL");
2716 module_param(enable_ino64, bool, 0644);
2717
2718 module_init(init_nfs_fs)
2719 module_exit(exit_nfs_fs)
2720