1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/fs/nfs/inode.c
4 *
5 * Copyright (C) 1992 Rick Sladkey
6 *
7 * nfs inode and superblock handling functions
8 *
9 * Modularised by Alan Cox <alan@lxorguk.ukuu.org.uk>, while hacking some
10 * experimental NFS changes. Modularisation taken straight from SYS5 fs.
11 *
12 * Change to nfs_read_super() to permit NFS mounts to multi-homed hosts.
13 * J.S.Peatfield@damtp.cam.ac.uk
14 *
15 */
16
17 #include <linux/module.h>
18 #include <linux/init.h>
19 #include <linux/sched/signal.h>
20 #include <linux/time.h>
21 #include <linux/kernel.h>
22 #include <linux/mm.h>
23 #include <linux/string.h>
24 #include <linux/stat.h>
25 #include <linux/errno.h>
26 #include <linux/unistd.h>
27 #include <linux/sunrpc/clnt.h>
28 #include <linux/sunrpc/stats.h>
29 #include <linux/sunrpc/metrics.h>
30 #include <linux/nfs_fs.h>
31 #include <linux/nfs_mount.h>
32 #include <linux/nfs4_mount.h>
33 #include <linux/lockd/bind.h>
34 #include <linux/seq_file.h>
35 #include <linux/mount.h>
36 #include <linux/vfs.h>
37 #include <linux/inet.h>
38 #include <linux/nfs_xdr.h>
39 #include <linux/slab.h>
40 #include <linux/compat.h>
41 #include <linux/freezer.h>
42 #include <linux/uaccess.h>
43 #include <linux/iversion.h>
44
45 #include "nfs4_fs.h"
46 #include "callback.h"
47 #include "delegation.h"
48 #include "iostat.h"
49 #include "internal.h"
50 #include "fscache.h"
51 #include "pnfs.h"
52 #include "nfs.h"
53 #include "netns.h"
54 #include "sysfs.h"
55
56 #include "nfstrace.h"
57
58 #define NFSDBG_FACILITY NFSDBG_VFS
59
60 #define NFS_64_BIT_INODE_NUMBERS_ENABLED 1
61
62 /* Default is to see 64-bit inode numbers */
63 static bool enable_ino64 = NFS_64_BIT_INODE_NUMBERS_ENABLED;
64
65 static int nfs_update_inode(struct inode *, struct nfs_fattr *);
66
67 static struct kmem_cache * nfs_inode_cachep;
68
69 static inline unsigned long
nfs_fattr_to_ino_t(struct nfs_fattr * fattr)70 nfs_fattr_to_ino_t(struct nfs_fattr *fattr)
71 {
72 return nfs_fileid_to_ino_t(fattr->fileid);
73 }
74
nfs_wait_bit_killable(struct wait_bit_key * key,int mode)75 int nfs_wait_bit_killable(struct wait_bit_key *key, int mode)
76 {
77 schedule();
78 if (signal_pending_state(mode, current))
79 return -ERESTARTSYS;
80 return 0;
81 }
82 EXPORT_SYMBOL_GPL(nfs_wait_bit_killable);
83
84 /**
85 * nfs_compat_user_ino64 - returns the user-visible inode number
86 * @fileid: 64-bit fileid
87 *
88 * This function returns a 32-bit inode number if the boot parameter
89 * nfs.enable_ino64 is zero.
90 */
nfs_compat_user_ino64(u64 fileid)91 u64 nfs_compat_user_ino64(u64 fileid)
92 {
93 #ifdef CONFIG_COMPAT
94 compat_ulong_t ino;
95 #else
96 unsigned long ino;
97 #endif
98
99 if (enable_ino64)
100 return fileid;
101 ino = fileid;
102 if (sizeof(ino) < sizeof(fileid))
103 ino ^= fileid >> (sizeof(fileid)-sizeof(ino)) * 8;
104 return ino;
105 }
106
nfs_drop_inode(struct inode * inode)107 int nfs_drop_inode(struct inode *inode)
108 {
109 return NFS_STALE(inode) || generic_drop_inode(inode);
110 }
111 EXPORT_SYMBOL_GPL(nfs_drop_inode);
112
nfs_clear_inode(struct inode * inode)113 void nfs_clear_inode(struct inode *inode)
114 {
115 /*
116 * The following should never happen...
117 */
118 WARN_ON_ONCE(nfs_have_writebacks(inode));
119 WARN_ON_ONCE(!list_empty(&NFS_I(inode)->open_files));
120 nfs_zap_acl_cache(inode);
121 nfs_access_zap_cache(inode);
122 nfs_fscache_clear_inode(inode);
123 }
124 EXPORT_SYMBOL_GPL(nfs_clear_inode);
125
nfs_evict_inode(struct inode * inode)126 void nfs_evict_inode(struct inode *inode)
127 {
128 truncate_inode_pages_final(&inode->i_data);
129 clear_inode(inode);
130 nfs_clear_inode(inode);
131 }
132
nfs_sync_inode(struct inode * inode)133 int nfs_sync_inode(struct inode *inode)
134 {
135 inode_dio_wait(inode);
136 return nfs_wb_all(inode);
137 }
138 EXPORT_SYMBOL_GPL(nfs_sync_inode);
139
140 /**
141 * nfs_sync_mapping - helper to flush all mmapped dirty data to disk
142 * @mapping: pointer to struct address_space
143 */
nfs_sync_mapping(struct address_space * mapping)144 int nfs_sync_mapping(struct address_space *mapping)
145 {
146 int ret = 0;
147
148 if (mapping->nrpages != 0) {
149 unmap_mapping_range(mapping, 0, 0, 0);
150 ret = nfs_wb_all(mapping->host);
151 }
152 return ret;
153 }
154
nfs_attribute_timeout(struct inode * inode)155 static int nfs_attribute_timeout(struct inode *inode)
156 {
157 struct nfs_inode *nfsi = NFS_I(inode);
158
159 return !time_in_range_open(jiffies, nfsi->read_cache_jiffies, nfsi->read_cache_jiffies + nfsi->attrtimeo);
160 }
161
nfs_check_cache_flags_invalid(struct inode * inode,unsigned long flags)162 static bool nfs_check_cache_flags_invalid(struct inode *inode,
163 unsigned long flags)
164 {
165 unsigned long cache_validity = READ_ONCE(NFS_I(inode)->cache_validity);
166
167 return (cache_validity & flags) != 0;
168 }
169
nfs_check_cache_invalid(struct inode * inode,unsigned long flags)170 bool nfs_check_cache_invalid(struct inode *inode, unsigned long flags)
171 {
172 if (nfs_check_cache_flags_invalid(inode, flags))
173 return true;
174 return nfs_attribute_cache_expired(inode);
175 }
176 EXPORT_SYMBOL_GPL(nfs_check_cache_invalid);
177
178 #ifdef CONFIG_NFS_V4_2
nfs_has_xattr_cache(const struct nfs_inode * nfsi)179 static bool nfs_has_xattr_cache(const struct nfs_inode *nfsi)
180 {
181 return nfsi->xattr_cache != NULL;
182 }
183 #else
nfs_has_xattr_cache(const struct nfs_inode * nfsi)184 static bool nfs_has_xattr_cache(const struct nfs_inode *nfsi)
185 {
186 return false;
187 }
188 #endif
189
nfs_set_cache_invalid(struct inode * inode,unsigned long flags)190 void nfs_set_cache_invalid(struct inode *inode, unsigned long flags)
191 {
192 struct nfs_inode *nfsi = NFS_I(inode);
193
194 if (nfs_have_delegated_attributes(inode)) {
195 if (!(flags & NFS_INO_REVAL_FORCED))
196 flags &= ~(NFS_INO_INVALID_MODE |
197 NFS_INO_INVALID_OTHER |
198 NFS_INO_INVALID_XATTR);
199 flags &= ~(NFS_INO_INVALID_CHANGE | NFS_INO_INVALID_SIZE);
200 }
201
202 if (!nfs_has_xattr_cache(nfsi))
203 flags &= ~NFS_INO_INVALID_XATTR;
204 if (flags & NFS_INO_INVALID_DATA)
205 nfs_fscache_invalidate(inode, 0);
206 flags &= ~NFS_INO_REVAL_FORCED;
207
208 flags |= nfsi->cache_validity;
209 if (inode->i_mapping->nrpages == 0)
210 flags &= ~NFS_INO_INVALID_DATA;
211
212 /* pairs with nfs_clear_invalid_mapping()'s smp_load_acquire() */
213 smp_store_release(&nfsi->cache_validity, flags);
214
215 if (inode->i_mapping->nrpages == 0 ||
216 nfsi->cache_validity & NFS_INO_INVALID_DATA) {
217 nfs_ooo_clear(nfsi);
218 }
219 trace_nfs_set_cache_invalid(inode, 0);
220 }
221 EXPORT_SYMBOL_GPL(nfs_set_cache_invalid);
222
223 /*
224 * Invalidate the local caches
225 */
nfs_zap_caches_locked(struct inode * inode)226 static void nfs_zap_caches_locked(struct inode *inode)
227 {
228 struct nfs_inode *nfsi = NFS_I(inode);
229 int mode = inode->i_mode;
230
231 nfs_inc_stats(inode, NFSIOS_ATTRINVALIDATE);
232
233 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
234 nfsi->attrtimeo_timestamp = jiffies;
235
236 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
237 nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR |
238 NFS_INO_INVALID_DATA |
239 NFS_INO_INVALID_ACCESS |
240 NFS_INO_INVALID_ACL |
241 NFS_INO_INVALID_XATTR);
242 else
243 nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR |
244 NFS_INO_INVALID_ACCESS |
245 NFS_INO_INVALID_ACL |
246 NFS_INO_INVALID_XATTR);
247 nfs_zap_label_cache_locked(nfsi);
248 }
249
nfs_zap_caches(struct inode * inode)250 void nfs_zap_caches(struct inode *inode)
251 {
252 spin_lock(&inode->i_lock);
253 nfs_zap_caches_locked(inode);
254 spin_unlock(&inode->i_lock);
255 }
256
nfs_zap_mapping(struct inode * inode,struct address_space * mapping)257 void nfs_zap_mapping(struct inode *inode, struct address_space *mapping)
258 {
259 if (mapping->nrpages != 0) {
260 spin_lock(&inode->i_lock);
261 nfs_set_cache_invalid(inode, NFS_INO_INVALID_DATA);
262 spin_unlock(&inode->i_lock);
263 }
264 }
265
nfs_zap_acl_cache(struct inode * inode)266 void nfs_zap_acl_cache(struct inode *inode)
267 {
268 void (*clear_acl_cache)(struct inode *);
269
270 clear_acl_cache = NFS_PROTO(inode)->clear_acl_cache;
271 if (clear_acl_cache != NULL)
272 clear_acl_cache(inode);
273 spin_lock(&inode->i_lock);
274 NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_ACL;
275 spin_unlock(&inode->i_lock);
276 }
277 EXPORT_SYMBOL_GPL(nfs_zap_acl_cache);
278
nfs_invalidate_atime(struct inode * inode)279 void nfs_invalidate_atime(struct inode *inode)
280 {
281 if (nfs_have_delegated_atime(inode))
282 return;
283 spin_lock(&inode->i_lock);
284 nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATIME);
285 spin_unlock(&inode->i_lock);
286 }
287 EXPORT_SYMBOL_GPL(nfs_invalidate_atime);
288
289 /*
290 * Invalidate, but do not unhash, the inode.
291 * NB: must be called with inode->i_lock held!
292 */
nfs_set_inode_stale_locked(struct inode * inode)293 static void nfs_set_inode_stale_locked(struct inode *inode)
294 {
295 set_bit(NFS_INO_STALE, &NFS_I(inode)->flags);
296 nfs_zap_caches_locked(inode);
297 trace_nfs_set_inode_stale(inode);
298 }
299
nfs_set_inode_stale(struct inode * inode)300 void nfs_set_inode_stale(struct inode *inode)
301 {
302 spin_lock(&inode->i_lock);
303 nfs_set_inode_stale_locked(inode);
304 spin_unlock(&inode->i_lock);
305 }
306
307 struct nfs_find_desc {
308 struct nfs_fh *fh;
309 struct nfs_fattr *fattr;
310 };
311
312 /*
313 * In NFSv3 we can have 64bit inode numbers. In order to support
314 * this, and re-exported directories (also seen in NFSv2)
315 * we are forced to allow 2 different inodes to have the same
316 * i_ino.
317 */
318 static int
nfs_find_actor(struct inode * inode,void * opaque)319 nfs_find_actor(struct inode *inode, void *opaque)
320 {
321 struct nfs_find_desc *desc = opaque;
322 struct nfs_fh *fh = desc->fh;
323 struct nfs_fattr *fattr = desc->fattr;
324
325 if (NFS_FILEID(inode) != fattr->fileid)
326 return 0;
327 if (inode_wrong_type(inode, fattr->mode))
328 return 0;
329 if (nfs_compare_fh(NFS_FH(inode), fh))
330 return 0;
331 if (is_bad_inode(inode) || NFS_STALE(inode))
332 return 0;
333 return 1;
334 }
335
336 static int
nfs_init_locked(struct inode * inode,void * opaque)337 nfs_init_locked(struct inode *inode, void *opaque)
338 {
339 struct nfs_find_desc *desc = opaque;
340 struct nfs_fattr *fattr = desc->fattr;
341
342 set_nfs_fileid(inode, fattr->fileid);
343 inode->i_mode = fattr->mode;
344 nfs_copy_fh(NFS_FH(inode), desc->fh);
345 return 0;
346 }
347
348 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
nfs_clear_label_invalid(struct inode * inode)349 static void nfs_clear_label_invalid(struct inode *inode)
350 {
351 spin_lock(&inode->i_lock);
352 NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_LABEL;
353 spin_unlock(&inode->i_lock);
354 }
355
nfs_setsecurity(struct inode * inode,struct nfs_fattr * fattr)356 void nfs_setsecurity(struct inode *inode, struct nfs_fattr *fattr)
357 {
358 int error;
359
360 if (fattr->label == NULL)
361 return;
362
363 if ((fattr->valid & NFS_ATTR_FATTR_V4_SECURITY_LABEL) && inode->i_security) {
364 error = security_inode_notifysecctx(inode, fattr->label->label,
365 fattr->label->len);
366 if (error)
367 printk(KERN_ERR "%s() %s %d "
368 "security_inode_notifysecctx() %d\n",
369 __func__,
370 (char *)fattr->label->label,
371 fattr->label->len, error);
372 nfs_clear_label_invalid(inode);
373 }
374 }
375
nfs4_label_alloc(struct nfs_server * server,gfp_t flags)376 struct nfs4_label *nfs4_label_alloc(struct nfs_server *server, gfp_t flags)
377 {
378 struct nfs4_label *label;
379
380 if (!(server->caps & NFS_CAP_SECURITY_LABEL))
381 return NULL;
382
383 label = kzalloc(sizeof(struct nfs4_label), flags);
384 if (label == NULL)
385 return ERR_PTR(-ENOMEM);
386
387 label->label = kzalloc(NFS4_MAXLABELLEN, flags);
388 if (label->label == NULL) {
389 kfree(label);
390 return ERR_PTR(-ENOMEM);
391 }
392 label->len = NFS4_MAXLABELLEN;
393
394 return label;
395 }
396 EXPORT_SYMBOL_GPL(nfs4_label_alloc);
397 #else
nfs_setsecurity(struct inode * inode,struct nfs_fattr * fattr)398 void nfs_setsecurity(struct inode *inode, struct nfs_fattr *fattr)
399 {
400 }
401 #endif
402 EXPORT_SYMBOL_GPL(nfs_setsecurity);
403
404 /* Search for inode identified by fh, fileid and i_mode in inode cache. */
405 struct inode *
nfs_ilookup(struct super_block * sb,struct nfs_fattr * fattr,struct nfs_fh * fh)406 nfs_ilookup(struct super_block *sb, struct nfs_fattr *fattr, struct nfs_fh *fh)
407 {
408 struct nfs_find_desc desc = {
409 .fh = fh,
410 .fattr = fattr,
411 };
412 struct inode *inode;
413 unsigned long hash;
414
415 if (!(fattr->valid & NFS_ATTR_FATTR_FILEID) ||
416 !(fattr->valid & NFS_ATTR_FATTR_TYPE))
417 return NULL;
418
419 hash = nfs_fattr_to_ino_t(fattr);
420 inode = ilookup5(sb, hash, nfs_find_actor, &desc);
421
422 dprintk("%s: returning %p\n", __func__, inode);
423 return inode;
424 }
425
nfs_inode_init_regular(struct nfs_inode * nfsi)426 static void nfs_inode_init_regular(struct nfs_inode *nfsi)
427 {
428 atomic_long_set(&nfsi->nrequests, 0);
429 atomic_long_set(&nfsi->redirtied_pages, 0);
430 INIT_LIST_HEAD(&nfsi->commit_info.list);
431 atomic_long_set(&nfsi->commit_info.ncommit, 0);
432 atomic_set(&nfsi->commit_info.rpcs_out, 0);
433 mutex_init(&nfsi->commit_mutex);
434 }
435
nfs_inode_init_dir(struct nfs_inode * nfsi)436 static void nfs_inode_init_dir(struct nfs_inode *nfsi)
437 {
438 nfsi->cache_change_attribute = 0;
439 memset(nfsi->cookieverf, 0, sizeof(nfsi->cookieverf));
440 init_rwsem(&nfsi->rmdir_sem);
441 }
442
443 /*
444 * This is our front-end to iget that looks up inodes by file handle
445 * instead of inode number.
446 */
447 struct inode *
nfs_fhget(struct super_block * sb,struct nfs_fh * fh,struct nfs_fattr * fattr)448 nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
449 {
450 struct nfs_find_desc desc = {
451 .fh = fh,
452 .fattr = fattr
453 };
454 struct inode *inode = ERR_PTR(-ENOENT);
455 u64 fattr_supported = NFS_SB(sb)->fattr_valid;
456 unsigned long hash;
457
458 nfs_attr_check_mountpoint(sb, fattr);
459
460 if (nfs_attr_use_mounted_on_fileid(fattr))
461 fattr->fileid = fattr->mounted_on_fileid;
462 else if ((fattr->valid & NFS_ATTR_FATTR_FILEID) == 0)
463 goto out_no_inode;
464 if ((fattr->valid & NFS_ATTR_FATTR_TYPE) == 0)
465 goto out_no_inode;
466
467 hash = nfs_fattr_to_ino_t(fattr);
468
469 inode = iget5_locked(sb, hash, nfs_find_actor, nfs_init_locked, &desc);
470 if (inode == NULL) {
471 inode = ERR_PTR(-ENOMEM);
472 goto out_no_inode;
473 }
474
475 if (inode->i_state & I_NEW) {
476 struct nfs_inode *nfsi = NFS_I(inode);
477 unsigned long now = jiffies;
478
479 /* We set i_ino for the few things that still rely on it,
480 * such as stat(2) */
481 inode->i_ino = hash;
482
483 /* We can't support update_atime(), since the server will reset it */
484 inode->i_flags |= S_NOATIME|S_NOCMTIME;
485 inode->i_mode = fattr->mode;
486 nfsi->cache_validity = 0;
487 if ((fattr->valid & NFS_ATTR_FATTR_MODE) == 0
488 && (fattr_supported & NFS_ATTR_FATTR_MODE))
489 nfs_set_cache_invalid(inode, NFS_INO_INVALID_MODE);
490 /* Why so? Because we want revalidate for devices/FIFOs, and
491 * that's precisely what we have in nfs_file_inode_operations.
492 */
493 inode->i_op = NFS_SB(sb)->nfs_client->rpc_ops->file_inode_ops;
494 if (S_ISREG(inode->i_mode)) {
495 inode->i_fop = NFS_SB(sb)->nfs_client->rpc_ops->file_ops;
496 inode->i_data.a_ops = &nfs_file_aops;
497 nfs_inode_init_regular(nfsi);
498 mapping_set_large_folios(inode->i_mapping);
499 } else if (S_ISDIR(inode->i_mode)) {
500 inode->i_op = NFS_SB(sb)->nfs_client->rpc_ops->dir_inode_ops;
501 inode->i_fop = &nfs_dir_operations;
502 inode->i_data.a_ops = &nfs_dir_aops;
503 nfs_inode_init_dir(nfsi);
504 /* Deal with crossing mountpoints */
505 if (fattr->valid & NFS_ATTR_FATTR_MOUNTPOINT ||
506 fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL) {
507 if (fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL)
508 inode->i_op = &nfs_referral_inode_operations;
509 else
510 inode->i_op = &nfs_mountpoint_inode_operations;
511 inode->i_fop = NULL;
512 inode->i_flags |= S_AUTOMOUNT;
513 }
514 } else if (S_ISLNK(inode->i_mode)) {
515 inode->i_op = &nfs_symlink_inode_operations;
516 inode_nohighmem(inode);
517 } else
518 init_special_inode(inode, inode->i_mode, fattr->rdev);
519
520 inode_set_atime(inode, 0, 0);
521 inode_set_mtime(inode, 0, 0);
522 inode_set_ctime(inode, 0, 0);
523 inode_set_iversion_raw(inode, 0);
524 inode->i_size = 0;
525 clear_nlink(inode);
526 inode->i_uid = make_kuid(&init_user_ns, -2);
527 inode->i_gid = make_kgid(&init_user_ns, -2);
528 inode->i_blocks = 0;
529 nfsi->write_io = 0;
530 nfsi->read_io = 0;
531
532 nfsi->read_cache_jiffies = fattr->time_start;
533 nfsi->attr_gencount = fattr->gencount;
534 if (fattr->valid & NFS_ATTR_FATTR_ATIME)
535 inode_set_atime_to_ts(inode, fattr->atime);
536 else if (fattr_supported & NFS_ATTR_FATTR_ATIME)
537 nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATIME);
538 if (fattr->valid & NFS_ATTR_FATTR_MTIME)
539 inode_set_mtime_to_ts(inode, fattr->mtime);
540 else if (fattr_supported & NFS_ATTR_FATTR_MTIME)
541 nfs_set_cache_invalid(inode, NFS_INO_INVALID_MTIME);
542 if (fattr->valid & NFS_ATTR_FATTR_CTIME)
543 inode_set_ctime_to_ts(inode, fattr->ctime);
544 else if (fattr_supported & NFS_ATTR_FATTR_CTIME)
545 nfs_set_cache_invalid(inode, NFS_INO_INVALID_CTIME);
546 if (fattr->valid & NFS_ATTR_FATTR_CHANGE)
547 inode_set_iversion_raw(inode, fattr->change_attr);
548 else
549 nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE);
550 if (fattr->valid & NFS_ATTR_FATTR_SIZE)
551 inode->i_size = nfs_size_to_loff_t(fattr->size);
552 else
553 nfs_set_cache_invalid(inode, NFS_INO_INVALID_SIZE);
554 if (fattr->valid & NFS_ATTR_FATTR_NLINK)
555 set_nlink(inode, fattr->nlink);
556 else if (fattr_supported & NFS_ATTR_FATTR_NLINK)
557 nfs_set_cache_invalid(inode, NFS_INO_INVALID_NLINK);
558 if (fattr->valid & NFS_ATTR_FATTR_OWNER)
559 inode->i_uid = fattr->uid;
560 else if (fattr_supported & NFS_ATTR_FATTR_OWNER)
561 nfs_set_cache_invalid(inode, NFS_INO_INVALID_OTHER);
562 if (fattr->valid & NFS_ATTR_FATTR_GROUP)
563 inode->i_gid = fattr->gid;
564 else if (fattr_supported & NFS_ATTR_FATTR_GROUP)
565 nfs_set_cache_invalid(inode, NFS_INO_INVALID_OTHER);
566 if (fattr->valid & NFS_ATTR_FATTR_BLOCKS_USED)
567 inode->i_blocks = fattr->du.nfs2.blocks;
568 else if (fattr_supported & NFS_ATTR_FATTR_BLOCKS_USED &&
569 fattr->size != 0)
570 nfs_set_cache_invalid(inode, NFS_INO_INVALID_BLOCKS);
571 if (fattr->valid & NFS_ATTR_FATTR_SPACE_USED) {
572 /*
573 * report the blocks in 512byte units
574 */
575 inode->i_blocks = nfs_calc_block_size(fattr->du.nfs3.used);
576 } else if (fattr_supported & NFS_ATTR_FATTR_SPACE_USED &&
577 fattr->size != 0)
578 nfs_set_cache_invalid(inode, NFS_INO_INVALID_BLOCKS);
579
580 nfs_setsecurity(inode, fattr);
581
582 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
583 nfsi->attrtimeo_timestamp = now;
584 nfsi->access_cache = RB_ROOT;
585
586 nfs_fscache_init_inode(inode);
587
588 unlock_new_inode(inode);
589 } else {
590 int err = nfs_refresh_inode(inode, fattr);
591 if (err < 0) {
592 iput(inode);
593 inode = ERR_PTR(err);
594 goto out_no_inode;
595 }
596 }
597 dprintk("NFS: nfs_fhget(%s/%Lu fh_crc=0x%08x ct=%d)\n",
598 inode->i_sb->s_id,
599 (unsigned long long)NFS_FILEID(inode),
600 nfs_display_fhandle_hash(fh),
601 atomic_read(&inode->i_count));
602
603 out:
604 return inode;
605
606 out_no_inode:
607 dprintk("nfs_fhget: iget failed with error %ld\n", PTR_ERR(inode));
608 goto out;
609 }
610 EXPORT_SYMBOL_GPL(nfs_fhget);
611
612 static void
nfs_fattr_fixup_delegated(struct inode * inode,struct nfs_fattr * fattr)613 nfs_fattr_fixup_delegated(struct inode *inode, struct nfs_fattr *fattr)
614 {
615 unsigned long cache_validity = NFS_I(inode)->cache_validity;
616
617 if (nfs_have_delegated_mtime(inode)) {
618 if (!(cache_validity & NFS_INO_INVALID_CTIME))
619 fattr->valid &= ~(NFS_ATTR_FATTR_PRECTIME |
620 NFS_ATTR_FATTR_CTIME);
621
622 if (!(cache_validity & NFS_INO_INVALID_MTIME))
623 fattr->valid &= ~(NFS_ATTR_FATTR_PREMTIME |
624 NFS_ATTR_FATTR_MTIME);
625
626 if (!(cache_validity & NFS_INO_INVALID_ATIME))
627 fattr->valid &= ~NFS_ATTR_FATTR_ATIME;
628 } else if (nfs_have_delegated_atime(inode)) {
629 if (!(cache_validity & NFS_INO_INVALID_ATIME))
630 fattr->valid &= ~NFS_ATTR_FATTR_ATIME;
631 }
632 }
633
nfs_update_timestamps(struct inode * inode,unsigned int ia_valid)634 static void nfs_update_timestamps(struct inode *inode, unsigned int ia_valid)
635 {
636 enum file_time_flags time_flags = 0;
637 unsigned int cache_flags = 0;
638
639 if (ia_valid & ATTR_MTIME) {
640 time_flags |= S_MTIME | S_CTIME;
641 cache_flags |= NFS_INO_INVALID_CTIME | NFS_INO_INVALID_MTIME;
642 }
643 if (ia_valid & ATTR_ATIME) {
644 time_flags |= S_ATIME;
645 cache_flags |= NFS_INO_INVALID_ATIME;
646 }
647 inode_update_timestamps(inode, time_flags);
648 NFS_I(inode)->cache_validity &= ~cache_flags;
649 }
650
nfs_update_delegated_atime(struct inode * inode)651 void nfs_update_delegated_atime(struct inode *inode)
652 {
653 spin_lock(&inode->i_lock);
654 if (nfs_have_delegated_atime(inode))
655 nfs_update_timestamps(inode, ATTR_ATIME);
656 spin_unlock(&inode->i_lock);
657 }
658
nfs_update_delegated_mtime_locked(struct inode * inode)659 void nfs_update_delegated_mtime_locked(struct inode *inode)
660 {
661 if (nfs_have_delegated_mtime(inode))
662 nfs_update_timestamps(inode, ATTR_MTIME);
663 }
664
nfs_update_delegated_mtime(struct inode * inode)665 void nfs_update_delegated_mtime(struct inode *inode)
666 {
667 spin_lock(&inode->i_lock);
668 nfs_update_delegated_mtime_locked(inode);
669 spin_unlock(&inode->i_lock);
670 }
671 EXPORT_SYMBOL_GPL(nfs_update_delegated_mtime);
672
673 #define NFS_VALID_ATTRS (ATTR_MODE|ATTR_UID|ATTR_GID|ATTR_SIZE|ATTR_ATIME|ATTR_ATIME_SET|ATTR_MTIME|ATTR_MTIME_SET|ATTR_FILE|ATTR_OPEN)
674
675 int
nfs_setattr(struct mnt_idmap * idmap,struct dentry * dentry,struct iattr * attr)676 nfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
677 struct iattr *attr)
678 {
679 struct inode *inode = d_inode(dentry);
680 struct nfs_fattr *fattr;
681 int error = 0;
682
683 nfs_inc_stats(inode, NFSIOS_VFSSETATTR);
684
685 /* skip mode change if it's just for clearing setuid/setgid */
686 if (attr->ia_valid & (ATTR_KILL_SUID | ATTR_KILL_SGID))
687 attr->ia_valid &= ~ATTR_MODE;
688
689 if (attr->ia_valid & ATTR_SIZE) {
690 BUG_ON(!S_ISREG(inode->i_mode));
691
692 error = inode_newsize_ok(inode, attr->ia_size);
693 if (error)
694 return error;
695
696 if (attr->ia_size == i_size_read(inode))
697 attr->ia_valid &= ~ATTR_SIZE;
698 }
699
700 if (nfs_have_delegated_mtime(inode) && attr->ia_valid & ATTR_MTIME) {
701 spin_lock(&inode->i_lock);
702 nfs_update_timestamps(inode, attr->ia_valid);
703 spin_unlock(&inode->i_lock);
704 attr->ia_valid &= ~(ATTR_MTIME | ATTR_ATIME);
705 } else if (nfs_have_delegated_atime(inode) &&
706 attr->ia_valid & ATTR_ATIME &&
707 !(attr->ia_valid & ATTR_MTIME)) {
708 nfs_update_delegated_atime(inode);
709 attr->ia_valid &= ~ATTR_ATIME;
710 }
711
712 /* Optimization: if the end result is no change, don't RPC */
713 if (((attr->ia_valid & NFS_VALID_ATTRS) & ~(ATTR_FILE|ATTR_OPEN)) == 0)
714 return 0;
715
716 trace_nfs_setattr_enter(inode);
717
718 /* Write all dirty data */
719 if (S_ISREG(inode->i_mode))
720 nfs_sync_inode(inode);
721
722 fattr = nfs_alloc_fattr_with_label(NFS_SERVER(inode));
723 if (fattr == NULL) {
724 error = -ENOMEM;
725 goto out;
726 }
727
728 error = NFS_PROTO(inode)->setattr(dentry, fattr, attr);
729 if (error == 0)
730 error = nfs_refresh_inode(inode, fattr);
731 nfs_free_fattr(fattr);
732 out:
733 trace_nfs_setattr_exit(inode, error);
734 return error;
735 }
736 EXPORT_SYMBOL_GPL(nfs_setattr);
737
738 /**
739 * nfs_vmtruncate - unmap mappings "freed" by truncate() syscall
740 * @inode: inode of the file used
741 * @offset: file offset to start truncating
742 *
743 * This is a copy of the common vmtruncate, but with the locking
744 * corrected to take into account the fact that NFS requires
745 * inode->i_size to be updated under the inode->i_lock.
746 * Note: must be called with inode->i_lock held!
747 */
nfs_vmtruncate(struct inode * inode,loff_t offset)748 static int nfs_vmtruncate(struct inode * inode, loff_t offset)
749 {
750 int err;
751
752 err = inode_newsize_ok(inode, offset);
753 if (err)
754 goto out;
755
756 trace_nfs_size_truncate(inode, offset);
757 i_size_write(inode, offset);
758 /* Optimisation */
759 if (offset == 0) {
760 NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_DATA;
761 nfs_ooo_clear(NFS_I(inode));
762 }
763 NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_SIZE;
764
765 spin_unlock(&inode->i_lock);
766 truncate_pagecache(inode, offset);
767 nfs_update_delegated_mtime_locked(inode);
768 spin_lock(&inode->i_lock);
769 out:
770 return err;
771 }
772
773 /**
774 * nfs_setattr_update_inode - Update inode metadata after a setattr call.
775 * @inode: pointer to struct inode
776 * @attr: pointer to struct iattr
777 * @fattr: pointer to struct nfs_fattr
778 *
779 * Note: we do this in the *proc.c in order to ensure that
780 * it works for things like exclusive creates too.
781 */
nfs_setattr_update_inode(struct inode * inode,struct iattr * attr,struct nfs_fattr * fattr)782 void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr,
783 struct nfs_fattr *fattr)
784 {
785 /* Barrier: bump the attribute generation count. */
786 nfs_fattr_set_barrier(fattr);
787
788 spin_lock(&inode->i_lock);
789 NFS_I(inode)->attr_gencount = fattr->gencount;
790 if ((attr->ia_valid & ATTR_SIZE) != 0) {
791 if (!nfs_have_delegated_mtime(inode))
792 nfs_set_cache_invalid(inode, NFS_INO_INVALID_MTIME);
793 nfs_set_cache_invalid(inode, NFS_INO_INVALID_BLOCKS);
794 nfs_inc_stats(inode, NFSIOS_SETATTRTRUNC);
795 nfs_vmtruncate(inode, attr->ia_size);
796 }
797 if ((attr->ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID)) != 0) {
798 NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_CTIME;
799 if ((attr->ia_valid & ATTR_KILL_SUID) != 0 &&
800 inode->i_mode & S_ISUID)
801 inode->i_mode &= ~S_ISUID;
802 if (setattr_should_drop_sgid(&nop_mnt_idmap, inode))
803 inode->i_mode &= ~S_ISGID;
804 if ((attr->ia_valid & ATTR_MODE) != 0) {
805 int mode = attr->ia_mode & S_IALLUGO;
806 mode |= inode->i_mode & ~S_IALLUGO;
807 inode->i_mode = mode;
808 }
809 if ((attr->ia_valid & ATTR_UID) != 0)
810 inode->i_uid = attr->ia_uid;
811 if ((attr->ia_valid & ATTR_GID) != 0)
812 inode->i_gid = attr->ia_gid;
813 if (fattr->valid & NFS_ATTR_FATTR_CTIME)
814 inode_set_ctime_to_ts(inode, fattr->ctime);
815 else
816 nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE
817 | NFS_INO_INVALID_CTIME);
818 nfs_set_cache_invalid(inode, NFS_INO_INVALID_ACCESS
819 | NFS_INO_INVALID_ACL);
820 }
821 if (attr->ia_valid & (ATTR_ATIME_SET|ATTR_ATIME)) {
822 NFS_I(inode)->cache_validity &= ~(NFS_INO_INVALID_ATIME
823 | NFS_INO_INVALID_CTIME);
824 if (fattr->valid & NFS_ATTR_FATTR_ATIME)
825 inode_set_atime_to_ts(inode, fattr->atime);
826 else if (attr->ia_valid & ATTR_ATIME_SET)
827 inode_set_atime_to_ts(inode, attr->ia_atime);
828 else
829 nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATIME);
830
831 if (fattr->valid & NFS_ATTR_FATTR_CTIME)
832 inode_set_ctime_to_ts(inode, fattr->ctime);
833 else
834 nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE
835 | NFS_INO_INVALID_CTIME);
836 }
837 if (attr->ia_valid & (ATTR_MTIME_SET|ATTR_MTIME)) {
838 NFS_I(inode)->cache_validity &= ~(NFS_INO_INVALID_MTIME
839 | NFS_INO_INVALID_CTIME);
840 if (fattr->valid & NFS_ATTR_FATTR_MTIME)
841 inode_set_mtime_to_ts(inode, fattr->mtime);
842 else if (attr->ia_valid & ATTR_MTIME_SET)
843 inode_set_mtime_to_ts(inode, attr->ia_mtime);
844 else
845 nfs_set_cache_invalid(inode, NFS_INO_INVALID_MTIME);
846
847 if (fattr->valid & NFS_ATTR_FATTR_CTIME)
848 inode_set_ctime_to_ts(inode, fattr->ctime);
849 else
850 nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE
851 | NFS_INO_INVALID_CTIME);
852 }
853 if (fattr->valid)
854 nfs_update_inode(inode, fattr);
855 spin_unlock(&inode->i_lock);
856 }
857 EXPORT_SYMBOL_GPL(nfs_setattr_update_inode);
858
859 /*
860 * Don't request help from readdirplus if the file is being written to,
861 * or if attribute caching is turned off
862 */
nfs_getattr_readdirplus_enable(const struct inode * inode)863 static bool nfs_getattr_readdirplus_enable(const struct inode *inode)
864 {
865 return nfs_server_capable(inode, NFS_CAP_READDIRPLUS) &&
866 !nfs_have_writebacks(inode) && NFS_MAXATTRTIMEO(inode) > 5 * HZ;
867 }
868
nfs_readdirplus_parent_cache_miss(struct dentry * dentry)869 static void nfs_readdirplus_parent_cache_miss(struct dentry *dentry)
870 {
871 if (!IS_ROOT(dentry)) {
872 struct dentry *parent = dget_parent(dentry);
873 nfs_readdir_record_entry_cache_miss(d_inode(parent));
874 dput(parent);
875 }
876 }
877
nfs_readdirplus_parent_cache_hit(struct dentry * dentry)878 static void nfs_readdirplus_parent_cache_hit(struct dentry *dentry)
879 {
880 if (!IS_ROOT(dentry)) {
881 struct dentry *parent = dget_parent(dentry);
882 nfs_readdir_record_entry_cache_hit(d_inode(parent));
883 dput(parent);
884 }
885 }
886
nfs_get_valid_attrmask(struct inode * inode)887 static u32 nfs_get_valid_attrmask(struct inode *inode)
888 {
889 unsigned long cache_validity = READ_ONCE(NFS_I(inode)->cache_validity);
890 u32 reply_mask = STATX_INO | STATX_TYPE;
891
892 if (!(cache_validity & NFS_INO_INVALID_ATIME))
893 reply_mask |= STATX_ATIME;
894 if (!(cache_validity & NFS_INO_INVALID_CTIME))
895 reply_mask |= STATX_CTIME;
896 if (!(cache_validity & NFS_INO_INVALID_MTIME))
897 reply_mask |= STATX_MTIME;
898 if (!(cache_validity & NFS_INO_INVALID_SIZE))
899 reply_mask |= STATX_SIZE;
900 if (!(cache_validity & NFS_INO_INVALID_NLINK))
901 reply_mask |= STATX_NLINK;
902 if (!(cache_validity & NFS_INO_INVALID_MODE))
903 reply_mask |= STATX_MODE;
904 if (!(cache_validity & NFS_INO_INVALID_OTHER))
905 reply_mask |= STATX_UID | STATX_GID;
906 if (!(cache_validity & NFS_INO_INVALID_BLOCKS))
907 reply_mask |= STATX_BLOCKS;
908 if (!(cache_validity & NFS_INO_INVALID_CHANGE))
909 reply_mask |= STATX_CHANGE_COOKIE;
910 return reply_mask;
911 }
912
nfs_getattr(struct mnt_idmap * idmap,const struct path * path,struct kstat * stat,u32 request_mask,unsigned int query_flags)913 int nfs_getattr(struct mnt_idmap *idmap, const struct path *path,
914 struct kstat *stat, u32 request_mask, unsigned int query_flags)
915 {
916 struct inode *inode = d_inode(path->dentry);
917 struct nfs_server *server = NFS_SERVER(inode);
918 unsigned long cache_validity;
919 int err = 0;
920 bool force_sync = query_flags & AT_STATX_FORCE_SYNC;
921 bool do_update = false;
922 bool readdirplus_enabled = nfs_getattr_readdirplus_enable(inode);
923
924 trace_nfs_getattr_enter(inode);
925
926 request_mask &= STATX_TYPE | STATX_MODE | STATX_NLINK | STATX_UID |
927 STATX_GID | STATX_ATIME | STATX_MTIME | STATX_CTIME |
928 STATX_INO | STATX_SIZE | STATX_BLOCKS |
929 STATX_CHANGE_COOKIE;
930
931 if ((query_flags & AT_STATX_DONT_SYNC) && !force_sync) {
932 if (readdirplus_enabled)
933 nfs_readdirplus_parent_cache_hit(path->dentry);
934 goto out_no_revalidate;
935 }
936
937 /* Flush out writes to the server in order to update c/mtime/version. */
938 if ((request_mask & (STATX_CTIME | STATX_MTIME | STATX_CHANGE_COOKIE)) &&
939 S_ISREG(inode->i_mode)) {
940 if (nfs_have_delegated_mtime(inode))
941 filemap_fdatawrite(inode->i_mapping);
942 else
943 filemap_write_and_wait(inode->i_mapping);
944 }
945
946 /*
947 * We may force a getattr if the user cares about atime.
948 *
949 * Note that we only have to check the vfsmount flags here:
950 * - NFS always sets S_NOATIME by so checking it would give a
951 * bogus result
952 * - NFS never sets SB_NOATIME or SB_NODIRATIME so there is
953 * no point in checking those.
954 */
955 if ((path->mnt->mnt_flags & MNT_NOATIME) ||
956 ((path->mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode)))
957 request_mask &= ~STATX_ATIME;
958
959 /* Is the user requesting attributes that might need revalidation? */
960 if (!(request_mask & (STATX_MODE|STATX_NLINK|STATX_ATIME|STATX_CTIME|
961 STATX_MTIME|STATX_UID|STATX_GID|
962 STATX_SIZE|STATX_BLOCKS|
963 STATX_CHANGE_COOKIE)))
964 goto out_no_revalidate;
965
966 /* Check whether the cached attributes are stale */
967 do_update |= force_sync || nfs_attribute_cache_expired(inode);
968 cache_validity = READ_ONCE(NFS_I(inode)->cache_validity);
969 do_update |= cache_validity & NFS_INO_INVALID_CHANGE;
970 if (request_mask & STATX_ATIME)
971 do_update |= cache_validity & NFS_INO_INVALID_ATIME;
972 if (request_mask & STATX_CTIME)
973 do_update |= cache_validity & NFS_INO_INVALID_CTIME;
974 if (request_mask & STATX_MTIME)
975 do_update |= cache_validity & NFS_INO_INVALID_MTIME;
976 if (request_mask & STATX_SIZE)
977 do_update |= cache_validity & NFS_INO_INVALID_SIZE;
978 if (request_mask & STATX_NLINK)
979 do_update |= cache_validity & NFS_INO_INVALID_NLINK;
980 if (request_mask & STATX_MODE)
981 do_update |= cache_validity & NFS_INO_INVALID_MODE;
982 if (request_mask & (STATX_UID | STATX_GID))
983 do_update |= cache_validity & NFS_INO_INVALID_OTHER;
984 if (request_mask & STATX_BLOCKS)
985 do_update |= cache_validity & NFS_INO_INVALID_BLOCKS;
986
987 if (do_update) {
988 if (readdirplus_enabled)
989 nfs_readdirplus_parent_cache_miss(path->dentry);
990 err = __nfs_revalidate_inode(server, inode);
991 if (err)
992 goto out;
993 } else if (readdirplus_enabled)
994 nfs_readdirplus_parent_cache_hit(path->dentry);
995 out_no_revalidate:
996 /* Only return attributes that were revalidated. */
997 stat->result_mask = nfs_get_valid_attrmask(inode) | request_mask;
998
999 generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
1000 stat->ino = nfs_compat_user_ino64(NFS_FILEID(inode));
1001 stat->change_cookie = inode_peek_iversion_raw(inode);
1002 stat->attributes_mask |= STATX_ATTR_CHANGE_MONOTONIC;
1003 if (server->change_attr_type != NFS4_CHANGE_TYPE_IS_UNDEFINED)
1004 stat->attributes |= STATX_ATTR_CHANGE_MONOTONIC;
1005 if (S_ISDIR(inode->i_mode))
1006 stat->blksize = NFS_SERVER(inode)->dtsize;
1007 out:
1008 trace_nfs_getattr_exit(inode, err);
1009 return err;
1010 }
1011 EXPORT_SYMBOL_GPL(nfs_getattr);
1012
nfs_init_lock_context(struct nfs_lock_context * l_ctx)1013 static void nfs_init_lock_context(struct nfs_lock_context *l_ctx)
1014 {
1015 refcount_set(&l_ctx->count, 1);
1016 l_ctx->lockowner = current->files;
1017 INIT_LIST_HEAD(&l_ctx->list);
1018 atomic_set(&l_ctx->io_count, 0);
1019 }
1020
__nfs_find_lock_context(struct nfs_open_context * ctx)1021 static struct nfs_lock_context *__nfs_find_lock_context(struct nfs_open_context *ctx)
1022 {
1023 struct nfs_lock_context *pos;
1024
1025 list_for_each_entry_rcu(pos, &ctx->lock_context.list, list) {
1026 if (pos->lockowner != current->files)
1027 continue;
1028 if (refcount_inc_not_zero(&pos->count))
1029 return pos;
1030 }
1031 return NULL;
1032 }
1033
nfs_get_lock_context(struct nfs_open_context * ctx)1034 struct nfs_lock_context *nfs_get_lock_context(struct nfs_open_context *ctx)
1035 {
1036 struct nfs_lock_context *res, *new = NULL;
1037 struct inode *inode = d_inode(ctx->dentry);
1038
1039 rcu_read_lock();
1040 res = __nfs_find_lock_context(ctx);
1041 rcu_read_unlock();
1042 if (res == NULL) {
1043 new = kmalloc(sizeof(*new), GFP_KERNEL_ACCOUNT);
1044 if (new == NULL)
1045 return ERR_PTR(-ENOMEM);
1046 nfs_init_lock_context(new);
1047 spin_lock(&inode->i_lock);
1048 res = __nfs_find_lock_context(ctx);
1049 if (res == NULL) {
1050 new->open_context = get_nfs_open_context(ctx);
1051 if (new->open_context) {
1052 list_add_tail_rcu(&new->list,
1053 &ctx->lock_context.list);
1054 res = new;
1055 new = NULL;
1056 } else
1057 res = ERR_PTR(-EBADF);
1058 }
1059 spin_unlock(&inode->i_lock);
1060 kfree(new);
1061 }
1062 return res;
1063 }
1064 EXPORT_SYMBOL_GPL(nfs_get_lock_context);
1065
nfs_put_lock_context(struct nfs_lock_context * l_ctx)1066 void nfs_put_lock_context(struct nfs_lock_context *l_ctx)
1067 {
1068 struct nfs_open_context *ctx = l_ctx->open_context;
1069 struct inode *inode = d_inode(ctx->dentry);
1070
1071 if (!refcount_dec_and_lock(&l_ctx->count, &inode->i_lock))
1072 return;
1073 list_del_rcu(&l_ctx->list);
1074 spin_unlock(&inode->i_lock);
1075 put_nfs_open_context(ctx);
1076 kfree_rcu(l_ctx, rcu_head);
1077 }
1078 EXPORT_SYMBOL_GPL(nfs_put_lock_context);
1079
1080 /**
1081 * nfs_close_context - Common close_context() routine NFSv2/v3
1082 * @ctx: pointer to context
1083 * @is_sync: is this a synchronous close
1084 *
1085 * Ensure that the attributes are up to date if we're mounted
1086 * with close-to-open semantics and we have cached data that will
1087 * need to be revalidated on open.
1088 */
nfs_close_context(struct nfs_open_context * ctx,int is_sync)1089 void nfs_close_context(struct nfs_open_context *ctx, int is_sync)
1090 {
1091 struct nfs_inode *nfsi;
1092 struct inode *inode;
1093
1094 if (!(ctx->mode & FMODE_WRITE))
1095 return;
1096 if (!is_sync)
1097 return;
1098 inode = d_inode(ctx->dentry);
1099 if (nfs_have_read_or_write_delegation(inode))
1100 return;
1101 nfsi = NFS_I(inode);
1102 if (inode->i_mapping->nrpages == 0)
1103 return;
1104 if (nfsi->cache_validity & NFS_INO_INVALID_DATA)
1105 return;
1106 if (!list_empty(&nfsi->open_files))
1107 return;
1108 if (NFS_SERVER(inode)->flags & NFS_MOUNT_NOCTO)
1109 return;
1110 nfs_revalidate_inode(inode,
1111 NFS_INO_INVALID_CHANGE | NFS_INO_INVALID_SIZE);
1112 }
1113 EXPORT_SYMBOL_GPL(nfs_close_context);
1114
alloc_nfs_open_context(struct dentry * dentry,fmode_t f_mode,struct file * filp)1115 struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry,
1116 fmode_t f_mode,
1117 struct file *filp)
1118 {
1119 struct nfs_open_context *ctx;
1120
1121 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL_ACCOUNT);
1122 if (!ctx)
1123 return ERR_PTR(-ENOMEM);
1124 nfs_sb_active(dentry->d_sb);
1125 ctx->dentry = dget(dentry);
1126 if (filp)
1127 ctx->cred = get_cred(filp->f_cred);
1128 else
1129 ctx->cred = get_current_cred();
1130 rcu_assign_pointer(ctx->ll_cred, NULL);
1131 ctx->state = NULL;
1132 ctx->mode = f_mode;
1133 ctx->flags = 0;
1134 ctx->error = 0;
1135 ctx->flock_owner = (fl_owner_t)filp;
1136 nfs_init_lock_context(&ctx->lock_context);
1137 ctx->lock_context.open_context = ctx;
1138 INIT_LIST_HEAD(&ctx->list);
1139 ctx->mdsthreshold = NULL;
1140 return ctx;
1141 }
1142 EXPORT_SYMBOL_GPL(alloc_nfs_open_context);
1143
get_nfs_open_context(struct nfs_open_context * ctx)1144 struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx)
1145 {
1146 if (ctx != NULL && refcount_inc_not_zero(&ctx->lock_context.count))
1147 return ctx;
1148 return NULL;
1149 }
1150 EXPORT_SYMBOL_GPL(get_nfs_open_context);
1151
__put_nfs_open_context(struct nfs_open_context * ctx,int is_sync)1152 static void __put_nfs_open_context(struct nfs_open_context *ctx, int is_sync)
1153 {
1154 struct inode *inode = d_inode(ctx->dentry);
1155 struct super_block *sb = ctx->dentry->d_sb;
1156
1157 if (!refcount_dec_and_test(&ctx->lock_context.count))
1158 return;
1159 if (!list_empty(&ctx->list)) {
1160 spin_lock(&inode->i_lock);
1161 list_del_rcu(&ctx->list);
1162 spin_unlock(&inode->i_lock);
1163 }
1164 if (inode != NULL)
1165 NFS_PROTO(inode)->close_context(ctx, is_sync);
1166 put_cred(ctx->cred);
1167 dput(ctx->dentry);
1168 nfs_sb_deactive(sb);
1169 put_rpccred(rcu_dereference_protected(ctx->ll_cred, 1));
1170 kfree(ctx->mdsthreshold);
1171 kfree_rcu(ctx, rcu_head);
1172 }
1173
put_nfs_open_context(struct nfs_open_context * ctx)1174 void put_nfs_open_context(struct nfs_open_context *ctx)
1175 {
1176 __put_nfs_open_context(ctx, 0);
1177 }
1178 EXPORT_SYMBOL_GPL(put_nfs_open_context);
1179
put_nfs_open_context_sync(struct nfs_open_context * ctx)1180 static void put_nfs_open_context_sync(struct nfs_open_context *ctx)
1181 {
1182 __put_nfs_open_context(ctx, 1);
1183 }
1184
1185 /*
1186 * Ensure that mmap has a recent RPC credential for use when writing out
1187 * shared pages
1188 */
nfs_inode_attach_open_context(struct nfs_open_context * ctx)1189 void nfs_inode_attach_open_context(struct nfs_open_context *ctx)
1190 {
1191 struct inode *inode = d_inode(ctx->dentry);
1192 struct nfs_inode *nfsi = NFS_I(inode);
1193
1194 spin_lock(&inode->i_lock);
1195 if (list_empty(&nfsi->open_files) &&
1196 nfs_ooo_test(nfsi))
1197 nfs_set_cache_invalid(inode, NFS_INO_INVALID_DATA |
1198 NFS_INO_REVAL_FORCED);
1199 list_add_tail_rcu(&ctx->list, &nfsi->open_files);
1200 spin_unlock(&inode->i_lock);
1201 }
1202 EXPORT_SYMBOL_GPL(nfs_inode_attach_open_context);
1203
nfs_file_set_open_context(struct file * filp,struct nfs_open_context * ctx)1204 void nfs_file_set_open_context(struct file *filp, struct nfs_open_context *ctx)
1205 {
1206 filp->private_data = get_nfs_open_context(ctx);
1207 set_bit(NFS_CONTEXT_FILE_OPEN, &ctx->flags);
1208 if (list_empty(&ctx->list))
1209 nfs_inode_attach_open_context(ctx);
1210 }
1211 EXPORT_SYMBOL_GPL(nfs_file_set_open_context);
1212
1213 /*
1214 * Given an inode, search for an open context with the desired characteristics
1215 */
nfs_find_open_context(struct inode * inode,const struct cred * cred,fmode_t mode)1216 struct nfs_open_context *nfs_find_open_context(struct inode *inode, const struct cred *cred, fmode_t mode)
1217 {
1218 struct nfs_inode *nfsi = NFS_I(inode);
1219 struct nfs_open_context *pos, *ctx = NULL;
1220
1221 rcu_read_lock();
1222 list_for_each_entry_rcu(pos, &nfsi->open_files, list) {
1223 if (cred != NULL && cred_fscmp(pos->cred, cred) != 0)
1224 continue;
1225 if ((pos->mode & (FMODE_READ|FMODE_WRITE)) != mode)
1226 continue;
1227 if (!test_bit(NFS_CONTEXT_FILE_OPEN, &pos->flags))
1228 continue;
1229 ctx = get_nfs_open_context(pos);
1230 if (ctx)
1231 break;
1232 }
1233 rcu_read_unlock();
1234 return ctx;
1235 }
1236
nfs_file_clear_open_context(struct file * filp)1237 void nfs_file_clear_open_context(struct file *filp)
1238 {
1239 struct nfs_open_context *ctx = nfs_file_open_context(filp);
1240
1241 if (ctx) {
1242 struct inode *inode = d_inode(ctx->dentry);
1243
1244 clear_bit(NFS_CONTEXT_FILE_OPEN, &ctx->flags);
1245 /*
1246 * We fatal error on write before. Try to writeback
1247 * every page again.
1248 */
1249 if (ctx->error < 0)
1250 invalidate_inode_pages2(inode->i_mapping);
1251 filp->private_data = NULL;
1252 put_nfs_open_context_sync(ctx);
1253 }
1254 }
1255
1256 /*
1257 * These allocate and release file read/write context information.
1258 */
nfs_open(struct inode * inode,struct file * filp)1259 int nfs_open(struct inode *inode, struct file *filp)
1260 {
1261 struct nfs_open_context *ctx;
1262
1263 ctx = alloc_nfs_open_context(file_dentry(filp),
1264 flags_to_mode(filp->f_flags), filp);
1265 if (IS_ERR(ctx))
1266 return PTR_ERR(ctx);
1267 nfs_file_set_open_context(filp, ctx);
1268 put_nfs_open_context(ctx);
1269 nfs_fscache_open_file(inode, filp);
1270 return 0;
1271 }
1272
1273 /*
1274 * This function is called whenever some part of NFS notices that
1275 * the cached attributes have to be refreshed.
1276 */
1277 int
__nfs_revalidate_inode(struct nfs_server * server,struct inode * inode)1278 __nfs_revalidate_inode(struct nfs_server *server, struct inode *inode)
1279 {
1280 int status = -ESTALE;
1281 struct nfs_fattr *fattr = NULL;
1282 struct nfs_inode *nfsi = NFS_I(inode);
1283
1284 dfprintk(PAGECACHE, "NFS: revalidating (%s/%Lu)\n",
1285 inode->i_sb->s_id, (unsigned long long)NFS_FILEID(inode));
1286
1287 trace_nfs_revalidate_inode_enter(inode);
1288
1289 if (is_bad_inode(inode))
1290 goto out;
1291 if (NFS_STALE(inode))
1292 goto out;
1293
1294 /* pNFS: Attributes aren't updated until we layoutcommit */
1295 if (S_ISREG(inode->i_mode)) {
1296 status = pnfs_sync_inode(inode, false);
1297 if (status)
1298 goto out;
1299 }
1300
1301 status = -ENOMEM;
1302 fattr = nfs_alloc_fattr_with_label(NFS_SERVER(inode));
1303 if (fattr == NULL)
1304 goto out;
1305
1306 nfs_inc_stats(inode, NFSIOS_INODEREVALIDATE);
1307
1308 status = NFS_PROTO(inode)->getattr(server, NFS_FH(inode), fattr, inode);
1309 if (status != 0) {
1310 dfprintk(PAGECACHE, "nfs_revalidate_inode: (%s/%Lu) getattr failed, error=%d\n",
1311 inode->i_sb->s_id,
1312 (unsigned long long)NFS_FILEID(inode), status);
1313 switch (status) {
1314 case -ETIMEDOUT:
1315 /* A soft timeout occurred. Use cached information? */
1316 if (server->flags & NFS_MOUNT_SOFTREVAL)
1317 status = 0;
1318 break;
1319 case -ESTALE:
1320 if (!S_ISDIR(inode->i_mode))
1321 nfs_set_inode_stale(inode);
1322 else
1323 nfs_zap_caches(inode);
1324 }
1325 goto out;
1326 }
1327
1328 status = nfs_refresh_inode(inode, fattr);
1329 if (status) {
1330 dfprintk(PAGECACHE, "nfs_revalidate_inode: (%s/%Lu) refresh failed, error=%d\n",
1331 inode->i_sb->s_id,
1332 (unsigned long long)NFS_FILEID(inode), status);
1333 goto out;
1334 }
1335
1336 if (nfsi->cache_validity & NFS_INO_INVALID_ACL)
1337 nfs_zap_acl_cache(inode);
1338
1339 nfs_setsecurity(inode, fattr);
1340
1341 dfprintk(PAGECACHE, "NFS: (%s/%Lu) revalidation complete\n",
1342 inode->i_sb->s_id,
1343 (unsigned long long)NFS_FILEID(inode));
1344
1345 out:
1346 nfs_free_fattr(fattr);
1347 trace_nfs_revalidate_inode_exit(inode, status);
1348 return status;
1349 }
1350
nfs_attribute_cache_expired(struct inode * inode)1351 int nfs_attribute_cache_expired(struct inode *inode)
1352 {
1353 if (nfs_have_delegated_attributes(inode))
1354 return 0;
1355 return nfs_attribute_timeout(inode);
1356 }
1357
1358 /**
1359 * nfs_revalidate_inode - Revalidate the inode attributes
1360 * @inode: pointer to inode struct
1361 * @flags: cache flags to check
1362 *
1363 * Updates inode attribute information by retrieving the data from the server.
1364 */
nfs_revalidate_inode(struct inode * inode,unsigned long flags)1365 int nfs_revalidate_inode(struct inode *inode, unsigned long flags)
1366 {
1367 if (!nfs_check_cache_invalid(inode, flags))
1368 return NFS_STALE(inode) ? -ESTALE : 0;
1369 return __nfs_revalidate_inode(NFS_SERVER(inode), inode);
1370 }
1371 EXPORT_SYMBOL_GPL(nfs_revalidate_inode);
1372
nfs_invalidate_mapping(struct inode * inode,struct address_space * mapping)1373 static int nfs_invalidate_mapping(struct inode *inode, struct address_space *mapping)
1374 {
1375 int ret;
1376
1377 nfs_fscache_invalidate(inode, 0);
1378 if (mapping->nrpages != 0) {
1379 if (S_ISREG(inode->i_mode)) {
1380 ret = nfs_sync_mapping(mapping);
1381 if (ret < 0)
1382 return ret;
1383 }
1384 ret = invalidate_inode_pages2(mapping);
1385 if (ret < 0)
1386 return ret;
1387 }
1388 nfs_inc_stats(inode, NFSIOS_DATAINVALIDATE);
1389
1390 dfprintk(PAGECACHE, "NFS: (%s/%Lu) data cache invalidated\n",
1391 inode->i_sb->s_id,
1392 (unsigned long long)NFS_FILEID(inode));
1393 return 0;
1394 }
1395
1396 /**
1397 * nfs_clear_invalid_mapping - Conditionally clear a mapping
1398 * @mapping: pointer to mapping
1399 *
1400 * If the NFS_INO_INVALID_DATA inode flag is set, clear the mapping.
1401 */
nfs_clear_invalid_mapping(struct address_space * mapping)1402 int nfs_clear_invalid_mapping(struct address_space *mapping)
1403 {
1404 struct inode *inode = mapping->host;
1405 struct nfs_inode *nfsi = NFS_I(inode);
1406 unsigned long *bitlock = &nfsi->flags;
1407 int ret = 0;
1408
1409 /*
1410 * We must clear NFS_INO_INVALID_DATA first to ensure that
1411 * invalidations that come in while we're shooting down the mappings
1412 * are respected. But, that leaves a race window where one revalidator
1413 * can clear the flag, and then another checks it before the mapping
1414 * gets invalidated. Fix that by serializing access to this part of
1415 * the function.
1416 *
1417 * At the same time, we need to allow other tasks to see whether we
1418 * might be in the middle of invalidating the pages, so we only set
1419 * the bit lock here if it looks like we're going to be doing that.
1420 */
1421 for (;;) {
1422 ret = wait_on_bit_action(bitlock, NFS_INO_INVALIDATING,
1423 nfs_wait_bit_killable,
1424 TASK_KILLABLE|TASK_FREEZABLE_UNSAFE);
1425 if (ret)
1426 goto out;
1427 smp_rmb(); /* pairs with smp_wmb() below */
1428 if (test_bit(NFS_INO_INVALIDATING, bitlock))
1429 continue;
1430 /* pairs with nfs_set_cache_invalid()'s smp_store_release() */
1431 if (!(smp_load_acquire(&nfsi->cache_validity) & NFS_INO_INVALID_DATA))
1432 goto out;
1433 /* Slow-path that double-checks with spinlock held */
1434 spin_lock(&inode->i_lock);
1435 if (test_bit(NFS_INO_INVALIDATING, bitlock)) {
1436 spin_unlock(&inode->i_lock);
1437 continue;
1438 }
1439 if (nfsi->cache_validity & NFS_INO_INVALID_DATA)
1440 break;
1441 spin_unlock(&inode->i_lock);
1442 goto out;
1443 }
1444
1445 set_bit(NFS_INO_INVALIDATING, bitlock);
1446 smp_wmb();
1447 nfsi->cache_validity &= ~NFS_INO_INVALID_DATA;
1448 nfs_ooo_clear(nfsi);
1449 spin_unlock(&inode->i_lock);
1450 trace_nfs_invalidate_mapping_enter(inode);
1451 ret = nfs_invalidate_mapping(inode, mapping);
1452 trace_nfs_invalidate_mapping_exit(inode, ret);
1453
1454 clear_bit_unlock(NFS_INO_INVALIDATING, bitlock);
1455 smp_mb__after_atomic();
1456 wake_up_bit(bitlock, NFS_INO_INVALIDATING);
1457 out:
1458 return ret;
1459 }
1460
nfs_mapping_need_revalidate_inode(struct inode * inode)1461 bool nfs_mapping_need_revalidate_inode(struct inode *inode)
1462 {
1463 return nfs_check_cache_invalid(inode, NFS_INO_INVALID_CHANGE) ||
1464 NFS_STALE(inode);
1465 }
1466
nfs_revalidate_mapping_rcu(struct inode * inode)1467 int nfs_revalidate_mapping_rcu(struct inode *inode)
1468 {
1469 struct nfs_inode *nfsi = NFS_I(inode);
1470 unsigned long *bitlock = &nfsi->flags;
1471 int ret = 0;
1472
1473 if (IS_SWAPFILE(inode))
1474 goto out;
1475 if (nfs_mapping_need_revalidate_inode(inode)) {
1476 ret = -ECHILD;
1477 goto out;
1478 }
1479 spin_lock(&inode->i_lock);
1480 if (test_bit(NFS_INO_INVALIDATING, bitlock) ||
1481 (nfsi->cache_validity & NFS_INO_INVALID_DATA))
1482 ret = -ECHILD;
1483 spin_unlock(&inode->i_lock);
1484 out:
1485 return ret;
1486 }
1487
1488 /**
1489 * nfs_revalidate_mapping - Revalidate the pagecache
1490 * @inode: pointer to host inode
1491 * @mapping: pointer to mapping
1492 */
nfs_revalidate_mapping(struct inode * inode,struct address_space * mapping)1493 int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping)
1494 {
1495 /* swapfiles are not supposed to be shared. */
1496 if (IS_SWAPFILE(inode))
1497 return 0;
1498
1499 if (nfs_mapping_need_revalidate_inode(inode)) {
1500 int ret = __nfs_revalidate_inode(NFS_SERVER(inode), inode);
1501 if (ret < 0)
1502 return ret;
1503 }
1504
1505 return nfs_clear_invalid_mapping(mapping);
1506 }
1507
nfs_file_has_writers(struct nfs_inode * nfsi)1508 static bool nfs_file_has_writers(struct nfs_inode *nfsi)
1509 {
1510 struct inode *inode = &nfsi->vfs_inode;
1511
1512 if (!S_ISREG(inode->i_mode))
1513 return false;
1514 if (list_empty(&nfsi->open_files))
1515 return false;
1516 return inode_is_open_for_write(inode);
1517 }
1518
nfs_file_has_buffered_writers(struct nfs_inode * nfsi)1519 static bool nfs_file_has_buffered_writers(struct nfs_inode *nfsi)
1520 {
1521 return nfs_file_has_writers(nfsi) && nfs_file_io_is_buffered(nfsi);
1522 }
1523
nfs_wcc_update_inode(struct inode * inode,struct nfs_fattr * fattr)1524 static void nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr *fattr)
1525 {
1526 struct timespec64 ts;
1527
1528 if ((fattr->valid & NFS_ATTR_FATTR_PRECHANGE)
1529 && (fattr->valid & NFS_ATTR_FATTR_CHANGE)
1530 && inode_eq_iversion_raw(inode, fattr->pre_change_attr)) {
1531 inode_set_iversion_raw(inode, fattr->change_attr);
1532 if (S_ISDIR(inode->i_mode))
1533 nfs_set_cache_invalid(inode, NFS_INO_INVALID_DATA);
1534 else if (nfs_server_capable(inode, NFS_CAP_XATTR))
1535 nfs_set_cache_invalid(inode, NFS_INO_INVALID_XATTR);
1536 }
1537 /* If we have atomic WCC data, we may update some attributes */
1538 ts = inode_get_ctime(inode);
1539 if ((fattr->valid & NFS_ATTR_FATTR_PRECTIME)
1540 && (fattr->valid & NFS_ATTR_FATTR_CTIME)
1541 && timespec64_equal(&ts, &fattr->pre_ctime)) {
1542 inode_set_ctime_to_ts(inode, fattr->ctime);
1543 }
1544
1545 ts = inode_get_mtime(inode);
1546 if ((fattr->valid & NFS_ATTR_FATTR_PREMTIME)
1547 && (fattr->valid & NFS_ATTR_FATTR_MTIME)
1548 && timespec64_equal(&ts, &fattr->pre_mtime)) {
1549 inode_set_mtime_to_ts(inode, fattr->mtime);
1550 }
1551 if ((fattr->valid & NFS_ATTR_FATTR_PRESIZE)
1552 && (fattr->valid & NFS_ATTR_FATTR_SIZE)
1553 && i_size_read(inode) == nfs_size_to_loff_t(fattr->pre_size)
1554 && !nfs_have_writebacks(inode)) {
1555 trace_nfs_size_wcc(inode, fattr->size);
1556 i_size_write(inode, nfs_size_to_loff_t(fattr->size));
1557 }
1558 }
1559
1560 /**
1561 * nfs_check_inode_attributes - verify consistency of the inode attribute cache
1562 * @inode: pointer to inode
1563 * @fattr: updated attributes
1564 *
1565 * Verifies the attribute cache. If we have just changed the attributes,
1566 * so that fattr carries weak cache consistency data, then it may
1567 * also update the ctime/mtime/change_attribute.
1568 */
nfs_check_inode_attributes(struct inode * inode,struct nfs_fattr * fattr)1569 static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fattr)
1570 {
1571 struct nfs_inode *nfsi = NFS_I(inode);
1572 loff_t cur_size, new_isize;
1573 unsigned long invalid = 0;
1574 struct timespec64 ts;
1575
1576 if (nfs_have_delegated_attributes(inode))
1577 return 0;
1578
1579 if (!(fattr->valid & NFS_ATTR_FATTR_FILEID)) {
1580 /* Only a mounted-on-fileid? Just exit */
1581 if (fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID)
1582 return 0;
1583 /* Has the inode gone and changed behind our back? */
1584 } else if (nfsi->fileid != fattr->fileid) {
1585 /* Is this perhaps the mounted-on fileid? */
1586 if ((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) &&
1587 nfsi->fileid == fattr->mounted_on_fileid)
1588 return 0;
1589 return -ESTALE;
1590 }
1591 if ((fattr->valid & NFS_ATTR_FATTR_TYPE) && inode_wrong_type(inode, fattr->mode))
1592 return -ESTALE;
1593
1594
1595 if (!nfs_file_has_buffered_writers(nfsi)) {
1596 /* Verify a few of the more important attributes */
1597 if ((fattr->valid & NFS_ATTR_FATTR_CHANGE) != 0 && !inode_eq_iversion_raw(inode, fattr->change_attr))
1598 invalid |= NFS_INO_INVALID_CHANGE;
1599
1600 ts = inode_get_mtime(inode);
1601 if ((fattr->valid & NFS_ATTR_FATTR_MTIME) && !timespec64_equal(&ts, &fattr->mtime))
1602 invalid |= NFS_INO_INVALID_MTIME;
1603
1604 ts = inode_get_ctime(inode);
1605 if ((fattr->valid & NFS_ATTR_FATTR_CTIME) && !timespec64_equal(&ts, &fattr->ctime))
1606 invalid |= NFS_INO_INVALID_CTIME;
1607
1608 if (fattr->valid & NFS_ATTR_FATTR_SIZE) {
1609 cur_size = i_size_read(inode);
1610 new_isize = nfs_size_to_loff_t(fattr->size);
1611 if (cur_size != new_isize)
1612 invalid |= NFS_INO_INVALID_SIZE;
1613 }
1614 }
1615
1616 /* Have any file permissions changed? */
1617 if ((fattr->valid & NFS_ATTR_FATTR_MODE) && (inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO))
1618 invalid |= NFS_INO_INVALID_MODE;
1619 if ((fattr->valid & NFS_ATTR_FATTR_OWNER) && !uid_eq(inode->i_uid, fattr->uid))
1620 invalid |= NFS_INO_INVALID_OTHER;
1621 if ((fattr->valid & NFS_ATTR_FATTR_GROUP) && !gid_eq(inode->i_gid, fattr->gid))
1622 invalid |= NFS_INO_INVALID_OTHER;
1623
1624 /* Has the link count changed? */
1625 if ((fattr->valid & NFS_ATTR_FATTR_NLINK) && inode->i_nlink != fattr->nlink)
1626 invalid |= NFS_INO_INVALID_NLINK;
1627
1628 ts = inode_get_atime(inode);
1629 if ((fattr->valid & NFS_ATTR_FATTR_ATIME) && !timespec64_equal(&ts, &fattr->atime))
1630 invalid |= NFS_INO_INVALID_ATIME;
1631
1632 if (invalid != 0)
1633 nfs_set_cache_invalid(inode, invalid);
1634
1635 nfsi->read_cache_jiffies = fattr->time_start;
1636 return 0;
1637 }
1638
1639 static atomic_long_t nfs_attr_generation_counter;
1640
nfs_read_attr_generation_counter(void)1641 static unsigned long nfs_read_attr_generation_counter(void)
1642 {
1643 return atomic_long_read(&nfs_attr_generation_counter);
1644 }
1645
nfs_inc_attr_generation_counter(void)1646 unsigned long nfs_inc_attr_generation_counter(void)
1647 {
1648 return atomic_long_inc_return(&nfs_attr_generation_counter);
1649 }
1650 EXPORT_SYMBOL_GPL(nfs_inc_attr_generation_counter);
1651
nfs_fattr_init(struct nfs_fattr * fattr)1652 void nfs_fattr_init(struct nfs_fattr *fattr)
1653 {
1654 fattr->valid = 0;
1655 fattr->time_start = jiffies;
1656 fattr->gencount = nfs_inc_attr_generation_counter();
1657 fattr->owner_name = NULL;
1658 fattr->group_name = NULL;
1659 fattr->mdsthreshold = NULL;
1660 }
1661 EXPORT_SYMBOL_GPL(nfs_fattr_init);
1662
1663 /**
1664 * nfs_fattr_set_barrier
1665 * @fattr: attributes
1666 *
1667 * Used to set a barrier after an attribute was updated. This
1668 * barrier ensures that older attributes from RPC calls that may
1669 * have raced with our update cannot clobber these new values.
1670 * Note that you are still responsible for ensuring that other
1671 * operations which change the attribute on the server do not
1672 * collide.
1673 */
nfs_fattr_set_barrier(struct nfs_fattr * fattr)1674 void nfs_fattr_set_barrier(struct nfs_fattr *fattr)
1675 {
1676 fattr->gencount = nfs_inc_attr_generation_counter();
1677 }
1678
nfs_alloc_fattr(void)1679 struct nfs_fattr *nfs_alloc_fattr(void)
1680 {
1681 struct nfs_fattr *fattr;
1682
1683 fattr = kmalloc(sizeof(*fattr), GFP_KERNEL);
1684 if (fattr != NULL) {
1685 nfs_fattr_init(fattr);
1686 fattr->label = NULL;
1687 }
1688 return fattr;
1689 }
1690 EXPORT_SYMBOL_GPL(nfs_alloc_fattr);
1691
nfs_alloc_fattr_with_label(struct nfs_server * server)1692 struct nfs_fattr *nfs_alloc_fattr_with_label(struct nfs_server *server)
1693 {
1694 struct nfs_fattr *fattr = nfs_alloc_fattr();
1695
1696 if (!fattr)
1697 return NULL;
1698
1699 fattr->label = nfs4_label_alloc(server, GFP_KERNEL);
1700 if (IS_ERR(fattr->label)) {
1701 kfree(fattr);
1702 return NULL;
1703 }
1704
1705 return fattr;
1706 }
1707 EXPORT_SYMBOL_GPL(nfs_alloc_fattr_with_label);
1708
nfs_alloc_fhandle(void)1709 struct nfs_fh *nfs_alloc_fhandle(void)
1710 {
1711 struct nfs_fh *fh;
1712
1713 fh = kmalloc(sizeof(struct nfs_fh), GFP_KERNEL);
1714 if (fh != NULL)
1715 fh->size = 0;
1716 return fh;
1717 }
1718 EXPORT_SYMBOL_GPL(nfs_alloc_fhandle);
1719
1720 #ifdef NFS_DEBUG
1721 /*
1722 * _nfs_display_fhandle_hash - calculate the crc32 hash for the filehandle
1723 * in the same way that wireshark does
1724 *
1725 * @fh: file handle
1726 *
1727 * For debugging only.
1728 */
_nfs_display_fhandle_hash(const struct nfs_fh * fh)1729 u32 _nfs_display_fhandle_hash(const struct nfs_fh *fh)
1730 {
1731 /* wireshark uses 32-bit AUTODIN crc and does a bitwise
1732 * not on the result */
1733 return nfs_fhandle_hash(fh);
1734 }
1735 EXPORT_SYMBOL_GPL(_nfs_display_fhandle_hash);
1736
1737 /*
1738 * _nfs_display_fhandle - display an NFS file handle on the console
1739 *
1740 * @fh: file handle to display
1741 * @caption: display caption
1742 *
1743 * For debugging only.
1744 */
_nfs_display_fhandle(const struct nfs_fh * fh,const char * caption)1745 void _nfs_display_fhandle(const struct nfs_fh *fh, const char *caption)
1746 {
1747 unsigned short i;
1748
1749 if (fh == NULL || fh->size == 0) {
1750 printk(KERN_DEFAULT "%s at %p is empty\n", caption, fh);
1751 return;
1752 }
1753
1754 printk(KERN_DEFAULT "%s at %p is %u bytes, crc: 0x%08x:\n",
1755 caption, fh, fh->size, _nfs_display_fhandle_hash(fh));
1756 for (i = 0; i < fh->size; i += 16) {
1757 __be32 *pos = (__be32 *)&fh->data[i];
1758
1759 switch ((fh->size - i - 1) >> 2) {
1760 case 0:
1761 printk(KERN_DEFAULT " %08x\n",
1762 be32_to_cpup(pos));
1763 break;
1764 case 1:
1765 printk(KERN_DEFAULT " %08x %08x\n",
1766 be32_to_cpup(pos), be32_to_cpup(pos + 1));
1767 break;
1768 case 2:
1769 printk(KERN_DEFAULT " %08x %08x %08x\n",
1770 be32_to_cpup(pos), be32_to_cpup(pos + 1),
1771 be32_to_cpup(pos + 2));
1772 break;
1773 default:
1774 printk(KERN_DEFAULT " %08x %08x %08x %08x\n",
1775 be32_to_cpup(pos), be32_to_cpup(pos + 1),
1776 be32_to_cpup(pos + 2), be32_to_cpup(pos + 3));
1777 }
1778 }
1779 }
1780 EXPORT_SYMBOL_GPL(_nfs_display_fhandle);
1781 #endif
1782
1783 /**
1784 * nfs_inode_attrs_cmp_generic - compare attributes
1785 * @fattr: attributes
1786 * @inode: pointer to inode
1787 *
1788 * Attempt to divine whether or not an RPC call reply carrying stale
1789 * attributes got scheduled after another call carrying updated ones.
1790 * Note also the check for wraparound of 'attr_gencount'
1791 *
1792 * The function returns '1' if it thinks the attributes in @fattr are
1793 * more recent than the ones cached in @inode. Otherwise it returns
1794 * the value '0'.
1795 */
nfs_inode_attrs_cmp_generic(const struct nfs_fattr * fattr,const struct inode * inode)1796 static int nfs_inode_attrs_cmp_generic(const struct nfs_fattr *fattr,
1797 const struct inode *inode)
1798 {
1799 unsigned long attr_gencount = NFS_I(inode)->attr_gencount;
1800
1801 return (long)(fattr->gencount - attr_gencount) > 0 ||
1802 (long)(attr_gencount - nfs_read_attr_generation_counter()) > 0;
1803 }
1804
1805 /**
1806 * nfs_inode_attrs_cmp_monotonic - compare attributes
1807 * @fattr: attributes
1808 * @inode: pointer to inode
1809 *
1810 * Attempt to divine whether or not an RPC call reply carrying stale
1811 * attributes got scheduled after another call carrying updated ones.
1812 *
1813 * We assume that the server observes monotonic semantics for
1814 * the change attribute, so a larger value means that the attributes in
1815 * @fattr are more recent, in which case the function returns the
1816 * value '1'.
1817 * A return value of '0' indicates no measurable change
1818 * A return value of '-1' means that the attributes in @inode are
1819 * more recent.
1820 */
nfs_inode_attrs_cmp_monotonic(const struct nfs_fattr * fattr,const struct inode * inode)1821 static int nfs_inode_attrs_cmp_monotonic(const struct nfs_fattr *fattr,
1822 const struct inode *inode)
1823 {
1824 s64 diff = fattr->change_attr - inode_peek_iversion_raw(inode);
1825 if (diff > 0)
1826 return 1;
1827 return diff == 0 ? 0 : -1;
1828 }
1829
1830 /**
1831 * nfs_inode_attrs_cmp_strict_monotonic - compare attributes
1832 * @fattr: attributes
1833 * @inode: pointer to inode
1834 *
1835 * Attempt to divine whether or not an RPC call reply carrying stale
1836 * attributes got scheduled after another call carrying updated ones.
1837 *
1838 * We assume that the server observes strictly monotonic semantics for
1839 * the change attribute, so a larger value means that the attributes in
1840 * @fattr are more recent, in which case the function returns the
1841 * value '1'.
1842 * A return value of '-1' means that the attributes in @inode are
1843 * more recent or unchanged.
1844 */
nfs_inode_attrs_cmp_strict_monotonic(const struct nfs_fattr * fattr,const struct inode * inode)1845 static int nfs_inode_attrs_cmp_strict_monotonic(const struct nfs_fattr *fattr,
1846 const struct inode *inode)
1847 {
1848 return nfs_inode_attrs_cmp_monotonic(fattr, inode) > 0 ? 1 : -1;
1849 }
1850
1851 /**
1852 * nfs_inode_attrs_cmp - compare attributes
1853 * @fattr: attributes
1854 * @inode: pointer to inode
1855 *
1856 * This function returns '1' if it thinks the attributes in @fattr are
1857 * more recent than the ones cached in @inode. It returns '-1' if
1858 * the attributes in @inode are more recent than the ones in @fattr,
1859 * and it returns 0 if not sure.
1860 */
nfs_inode_attrs_cmp(const struct nfs_fattr * fattr,const struct inode * inode)1861 static int nfs_inode_attrs_cmp(const struct nfs_fattr *fattr,
1862 const struct inode *inode)
1863 {
1864 if (nfs_inode_attrs_cmp_generic(fattr, inode) > 0)
1865 return 1;
1866 switch (NFS_SERVER(inode)->change_attr_type) {
1867 case NFS4_CHANGE_TYPE_IS_UNDEFINED:
1868 break;
1869 case NFS4_CHANGE_TYPE_IS_TIME_METADATA:
1870 if (!(fattr->valid & NFS_ATTR_FATTR_CHANGE))
1871 break;
1872 return nfs_inode_attrs_cmp_monotonic(fattr, inode);
1873 default:
1874 if (!(fattr->valid & NFS_ATTR_FATTR_CHANGE))
1875 break;
1876 return nfs_inode_attrs_cmp_strict_monotonic(fattr, inode);
1877 }
1878 return 0;
1879 }
1880
1881 /**
1882 * nfs_inode_finish_partial_attr_update - complete a previous inode update
1883 * @fattr: attributes
1884 * @inode: pointer to inode
1885 *
1886 * Returns '1' if the last attribute update left the inode cached
1887 * attributes in a partially unrevalidated state, and @fattr
1888 * matches the change attribute of that partial update.
1889 * Otherwise returns '0'.
1890 */
nfs_inode_finish_partial_attr_update(const struct nfs_fattr * fattr,const struct inode * inode)1891 static int nfs_inode_finish_partial_attr_update(const struct nfs_fattr *fattr,
1892 const struct inode *inode)
1893 {
1894 const unsigned long check_valid =
1895 NFS_INO_INVALID_ATIME | NFS_INO_INVALID_CTIME |
1896 NFS_INO_INVALID_MTIME | NFS_INO_INVALID_SIZE |
1897 NFS_INO_INVALID_BLOCKS | NFS_INO_INVALID_OTHER |
1898 NFS_INO_INVALID_NLINK;
1899 unsigned long cache_validity = NFS_I(inode)->cache_validity;
1900 enum nfs4_change_attr_type ctype = NFS_SERVER(inode)->change_attr_type;
1901
1902 if (ctype != NFS4_CHANGE_TYPE_IS_UNDEFINED &&
1903 !(cache_validity & NFS_INO_INVALID_CHANGE) &&
1904 (cache_validity & check_valid) != 0 &&
1905 (fattr->valid & NFS_ATTR_FATTR_CHANGE) != 0 &&
1906 nfs_inode_attrs_cmp_monotonic(fattr, inode) == 0)
1907 return 1;
1908 return 0;
1909 }
1910
nfs_ooo_merge(struct nfs_inode * nfsi,u64 start,u64 end)1911 static void nfs_ooo_merge(struct nfs_inode *nfsi,
1912 u64 start, u64 end)
1913 {
1914 int i, cnt;
1915
1916 if (nfsi->cache_validity & NFS_INO_DATA_INVAL_DEFER)
1917 /* No point merging anything */
1918 return;
1919
1920 if (!nfsi->ooo) {
1921 nfsi->ooo = kmalloc(sizeof(*nfsi->ooo), GFP_ATOMIC);
1922 if (!nfsi->ooo) {
1923 nfsi->cache_validity |= NFS_INO_DATA_INVAL_DEFER;
1924 return;
1925 }
1926 nfsi->ooo->cnt = 0;
1927 }
1928
1929 /* add this range, merging if possible */
1930 cnt = nfsi->ooo->cnt;
1931 for (i = 0; i < cnt; i++) {
1932 if (end == nfsi->ooo->gap[i].start)
1933 end = nfsi->ooo->gap[i].end;
1934 else if (start == nfsi->ooo->gap[i].end)
1935 start = nfsi->ooo->gap[i].start;
1936 else
1937 continue;
1938 /* Remove 'i' from table and loop to insert the new range */
1939 cnt -= 1;
1940 nfsi->ooo->gap[i] = nfsi->ooo->gap[cnt];
1941 i = -1;
1942 }
1943 if (start != end) {
1944 if (cnt >= ARRAY_SIZE(nfsi->ooo->gap)) {
1945 nfsi->cache_validity |= NFS_INO_DATA_INVAL_DEFER;
1946 kfree(nfsi->ooo);
1947 nfsi->ooo = NULL;
1948 return;
1949 }
1950 nfsi->ooo->gap[cnt].start = start;
1951 nfsi->ooo->gap[cnt].end = end;
1952 cnt += 1;
1953 }
1954 nfsi->ooo->cnt = cnt;
1955 }
1956
nfs_ooo_record(struct nfs_inode * nfsi,struct nfs_fattr * fattr)1957 static void nfs_ooo_record(struct nfs_inode *nfsi,
1958 struct nfs_fattr *fattr)
1959 {
1960 /* This reply was out-of-order, so record in the
1961 * pre/post change id, possibly cancelling
1962 * gaps created when iversion was jumpped forward.
1963 */
1964 if ((fattr->valid & NFS_ATTR_FATTR_CHANGE) &&
1965 (fattr->valid & NFS_ATTR_FATTR_PRECHANGE))
1966 nfs_ooo_merge(nfsi,
1967 fattr->change_attr,
1968 fattr->pre_change_attr);
1969 }
1970
nfs_refresh_inode_locked(struct inode * inode,struct nfs_fattr * fattr)1971 static int nfs_refresh_inode_locked(struct inode *inode,
1972 struct nfs_fattr *fattr)
1973 {
1974 int attr_cmp = nfs_inode_attrs_cmp(fattr, inode);
1975 int ret = 0;
1976
1977 trace_nfs_refresh_inode_enter(inode);
1978
1979 if (attr_cmp > 0 || nfs_inode_finish_partial_attr_update(fattr, inode))
1980 ret = nfs_update_inode(inode, fattr);
1981 else {
1982 nfs_ooo_record(NFS_I(inode), fattr);
1983
1984 if (attr_cmp == 0)
1985 ret = nfs_check_inode_attributes(inode, fattr);
1986 }
1987
1988 trace_nfs_refresh_inode_exit(inode, ret);
1989 return ret;
1990 }
1991
1992 /**
1993 * nfs_refresh_inode - try to update the inode attribute cache
1994 * @inode: pointer to inode
1995 * @fattr: updated attributes
1996 *
1997 * Check that an RPC call that returned attributes has not overlapped with
1998 * other recent updates of the inode metadata, then decide whether it is
1999 * safe to do a full update of the inode attributes, or whether just to
2000 * call nfs_check_inode_attributes.
2001 */
nfs_refresh_inode(struct inode * inode,struct nfs_fattr * fattr)2002 int nfs_refresh_inode(struct inode *inode, struct nfs_fattr *fattr)
2003 {
2004 int status;
2005
2006 if ((fattr->valid & NFS_ATTR_FATTR) == 0)
2007 return 0;
2008 spin_lock(&inode->i_lock);
2009 status = nfs_refresh_inode_locked(inode, fattr);
2010 spin_unlock(&inode->i_lock);
2011
2012 return status;
2013 }
2014 EXPORT_SYMBOL_GPL(nfs_refresh_inode);
2015
nfs_post_op_update_inode_locked(struct inode * inode,struct nfs_fattr * fattr,unsigned int invalid)2016 static int nfs_post_op_update_inode_locked(struct inode *inode,
2017 struct nfs_fattr *fattr, unsigned int invalid)
2018 {
2019 if (S_ISDIR(inode->i_mode))
2020 invalid |= NFS_INO_INVALID_DATA;
2021 nfs_set_cache_invalid(inode, invalid);
2022 if ((fattr->valid & NFS_ATTR_FATTR) == 0)
2023 return 0;
2024 return nfs_refresh_inode_locked(inode, fattr);
2025 }
2026
2027 /**
2028 * nfs_post_op_update_inode - try to update the inode attribute cache
2029 * @inode: pointer to inode
2030 * @fattr: updated attributes
2031 *
2032 * After an operation that has changed the inode metadata, mark the
2033 * attribute cache as being invalid, then try to update it.
2034 *
2035 * NB: if the server didn't return any post op attributes, this
2036 * function will force the retrieval of attributes before the next
2037 * NFS request. Thus it should be used only for operations that
2038 * are expected to change one or more attributes, to avoid
2039 * unnecessary NFS requests and trips through nfs_update_inode().
2040 */
nfs_post_op_update_inode(struct inode * inode,struct nfs_fattr * fattr)2041 int nfs_post_op_update_inode(struct inode *inode, struct nfs_fattr *fattr)
2042 {
2043 int status;
2044
2045 spin_lock(&inode->i_lock);
2046 nfs_fattr_set_barrier(fattr);
2047 status = nfs_post_op_update_inode_locked(inode, fattr,
2048 NFS_INO_INVALID_CHANGE
2049 | NFS_INO_INVALID_CTIME
2050 | NFS_INO_REVAL_FORCED);
2051 spin_unlock(&inode->i_lock);
2052
2053 return status;
2054 }
2055 EXPORT_SYMBOL_GPL(nfs_post_op_update_inode);
2056
2057 /**
2058 * nfs_post_op_update_inode_force_wcc_locked - update the inode attribute cache
2059 * @inode: pointer to inode
2060 * @fattr: updated attributes
2061 *
2062 * After an operation that has changed the inode metadata, mark the
2063 * attribute cache as being invalid, then try to update it. Fake up
2064 * weak cache consistency data, if none exist.
2065 *
2066 * This function is mainly designed to be used by the ->write_done() functions.
2067 */
nfs_post_op_update_inode_force_wcc_locked(struct inode * inode,struct nfs_fattr * fattr)2068 int nfs_post_op_update_inode_force_wcc_locked(struct inode *inode, struct nfs_fattr *fattr)
2069 {
2070 int attr_cmp = nfs_inode_attrs_cmp(fattr, inode);
2071 int status;
2072
2073 /* Don't do a WCC update if these attributes are already stale */
2074 if (attr_cmp < 0)
2075 return 0;
2076 if ((fattr->valid & NFS_ATTR_FATTR) == 0 || !attr_cmp) {
2077 /* Record the pre/post change info before clearing PRECHANGE */
2078 nfs_ooo_record(NFS_I(inode), fattr);
2079 fattr->valid &= ~(NFS_ATTR_FATTR_PRECHANGE
2080 | NFS_ATTR_FATTR_PRESIZE
2081 | NFS_ATTR_FATTR_PREMTIME
2082 | NFS_ATTR_FATTR_PRECTIME);
2083 goto out_noforce;
2084 }
2085 if ((fattr->valid & NFS_ATTR_FATTR_CHANGE) != 0 &&
2086 (fattr->valid & NFS_ATTR_FATTR_PRECHANGE) == 0) {
2087 fattr->pre_change_attr = inode_peek_iversion_raw(inode);
2088 fattr->valid |= NFS_ATTR_FATTR_PRECHANGE;
2089 }
2090 if ((fattr->valid & NFS_ATTR_FATTR_CTIME) != 0 &&
2091 (fattr->valid & NFS_ATTR_FATTR_PRECTIME) == 0) {
2092 fattr->pre_ctime = inode_get_ctime(inode);
2093 fattr->valid |= NFS_ATTR_FATTR_PRECTIME;
2094 }
2095 if ((fattr->valid & NFS_ATTR_FATTR_MTIME) != 0 &&
2096 (fattr->valid & NFS_ATTR_FATTR_PREMTIME) == 0) {
2097 fattr->pre_mtime = inode_get_mtime(inode);
2098 fattr->valid |= NFS_ATTR_FATTR_PREMTIME;
2099 }
2100 if ((fattr->valid & NFS_ATTR_FATTR_SIZE) != 0 &&
2101 (fattr->valid & NFS_ATTR_FATTR_PRESIZE) == 0) {
2102 fattr->pre_size = i_size_read(inode);
2103 fattr->valid |= NFS_ATTR_FATTR_PRESIZE;
2104 }
2105 out_noforce:
2106 status = nfs_post_op_update_inode_locked(inode, fattr,
2107 NFS_INO_INVALID_CHANGE
2108 | NFS_INO_INVALID_CTIME
2109 | NFS_INO_INVALID_MTIME
2110 | NFS_INO_INVALID_BLOCKS);
2111 return status;
2112 }
2113
2114 /**
2115 * nfs_post_op_update_inode_force_wcc - try to update the inode attribute cache
2116 * @inode: pointer to inode
2117 * @fattr: updated attributes
2118 *
2119 * After an operation that has changed the inode metadata, mark the
2120 * attribute cache as being invalid, then try to update it. Fake up
2121 * weak cache consistency data, if none exist.
2122 *
2123 * This function is mainly designed to be used by the ->write_done() functions.
2124 */
nfs_post_op_update_inode_force_wcc(struct inode * inode,struct nfs_fattr * fattr)2125 int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fattr)
2126 {
2127 int status;
2128
2129 spin_lock(&inode->i_lock);
2130 nfs_fattr_set_barrier(fattr);
2131 status = nfs_post_op_update_inode_force_wcc_locked(inode, fattr);
2132 spin_unlock(&inode->i_lock);
2133 return status;
2134 }
2135 EXPORT_SYMBOL_GPL(nfs_post_op_update_inode_force_wcc);
2136
2137
2138 /*
2139 * Many nfs protocol calls return the new file attributes after
2140 * an operation. Here we update the inode to reflect the state
2141 * of the server's inode.
2142 *
2143 * This is a bit tricky because we have to make sure all dirty pages
2144 * have been sent off to the server before calling invalidate_inode_pages.
2145 * To make sure no other process adds more write requests while we try
2146 * our best to flush them, we make them sleep during the attribute refresh.
2147 *
2148 * A very similar scenario holds for the dir cache.
2149 */
nfs_update_inode(struct inode * inode,struct nfs_fattr * fattr)2150 static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
2151 {
2152 struct nfs_server *server = NFS_SERVER(inode);
2153 struct nfs_inode *nfsi = NFS_I(inode);
2154 loff_t cur_isize, new_isize;
2155 u64 fattr_supported = server->fattr_valid;
2156 unsigned long invalid = 0;
2157 unsigned long now = jiffies;
2158 unsigned long save_cache_validity;
2159 bool have_writers = nfs_file_has_buffered_writers(nfsi);
2160 bool cache_revalidated = true;
2161 bool attr_changed = false;
2162 bool have_delegation;
2163
2164 dfprintk(VFS, "NFS: %s(%s/%lu fh_crc=0x%08x ct=%d info=0x%x)\n",
2165 __func__, inode->i_sb->s_id, inode->i_ino,
2166 nfs_display_fhandle_hash(NFS_FH(inode)),
2167 atomic_read(&inode->i_count), fattr->valid);
2168
2169 if (!(fattr->valid & NFS_ATTR_FATTR_FILEID)) {
2170 /* Only a mounted-on-fileid? Just exit */
2171 if (fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID)
2172 return 0;
2173 /* Has the inode gone and changed behind our back? */
2174 } else if (nfsi->fileid != fattr->fileid) {
2175 /* Is this perhaps the mounted-on fileid? */
2176 if ((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) &&
2177 nfsi->fileid == fattr->mounted_on_fileid)
2178 return 0;
2179 printk(KERN_ERR "NFS: server %s error: fileid changed\n"
2180 "fsid %s: expected fileid 0x%Lx, got 0x%Lx\n",
2181 NFS_SERVER(inode)->nfs_client->cl_hostname,
2182 inode->i_sb->s_id, (long long)nfsi->fileid,
2183 (long long)fattr->fileid);
2184 goto out_err;
2185 }
2186
2187 /*
2188 * Make sure the inode's type hasn't changed.
2189 */
2190 if ((fattr->valid & NFS_ATTR_FATTR_TYPE) && inode_wrong_type(inode, fattr->mode)) {
2191 /*
2192 * Big trouble! The inode has become a different object.
2193 */
2194 printk(KERN_DEBUG "NFS: %s: inode %lu mode changed, %07o to %07o\n",
2195 __func__, inode->i_ino, inode->i_mode, fattr->mode);
2196 goto out_err;
2197 }
2198
2199 /* Update the fsid? */
2200 if (S_ISDIR(inode->i_mode) && (fattr->valid & NFS_ATTR_FATTR_FSID) &&
2201 !nfs_fsid_equal(&server->fsid, &fattr->fsid) &&
2202 !IS_AUTOMOUNT(inode))
2203 server->fsid = fattr->fsid;
2204
2205 /* Save the delegation state before clearing cache_validity */
2206 have_delegation = nfs_have_delegated_attributes(inode);
2207
2208 /*
2209 * Update the read time so we don't revalidate too often.
2210 */
2211 nfsi->read_cache_jiffies = fattr->time_start;
2212
2213 /* Fix up any delegated attributes in the struct nfs_fattr */
2214 nfs_fattr_fixup_delegated(inode, fattr);
2215
2216 save_cache_validity = nfsi->cache_validity;
2217 nfsi->cache_validity &= ~(NFS_INO_INVALID_ATTR
2218 | NFS_INO_INVALID_ATIME
2219 | NFS_INO_REVAL_FORCED
2220 | NFS_INO_INVALID_BLOCKS);
2221
2222 /* Do atomic weak cache consistency updates */
2223 nfs_wcc_update_inode(inode, fattr);
2224
2225 if (pnfs_layoutcommit_outstanding(inode)) {
2226 nfsi->cache_validity |=
2227 save_cache_validity &
2228 (NFS_INO_INVALID_CHANGE | NFS_INO_INVALID_CTIME |
2229 NFS_INO_INVALID_MTIME | NFS_INO_INVALID_SIZE |
2230 NFS_INO_INVALID_BLOCKS);
2231 cache_revalidated = false;
2232 }
2233
2234 /* More cache consistency checks */
2235 if (fattr->valid & NFS_ATTR_FATTR_CHANGE) {
2236 if (!have_writers && nfsi->ooo && nfsi->ooo->cnt == 1 &&
2237 nfsi->ooo->gap[0].end == inode_peek_iversion_raw(inode)) {
2238 /* There is one remaining gap that hasn't been
2239 * merged into iversion - do that now.
2240 */
2241 inode_set_iversion_raw(inode, nfsi->ooo->gap[0].start);
2242 kfree(nfsi->ooo);
2243 nfsi->ooo = NULL;
2244 }
2245 if (!inode_eq_iversion_raw(inode, fattr->change_attr)) {
2246 /* Could it be a race with writeback? */
2247 if (!(have_writers || have_delegation)) {
2248 invalid |= NFS_INO_INVALID_DATA
2249 | NFS_INO_INVALID_ACCESS
2250 | NFS_INO_INVALID_ACL
2251 | NFS_INO_INVALID_XATTR;
2252 /* Force revalidate of all attributes */
2253 save_cache_validity |= NFS_INO_INVALID_CTIME
2254 | NFS_INO_INVALID_MTIME
2255 | NFS_INO_INVALID_SIZE
2256 | NFS_INO_INVALID_BLOCKS
2257 | NFS_INO_INVALID_NLINK
2258 | NFS_INO_INVALID_MODE
2259 | NFS_INO_INVALID_OTHER;
2260 if (S_ISDIR(inode->i_mode))
2261 nfs_force_lookup_revalidate(inode);
2262 attr_changed = true;
2263 dprintk("NFS: change_attr change on server for file %s/%ld\n",
2264 inode->i_sb->s_id,
2265 inode->i_ino);
2266 } else if (!have_delegation) {
2267 nfs_ooo_record(nfsi, fattr);
2268 nfs_ooo_merge(nfsi, inode_peek_iversion_raw(inode),
2269 fattr->change_attr);
2270 }
2271 inode_set_iversion_raw(inode, fattr->change_attr);
2272 }
2273 } else {
2274 nfsi->cache_validity |=
2275 save_cache_validity & NFS_INO_INVALID_CHANGE;
2276 if (!have_delegation ||
2277 (nfsi->cache_validity & NFS_INO_INVALID_CHANGE) != 0)
2278 cache_revalidated = false;
2279 }
2280
2281 if (fattr->valid & NFS_ATTR_FATTR_MTIME)
2282 inode_set_mtime_to_ts(inode, fattr->mtime);
2283 else if (fattr_supported & NFS_ATTR_FATTR_MTIME)
2284 nfsi->cache_validity |=
2285 save_cache_validity & NFS_INO_INVALID_MTIME;
2286
2287 if (fattr->valid & NFS_ATTR_FATTR_CTIME)
2288 inode_set_ctime_to_ts(inode, fattr->ctime);
2289 else if (fattr_supported & NFS_ATTR_FATTR_CTIME)
2290 nfsi->cache_validity |=
2291 save_cache_validity & NFS_INO_INVALID_CTIME;
2292
2293 /* Check if our cached file size is stale */
2294 if (fattr->valid & NFS_ATTR_FATTR_SIZE) {
2295 new_isize = nfs_size_to_loff_t(fattr->size);
2296 cur_isize = i_size_read(inode);
2297 if (new_isize != cur_isize && !have_delegation) {
2298 /* Do we perhaps have any outstanding writes, or has
2299 * the file grown beyond our last write? */
2300 if (!nfs_have_writebacks(inode) || new_isize > cur_isize) {
2301 trace_nfs_size_update(inode, new_isize);
2302 i_size_write(inode, new_isize);
2303 if (!have_writers)
2304 invalid |= NFS_INO_INVALID_DATA;
2305 }
2306 }
2307 if (new_isize == 0 &&
2308 !(fattr->valid & (NFS_ATTR_FATTR_SPACE_USED |
2309 NFS_ATTR_FATTR_BLOCKS_USED))) {
2310 fattr->du.nfs3.used = 0;
2311 fattr->valid |= NFS_ATTR_FATTR_SPACE_USED;
2312 }
2313 } else
2314 nfsi->cache_validity |=
2315 save_cache_validity & NFS_INO_INVALID_SIZE;
2316
2317 if (fattr->valid & NFS_ATTR_FATTR_ATIME)
2318 inode_set_atime_to_ts(inode, fattr->atime);
2319 else if (fattr_supported & NFS_ATTR_FATTR_ATIME)
2320 nfsi->cache_validity |=
2321 save_cache_validity & NFS_INO_INVALID_ATIME;
2322
2323 if (fattr->valid & NFS_ATTR_FATTR_MODE) {
2324 if ((inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO)) {
2325 umode_t newmode = inode->i_mode & S_IFMT;
2326 newmode |= fattr->mode & S_IALLUGO;
2327 inode->i_mode = newmode;
2328 invalid |= NFS_INO_INVALID_ACCESS
2329 | NFS_INO_INVALID_ACL;
2330 }
2331 } else if (fattr_supported & NFS_ATTR_FATTR_MODE)
2332 nfsi->cache_validity |=
2333 save_cache_validity & NFS_INO_INVALID_MODE;
2334
2335 if (fattr->valid & NFS_ATTR_FATTR_OWNER) {
2336 if (!uid_eq(inode->i_uid, fattr->uid)) {
2337 invalid |= NFS_INO_INVALID_ACCESS
2338 | NFS_INO_INVALID_ACL;
2339 inode->i_uid = fattr->uid;
2340 }
2341 } else if (fattr_supported & NFS_ATTR_FATTR_OWNER)
2342 nfsi->cache_validity |=
2343 save_cache_validity & NFS_INO_INVALID_OTHER;
2344
2345 if (fattr->valid & NFS_ATTR_FATTR_GROUP) {
2346 if (!gid_eq(inode->i_gid, fattr->gid)) {
2347 invalid |= NFS_INO_INVALID_ACCESS
2348 | NFS_INO_INVALID_ACL;
2349 inode->i_gid = fattr->gid;
2350 }
2351 } else if (fattr_supported & NFS_ATTR_FATTR_GROUP)
2352 nfsi->cache_validity |=
2353 save_cache_validity & NFS_INO_INVALID_OTHER;
2354
2355 if (fattr->valid & NFS_ATTR_FATTR_NLINK) {
2356 if (inode->i_nlink != fattr->nlink)
2357 set_nlink(inode, fattr->nlink);
2358 } else if (fattr_supported & NFS_ATTR_FATTR_NLINK)
2359 nfsi->cache_validity |=
2360 save_cache_validity & NFS_INO_INVALID_NLINK;
2361
2362 if (fattr->valid & NFS_ATTR_FATTR_SPACE_USED) {
2363 /*
2364 * report the blocks in 512byte units
2365 */
2366 inode->i_blocks = nfs_calc_block_size(fattr->du.nfs3.used);
2367 } else if (fattr_supported & NFS_ATTR_FATTR_SPACE_USED)
2368 nfsi->cache_validity |=
2369 save_cache_validity & NFS_INO_INVALID_BLOCKS;
2370
2371 if (fattr->valid & NFS_ATTR_FATTR_BLOCKS_USED)
2372 inode->i_blocks = fattr->du.nfs2.blocks;
2373 else if (fattr_supported & NFS_ATTR_FATTR_BLOCKS_USED)
2374 nfsi->cache_validity |=
2375 save_cache_validity & NFS_INO_INVALID_BLOCKS;
2376
2377 /* Update attrtimeo value if we're out of the unstable period */
2378 if (attr_changed) {
2379 nfs_inc_stats(inode, NFSIOS_ATTRINVALIDATE);
2380 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
2381 nfsi->attrtimeo_timestamp = now;
2382 /* Set barrier to be more recent than all outstanding updates */
2383 nfsi->attr_gencount = nfs_inc_attr_generation_counter();
2384 } else {
2385 if (cache_revalidated) {
2386 if (!time_in_range_open(now, nfsi->attrtimeo_timestamp,
2387 nfsi->attrtimeo_timestamp + nfsi->attrtimeo)) {
2388 nfsi->attrtimeo <<= 1;
2389 if (nfsi->attrtimeo > NFS_MAXATTRTIMEO(inode))
2390 nfsi->attrtimeo = NFS_MAXATTRTIMEO(inode);
2391 }
2392 nfsi->attrtimeo_timestamp = now;
2393 }
2394 /* Set the barrier to be more recent than this fattr */
2395 if ((long)(fattr->gencount - nfsi->attr_gencount) > 0)
2396 nfsi->attr_gencount = fattr->gencount;
2397 }
2398
2399 /* Don't invalidate the data if we were to blame */
2400 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)
2401 || S_ISLNK(inode->i_mode)))
2402 invalid &= ~NFS_INO_INVALID_DATA;
2403 nfs_set_cache_invalid(inode, invalid);
2404
2405 return 0;
2406 out_err:
2407 /*
2408 * No need to worry about unhashing the dentry, as the
2409 * lookup validation will know that the inode is bad.
2410 * (But we fall through to invalidate the caches.)
2411 */
2412 nfs_set_inode_stale_locked(inode);
2413 return -ESTALE;
2414 }
2415
nfs_alloc_inode(struct super_block * sb)2416 struct inode *nfs_alloc_inode(struct super_block *sb)
2417 {
2418 struct nfs_inode *nfsi;
2419 nfsi = alloc_inode_sb(sb, nfs_inode_cachep, GFP_KERNEL);
2420 if (!nfsi)
2421 return NULL;
2422 nfsi->flags = 0UL;
2423 nfsi->cache_validity = 0UL;
2424 nfsi->ooo = NULL;
2425 #if IS_ENABLED(CONFIG_NFS_V4)
2426 nfsi->nfs4_acl = NULL;
2427 #endif /* CONFIG_NFS_V4 */
2428 #ifdef CONFIG_NFS_V4_2
2429 nfsi->xattr_cache = NULL;
2430 #endif
2431 nfs_netfs_inode_init(nfsi);
2432
2433 return &nfsi->vfs_inode;
2434 }
2435 EXPORT_SYMBOL_GPL(nfs_alloc_inode);
2436
nfs_free_inode(struct inode * inode)2437 void nfs_free_inode(struct inode *inode)
2438 {
2439 kfree(NFS_I(inode)->ooo);
2440 kmem_cache_free(nfs_inode_cachep, NFS_I(inode));
2441 }
2442 EXPORT_SYMBOL_GPL(nfs_free_inode);
2443
nfs4_init_once(struct nfs_inode * nfsi)2444 static inline void nfs4_init_once(struct nfs_inode *nfsi)
2445 {
2446 #if IS_ENABLED(CONFIG_NFS_V4)
2447 INIT_LIST_HEAD(&nfsi->open_states);
2448 nfsi->delegation = NULL;
2449 init_rwsem(&nfsi->rwsem);
2450 nfsi->layout = NULL;
2451 #endif
2452 }
2453
init_once(void * foo)2454 static void init_once(void *foo)
2455 {
2456 struct nfs_inode *nfsi = foo;
2457
2458 inode_init_once(&nfsi->vfs_inode);
2459 INIT_LIST_HEAD(&nfsi->open_files);
2460 INIT_LIST_HEAD(&nfsi->access_cache_entry_lru);
2461 INIT_LIST_HEAD(&nfsi->access_cache_inode_lru);
2462 nfs4_init_once(nfsi);
2463 }
2464
nfs_init_inodecache(void)2465 static int __init nfs_init_inodecache(void)
2466 {
2467 nfs_inode_cachep = kmem_cache_create("nfs_inode_cache",
2468 sizeof(struct nfs_inode),
2469 0, (SLAB_RECLAIM_ACCOUNT|
2470 SLAB_ACCOUNT),
2471 init_once);
2472 if (nfs_inode_cachep == NULL)
2473 return -ENOMEM;
2474
2475 return 0;
2476 }
2477
nfs_destroy_inodecache(void)2478 static void nfs_destroy_inodecache(void)
2479 {
2480 /*
2481 * Make sure all delayed rcu free inodes are flushed before we
2482 * destroy cache.
2483 */
2484 rcu_barrier();
2485 kmem_cache_destroy(nfs_inode_cachep);
2486 }
2487
2488 struct workqueue_struct *nfslocaliod_workqueue;
2489 struct workqueue_struct *nfsiod_workqueue;
2490 EXPORT_SYMBOL_GPL(nfsiod_workqueue);
2491
2492 /*
2493 * Destroy the nfsiod workqueues
2494 */
nfsiod_stop(void)2495 static void nfsiod_stop(void)
2496 {
2497 struct workqueue_struct *wq;
2498
2499 wq = nfsiod_workqueue;
2500 if (wq != NULL) {
2501 nfsiod_workqueue = NULL;
2502 destroy_workqueue(wq);
2503 }
2504 #if IS_ENABLED(CONFIG_NFS_LOCALIO)
2505 wq = nfslocaliod_workqueue;
2506 if (wq != NULL) {
2507 nfslocaliod_workqueue = NULL;
2508 destroy_workqueue(wq);
2509 }
2510 #endif /* CONFIG_NFS_LOCALIO */
2511 }
2512
2513 /*
2514 * Start the nfsiod workqueues
2515 */
nfsiod_start(void)2516 static int nfsiod_start(void)
2517 {
2518 dprintk("RPC: creating workqueue nfsiod\n");
2519 nfsiod_workqueue = alloc_workqueue("nfsiod", WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
2520 if (nfsiod_workqueue == NULL)
2521 return -ENOMEM;
2522 #if IS_ENABLED(CONFIG_NFS_LOCALIO)
2523 /*
2524 * localio writes need to use a normal (non-memreclaim) workqueue.
2525 * When we start getting low on space, XFS goes and calls flush_work() on
2526 * a non-memreclaim work queue, which causes a priority inversion problem.
2527 */
2528 dprintk("RPC: creating workqueue nfslocaliod\n");
2529 nfslocaliod_workqueue = alloc_workqueue("nfslocaliod", WQ_UNBOUND, 0);
2530 if (unlikely(nfslocaliod_workqueue == NULL)) {
2531 nfsiod_stop();
2532 return -ENOMEM;
2533 }
2534 #endif /* CONFIG_NFS_LOCALIO */
2535 return 0;
2536 }
2537
2538 unsigned int nfs_net_id;
2539 EXPORT_SYMBOL_GPL(nfs_net_id);
2540
nfs_net_init(struct net * net)2541 static int nfs_net_init(struct net *net)
2542 {
2543 struct nfs_net *nn = net_generic(net, nfs_net_id);
2544
2545 nfs_clients_init(net);
2546
2547 if (!rpc_proc_register(net, &nn->rpcstats)) {
2548 nfs_clients_exit(net);
2549 return -ENOMEM;
2550 }
2551
2552 return nfs_fs_proc_net_init(net);
2553 }
2554
nfs_net_exit(struct net * net)2555 static void nfs_net_exit(struct net *net)
2556 {
2557 rpc_proc_unregister(net, "nfs");
2558 nfs_fs_proc_net_exit(net);
2559 nfs_clients_exit(net);
2560 }
2561
2562 static struct pernet_operations nfs_net_ops = {
2563 .init = nfs_net_init,
2564 .exit = nfs_net_exit,
2565 .id = &nfs_net_id,
2566 .size = sizeof(struct nfs_net),
2567 };
2568
2569 /*
2570 * Initialize NFS
2571 */
init_nfs_fs(void)2572 static int __init init_nfs_fs(void)
2573 {
2574 int err;
2575
2576 err = nfs_sysfs_init();
2577 if (err < 0)
2578 goto out10;
2579
2580 err = register_pernet_subsys(&nfs_net_ops);
2581 if (err < 0)
2582 goto out9;
2583
2584 err = nfsiod_start();
2585 if (err)
2586 goto out7;
2587
2588 err = nfs_fs_proc_init();
2589 if (err)
2590 goto out6;
2591
2592 err = nfs_init_nfspagecache();
2593 if (err)
2594 goto out5;
2595
2596 err = nfs_init_inodecache();
2597 if (err)
2598 goto out4;
2599
2600 err = nfs_init_readpagecache();
2601 if (err)
2602 goto out3;
2603
2604 err = nfs_init_writepagecache();
2605 if (err)
2606 goto out2;
2607
2608 err = nfs_init_directcache();
2609 if (err)
2610 goto out1;
2611
2612 err = register_nfs_fs();
2613 if (err)
2614 goto out0;
2615
2616 return 0;
2617 out0:
2618 nfs_destroy_directcache();
2619 out1:
2620 nfs_destroy_writepagecache();
2621 out2:
2622 nfs_destroy_readpagecache();
2623 out3:
2624 nfs_destroy_inodecache();
2625 out4:
2626 nfs_destroy_nfspagecache();
2627 out5:
2628 nfs_fs_proc_exit();
2629 out6:
2630 nfsiod_stop();
2631 out7:
2632 unregister_pernet_subsys(&nfs_net_ops);
2633 out9:
2634 nfs_sysfs_exit();
2635 out10:
2636 return err;
2637 }
2638
exit_nfs_fs(void)2639 static void __exit exit_nfs_fs(void)
2640 {
2641 nfs_destroy_directcache();
2642 nfs_destroy_writepagecache();
2643 nfs_destroy_readpagecache();
2644 nfs_destroy_inodecache();
2645 nfs_destroy_nfspagecache();
2646 unregister_pernet_subsys(&nfs_net_ops);
2647 unregister_nfs_fs();
2648 nfs_fs_proc_exit();
2649 nfsiod_stop();
2650 nfs_sysfs_exit();
2651 }
2652
2653 /* Not quite true; I just maintain it */
2654 MODULE_AUTHOR("Olaf Kirch <okir@monad.swb.de>");
2655 MODULE_DESCRIPTION("NFS client support");
2656 MODULE_LICENSE("GPL");
2657 module_param(enable_ino64, bool, 0644);
2658
2659 module_init(init_nfs_fs)
2660 module_exit(exit_nfs_fs)
2661