1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
5 */
6
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
9 #include <linux/bio.h>
10 #include <linux/sched/signal.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/statfs.h>
16 #include <linux/seq_file.h>
17 #include <linux/mount.h>
18 #include <linux/kthread.h>
19 #include <linux/delay.h>
20 #include <linux/gfs2_ondisk.h>
21 #include <linux/crc32.h>
22 #include <linux/time.h>
23 #include <linux/wait.h>
24 #include <linux/writeback.h>
25 #include <linux/backing-dev.h>
26 #include <linux/kernel.h>
27
28 #include "gfs2.h"
29 #include "incore.h"
30 #include "bmap.h"
31 #include "dir.h"
32 #include "glock.h"
33 #include "glops.h"
34 #include "inode.h"
35 #include "log.h"
36 #include "meta_io.h"
37 #include "quota.h"
38 #include "recovery.h"
39 #include "rgrp.h"
40 #include "super.h"
41 #include "trans.h"
42 #include "util.h"
43 #include "sys.h"
44 #include "xattr.h"
45 #include "lops.h"
46
47 enum evict_behavior {
48 EVICT_SHOULD_DELETE,
49 EVICT_SHOULD_SKIP_DELETE,
50 EVICT_SHOULD_DEFER_DELETE,
51 };
52
53 /**
54 * gfs2_jindex_free - Clear all the journal index information
55 * @sdp: The GFS2 superblock
56 *
57 */
58
gfs2_jindex_free(struct gfs2_sbd * sdp)59 void gfs2_jindex_free(struct gfs2_sbd *sdp)
60 {
61 struct list_head list;
62 struct gfs2_jdesc *jd;
63
64 spin_lock(&sdp->sd_jindex_spin);
65 list_add(&list, &sdp->sd_jindex_list);
66 list_del_init(&sdp->sd_jindex_list);
67 sdp->sd_journals = 0;
68 spin_unlock(&sdp->sd_jindex_spin);
69
70 down_write(&sdp->sd_log_flush_lock);
71 sdp->sd_jdesc = NULL;
72 up_write(&sdp->sd_log_flush_lock);
73
74 while (!list_empty(&list)) {
75 jd = list_first_entry(&list, struct gfs2_jdesc, jd_list);
76 BUG_ON(jd->jd_log_bio);
77 gfs2_free_journal_extents(jd);
78 list_del(&jd->jd_list);
79 iput(jd->jd_inode);
80 jd->jd_inode = NULL;
81 kfree(jd);
82 }
83 }
84
jdesc_find_i(struct list_head * head,unsigned int jid)85 static struct gfs2_jdesc *jdesc_find_i(struct list_head *head, unsigned int jid)
86 {
87 struct gfs2_jdesc *jd;
88
89 list_for_each_entry(jd, head, jd_list) {
90 if (jd->jd_jid == jid)
91 return jd;
92 }
93 return NULL;
94 }
95
gfs2_jdesc_find(struct gfs2_sbd * sdp,unsigned int jid)96 struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid)
97 {
98 struct gfs2_jdesc *jd;
99
100 spin_lock(&sdp->sd_jindex_spin);
101 jd = jdesc_find_i(&sdp->sd_jindex_list, jid);
102 spin_unlock(&sdp->sd_jindex_spin);
103
104 return jd;
105 }
106
gfs2_jdesc_check(struct gfs2_jdesc * jd)107 int gfs2_jdesc_check(struct gfs2_jdesc *jd)
108 {
109 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
110 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
111 u64 size = i_size_read(jd->jd_inode);
112
113 if (gfs2_check_internal_file_size(jd->jd_inode, 8 << 20, BIT(30)))
114 return -EIO;
115
116 jd->jd_blocks = size >> sdp->sd_sb.sb_bsize_shift;
117
118 if (gfs2_write_alloc_required(ip, 0, size)) {
119 gfs2_consist_inode(ip);
120 return -EIO;
121 }
122
123 return 0;
124 }
125
126 /**
127 * gfs2_make_fs_rw - Turn a Read-Only FS into a Read-Write one
128 * @sdp: the filesystem
129 *
130 * Returns: errno
131 */
132
gfs2_make_fs_rw(struct gfs2_sbd * sdp)133 int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
134 {
135 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
136 struct gfs2_glock *j_gl = ip->i_gl;
137 int error;
138
139 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
140 if (gfs2_withdrawing_or_withdrawn(sdp))
141 return -EIO;
142
143 if (sdp->sd_log_sequence == 0) {
144 fs_err(sdp, "unknown status of our own journal jid %d",
145 sdp->sd_lockstruct.ls_jid);
146 return -EIO;
147 }
148
149 error = gfs2_quota_init(sdp);
150 if (!error && gfs2_withdrawing_or_withdrawn(sdp))
151 error = -EIO;
152 if (!error)
153 set_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
154 return error;
155 }
156
gfs2_statfs_change_in(struct gfs2_statfs_change_host * sc,const void * buf)157 void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc, const void *buf)
158 {
159 const struct gfs2_statfs_change *str = buf;
160
161 sc->sc_total = be64_to_cpu(str->sc_total);
162 sc->sc_free = be64_to_cpu(str->sc_free);
163 sc->sc_dinodes = be64_to_cpu(str->sc_dinodes);
164 }
165
gfs2_statfs_change_out(const struct gfs2_statfs_change_host * sc,void * buf)166 void gfs2_statfs_change_out(const struct gfs2_statfs_change_host *sc, void *buf)
167 {
168 struct gfs2_statfs_change *str = buf;
169
170 str->sc_total = cpu_to_be64(sc->sc_total);
171 str->sc_free = cpu_to_be64(sc->sc_free);
172 str->sc_dinodes = cpu_to_be64(sc->sc_dinodes);
173 }
174
gfs2_statfs_init(struct gfs2_sbd * sdp)175 int gfs2_statfs_init(struct gfs2_sbd *sdp)
176 {
177 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
178 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
179 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
180 struct buffer_head *m_bh;
181 struct gfs2_holder gh;
182 int error;
183
184 error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE,
185 &gh);
186 if (error)
187 return error;
188
189 error = gfs2_meta_inode_buffer(m_ip, &m_bh);
190 if (error)
191 goto out;
192
193 if (sdp->sd_args.ar_spectator) {
194 spin_lock(&sdp->sd_statfs_spin);
195 gfs2_statfs_change_in(m_sc, m_bh->b_data +
196 sizeof(struct gfs2_dinode));
197 spin_unlock(&sdp->sd_statfs_spin);
198 } else {
199 spin_lock(&sdp->sd_statfs_spin);
200 gfs2_statfs_change_in(m_sc, m_bh->b_data +
201 sizeof(struct gfs2_dinode));
202 gfs2_statfs_change_in(l_sc, sdp->sd_sc_bh->b_data +
203 sizeof(struct gfs2_dinode));
204 spin_unlock(&sdp->sd_statfs_spin);
205
206 }
207
208 brelse(m_bh);
209 out:
210 gfs2_glock_dq_uninit(&gh);
211 return 0;
212 }
213
gfs2_statfs_change(struct gfs2_sbd * sdp,s64 total,s64 free,s64 dinodes)214 void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
215 s64 dinodes)
216 {
217 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
218 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
219 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
220 s64 x, y;
221 int need_sync = 0;
222
223 gfs2_trans_add_meta(l_ip->i_gl, sdp->sd_sc_bh);
224
225 spin_lock(&sdp->sd_statfs_spin);
226 l_sc->sc_total += total;
227 l_sc->sc_free += free;
228 l_sc->sc_dinodes += dinodes;
229 gfs2_statfs_change_out(l_sc, sdp->sd_sc_bh->b_data +
230 sizeof(struct gfs2_dinode));
231 if (sdp->sd_args.ar_statfs_percent) {
232 x = 100 * l_sc->sc_free;
233 y = m_sc->sc_free * sdp->sd_args.ar_statfs_percent;
234 if (x >= y || x <= -y)
235 need_sync = 1;
236 }
237 spin_unlock(&sdp->sd_statfs_spin);
238
239 if (need_sync)
240 gfs2_wake_up_statfs(sdp);
241 }
242
update_statfs(struct gfs2_sbd * sdp,struct buffer_head * m_bh)243 void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh)
244 {
245 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
246 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
247 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
248 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
249
250 gfs2_trans_add_meta(l_ip->i_gl, sdp->sd_sc_bh);
251 gfs2_trans_add_meta(m_ip->i_gl, m_bh);
252
253 spin_lock(&sdp->sd_statfs_spin);
254 m_sc->sc_total += l_sc->sc_total;
255 m_sc->sc_free += l_sc->sc_free;
256 m_sc->sc_dinodes += l_sc->sc_dinodes;
257 memset(l_sc, 0, sizeof(struct gfs2_statfs_change));
258 memset(sdp->sd_sc_bh->b_data + sizeof(struct gfs2_dinode),
259 0, sizeof(struct gfs2_statfs_change));
260 gfs2_statfs_change_out(m_sc, m_bh->b_data + sizeof(struct gfs2_dinode));
261 spin_unlock(&sdp->sd_statfs_spin);
262 }
263
gfs2_statfs_sync(struct super_block * sb,int type)264 int gfs2_statfs_sync(struct super_block *sb, int type)
265 {
266 struct gfs2_sbd *sdp = sb->s_fs_info;
267 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
268 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
269 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
270 struct gfs2_holder gh;
271 struct buffer_head *m_bh;
272 int error;
273
274 error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE,
275 &gh);
276 if (error)
277 goto out;
278
279 error = gfs2_meta_inode_buffer(m_ip, &m_bh);
280 if (error)
281 goto out_unlock;
282
283 spin_lock(&sdp->sd_statfs_spin);
284 gfs2_statfs_change_in(m_sc, m_bh->b_data +
285 sizeof(struct gfs2_dinode));
286 if (!l_sc->sc_total && !l_sc->sc_free && !l_sc->sc_dinodes) {
287 spin_unlock(&sdp->sd_statfs_spin);
288 goto out_bh;
289 }
290 spin_unlock(&sdp->sd_statfs_spin);
291
292 error = gfs2_trans_begin(sdp, 2 * RES_DINODE, 0);
293 if (error)
294 goto out_bh;
295
296 update_statfs(sdp, m_bh);
297 sdp->sd_statfs_force_sync = 0;
298
299 gfs2_trans_end(sdp);
300
301 out_bh:
302 brelse(m_bh);
303 out_unlock:
304 gfs2_glock_dq_uninit(&gh);
305 out:
306 return error;
307 }
308
309 struct lfcc {
310 struct list_head list;
311 struct gfs2_holder gh;
312 };
313
314 /**
315 * gfs2_lock_fs_check_clean - Stop all writes to the FS and check that all
316 * journals are clean
317 * @sdp: the file system
318 *
319 * Returns: errno
320 */
321
gfs2_lock_fs_check_clean(struct gfs2_sbd * sdp)322 static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp)
323 {
324 struct gfs2_inode *ip;
325 struct gfs2_jdesc *jd;
326 struct lfcc *lfcc;
327 LIST_HEAD(list);
328 struct gfs2_log_header_host lh;
329 int error, error2;
330
331 /*
332 * Grab all the journal glocks in SH mode. We are *probably* doing
333 * that to prevent recovery.
334 */
335
336 list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
337 lfcc = kmalloc(sizeof(struct lfcc), GFP_KERNEL);
338 if (!lfcc) {
339 error = -ENOMEM;
340 goto out;
341 }
342 ip = GFS2_I(jd->jd_inode);
343 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &lfcc->gh);
344 if (error) {
345 kfree(lfcc);
346 goto out;
347 }
348 list_add(&lfcc->list, &list);
349 }
350
351 gfs2_freeze_unlock(sdp);
352
353 error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_EXCLUSIVE,
354 LM_FLAG_NOEXP | GL_NOPID,
355 &sdp->sd_freeze_gh);
356 if (error)
357 goto relock_shared;
358
359 list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
360 error = gfs2_jdesc_check(jd);
361 if (error)
362 break;
363 error = gfs2_find_jhead(jd, &lh);
364 if (error)
365 break;
366 if (!(lh.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
367 error = -EBUSY;
368 break;
369 }
370 }
371
372 if (!error)
373 goto out; /* success */
374
375 gfs2_freeze_unlock(sdp);
376
377 relock_shared:
378 error2 = gfs2_freeze_lock_shared(sdp);
379 gfs2_assert_withdraw(sdp, !error2);
380
381 out:
382 while (!list_empty(&list)) {
383 lfcc = list_first_entry(&list, struct lfcc, list);
384 list_del(&lfcc->list);
385 gfs2_glock_dq_uninit(&lfcc->gh);
386 kfree(lfcc);
387 }
388 return error;
389 }
390
gfs2_dinode_out(const struct gfs2_inode * ip,void * buf)391 void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf)
392 {
393 const struct inode *inode = &ip->i_inode;
394 struct gfs2_dinode *str = buf;
395
396 str->di_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
397 str->di_header.mh_type = cpu_to_be32(GFS2_METATYPE_DI);
398 str->di_header.mh_format = cpu_to_be32(GFS2_FORMAT_DI);
399 str->di_num.no_addr = cpu_to_be64(ip->i_no_addr);
400 str->di_num.no_formal_ino = cpu_to_be64(ip->i_no_formal_ino);
401 str->di_mode = cpu_to_be32(inode->i_mode);
402 str->di_uid = cpu_to_be32(i_uid_read(inode));
403 str->di_gid = cpu_to_be32(i_gid_read(inode));
404 str->di_nlink = cpu_to_be32(inode->i_nlink);
405 str->di_size = cpu_to_be64(i_size_read(inode));
406 str->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(inode));
407 str->di_atime = cpu_to_be64(inode_get_atime_sec(inode));
408 str->di_mtime = cpu_to_be64(inode_get_mtime_sec(inode));
409 str->di_ctime = cpu_to_be64(inode_get_ctime_sec(inode));
410
411 str->di_goal_meta = cpu_to_be64(ip->i_goal);
412 str->di_goal_data = cpu_to_be64(ip->i_goal);
413 str->di_generation = cpu_to_be64(ip->i_generation);
414
415 str->di_flags = cpu_to_be32(ip->i_diskflags);
416 str->di_height = cpu_to_be16(ip->i_height);
417 str->di_payload_format = cpu_to_be32(S_ISDIR(inode->i_mode) &&
418 !(ip->i_diskflags & GFS2_DIF_EXHASH) ?
419 GFS2_FORMAT_DE : 0);
420 str->di_depth = cpu_to_be16(ip->i_depth);
421 str->di_entries = cpu_to_be32(ip->i_entries);
422
423 str->di_eattr = cpu_to_be64(ip->i_eattr);
424 str->di_atime_nsec = cpu_to_be32(inode_get_atime_nsec(inode));
425 str->di_mtime_nsec = cpu_to_be32(inode_get_mtime_nsec(inode));
426 str->di_ctime_nsec = cpu_to_be32(inode_get_ctime_nsec(inode));
427 }
428
429 /**
430 * gfs2_write_inode - Make sure the inode is stable on the disk
431 * @inode: The inode
432 * @wbc: The writeback control structure
433 *
434 * Returns: errno
435 */
436
gfs2_write_inode(struct inode * inode,struct writeback_control * wbc)437 static int gfs2_write_inode(struct inode *inode, struct writeback_control *wbc)
438 {
439 struct gfs2_inode *ip = GFS2_I(inode);
440 struct gfs2_sbd *sdp = GFS2_SB(inode);
441 struct address_space *metamapping = gfs2_glock2aspace(ip->i_gl);
442 struct backing_dev_info *bdi = inode_to_bdi(metamapping->host);
443 int ret = 0;
444 bool flush_all = (wbc->sync_mode == WB_SYNC_ALL || gfs2_is_jdata(ip));
445
446 if (flush_all)
447 gfs2_log_flush(GFS2_SB(inode), ip->i_gl,
448 GFS2_LOG_HEAD_FLUSH_NORMAL |
449 GFS2_LFC_WRITE_INODE);
450 if (bdi->wb.dirty_exceeded)
451 gfs2_ail1_flush(sdp, wbc);
452 else
453 filemap_fdatawrite(metamapping);
454 if (flush_all)
455 ret = filemap_fdatawait(metamapping);
456 if (ret)
457 mark_inode_dirty_sync(inode);
458 else {
459 spin_lock(&inode->i_lock);
460 if (!(inode->i_flags & I_DIRTY))
461 gfs2_ordered_del_inode(ip);
462 spin_unlock(&inode->i_lock);
463 }
464 return ret;
465 }
466
467 /**
468 * gfs2_dirty_inode - check for atime updates
469 * @inode: The inode in question
470 * @flags: The type of dirty
471 *
472 * Unfortunately it can be called under any combination of inode
473 * glock and freeze glock, so we have to check carefully.
474 *
475 * At the moment this deals only with atime - it should be possible
476 * to expand that role in future, once a review of the locking has
477 * been carried out.
478 */
479
gfs2_dirty_inode(struct inode * inode,int flags)480 static void gfs2_dirty_inode(struct inode *inode, int flags)
481 {
482 struct gfs2_inode *ip = GFS2_I(inode);
483 struct gfs2_sbd *sdp = GFS2_SB(inode);
484 struct buffer_head *bh;
485 struct gfs2_holder gh;
486 int need_unlock = 0;
487 int need_endtrans = 0;
488 int ret;
489
490 /* This can only happen during incomplete inode creation. */
491 if (unlikely(!ip->i_gl))
492 return;
493
494 if (gfs2_withdrawing_or_withdrawn(sdp))
495 return;
496 if (!gfs2_glock_is_locked_by_me(ip->i_gl)) {
497 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
498 if (ret) {
499 fs_err(sdp, "dirty_inode: glock %d\n", ret);
500 gfs2_dump_glock(NULL, ip->i_gl, true);
501 return;
502 }
503 need_unlock = 1;
504 } else if (WARN_ON_ONCE(ip->i_gl->gl_state != LM_ST_EXCLUSIVE))
505 return;
506
507 if (current->journal_info == NULL) {
508 ret = gfs2_trans_begin(sdp, RES_DINODE, 0);
509 if (ret) {
510 fs_err(sdp, "dirty_inode: gfs2_trans_begin %d\n", ret);
511 goto out;
512 }
513 need_endtrans = 1;
514 }
515
516 ret = gfs2_meta_inode_buffer(ip, &bh);
517 if (ret == 0) {
518 gfs2_trans_add_meta(ip->i_gl, bh);
519 gfs2_dinode_out(ip, bh->b_data);
520 brelse(bh);
521 }
522
523 if (need_endtrans)
524 gfs2_trans_end(sdp);
525 out:
526 if (need_unlock)
527 gfs2_glock_dq_uninit(&gh);
528 }
529
530 /**
531 * gfs2_make_fs_ro - Turn a Read-Write FS into a Read-Only one
532 * @sdp: the filesystem
533 *
534 * Returns: errno
535 */
536
gfs2_make_fs_ro(struct gfs2_sbd * sdp)537 void gfs2_make_fs_ro(struct gfs2_sbd *sdp)
538 {
539 int log_write_allowed = test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
540
541 if (!test_bit(SDF_KILL, &sdp->sd_flags))
542 gfs2_flush_delete_work(sdp);
543
544 gfs2_destroy_threads(sdp);
545
546 if (log_write_allowed) {
547 gfs2_quota_sync(sdp->sd_vfs, 0);
548 gfs2_statfs_sync(sdp->sd_vfs, 0);
549
550 /* We do two log flushes here. The first one commits dirty inodes
551 * and rgrps to the journal, but queues up revokes to the ail list.
552 * The second flush writes out and removes the revokes.
553 *
554 * The first must be done before the FLUSH_SHUTDOWN code
555 * clears the LIVE flag, otherwise it will not be able to start
556 * a transaction to write its revokes, and the error will cause
557 * a withdraw of the file system. */
558 gfs2_log_flush(sdp, NULL, GFS2_LFC_MAKE_FS_RO);
559 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_SHUTDOWN |
560 GFS2_LFC_MAKE_FS_RO);
561 wait_event_timeout(sdp->sd_log_waitq,
562 gfs2_log_is_empty(sdp),
563 HZ * 5);
564 gfs2_assert_warn(sdp, gfs2_log_is_empty(sdp));
565 }
566 gfs2_quota_cleanup(sdp);
567 }
568
569 /**
570 * gfs2_put_super - Unmount the filesystem
571 * @sb: The VFS superblock
572 *
573 */
574
gfs2_put_super(struct super_block * sb)575 static void gfs2_put_super(struct super_block *sb)
576 {
577 struct gfs2_sbd *sdp = sb->s_fs_info;
578 struct gfs2_jdesc *jd;
579
580 /* No more recovery requests */
581 set_bit(SDF_NORECOVERY, &sdp->sd_flags);
582 smp_mb();
583
584 /* Wait on outstanding recovery */
585 restart:
586 spin_lock(&sdp->sd_jindex_spin);
587 list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
588 if (!test_bit(JDF_RECOVERY, &jd->jd_flags))
589 continue;
590 spin_unlock(&sdp->sd_jindex_spin);
591 wait_on_bit(&jd->jd_flags, JDF_RECOVERY,
592 TASK_UNINTERRUPTIBLE);
593 goto restart;
594 }
595 spin_unlock(&sdp->sd_jindex_spin);
596
597 if (!sb_rdonly(sb))
598 gfs2_make_fs_ro(sdp);
599 else {
600 if (gfs2_withdrawing_or_withdrawn(sdp))
601 gfs2_destroy_threads(sdp);
602
603 gfs2_quota_cleanup(sdp);
604 }
605
606 WARN_ON(gfs2_withdrawing(sdp));
607
608 /* At this point, we're through modifying the disk */
609
610 /* Release stuff */
611
612 gfs2_freeze_unlock(sdp);
613
614 iput(sdp->sd_jindex);
615 iput(sdp->sd_statfs_inode);
616 iput(sdp->sd_rindex);
617 iput(sdp->sd_quota_inode);
618
619 gfs2_glock_put(sdp->sd_rename_gl);
620 gfs2_glock_put(sdp->sd_freeze_gl);
621
622 if (!sdp->sd_args.ar_spectator) {
623 if (gfs2_holder_initialized(&sdp->sd_journal_gh))
624 gfs2_glock_dq_uninit(&sdp->sd_journal_gh);
625 if (gfs2_holder_initialized(&sdp->sd_jinode_gh))
626 gfs2_glock_dq_uninit(&sdp->sd_jinode_gh);
627 brelse(sdp->sd_sc_bh);
628 gfs2_glock_dq_uninit(&sdp->sd_sc_gh);
629 gfs2_glock_dq_uninit(&sdp->sd_qc_gh);
630 free_local_statfs_inodes(sdp);
631 iput(sdp->sd_qc_inode);
632 }
633
634 gfs2_glock_dq_uninit(&sdp->sd_live_gh);
635 gfs2_clear_rgrpd(sdp);
636 gfs2_jindex_free(sdp);
637 /* Take apart glock structures and buffer lists */
638 gfs2_gl_hash_clear(sdp);
639 iput(sdp->sd_inode);
640 gfs2_delete_debugfs_file(sdp);
641
642 gfs2_sys_fs_del(sdp);
643 free_sbd(sdp);
644 }
645
646 /**
647 * gfs2_sync_fs - sync the filesystem
648 * @sb: the superblock
649 * @wait: true to wait for completion
650 *
651 * Flushes the log to disk.
652 */
653
gfs2_sync_fs(struct super_block * sb,int wait)654 static int gfs2_sync_fs(struct super_block *sb, int wait)
655 {
656 struct gfs2_sbd *sdp = sb->s_fs_info;
657
658 gfs2_quota_sync(sb, -1);
659 if (wait)
660 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
661 GFS2_LFC_SYNC_FS);
662 return sdp->sd_log_error;
663 }
664
gfs2_do_thaw(struct gfs2_sbd * sdp,enum freeze_holder who,const void * freeze_owner)665 static int gfs2_do_thaw(struct gfs2_sbd *sdp, enum freeze_holder who, const void *freeze_owner)
666 {
667 struct super_block *sb = sdp->sd_vfs;
668 int error;
669
670 error = gfs2_freeze_lock_shared(sdp);
671 if (error)
672 goto fail;
673 error = thaw_super(sb, who, freeze_owner);
674 if (!error)
675 return 0;
676
677 fail:
678 fs_info(sdp, "GFS2: couldn't thaw filesystem: %d\n", error);
679 gfs2_assert_withdraw(sdp, 0);
680 return error;
681 }
682
gfs2_freeze_func(struct work_struct * work)683 void gfs2_freeze_func(struct work_struct *work)
684 {
685 struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_freeze_work);
686 struct super_block *sb = sdp->sd_vfs;
687 int error;
688
689 mutex_lock(&sdp->sd_freeze_mutex);
690 error = -EBUSY;
691 if (test_bit(SDF_FROZEN, &sdp->sd_flags))
692 goto freeze_failed;
693
694 error = freeze_super(sb, FREEZE_HOLDER_USERSPACE, NULL);
695 if (error)
696 goto freeze_failed;
697
698 gfs2_freeze_unlock(sdp);
699 set_bit(SDF_FROZEN, &sdp->sd_flags);
700
701 error = gfs2_do_thaw(sdp, FREEZE_HOLDER_USERSPACE, NULL);
702 if (error)
703 goto out;
704
705 clear_bit(SDF_FROZEN, &sdp->sd_flags);
706 goto out;
707
708 freeze_failed:
709 fs_info(sdp, "GFS2: couldn't freeze filesystem: %d\n", error);
710
711 out:
712 mutex_unlock(&sdp->sd_freeze_mutex);
713 deactivate_super(sb);
714 }
715
716 /**
717 * gfs2_freeze_super - prevent further writes to the filesystem
718 * @sb: the VFS structure for the filesystem
719 * @who: freeze flags
720 * @freeze_owner: owner of the freeze
721 *
722 */
723
gfs2_freeze_super(struct super_block * sb,enum freeze_holder who,const void * freeze_owner)724 static int gfs2_freeze_super(struct super_block *sb, enum freeze_holder who,
725 const void *freeze_owner)
726 {
727 struct gfs2_sbd *sdp = sb->s_fs_info;
728 int error;
729
730 if (!mutex_trylock(&sdp->sd_freeze_mutex))
731 return -EBUSY;
732 if (test_bit(SDF_FROZEN, &sdp->sd_flags)) {
733 mutex_unlock(&sdp->sd_freeze_mutex);
734 return -EBUSY;
735 }
736
737 for (;;) {
738 error = freeze_super(sb, who, freeze_owner);
739 if (error) {
740 fs_info(sdp, "GFS2: couldn't freeze filesystem: %d\n",
741 error);
742 goto out;
743 }
744
745 error = gfs2_lock_fs_check_clean(sdp);
746 if (!error) {
747 set_bit(SDF_FREEZE_INITIATOR, &sdp->sd_flags);
748 set_bit(SDF_FROZEN, &sdp->sd_flags);
749 break;
750 }
751
752 error = gfs2_do_thaw(sdp, who, freeze_owner);
753 if (error)
754 goto out;
755
756 if (error == -EBUSY)
757 fs_err(sdp, "waiting for recovery before freeze\n");
758 else if (error == -EIO) {
759 fs_err(sdp, "Fatal IO error: cannot freeze gfs2 due "
760 "to recovery error.\n");
761 goto out;
762 } else {
763 fs_err(sdp, "error freezing FS: %d\n", error);
764 }
765 fs_err(sdp, "retrying...\n");
766 msleep(1000);
767 }
768
769 out:
770 mutex_unlock(&sdp->sd_freeze_mutex);
771 return error;
772 }
773
gfs2_freeze_fs(struct super_block * sb)774 static int gfs2_freeze_fs(struct super_block *sb)
775 {
776 struct gfs2_sbd *sdp = sb->s_fs_info;
777
778 if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
779 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE |
780 GFS2_LFC_FREEZE_GO_SYNC);
781 if (gfs2_withdrawing_or_withdrawn(sdp))
782 return -EIO;
783 }
784 return 0;
785 }
786
787 /**
788 * gfs2_thaw_super - reallow writes to the filesystem
789 * @sb: the VFS structure for the filesystem
790 * @who: freeze flags
791 * @freeze_owner: owner of the freeze
792 *
793 */
794
gfs2_thaw_super(struct super_block * sb,enum freeze_holder who,const void * freeze_owner)795 static int gfs2_thaw_super(struct super_block *sb, enum freeze_holder who,
796 const void *freeze_owner)
797 {
798 struct gfs2_sbd *sdp = sb->s_fs_info;
799 int error;
800
801 if (!mutex_trylock(&sdp->sd_freeze_mutex))
802 return -EBUSY;
803 if (!test_bit(SDF_FREEZE_INITIATOR, &sdp->sd_flags)) {
804 mutex_unlock(&sdp->sd_freeze_mutex);
805 return -EINVAL;
806 }
807
808 atomic_inc(&sb->s_active);
809 gfs2_freeze_unlock(sdp);
810
811 error = gfs2_do_thaw(sdp, who, freeze_owner);
812
813 if (!error) {
814 clear_bit(SDF_FREEZE_INITIATOR, &sdp->sd_flags);
815 clear_bit(SDF_FROZEN, &sdp->sd_flags);
816 }
817 mutex_unlock(&sdp->sd_freeze_mutex);
818 deactivate_super(sb);
819 return error;
820 }
821
gfs2_thaw_freeze_initiator(struct super_block * sb)822 void gfs2_thaw_freeze_initiator(struct super_block *sb)
823 {
824 struct gfs2_sbd *sdp = sb->s_fs_info;
825
826 mutex_lock(&sdp->sd_freeze_mutex);
827 if (!test_bit(SDF_FREEZE_INITIATOR, &sdp->sd_flags))
828 goto out;
829
830 gfs2_freeze_unlock(sdp);
831
832 out:
833 mutex_unlock(&sdp->sd_freeze_mutex);
834 }
835
836 /**
837 * statfs_slow_fill - fill in the sg for a given RG
838 * @rgd: the RG
839 * @sc: the sc structure
840 *
841 * Returns: 0 on success, -ESTALE if the LVB is invalid
842 */
843
statfs_slow_fill(struct gfs2_rgrpd * rgd,struct gfs2_statfs_change_host * sc)844 static int statfs_slow_fill(struct gfs2_rgrpd *rgd,
845 struct gfs2_statfs_change_host *sc)
846 {
847 gfs2_rgrp_verify(rgd);
848 sc->sc_total += rgd->rd_data;
849 sc->sc_free += rgd->rd_free;
850 sc->sc_dinodes += rgd->rd_dinodes;
851 return 0;
852 }
853
854 /**
855 * gfs2_statfs_slow - Stat a filesystem using asynchronous locking
856 * @sdp: the filesystem
857 * @sc: the sc info that will be returned
858 *
859 * Any error (other than a signal) will cause this routine to fall back
860 * to the synchronous version.
861 *
862 * FIXME: This really shouldn't busy wait like this.
863 *
864 * Returns: errno
865 */
866
gfs2_statfs_slow(struct gfs2_sbd * sdp,struct gfs2_statfs_change_host * sc)867 static int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc)
868 {
869 struct gfs2_rgrpd *rgd_next;
870 struct gfs2_holder *gha, *gh;
871 unsigned int slots = 64;
872 unsigned int x;
873 int done;
874 int error = 0, err;
875
876 memset(sc, 0, sizeof(struct gfs2_statfs_change_host));
877 gha = kmalloc_array(slots, sizeof(struct gfs2_holder), GFP_KERNEL);
878 if (!gha)
879 return -ENOMEM;
880 for (x = 0; x < slots; x++)
881 gfs2_holder_mark_uninitialized(gha + x);
882
883 rgd_next = gfs2_rgrpd_get_first(sdp);
884
885 for (;;) {
886 done = 1;
887
888 for (x = 0; x < slots; x++) {
889 gh = gha + x;
890
891 if (gfs2_holder_initialized(gh) && gfs2_glock_poll(gh)) {
892 err = gfs2_glock_wait(gh);
893 if (err) {
894 gfs2_holder_uninit(gh);
895 error = err;
896 } else {
897 if (!error) {
898 struct gfs2_rgrpd *rgd =
899 gfs2_glock2rgrp(gh->gh_gl);
900
901 error = statfs_slow_fill(rgd, sc);
902 }
903 gfs2_glock_dq_uninit(gh);
904 }
905 }
906
907 if (gfs2_holder_initialized(gh))
908 done = 0;
909 else if (rgd_next && !error) {
910 error = gfs2_glock_nq_init(rgd_next->rd_gl,
911 LM_ST_SHARED,
912 GL_ASYNC,
913 gh);
914 rgd_next = gfs2_rgrpd_get_next(rgd_next);
915 done = 0;
916 }
917
918 if (signal_pending(current))
919 error = -ERESTARTSYS;
920 }
921
922 if (done)
923 break;
924
925 yield();
926 }
927
928 kfree(gha);
929 return error;
930 }
931
932 /**
933 * gfs2_statfs_i - Do a statfs
934 * @sdp: the filesystem
935 * @sc: the sc structure
936 *
937 * Returns: errno
938 */
939
gfs2_statfs_i(struct gfs2_sbd * sdp,struct gfs2_statfs_change_host * sc)940 static int gfs2_statfs_i(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc)
941 {
942 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
943 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
944
945 spin_lock(&sdp->sd_statfs_spin);
946
947 *sc = *m_sc;
948 sc->sc_total += l_sc->sc_total;
949 sc->sc_free += l_sc->sc_free;
950 sc->sc_dinodes += l_sc->sc_dinodes;
951
952 spin_unlock(&sdp->sd_statfs_spin);
953
954 if (sc->sc_free < 0)
955 sc->sc_free = 0;
956 if (sc->sc_free > sc->sc_total)
957 sc->sc_free = sc->sc_total;
958 if (sc->sc_dinodes < 0)
959 sc->sc_dinodes = 0;
960
961 return 0;
962 }
963
964 /**
965 * gfs2_statfs - Gather and return stats about the filesystem
966 * @dentry: The name of the link
967 * @buf: The buffer
968 *
969 * Returns: 0 on success or error code
970 */
971
gfs2_statfs(struct dentry * dentry,struct kstatfs * buf)972 static int gfs2_statfs(struct dentry *dentry, struct kstatfs *buf)
973 {
974 struct super_block *sb = dentry->d_sb;
975 struct gfs2_sbd *sdp = sb->s_fs_info;
976 struct gfs2_statfs_change_host sc;
977 int error;
978
979 error = gfs2_rindex_update(sdp);
980 if (error)
981 return error;
982
983 if (gfs2_tune_get(sdp, gt_statfs_slow))
984 error = gfs2_statfs_slow(sdp, &sc);
985 else
986 error = gfs2_statfs_i(sdp, &sc);
987
988 if (error)
989 return error;
990
991 buf->f_type = GFS2_MAGIC;
992 buf->f_bsize = sdp->sd_sb.sb_bsize;
993 buf->f_blocks = sc.sc_total;
994 buf->f_bfree = sc.sc_free;
995 buf->f_bavail = sc.sc_free;
996 buf->f_files = sc.sc_dinodes + sc.sc_free;
997 buf->f_ffree = sc.sc_free;
998 buf->f_namelen = GFS2_FNAMESIZE;
999 buf->f_fsid = uuid_to_fsid(sb->s_uuid.b);
1000
1001 return 0;
1002 }
1003
1004 /**
1005 * gfs2_drop_inode - Drop an inode (test for remote unlink)
1006 * @inode: The inode to drop
1007 *
1008 * If we've received a callback on an iopen lock then it's because a
1009 * remote node tried to deallocate the inode but failed due to this node
1010 * still having the inode open. Here we mark the link count zero
1011 * since we know that it must have reached zero if the GLF_DEMOTE flag
1012 * is set on the iopen glock. If we didn't do a disk read since the
1013 * remote node removed the final link then we might otherwise miss
1014 * this event. This check ensures that this node will deallocate the
1015 * inode's blocks, or alternatively pass the baton on to another
1016 * node for later deallocation.
1017 */
1018
gfs2_drop_inode(struct inode * inode)1019 static int gfs2_drop_inode(struct inode *inode)
1020 {
1021 struct gfs2_inode *ip = GFS2_I(inode);
1022 struct gfs2_sbd *sdp = GFS2_SB(inode);
1023
1024 if (inode->i_nlink &&
1025 gfs2_holder_initialized(&ip->i_iopen_gh)) {
1026 struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
1027 if (glock_needs_demote(gl))
1028 clear_nlink(inode);
1029 }
1030
1031 /*
1032 * When under memory pressure when an inode's link count has dropped to
1033 * zero, defer deleting the inode to the delete workqueue. This avoids
1034 * calling into DLM under memory pressure, which can deadlock.
1035 */
1036 if (!inode->i_nlink &&
1037 unlikely(current->flags & PF_MEMALLOC) &&
1038 gfs2_holder_initialized(&ip->i_iopen_gh)) {
1039 struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
1040
1041 gfs2_glock_hold(gl);
1042 if (!gfs2_queue_verify_delete(gl, true))
1043 gfs2_glock_put_async(gl);
1044 return 0;
1045 }
1046
1047 /*
1048 * No longer cache inodes when trying to evict them all.
1049 */
1050 if (test_bit(SDF_EVICTING, &sdp->sd_flags))
1051 return 1;
1052
1053 return generic_drop_inode(inode);
1054 }
1055
1056 /**
1057 * gfs2_show_options - Show mount options for /proc/mounts
1058 * @s: seq_file structure
1059 * @root: root of this (sub)tree
1060 *
1061 * Returns: 0 on success or error code
1062 */
1063
gfs2_show_options(struct seq_file * s,struct dentry * root)1064 static int gfs2_show_options(struct seq_file *s, struct dentry *root)
1065 {
1066 struct gfs2_sbd *sdp = root->d_sb->s_fs_info;
1067 struct gfs2_args *args = &sdp->sd_args;
1068 unsigned int logd_secs, statfs_slow, statfs_quantum, quota_quantum;
1069
1070 spin_lock(&sdp->sd_tune.gt_spin);
1071 logd_secs = sdp->sd_tune.gt_logd_secs;
1072 quota_quantum = sdp->sd_tune.gt_quota_quantum;
1073 statfs_quantum = sdp->sd_tune.gt_statfs_quantum;
1074 statfs_slow = sdp->sd_tune.gt_statfs_slow;
1075 spin_unlock(&sdp->sd_tune.gt_spin);
1076
1077 if (is_subdir(root, sdp->sd_master_dir))
1078 seq_puts(s, ",meta");
1079 if (args->ar_lockproto[0])
1080 seq_show_option(s, "lockproto", args->ar_lockproto);
1081 if (args->ar_locktable[0])
1082 seq_show_option(s, "locktable", args->ar_locktable);
1083 if (args->ar_hostdata[0])
1084 seq_show_option(s, "hostdata", args->ar_hostdata);
1085 if (args->ar_spectator)
1086 seq_puts(s, ",spectator");
1087 if (args->ar_localflocks)
1088 seq_puts(s, ",localflocks");
1089 if (args->ar_debug)
1090 seq_puts(s, ",debug");
1091 if (args->ar_posix_acl)
1092 seq_puts(s, ",acl");
1093 if (args->ar_quota != GFS2_QUOTA_DEFAULT) {
1094 char *state;
1095 switch (args->ar_quota) {
1096 case GFS2_QUOTA_OFF:
1097 state = "off";
1098 break;
1099 case GFS2_QUOTA_ACCOUNT:
1100 state = "account";
1101 break;
1102 case GFS2_QUOTA_ON:
1103 state = "on";
1104 break;
1105 case GFS2_QUOTA_QUIET:
1106 state = "quiet";
1107 break;
1108 default:
1109 state = "unknown";
1110 break;
1111 }
1112 seq_printf(s, ",quota=%s", state);
1113 }
1114 if (args->ar_suiddir)
1115 seq_puts(s, ",suiddir");
1116 if (args->ar_data != GFS2_DATA_DEFAULT) {
1117 char *state;
1118 switch (args->ar_data) {
1119 case GFS2_DATA_WRITEBACK:
1120 state = "writeback";
1121 break;
1122 case GFS2_DATA_ORDERED:
1123 state = "ordered";
1124 break;
1125 default:
1126 state = "unknown";
1127 break;
1128 }
1129 seq_printf(s, ",data=%s", state);
1130 }
1131 if (args->ar_discard)
1132 seq_puts(s, ",discard");
1133 if (logd_secs != 30)
1134 seq_printf(s, ",commit=%d", logd_secs);
1135 if (statfs_quantum != 30)
1136 seq_printf(s, ",statfs_quantum=%d", statfs_quantum);
1137 else if (statfs_slow)
1138 seq_puts(s, ",statfs_quantum=0");
1139 if (quota_quantum != 60)
1140 seq_printf(s, ",quota_quantum=%d", quota_quantum);
1141 if (args->ar_statfs_percent)
1142 seq_printf(s, ",statfs_percent=%d", args->ar_statfs_percent);
1143 if (args->ar_errors != GFS2_ERRORS_DEFAULT) {
1144 const char *state;
1145
1146 switch (args->ar_errors) {
1147 case GFS2_ERRORS_WITHDRAW:
1148 state = "withdraw";
1149 break;
1150 case GFS2_ERRORS_PANIC:
1151 state = "panic";
1152 break;
1153 default:
1154 state = "unknown";
1155 break;
1156 }
1157 seq_printf(s, ",errors=%s", state);
1158 }
1159 if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags))
1160 seq_puts(s, ",nobarrier");
1161 if (test_bit(SDF_DEMOTE, &sdp->sd_flags))
1162 seq_puts(s, ",demote_interface_used");
1163 if (args->ar_rgrplvb)
1164 seq_puts(s, ",rgrplvb");
1165 if (args->ar_loccookie)
1166 seq_puts(s, ",loccookie");
1167 return 0;
1168 }
1169
1170 /**
1171 * gfs2_glock_put_eventually
1172 * @gl: The glock to put
1173 *
1174 * When under memory pressure, trigger a deferred glock put to make sure we
1175 * won't call into DLM and deadlock. Otherwise, put the glock directly.
1176 */
1177
gfs2_glock_put_eventually(struct gfs2_glock * gl)1178 static void gfs2_glock_put_eventually(struct gfs2_glock *gl)
1179 {
1180 if (current->flags & PF_MEMALLOC)
1181 gfs2_glock_put_async(gl);
1182 else
1183 gfs2_glock_put(gl);
1184 }
1185
gfs2_upgrade_iopen_glock(struct inode * inode)1186 static enum evict_behavior gfs2_upgrade_iopen_glock(struct inode *inode)
1187 {
1188 struct gfs2_inode *ip = GFS2_I(inode);
1189 struct gfs2_sbd *sdp = GFS2_SB(inode);
1190 struct gfs2_holder *gh = &ip->i_iopen_gh;
1191 int error;
1192
1193 gh->gh_flags |= GL_NOCACHE;
1194 gfs2_glock_dq_wait(gh);
1195
1196 /*
1197 * If there are no other lock holders, we will immediately get
1198 * exclusive access to the iopen glock here.
1199 *
1200 * Otherwise, the other nodes holding the lock will be notified about
1201 * our locking request (see iopen_go_callback()). If they do not have
1202 * the inode open, they are expected to evict the cached inode and
1203 * release the lock, allowing us to proceed.
1204 *
1205 * Otherwise, if they cannot evict the inode, they are expected to poke
1206 * the inode glock (note: not the iopen glock). We will notice that
1207 * and stop waiting for the iopen glock immediately. The other node(s)
1208 * are then expected to take care of deleting the inode when they no
1209 * longer use it.
1210 *
1211 * As a last resort, if another node keeps holding the iopen glock
1212 * without showing any activity on the inode glock, we will eventually
1213 * time out and fail the iopen glock upgrade.
1214 */
1215
1216 gfs2_holder_reinit(LM_ST_EXCLUSIVE, GL_ASYNC | GL_NOCACHE, gh);
1217 error = gfs2_glock_nq(gh);
1218 if (error)
1219 return EVICT_SHOULD_SKIP_DELETE;
1220
1221 wait_event_interruptible_timeout(sdp->sd_async_glock_wait,
1222 !test_bit(HIF_WAIT, &gh->gh_iflags) ||
1223 glock_needs_demote(ip->i_gl),
1224 5 * HZ);
1225 if (!test_bit(HIF_HOLDER, &gh->gh_iflags)) {
1226 gfs2_glock_dq(gh);
1227 if (glock_needs_demote(ip->i_gl))
1228 return EVICT_SHOULD_SKIP_DELETE;
1229 return EVICT_SHOULD_DEFER_DELETE;
1230 }
1231 error = gfs2_glock_holder_ready(gh);
1232 if (error)
1233 return EVICT_SHOULD_SKIP_DELETE;
1234 return EVICT_SHOULD_DELETE;
1235 }
1236
1237 /**
1238 * evict_should_delete - determine whether the inode is eligible for deletion
1239 * @inode: The inode to evict
1240 * @gh: The glock holder structure
1241 *
1242 * This function determines whether the evicted inode is eligible to be deleted
1243 * and locks the inode glock.
1244 *
1245 * Returns: the fate of the dinode
1246 */
evict_should_delete(struct inode * inode,struct gfs2_holder * gh)1247 static enum evict_behavior evict_should_delete(struct inode *inode,
1248 struct gfs2_holder *gh)
1249 {
1250 struct gfs2_inode *ip = GFS2_I(inode);
1251 struct super_block *sb = inode->i_sb;
1252 struct gfs2_sbd *sdp = sb->s_fs_info;
1253 int ret;
1254
1255 if (gfs2_holder_initialized(&ip->i_iopen_gh) &&
1256 test_bit(GLF_DEFER_DELETE, &ip->i_iopen_gh.gh_gl->gl_flags))
1257 return EVICT_SHOULD_DEFER_DELETE;
1258
1259 /* Deletes should never happen under memory pressure anymore. */
1260 if (WARN_ON_ONCE(current->flags & PF_MEMALLOC))
1261 return EVICT_SHOULD_DEFER_DELETE;
1262
1263 /* Must not read inode block until block type has been verified */
1264 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, gh);
1265 if (unlikely(ret))
1266 return EVICT_SHOULD_SKIP_DELETE;
1267
1268 if (gfs2_inode_already_deleted(ip->i_gl, ip->i_no_formal_ino))
1269 return EVICT_SHOULD_SKIP_DELETE;
1270 ret = gfs2_check_blk_type(sdp, ip->i_no_addr, GFS2_BLKST_UNLINKED);
1271 if (ret)
1272 return EVICT_SHOULD_SKIP_DELETE;
1273
1274 ret = gfs2_instantiate(gh);
1275 if (ret)
1276 return EVICT_SHOULD_SKIP_DELETE;
1277
1278 /*
1279 * The inode may have been recreated in the meantime.
1280 */
1281 if (inode->i_nlink)
1282 return EVICT_SHOULD_SKIP_DELETE;
1283
1284 if (gfs2_holder_initialized(&ip->i_iopen_gh) &&
1285 test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags))
1286 return gfs2_upgrade_iopen_glock(inode);
1287 return EVICT_SHOULD_DELETE;
1288 }
1289
1290 /**
1291 * evict_unlinked_inode - delete the pieces of an unlinked evicted inode
1292 * @inode: The inode to evict
1293 */
evict_unlinked_inode(struct inode * inode)1294 static int evict_unlinked_inode(struct inode *inode)
1295 {
1296 struct gfs2_inode *ip = GFS2_I(inode);
1297 int ret;
1298
1299 if (S_ISDIR(inode->i_mode) &&
1300 (ip->i_diskflags & GFS2_DIF_EXHASH)) {
1301 ret = gfs2_dir_exhash_dealloc(ip);
1302 if (ret)
1303 goto out;
1304 }
1305
1306 if (ip->i_eattr) {
1307 ret = gfs2_ea_dealloc(ip, true);
1308 if (ret)
1309 goto out;
1310 }
1311
1312 if (!gfs2_is_stuffed(ip)) {
1313 ret = gfs2_file_dealloc(ip);
1314 if (ret)
1315 goto out;
1316 }
1317
1318 /*
1319 * As soon as we clear the bitmap for the dinode, gfs2_create_inode()
1320 * can get called to recreate it, or even gfs2_inode_lookup() if the
1321 * inode was recreated on another node in the meantime.
1322 *
1323 * However, inserting the new inode into the inode hash table will not
1324 * succeed until the old inode is removed, and that only happens after
1325 * ->evict_inode() returns. The new inode is attached to its inode and
1326 * iopen glocks after inserting it into the inode hash table, so at
1327 * that point we can be sure that both glocks are unused.
1328 */
1329
1330 ret = gfs2_dinode_dealloc(ip);
1331 if (!ret && ip->i_gl)
1332 gfs2_inode_remember_delete(ip->i_gl, ip->i_no_formal_ino);
1333
1334 out:
1335 return ret;
1336 }
1337
1338 /*
1339 * evict_linked_inode - evict an inode whose dinode has not been unlinked
1340 * @inode: The inode to evict
1341 */
evict_linked_inode(struct inode * inode)1342 static int evict_linked_inode(struct inode *inode)
1343 {
1344 struct super_block *sb = inode->i_sb;
1345 struct gfs2_sbd *sdp = sb->s_fs_info;
1346 struct gfs2_inode *ip = GFS2_I(inode);
1347 struct address_space *metamapping;
1348 int ret;
1349
1350 gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
1351 GFS2_LFC_EVICT_INODE);
1352 metamapping = gfs2_glock2aspace(ip->i_gl);
1353 if (test_bit(GLF_DIRTY, &ip->i_gl->gl_flags)) {
1354 filemap_fdatawrite(metamapping);
1355 filemap_fdatawait(metamapping);
1356 }
1357 write_inode_now(inode, 1);
1358 gfs2_ail_flush(ip->i_gl, 0);
1359
1360 ret = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks);
1361 if (ret)
1362 return ret;
1363
1364 /* Needs to be done before glock release & also in a transaction */
1365 truncate_inode_pages(&inode->i_data, 0);
1366 truncate_inode_pages(metamapping, 0);
1367 gfs2_trans_end(sdp);
1368 return 0;
1369 }
1370
1371 /**
1372 * gfs2_evict_inode - Remove an inode from cache
1373 * @inode: The inode to evict
1374 *
1375 * There are three cases to consider:
1376 * 1. i_nlink == 0, we are final opener (and must deallocate)
1377 * 2. i_nlink == 0, we are not the final opener (and cannot deallocate)
1378 * 3. i_nlink > 0
1379 *
1380 * If the fs is read only, then we have to treat all cases as per #3
1381 * since we are unable to do any deallocation. The inode will be
1382 * deallocated by the next read/write node to attempt an allocation
1383 * in the same resource group
1384 *
1385 * We have to (at the moment) hold the inodes main lock to cover
1386 * the gap between unlocking the shared lock on the iopen lock and
1387 * taking the exclusive lock. I'd rather do a shared -> exclusive
1388 * conversion on the iopen lock, but we can change that later. This
1389 * is safe, just less efficient.
1390 */
1391
gfs2_evict_inode(struct inode * inode)1392 static void gfs2_evict_inode(struct inode *inode)
1393 {
1394 struct super_block *sb = inode->i_sb;
1395 struct gfs2_sbd *sdp = sb->s_fs_info;
1396 struct gfs2_inode *ip = GFS2_I(inode);
1397 struct gfs2_holder gh;
1398 enum evict_behavior behavior;
1399 int ret;
1400
1401 gfs2_holder_mark_uninitialized(&gh);
1402 if (inode->i_nlink || sb_rdonly(sb) || !ip->i_no_addr)
1403 goto out;
1404
1405 /*
1406 * In case of an incomplete mount, gfs2_evict_inode() may be called for
1407 * system files without having an active journal to write to. In that
1408 * case, skip the filesystem evict.
1409 */
1410 if (!sdp->sd_jdesc)
1411 goto out;
1412
1413 behavior = evict_should_delete(inode, &gh);
1414 if (behavior == EVICT_SHOULD_DEFER_DELETE &&
1415 !test_bit(SDF_KILL, &sdp->sd_flags)) {
1416 struct gfs2_glock *io_gl = ip->i_iopen_gh.gh_gl;
1417
1418 if (io_gl) {
1419 gfs2_glock_hold(io_gl);
1420 if (!gfs2_queue_verify_delete(io_gl, true))
1421 gfs2_glock_put(io_gl);
1422 goto out;
1423 }
1424 behavior = EVICT_SHOULD_SKIP_DELETE;
1425 }
1426 if (behavior == EVICT_SHOULD_DELETE)
1427 ret = evict_unlinked_inode(inode);
1428 else
1429 ret = evict_linked_inode(inode);
1430
1431 if (gfs2_rs_active(&ip->i_res))
1432 gfs2_rs_deltree(&ip->i_res);
1433
1434 if (ret && ret != GLR_TRYFAILED && ret != -EROFS)
1435 fs_warn(sdp, "gfs2_evict_inode: %d\n", ret);
1436 out:
1437 if (gfs2_holder_initialized(&gh))
1438 gfs2_glock_dq_uninit(&gh);
1439 truncate_inode_pages_final(&inode->i_data);
1440 if (ip->i_qadata)
1441 gfs2_assert_warn(sdp, ip->i_qadata->qa_ref == 0);
1442 gfs2_rs_deltree(&ip->i_res);
1443 gfs2_ordered_del_inode(ip);
1444 clear_inode(inode);
1445 gfs2_dir_hash_inval(ip);
1446 if (gfs2_holder_initialized(&ip->i_iopen_gh)) {
1447 struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
1448
1449 glock_clear_object(gl, ip);
1450 gfs2_glock_hold(gl);
1451 ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
1452 gfs2_glock_dq_uninit(&ip->i_iopen_gh);
1453 gfs2_glock_put_eventually(gl);
1454 }
1455 if (ip->i_gl) {
1456 glock_clear_object(ip->i_gl, ip);
1457 wait_on_bit_io(&ip->i_flags, GIF_GLOP_PENDING, TASK_UNINTERRUPTIBLE);
1458 gfs2_glock_put_eventually(ip->i_gl);
1459 rcu_assign_pointer(ip->i_gl, NULL);
1460 }
1461 }
1462
gfs2_alloc_inode(struct super_block * sb)1463 static struct inode *gfs2_alloc_inode(struct super_block *sb)
1464 {
1465 struct gfs2_inode *ip;
1466
1467 ip = alloc_inode_sb(sb, gfs2_inode_cachep, GFP_KERNEL);
1468 if (!ip)
1469 return NULL;
1470 ip->i_no_addr = 0;
1471 ip->i_no_formal_ino = 0;
1472 ip->i_flags = 0;
1473 ip->i_gl = NULL;
1474 gfs2_holder_mark_uninitialized(&ip->i_iopen_gh);
1475 memset(&ip->i_res, 0, sizeof(ip->i_res));
1476 RB_CLEAR_NODE(&ip->i_res.rs_node);
1477 ip->i_diskflags = 0;
1478 ip->i_rahead = 0;
1479 return &ip->i_inode;
1480 }
1481
gfs2_free_inode(struct inode * inode)1482 static void gfs2_free_inode(struct inode *inode)
1483 {
1484 kmem_cache_free(gfs2_inode_cachep, GFS2_I(inode));
1485 }
1486
free_local_statfs_inodes(struct gfs2_sbd * sdp)1487 void free_local_statfs_inodes(struct gfs2_sbd *sdp)
1488 {
1489 struct local_statfs_inode *lsi, *safe;
1490
1491 /* Run through the statfs inodes list to iput and free memory */
1492 list_for_each_entry_safe(lsi, safe, &sdp->sd_sc_inodes_list, si_list) {
1493 if (lsi->si_jid == sdp->sd_jdesc->jd_jid)
1494 sdp->sd_sc_inode = NULL; /* belongs to this node */
1495 if (lsi->si_sc_inode)
1496 iput(lsi->si_sc_inode);
1497 list_del(&lsi->si_list);
1498 kfree(lsi);
1499 }
1500 }
1501
find_local_statfs_inode(struct gfs2_sbd * sdp,unsigned int index)1502 struct inode *find_local_statfs_inode(struct gfs2_sbd *sdp,
1503 unsigned int index)
1504 {
1505 struct local_statfs_inode *lsi;
1506
1507 /* Return the local (per node) statfs inode in the
1508 * sdp->sd_sc_inodes_list corresponding to the 'index'. */
1509 list_for_each_entry(lsi, &sdp->sd_sc_inodes_list, si_list) {
1510 if (lsi->si_jid == index)
1511 return lsi->si_sc_inode;
1512 }
1513 return NULL;
1514 }
1515
1516 const struct super_operations gfs2_super_ops = {
1517 .alloc_inode = gfs2_alloc_inode,
1518 .free_inode = gfs2_free_inode,
1519 .write_inode = gfs2_write_inode,
1520 .dirty_inode = gfs2_dirty_inode,
1521 .evict_inode = gfs2_evict_inode,
1522 .put_super = gfs2_put_super,
1523 .sync_fs = gfs2_sync_fs,
1524 .freeze_super = gfs2_freeze_super,
1525 .freeze_fs = gfs2_freeze_fs,
1526 .thaw_super = gfs2_thaw_super,
1527 .statfs = gfs2_statfs,
1528 .drop_inode = gfs2_drop_inode,
1529 .show_options = gfs2_show_options,
1530 };
1531
1532