xref: /linux/fs/gfs2/super.c (revision 23f50b5ac331c8c27c421a7116618355508e8427)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
4  * Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
5  */
6 
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 
9 #include <linux/bio.h>
10 #include <linux/sched/signal.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/statfs.h>
16 #include <linux/seq_file.h>
17 #include <linux/mount.h>
18 #include <linux/kthread.h>
19 #include <linux/delay.h>
20 #include <linux/gfs2_ondisk.h>
21 #include <linux/crc32.h>
22 #include <linux/time.h>
23 #include <linux/wait.h>
24 #include <linux/writeback.h>
25 #include <linux/backing-dev.h>
26 #include <linux/kernel.h>
27 
28 #include "gfs2.h"
29 #include "incore.h"
30 #include "bmap.h"
31 #include "dir.h"
32 #include "glock.h"
33 #include "glops.h"
34 #include "inode.h"
35 #include "log.h"
36 #include "meta_io.h"
37 #include "quota.h"
38 #include "recovery.h"
39 #include "rgrp.h"
40 #include "super.h"
41 #include "trans.h"
42 #include "util.h"
43 #include "sys.h"
44 #include "xattr.h"
45 #include "lops.h"
46 
47 enum dinode_demise {
48 	SHOULD_DELETE_DINODE,
49 	SHOULD_NOT_DELETE_DINODE,
50 	SHOULD_DEFER_EVICTION,
51 };
52 
53 /**
54  * gfs2_jindex_free - Clear all the journal index information
55  * @sdp: The GFS2 superblock
56  *
57  */
58 
59 void gfs2_jindex_free(struct gfs2_sbd *sdp)
60 {
61 	struct list_head list;
62 	struct gfs2_jdesc *jd;
63 
64 	spin_lock(&sdp->sd_jindex_spin);
65 	list_add(&list, &sdp->sd_jindex_list);
66 	list_del_init(&sdp->sd_jindex_list);
67 	sdp->sd_journals = 0;
68 	spin_unlock(&sdp->sd_jindex_spin);
69 
70 	sdp->sd_jdesc = NULL;
71 	while (!list_empty(&list)) {
72 		jd = list_first_entry(&list, struct gfs2_jdesc, jd_list);
73 		gfs2_free_journal_extents(jd);
74 		list_del(&jd->jd_list);
75 		iput(jd->jd_inode);
76 		jd->jd_inode = NULL;
77 		kfree(jd);
78 	}
79 }
80 
81 static struct gfs2_jdesc *jdesc_find_i(struct list_head *head, unsigned int jid)
82 {
83 	struct gfs2_jdesc *jd;
84 
85 	list_for_each_entry(jd, head, jd_list) {
86 		if (jd->jd_jid == jid)
87 			return jd;
88 	}
89 	return NULL;
90 }
91 
92 struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid)
93 {
94 	struct gfs2_jdesc *jd;
95 
96 	spin_lock(&sdp->sd_jindex_spin);
97 	jd = jdesc_find_i(&sdp->sd_jindex_list, jid);
98 	spin_unlock(&sdp->sd_jindex_spin);
99 
100 	return jd;
101 }
102 
103 int gfs2_jdesc_check(struct gfs2_jdesc *jd)
104 {
105 	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
106 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
107 	u64 size = i_size_read(jd->jd_inode);
108 
109 	if (gfs2_check_internal_file_size(jd->jd_inode, 8 << 20, BIT(30)))
110 		return -EIO;
111 
112 	jd->jd_blocks = size >> sdp->sd_sb.sb_bsize_shift;
113 
114 	if (gfs2_write_alloc_required(ip, 0, size)) {
115 		gfs2_consist_inode(ip);
116 		return -EIO;
117 	}
118 
119 	return 0;
120 }
121 
122 static int init_threads(struct gfs2_sbd *sdp)
123 {
124 	struct task_struct *p;
125 	int error = 0;
126 
127 	p = kthread_run(gfs2_logd, sdp, "gfs2_logd");
128 	if (IS_ERR(p)) {
129 		error = PTR_ERR(p);
130 		fs_err(sdp, "can't start logd thread: %d\n", error);
131 		return error;
132 	}
133 	sdp->sd_logd_process = p;
134 
135 	p = kthread_run(gfs2_quotad, sdp, "gfs2_quotad");
136 	if (IS_ERR(p)) {
137 		error = PTR_ERR(p);
138 		fs_err(sdp, "can't start quotad thread: %d\n", error);
139 		goto fail;
140 	}
141 	sdp->sd_quotad_process = p;
142 	return 0;
143 
144 fail:
145 	kthread_stop(sdp->sd_logd_process);
146 	sdp->sd_logd_process = NULL;
147 	return error;
148 }
149 
150 /**
151  * gfs2_make_fs_rw - Turn a Read-Only FS into a Read-Write one
152  * @sdp: the filesystem
153  *
154  * Returns: errno
155  */
156 
157 int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
158 {
159 	struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
160 	struct gfs2_glock *j_gl = ip->i_gl;
161 	struct gfs2_log_header_host head;
162 	int error;
163 
164 	error = init_threads(sdp);
165 	if (error)
166 		return error;
167 
168 	j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
169 	if (gfs2_withdrawn(sdp)) {
170 		error = -EIO;
171 		goto fail;
172 	}
173 
174 	error = gfs2_find_jhead(sdp->sd_jdesc, &head, false);
175 	if (error || gfs2_withdrawn(sdp))
176 		goto fail;
177 
178 	if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
179 		gfs2_consist(sdp);
180 		error = -EIO;
181 		goto fail;
182 	}
183 
184 	/*  Initialize some head of the log stuff  */
185 	sdp->sd_log_sequence = head.lh_sequence + 1;
186 	gfs2_log_pointers_init(sdp, head.lh_blkno);
187 
188 	error = gfs2_quota_init(sdp);
189 	if (error || gfs2_withdrawn(sdp))
190 		goto fail;
191 
192 	set_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
193 
194 	return 0;
195 
196 fail:
197 	if (sdp->sd_quotad_process)
198 		kthread_stop(sdp->sd_quotad_process);
199 	sdp->sd_quotad_process = NULL;
200 	if (sdp->sd_logd_process)
201 		kthread_stop(sdp->sd_logd_process);
202 	sdp->sd_logd_process = NULL;
203 	return error;
204 }
205 
206 void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc, const void *buf)
207 {
208 	const struct gfs2_statfs_change *str = buf;
209 
210 	sc->sc_total = be64_to_cpu(str->sc_total);
211 	sc->sc_free = be64_to_cpu(str->sc_free);
212 	sc->sc_dinodes = be64_to_cpu(str->sc_dinodes);
213 }
214 
215 void gfs2_statfs_change_out(const struct gfs2_statfs_change_host *sc, void *buf)
216 {
217 	struct gfs2_statfs_change *str = buf;
218 
219 	str->sc_total = cpu_to_be64(sc->sc_total);
220 	str->sc_free = cpu_to_be64(sc->sc_free);
221 	str->sc_dinodes = cpu_to_be64(sc->sc_dinodes);
222 }
223 
224 int gfs2_statfs_init(struct gfs2_sbd *sdp)
225 {
226 	struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
227 	struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
228 	struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
229 	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
230 	struct buffer_head *m_bh, *l_bh;
231 	struct gfs2_holder gh;
232 	int error;
233 
234 	error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE,
235 				   &gh);
236 	if (error)
237 		return error;
238 
239 	error = gfs2_meta_inode_buffer(m_ip, &m_bh);
240 	if (error)
241 		goto out;
242 
243 	if (sdp->sd_args.ar_spectator) {
244 		spin_lock(&sdp->sd_statfs_spin);
245 		gfs2_statfs_change_in(m_sc, m_bh->b_data +
246 				      sizeof(struct gfs2_dinode));
247 		spin_unlock(&sdp->sd_statfs_spin);
248 	} else {
249 		error = gfs2_meta_inode_buffer(l_ip, &l_bh);
250 		if (error)
251 			goto out_m_bh;
252 
253 		spin_lock(&sdp->sd_statfs_spin);
254 		gfs2_statfs_change_in(m_sc, m_bh->b_data +
255 				      sizeof(struct gfs2_dinode));
256 		gfs2_statfs_change_in(l_sc, l_bh->b_data +
257 				      sizeof(struct gfs2_dinode));
258 		spin_unlock(&sdp->sd_statfs_spin);
259 
260 		brelse(l_bh);
261 	}
262 
263 out_m_bh:
264 	brelse(m_bh);
265 out:
266 	gfs2_glock_dq_uninit(&gh);
267 	return 0;
268 }
269 
270 void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
271 			s64 dinodes)
272 {
273 	struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
274 	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
275 	struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
276 	struct buffer_head *l_bh;
277 	s64 x, y;
278 	int need_sync = 0;
279 	int error;
280 
281 	error = gfs2_meta_inode_buffer(l_ip, &l_bh);
282 	if (error)
283 		return;
284 
285 	gfs2_trans_add_meta(l_ip->i_gl, l_bh);
286 
287 	spin_lock(&sdp->sd_statfs_spin);
288 	l_sc->sc_total += total;
289 	l_sc->sc_free += free;
290 	l_sc->sc_dinodes += dinodes;
291 	gfs2_statfs_change_out(l_sc, l_bh->b_data + sizeof(struct gfs2_dinode));
292 	if (sdp->sd_args.ar_statfs_percent) {
293 		x = 100 * l_sc->sc_free;
294 		y = m_sc->sc_free * sdp->sd_args.ar_statfs_percent;
295 		if (x >= y || x <= -y)
296 			need_sync = 1;
297 	}
298 	spin_unlock(&sdp->sd_statfs_spin);
299 
300 	brelse(l_bh);
301 	if (need_sync)
302 		gfs2_wake_up_statfs(sdp);
303 }
304 
305 void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh,
306 		   struct buffer_head *l_bh)
307 {
308 	struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
309 	struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
310 	struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
311 	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
312 
313 	gfs2_trans_add_meta(l_ip->i_gl, l_bh);
314 	gfs2_trans_add_meta(m_ip->i_gl, m_bh);
315 
316 	spin_lock(&sdp->sd_statfs_spin);
317 	m_sc->sc_total += l_sc->sc_total;
318 	m_sc->sc_free += l_sc->sc_free;
319 	m_sc->sc_dinodes += l_sc->sc_dinodes;
320 	memset(l_sc, 0, sizeof(struct gfs2_statfs_change));
321 	memset(l_bh->b_data + sizeof(struct gfs2_dinode),
322 	       0, sizeof(struct gfs2_statfs_change));
323 	gfs2_statfs_change_out(m_sc, m_bh->b_data + sizeof(struct gfs2_dinode));
324 	spin_unlock(&sdp->sd_statfs_spin);
325 }
326 
327 int gfs2_statfs_sync(struct super_block *sb, int type)
328 {
329 	struct gfs2_sbd *sdp = sb->s_fs_info;
330 	struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
331 	struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
332 	struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
333 	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
334 	struct gfs2_holder gh;
335 	struct buffer_head *m_bh, *l_bh;
336 	int error;
337 
338 	error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE,
339 				   &gh);
340 	if (error)
341 		goto out;
342 
343 	error = gfs2_meta_inode_buffer(m_ip, &m_bh);
344 	if (error)
345 		goto out_unlock;
346 
347 	spin_lock(&sdp->sd_statfs_spin);
348 	gfs2_statfs_change_in(m_sc, m_bh->b_data +
349 			      sizeof(struct gfs2_dinode));
350 	if (!l_sc->sc_total && !l_sc->sc_free && !l_sc->sc_dinodes) {
351 		spin_unlock(&sdp->sd_statfs_spin);
352 		goto out_bh;
353 	}
354 	spin_unlock(&sdp->sd_statfs_spin);
355 
356 	error = gfs2_meta_inode_buffer(l_ip, &l_bh);
357 	if (error)
358 		goto out_bh;
359 
360 	error = gfs2_trans_begin(sdp, 2 * RES_DINODE, 0);
361 	if (error)
362 		goto out_bh2;
363 
364 	update_statfs(sdp, m_bh, l_bh);
365 	sdp->sd_statfs_force_sync = 0;
366 
367 	gfs2_trans_end(sdp);
368 
369 out_bh2:
370 	brelse(l_bh);
371 out_bh:
372 	brelse(m_bh);
373 out_unlock:
374 	gfs2_glock_dq_uninit(&gh);
375 out:
376 	return error;
377 }
378 
379 struct lfcc {
380 	struct list_head list;
381 	struct gfs2_holder gh;
382 };
383 
384 /**
385  * gfs2_lock_fs_check_clean - Stop all writes to the FS and check that all
386  *                            journals are clean
387  * @sdp: the file system
388  * @state: the state to put the transaction lock into
389  * @t_gh: the hold on the transaction lock
390  *
391  * Returns: errno
392  */
393 
394 static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp)
395 {
396 	struct gfs2_inode *ip;
397 	struct gfs2_jdesc *jd;
398 	struct lfcc *lfcc;
399 	LIST_HEAD(list);
400 	struct gfs2_log_header_host lh;
401 	int error;
402 
403 	list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
404 		lfcc = kmalloc(sizeof(struct lfcc), GFP_KERNEL);
405 		if (!lfcc) {
406 			error = -ENOMEM;
407 			goto out;
408 		}
409 		ip = GFS2_I(jd->jd_inode);
410 		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &lfcc->gh);
411 		if (error) {
412 			kfree(lfcc);
413 			goto out;
414 		}
415 		list_add(&lfcc->list, &list);
416 	}
417 
418 	error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_EXCLUSIVE,
419 				   LM_FLAG_NOEXP, &sdp->sd_freeze_gh);
420 	if (error)
421 		goto out;
422 
423 	list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
424 		error = gfs2_jdesc_check(jd);
425 		if (error)
426 			break;
427 		error = gfs2_find_jhead(jd, &lh, false);
428 		if (error)
429 			break;
430 		if (!(lh.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
431 			error = -EBUSY;
432 			break;
433 		}
434 	}
435 
436 	if (error)
437 		gfs2_freeze_unlock(&sdp->sd_freeze_gh);
438 
439 out:
440 	while (!list_empty(&list)) {
441 		lfcc = list_first_entry(&list, struct lfcc, list);
442 		list_del(&lfcc->list);
443 		gfs2_glock_dq_uninit(&lfcc->gh);
444 		kfree(lfcc);
445 	}
446 	return error;
447 }
448 
449 void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf)
450 {
451 	struct gfs2_dinode *str = buf;
452 
453 	str->di_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
454 	str->di_header.mh_type = cpu_to_be32(GFS2_METATYPE_DI);
455 	str->di_header.mh_format = cpu_to_be32(GFS2_FORMAT_DI);
456 	str->di_num.no_addr = cpu_to_be64(ip->i_no_addr);
457 	str->di_num.no_formal_ino = cpu_to_be64(ip->i_no_formal_ino);
458 	str->di_mode = cpu_to_be32(ip->i_inode.i_mode);
459 	str->di_uid = cpu_to_be32(i_uid_read(&ip->i_inode));
460 	str->di_gid = cpu_to_be32(i_gid_read(&ip->i_inode));
461 	str->di_nlink = cpu_to_be32(ip->i_inode.i_nlink);
462 	str->di_size = cpu_to_be64(i_size_read(&ip->i_inode));
463 	str->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode));
464 	str->di_atime = cpu_to_be64(ip->i_inode.i_atime.tv_sec);
465 	str->di_mtime = cpu_to_be64(ip->i_inode.i_mtime.tv_sec);
466 	str->di_ctime = cpu_to_be64(ip->i_inode.i_ctime.tv_sec);
467 
468 	str->di_goal_meta = cpu_to_be64(ip->i_goal);
469 	str->di_goal_data = cpu_to_be64(ip->i_goal);
470 	str->di_generation = cpu_to_be64(ip->i_generation);
471 
472 	str->di_flags = cpu_to_be32(ip->i_diskflags);
473 	str->di_height = cpu_to_be16(ip->i_height);
474 	str->di_payload_format = cpu_to_be32(S_ISDIR(ip->i_inode.i_mode) &&
475 					     !(ip->i_diskflags & GFS2_DIF_EXHASH) ?
476 					     GFS2_FORMAT_DE : 0);
477 	str->di_depth = cpu_to_be16(ip->i_depth);
478 	str->di_entries = cpu_to_be32(ip->i_entries);
479 
480 	str->di_eattr = cpu_to_be64(ip->i_eattr);
481 	str->di_atime_nsec = cpu_to_be32(ip->i_inode.i_atime.tv_nsec);
482 	str->di_mtime_nsec = cpu_to_be32(ip->i_inode.i_mtime.tv_nsec);
483 	str->di_ctime_nsec = cpu_to_be32(ip->i_inode.i_ctime.tv_nsec);
484 }
485 
486 /**
487  * gfs2_write_inode - Make sure the inode is stable on the disk
488  * @inode: The inode
489  * @wbc: The writeback control structure
490  *
491  * Returns: errno
492  */
493 
494 static int gfs2_write_inode(struct inode *inode, struct writeback_control *wbc)
495 {
496 	struct gfs2_inode *ip = GFS2_I(inode);
497 	struct gfs2_sbd *sdp = GFS2_SB(inode);
498 	struct address_space *metamapping = gfs2_glock2aspace(ip->i_gl);
499 	struct backing_dev_info *bdi = inode_to_bdi(metamapping->host);
500 	int ret = 0;
501 	bool flush_all = (wbc->sync_mode == WB_SYNC_ALL || gfs2_is_jdata(ip));
502 
503 	if (flush_all)
504 		gfs2_log_flush(GFS2_SB(inode), ip->i_gl,
505 			       GFS2_LOG_HEAD_FLUSH_NORMAL |
506 			       GFS2_LFC_WRITE_INODE);
507 	if (bdi->wb.dirty_exceeded)
508 		gfs2_ail1_flush(sdp, wbc);
509 	else
510 		filemap_fdatawrite(metamapping);
511 	if (flush_all)
512 		ret = filemap_fdatawait(metamapping);
513 	if (ret)
514 		mark_inode_dirty_sync(inode);
515 	else {
516 		spin_lock(&inode->i_lock);
517 		if (!(inode->i_flags & I_DIRTY))
518 			gfs2_ordered_del_inode(ip);
519 		spin_unlock(&inode->i_lock);
520 	}
521 	return ret;
522 }
523 
524 /**
525  * gfs2_dirty_inode - check for atime updates
526  * @inode: The inode in question
527  * @flags: The type of dirty
528  *
529  * Unfortunately it can be called under any combination of inode
530  * glock and transaction lock, so we have to check carefully.
531  *
532  * At the moment this deals only with atime - it should be possible
533  * to expand that role in future, once a review of the locking has
534  * been carried out.
535  */
536 
537 static void gfs2_dirty_inode(struct inode *inode, int flags)
538 {
539 	struct gfs2_inode *ip = GFS2_I(inode);
540 	struct gfs2_sbd *sdp = GFS2_SB(inode);
541 	struct buffer_head *bh;
542 	struct gfs2_holder gh;
543 	int need_unlock = 0;
544 	int need_endtrans = 0;
545 	int ret;
546 
547 	if (unlikely(gfs2_withdrawn(sdp)))
548 		return;
549 	if (!gfs2_glock_is_locked_by_me(ip->i_gl)) {
550 		ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
551 		if (ret) {
552 			fs_err(sdp, "dirty_inode: glock %d\n", ret);
553 			gfs2_dump_glock(NULL, ip->i_gl, true);
554 			return;
555 		}
556 		need_unlock = 1;
557 	} else if (WARN_ON_ONCE(ip->i_gl->gl_state != LM_ST_EXCLUSIVE))
558 		return;
559 
560 	if (current->journal_info == NULL) {
561 		ret = gfs2_trans_begin(sdp, RES_DINODE, 0);
562 		if (ret) {
563 			fs_err(sdp, "dirty_inode: gfs2_trans_begin %d\n", ret);
564 			goto out;
565 		}
566 		need_endtrans = 1;
567 	}
568 
569 	ret = gfs2_meta_inode_buffer(ip, &bh);
570 	if (ret == 0) {
571 		gfs2_trans_add_meta(ip->i_gl, bh);
572 		gfs2_dinode_out(ip, bh->b_data);
573 		brelse(bh);
574 	}
575 
576 	if (need_endtrans)
577 		gfs2_trans_end(sdp);
578 out:
579 	if (need_unlock)
580 		gfs2_glock_dq_uninit(&gh);
581 }
582 
583 /**
584  * gfs2_make_fs_ro - Turn a Read-Write FS into a Read-Only one
585  * @sdp: the filesystem
586  *
587  * Returns: errno
588  */
589 
590 int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
591 {
592 	int error = 0;
593 	int log_write_allowed = test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
594 
595 	gfs2_flush_delete_work(sdp);
596 	if (!log_write_allowed && current == sdp->sd_quotad_process)
597 		fs_warn(sdp, "The quotad daemon is withdrawing.\n");
598 	else if (sdp->sd_quotad_process)
599 		kthread_stop(sdp->sd_quotad_process);
600 	sdp->sd_quotad_process = NULL;
601 
602 	if (!log_write_allowed && current == sdp->sd_logd_process)
603 		fs_warn(sdp, "The logd daemon is withdrawing.\n");
604 	else if (sdp->sd_logd_process)
605 		kthread_stop(sdp->sd_logd_process);
606 	sdp->sd_logd_process = NULL;
607 
608 	if (log_write_allowed) {
609 		gfs2_quota_sync(sdp->sd_vfs, 0);
610 		gfs2_statfs_sync(sdp->sd_vfs, 0);
611 
612 		gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_SHUTDOWN |
613 			       GFS2_LFC_MAKE_FS_RO);
614 		wait_event_timeout(sdp->sd_log_waitq,
615 				   gfs2_log_is_empty(sdp),
616 				   HZ * 5);
617 		gfs2_assert_warn(sdp, gfs2_log_is_empty(sdp));
618 	} else {
619 		wait_event_timeout(sdp->sd_log_waitq,
620 				   gfs2_log_is_empty(sdp),
621 				   HZ * 5);
622 	}
623 	gfs2_quota_cleanup(sdp);
624 
625 	if (!log_write_allowed)
626 		sdp->sd_vfs->s_flags |= SB_RDONLY;
627 
628 	return error;
629 }
630 
631 /**
632  * gfs2_put_super - Unmount the filesystem
633  * @sb: The VFS superblock
634  *
635  */
636 
637 static void gfs2_put_super(struct super_block *sb)
638 {
639 	struct gfs2_sbd *sdp = sb->s_fs_info;
640 	int error;
641 	struct gfs2_jdesc *jd;
642 
643 	/* No more recovery requests */
644 	set_bit(SDF_NORECOVERY, &sdp->sd_flags);
645 	smp_mb();
646 
647 	/* Wait on outstanding recovery */
648 restart:
649 	spin_lock(&sdp->sd_jindex_spin);
650 	list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
651 		if (!test_bit(JDF_RECOVERY, &jd->jd_flags))
652 			continue;
653 		spin_unlock(&sdp->sd_jindex_spin);
654 		wait_on_bit(&jd->jd_flags, JDF_RECOVERY,
655 			    TASK_UNINTERRUPTIBLE);
656 		goto restart;
657 	}
658 	spin_unlock(&sdp->sd_jindex_spin);
659 
660 	if (!sb_rdonly(sb)) {
661 		error = gfs2_make_fs_ro(sdp);
662 		if (error)
663 			gfs2_io_error(sdp);
664 	}
665 	WARN_ON(gfs2_withdrawing(sdp));
666 
667 	/*  At this point, we're through modifying the disk  */
668 
669 	/*  Release stuff  */
670 
671 	iput(sdp->sd_jindex);
672 	iput(sdp->sd_statfs_inode);
673 	iput(sdp->sd_rindex);
674 	iput(sdp->sd_quota_inode);
675 
676 	gfs2_glock_put(sdp->sd_rename_gl);
677 	gfs2_glock_put(sdp->sd_freeze_gl);
678 
679 	if (!sdp->sd_args.ar_spectator) {
680 		if (gfs2_holder_initialized(&sdp->sd_journal_gh))
681 			gfs2_glock_dq_uninit(&sdp->sd_journal_gh);
682 		if (gfs2_holder_initialized(&sdp->sd_jinode_gh))
683 			gfs2_glock_dq_uninit(&sdp->sd_jinode_gh);
684 		gfs2_glock_dq_uninit(&sdp->sd_sc_gh);
685 		gfs2_glock_dq_uninit(&sdp->sd_qc_gh);
686 		free_local_statfs_inodes(sdp);
687 		iput(sdp->sd_qc_inode);
688 	}
689 
690 	gfs2_glock_dq_uninit(&sdp->sd_live_gh);
691 	gfs2_clear_rgrpd(sdp);
692 	gfs2_jindex_free(sdp);
693 	/*  Take apart glock structures and buffer lists  */
694 	gfs2_gl_hash_clear(sdp);
695 	truncate_inode_pages_final(&sdp->sd_aspace);
696 	gfs2_delete_debugfs_file(sdp);
697 	/*  Unmount the locking protocol  */
698 	gfs2_lm_unmount(sdp);
699 
700 	/*  At this point, we're through participating in the lockspace  */
701 	gfs2_sys_fs_del(sdp);
702 	free_sbd(sdp);
703 }
704 
705 /**
706  * gfs2_sync_fs - sync the filesystem
707  * @sb: the superblock
708  *
709  * Flushes the log to disk.
710  */
711 
712 static int gfs2_sync_fs(struct super_block *sb, int wait)
713 {
714 	struct gfs2_sbd *sdp = sb->s_fs_info;
715 
716 	gfs2_quota_sync(sb, -1);
717 	if (wait)
718 		gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
719 			       GFS2_LFC_SYNC_FS);
720 	return sdp->sd_log_error;
721 }
722 
723 void gfs2_freeze_func(struct work_struct *work)
724 {
725 	int error;
726 	struct gfs2_holder freeze_gh;
727 	struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_freeze_work);
728 	struct super_block *sb = sdp->sd_vfs;
729 
730 	atomic_inc(&sb->s_active);
731 	error = gfs2_freeze_lock(sdp, &freeze_gh, 0);
732 	if (error) {
733 		gfs2_assert_withdraw(sdp, 0);
734 	} else {
735 		atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN);
736 		error = thaw_super(sb);
737 		if (error) {
738 			fs_info(sdp, "GFS2: couldn't thaw filesystem: %d\n",
739 				error);
740 			gfs2_assert_withdraw(sdp, 0);
741 		}
742 		gfs2_freeze_unlock(&freeze_gh);
743 	}
744 	deactivate_super(sb);
745 	clear_bit_unlock(SDF_FS_FROZEN, &sdp->sd_flags);
746 	wake_up_bit(&sdp->sd_flags, SDF_FS_FROZEN);
747 	return;
748 }
749 
750 /**
751  * gfs2_freeze - prevent further writes to the filesystem
752  * @sb: the VFS structure for the filesystem
753  *
754  */
755 
756 static int gfs2_freeze(struct super_block *sb)
757 {
758 	struct gfs2_sbd *sdp = sb->s_fs_info;
759 	int error = 0;
760 
761 	mutex_lock(&sdp->sd_freeze_mutex);
762 	if (atomic_read(&sdp->sd_freeze_state) != SFS_UNFROZEN)
763 		goto out;
764 
765 	for (;;) {
766 		if (gfs2_withdrawn(sdp)) {
767 			error = -EINVAL;
768 			goto out;
769 		}
770 
771 		error = gfs2_lock_fs_check_clean(sdp);
772 		if (!error)
773 			break;
774 
775 		if (error == -EBUSY)
776 			fs_err(sdp, "waiting for recovery before freeze\n");
777 		else if (error == -EIO) {
778 			fs_err(sdp, "Fatal IO error: cannot freeze gfs2 due "
779 			       "to recovery error.\n");
780 			goto out;
781 		} else {
782 			fs_err(sdp, "error freezing FS: %d\n", error);
783 		}
784 		fs_err(sdp, "retrying...\n");
785 		msleep(1000);
786 	}
787 	set_bit(SDF_FS_FROZEN, &sdp->sd_flags);
788 out:
789 	mutex_unlock(&sdp->sd_freeze_mutex);
790 	return error;
791 }
792 
793 /**
794  * gfs2_unfreeze - reallow writes to the filesystem
795  * @sb: the VFS structure for the filesystem
796  *
797  */
798 
799 static int gfs2_unfreeze(struct super_block *sb)
800 {
801 	struct gfs2_sbd *sdp = sb->s_fs_info;
802 
803 	mutex_lock(&sdp->sd_freeze_mutex);
804         if (atomic_read(&sdp->sd_freeze_state) != SFS_FROZEN ||
805 	    !gfs2_holder_initialized(&sdp->sd_freeze_gh)) {
806 		mutex_unlock(&sdp->sd_freeze_mutex);
807                 return 0;
808 	}
809 
810 	gfs2_freeze_unlock(&sdp->sd_freeze_gh);
811 	mutex_unlock(&sdp->sd_freeze_mutex);
812 	return wait_on_bit(&sdp->sd_flags, SDF_FS_FROZEN, TASK_INTERRUPTIBLE);
813 }
814 
815 /**
816  * statfs_fill - fill in the sg for a given RG
817  * @rgd: the RG
818  * @sc: the sc structure
819  *
820  * Returns: 0 on success, -ESTALE if the LVB is invalid
821  */
822 
823 static int statfs_slow_fill(struct gfs2_rgrpd *rgd,
824 			    struct gfs2_statfs_change_host *sc)
825 {
826 	gfs2_rgrp_verify(rgd);
827 	sc->sc_total += rgd->rd_data;
828 	sc->sc_free += rgd->rd_free;
829 	sc->sc_dinodes += rgd->rd_dinodes;
830 	return 0;
831 }
832 
833 /**
834  * gfs2_statfs_slow - Stat a filesystem using asynchronous locking
835  * @sdp: the filesystem
836  * @sc: the sc info that will be returned
837  *
838  * Any error (other than a signal) will cause this routine to fall back
839  * to the synchronous version.
840  *
841  * FIXME: This really shouldn't busy wait like this.
842  *
843  * Returns: errno
844  */
845 
846 static int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc)
847 {
848 	struct gfs2_rgrpd *rgd_next;
849 	struct gfs2_holder *gha, *gh;
850 	unsigned int slots = 64;
851 	unsigned int x;
852 	int done;
853 	int error = 0, err;
854 
855 	memset(sc, 0, sizeof(struct gfs2_statfs_change_host));
856 	gha = kmalloc_array(slots, sizeof(struct gfs2_holder), GFP_KERNEL);
857 	if (!gha)
858 		return -ENOMEM;
859 	for (x = 0; x < slots; x++)
860 		gfs2_holder_mark_uninitialized(gha + x);
861 
862 	rgd_next = gfs2_rgrpd_get_first(sdp);
863 
864 	for (;;) {
865 		done = 1;
866 
867 		for (x = 0; x < slots; x++) {
868 			gh = gha + x;
869 
870 			if (gfs2_holder_initialized(gh) && gfs2_glock_poll(gh)) {
871 				err = gfs2_glock_wait(gh);
872 				if (err) {
873 					gfs2_holder_uninit(gh);
874 					error = err;
875 				} else {
876 					if (!error) {
877 						struct gfs2_rgrpd *rgd =
878 							gfs2_glock2rgrp(gh->gh_gl);
879 
880 						error = statfs_slow_fill(rgd, sc);
881 					}
882 					gfs2_glock_dq_uninit(gh);
883 				}
884 			}
885 
886 			if (gfs2_holder_initialized(gh))
887 				done = 0;
888 			else if (rgd_next && !error) {
889 				error = gfs2_glock_nq_init(rgd_next->rd_gl,
890 							   LM_ST_SHARED,
891 							   GL_ASYNC,
892 							   gh);
893 				rgd_next = gfs2_rgrpd_get_next(rgd_next);
894 				done = 0;
895 			}
896 
897 			if (signal_pending(current))
898 				error = -ERESTARTSYS;
899 		}
900 
901 		if (done)
902 			break;
903 
904 		yield();
905 	}
906 
907 	kfree(gha);
908 	return error;
909 }
910 
911 /**
912  * gfs2_statfs_i - Do a statfs
913  * @sdp: the filesystem
914  * @sg: the sg structure
915  *
916  * Returns: errno
917  */
918 
919 static int gfs2_statfs_i(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc)
920 {
921 	struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
922 	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
923 
924 	spin_lock(&sdp->sd_statfs_spin);
925 
926 	*sc = *m_sc;
927 	sc->sc_total += l_sc->sc_total;
928 	sc->sc_free += l_sc->sc_free;
929 	sc->sc_dinodes += l_sc->sc_dinodes;
930 
931 	spin_unlock(&sdp->sd_statfs_spin);
932 
933 	if (sc->sc_free < 0)
934 		sc->sc_free = 0;
935 	if (sc->sc_free > sc->sc_total)
936 		sc->sc_free = sc->sc_total;
937 	if (sc->sc_dinodes < 0)
938 		sc->sc_dinodes = 0;
939 
940 	return 0;
941 }
942 
943 /**
944  * gfs2_statfs - Gather and return stats about the filesystem
945  * @sb: The superblock
946  * @statfsbuf: The buffer
947  *
948  * Returns: 0 on success or error code
949  */
950 
951 static int gfs2_statfs(struct dentry *dentry, struct kstatfs *buf)
952 {
953 	struct super_block *sb = dentry->d_sb;
954 	struct gfs2_sbd *sdp = sb->s_fs_info;
955 	struct gfs2_statfs_change_host sc;
956 	int error;
957 
958 	error = gfs2_rindex_update(sdp);
959 	if (error)
960 		return error;
961 
962 	if (gfs2_tune_get(sdp, gt_statfs_slow))
963 		error = gfs2_statfs_slow(sdp, &sc);
964 	else
965 		error = gfs2_statfs_i(sdp, &sc);
966 
967 	if (error)
968 		return error;
969 
970 	buf->f_type = GFS2_MAGIC;
971 	buf->f_bsize = sdp->sd_sb.sb_bsize;
972 	buf->f_blocks = sc.sc_total;
973 	buf->f_bfree = sc.sc_free;
974 	buf->f_bavail = sc.sc_free;
975 	buf->f_files = sc.sc_dinodes + sc.sc_free;
976 	buf->f_ffree = sc.sc_free;
977 	buf->f_namelen = GFS2_FNAMESIZE;
978 
979 	return 0;
980 }
981 
982 /**
983  * gfs2_drop_inode - Drop an inode (test for remote unlink)
984  * @inode: The inode to drop
985  *
986  * If we've received a callback on an iopen lock then it's because a
987  * remote node tried to deallocate the inode but failed due to this node
988  * still having the inode open. Here we mark the link count zero
989  * since we know that it must have reached zero if the GLF_DEMOTE flag
990  * is set on the iopen glock. If we didn't do a disk read since the
991  * remote node removed the final link then we might otherwise miss
992  * this event. This check ensures that this node will deallocate the
993  * inode's blocks, or alternatively pass the baton on to another
994  * node for later deallocation.
995  */
996 
997 static int gfs2_drop_inode(struct inode *inode)
998 {
999 	struct gfs2_inode *ip = GFS2_I(inode);
1000 
1001 	if (!test_bit(GIF_FREE_VFS_INODE, &ip->i_flags) &&
1002 	    inode->i_nlink &&
1003 	    gfs2_holder_initialized(&ip->i_iopen_gh)) {
1004 		struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
1005 		if (test_bit(GLF_DEMOTE, &gl->gl_flags))
1006 			clear_nlink(inode);
1007 	}
1008 
1009 	/*
1010 	 * When under memory pressure when an inode's link count has dropped to
1011 	 * zero, defer deleting the inode to the delete workqueue.  This avoids
1012 	 * calling into DLM under memory pressure, which can deadlock.
1013 	 */
1014 	if (!inode->i_nlink &&
1015 	    unlikely(current->flags & PF_MEMALLOC) &&
1016 	    gfs2_holder_initialized(&ip->i_iopen_gh)) {
1017 		struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
1018 
1019 		gfs2_glock_hold(gl);
1020 		if (!gfs2_queue_delete_work(gl, 0))
1021 			gfs2_glock_queue_put(gl);
1022 		return false;
1023 	}
1024 
1025 	return generic_drop_inode(inode);
1026 }
1027 
1028 static int is_ancestor(const struct dentry *d1, const struct dentry *d2)
1029 {
1030 	do {
1031 		if (d1 == d2)
1032 			return 1;
1033 		d1 = d1->d_parent;
1034 	} while (!IS_ROOT(d1));
1035 	return 0;
1036 }
1037 
1038 /**
1039  * gfs2_show_options - Show mount options for /proc/mounts
1040  * @s: seq_file structure
1041  * @root: root of this (sub)tree
1042  *
1043  * Returns: 0 on success or error code
1044  */
1045 
1046 static int gfs2_show_options(struct seq_file *s, struct dentry *root)
1047 {
1048 	struct gfs2_sbd *sdp = root->d_sb->s_fs_info;
1049 	struct gfs2_args *args = &sdp->sd_args;
1050 	int val;
1051 
1052 	if (is_ancestor(root, sdp->sd_master_dir))
1053 		seq_puts(s, ",meta");
1054 	if (args->ar_lockproto[0])
1055 		seq_show_option(s, "lockproto", args->ar_lockproto);
1056 	if (args->ar_locktable[0])
1057 		seq_show_option(s, "locktable", args->ar_locktable);
1058 	if (args->ar_hostdata[0])
1059 		seq_show_option(s, "hostdata", args->ar_hostdata);
1060 	if (args->ar_spectator)
1061 		seq_puts(s, ",spectator");
1062 	if (args->ar_localflocks)
1063 		seq_puts(s, ",localflocks");
1064 	if (args->ar_debug)
1065 		seq_puts(s, ",debug");
1066 	if (args->ar_posix_acl)
1067 		seq_puts(s, ",acl");
1068 	if (args->ar_quota != GFS2_QUOTA_DEFAULT) {
1069 		char *state;
1070 		switch (args->ar_quota) {
1071 		case GFS2_QUOTA_OFF:
1072 			state = "off";
1073 			break;
1074 		case GFS2_QUOTA_ACCOUNT:
1075 			state = "account";
1076 			break;
1077 		case GFS2_QUOTA_ON:
1078 			state = "on";
1079 			break;
1080 		default:
1081 			state = "unknown";
1082 			break;
1083 		}
1084 		seq_printf(s, ",quota=%s", state);
1085 	}
1086 	if (args->ar_suiddir)
1087 		seq_puts(s, ",suiddir");
1088 	if (args->ar_data != GFS2_DATA_DEFAULT) {
1089 		char *state;
1090 		switch (args->ar_data) {
1091 		case GFS2_DATA_WRITEBACK:
1092 			state = "writeback";
1093 			break;
1094 		case GFS2_DATA_ORDERED:
1095 			state = "ordered";
1096 			break;
1097 		default:
1098 			state = "unknown";
1099 			break;
1100 		}
1101 		seq_printf(s, ",data=%s", state);
1102 	}
1103 	if (args->ar_discard)
1104 		seq_puts(s, ",discard");
1105 	val = sdp->sd_tune.gt_logd_secs;
1106 	if (val != 30)
1107 		seq_printf(s, ",commit=%d", val);
1108 	val = sdp->sd_tune.gt_statfs_quantum;
1109 	if (val != 30)
1110 		seq_printf(s, ",statfs_quantum=%d", val);
1111 	else if (sdp->sd_tune.gt_statfs_slow)
1112 		seq_puts(s, ",statfs_quantum=0");
1113 	val = sdp->sd_tune.gt_quota_quantum;
1114 	if (val != 60)
1115 		seq_printf(s, ",quota_quantum=%d", val);
1116 	if (args->ar_statfs_percent)
1117 		seq_printf(s, ",statfs_percent=%d", args->ar_statfs_percent);
1118 	if (args->ar_errors != GFS2_ERRORS_DEFAULT) {
1119 		const char *state;
1120 
1121 		switch (args->ar_errors) {
1122 		case GFS2_ERRORS_WITHDRAW:
1123 			state = "withdraw";
1124 			break;
1125 		case GFS2_ERRORS_PANIC:
1126 			state = "panic";
1127 			break;
1128 		default:
1129 			state = "unknown";
1130 			break;
1131 		}
1132 		seq_printf(s, ",errors=%s", state);
1133 	}
1134 	if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags))
1135 		seq_puts(s, ",nobarrier");
1136 	if (test_bit(SDF_DEMOTE, &sdp->sd_flags))
1137 		seq_puts(s, ",demote_interface_used");
1138 	if (args->ar_rgrplvb)
1139 		seq_puts(s, ",rgrplvb");
1140 	if (args->ar_loccookie)
1141 		seq_puts(s, ",loccookie");
1142 	return 0;
1143 }
1144 
1145 static void gfs2_final_release_pages(struct gfs2_inode *ip)
1146 {
1147 	struct inode *inode = &ip->i_inode;
1148 	struct gfs2_glock *gl = ip->i_gl;
1149 
1150 	truncate_inode_pages(gfs2_glock2aspace(ip->i_gl), 0);
1151 	truncate_inode_pages(&inode->i_data, 0);
1152 
1153 	if (atomic_read(&gl->gl_revokes) == 0) {
1154 		clear_bit(GLF_LFLUSH, &gl->gl_flags);
1155 		clear_bit(GLF_DIRTY, &gl->gl_flags);
1156 	}
1157 }
1158 
1159 static int gfs2_dinode_dealloc(struct gfs2_inode *ip)
1160 {
1161 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1162 	struct gfs2_rgrpd *rgd;
1163 	struct gfs2_holder gh;
1164 	int error;
1165 
1166 	if (gfs2_get_inode_blocks(&ip->i_inode) != 1) {
1167 		gfs2_consist_inode(ip);
1168 		return -EIO;
1169 	}
1170 
1171 	error = gfs2_rindex_update(sdp);
1172 	if (error)
1173 		return error;
1174 
1175 	error = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
1176 	if (error)
1177 		return error;
1178 
1179 	rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr, 1);
1180 	if (!rgd) {
1181 		gfs2_consist_inode(ip);
1182 		error = -EIO;
1183 		goto out_qs;
1184 	}
1185 
1186 	error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE,
1187 				   LM_FLAG_NODE_SCOPE, &gh);
1188 	if (error)
1189 		goto out_qs;
1190 
1191 	error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS + RES_QUOTA,
1192 				 sdp->sd_jdesc->jd_blocks);
1193 	if (error)
1194 		goto out_rg_gunlock;
1195 
1196 	gfs2_free_di(rgd, ip);
1197 
1198 	gfs2_final_release_pages(ip);
1199 
1200 	gfs2_trans_end(sdp);
1201 
1202 out_rg_gunlock:
1203 	gfs2_glock_dq_uninit(&gh);
1204 out_qs:
1205 	gfs2_quota_unhold(ip);
1206 	return error;
1207 }
1208 
1209 /**
1210  * gfs2_glock_put_eventually
1211  * @gl:	The glock to put
1212  *
1213  * When under memory pressure, trigger a deferred glock put to make sure we
1214  * won't call into DLM and deadlock.  Otherwise, put the glock directly.
1215  */
1216 
1217 static void gfs2_glock_put_eventually(struct gfs2_glock *gl)
1218 {
1219 	if (current->flags & PF_MEMALLOC)
1220 		gfs2_glock_queue_put(gl);
1221 	else
1222 		gfs2_glock_put(gl);
1223 }
1224 
1225 static bool gfs2_upgrade_iopen_glock(struct inode *inode)
1226 {
1227 	struct gfs2_inode *ip = GFS2_I(inode);
1228 	struct gfs2_sbd *sdp = GFS2_SB(inode);
1229 	struct gfs2_holder *gh = &ip->i_iopen_gh;
1230 	long timeout = 5 * HZ;
1231 	int error;
1232 
1233 	gh->gh_flags |= GL_NOCACHE;
1234 	gfs2_glock_dq_wait(gh);
1235 
1236 	/*
1237 	 * If there are no other lock holders, we'll get the lock immediately.
1238 	 * Otherwise, the other nodes holding the lock will be notified about
1239 	 * our locking request.  If they don't have the inode open, they'll
1240 	 * evict the cached inode and release the lock.  Otherwise, if they
1241 	 * poke the inode glock, we'll take this as an indication that they
1242 	 * still need the iopen glock and that they'll take care of deleting
1243 	 * the inode when they're done.  As a last resort, if another node
1244 	 * keeps holding the iopen glock without showing any activity on the
1245 	 * inode glock, we'll eventually time out.
1246 	 *
1247 	 * Note that we're passing the LM_FLAG_TRY_1CB flag to the first
1248 	 * locking request as an optimization to notify lock holders as soon as
1249 	 * possible.  Without that flag, they'd be notified implicitly by the
1250 	 * second locking request.
1251 	 */
1252 
1253 	gfs2_holder_reinit(LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE, gh);
1254 	error = gfs2_glock_nq(gh);
1255 	if (error != GLR_TRYFAILED)
1256 		return !error;
1257 
1258 	gfs2_holder_reinit(LM_ST_EXCLUSIVE, GL_ASYNC | GL_NOCACHE, gh);
1259 	error = gfs2_glock_nq(gh);
1260 	if (error)
1261 		return false;
1262 
1263 	timeout = wait_event_interruptible_timeout(sdp->sd_async_glock_wait,
1264 		!test_bit(HIF_WAIT, &gh->gh_iflags) ||
1265 		test_bit(GLF_DEMOTE, &ip->i_gl->gl_flags),
1266 		timeout);
1267 	if (!test_bit(HIF_HOLDER, &gh->gh_iflags)) {
1268 		gfs2_glock_dq(gh);
1269 		return false;
1270 	}
1271 	return true;
1272 }
1273 
1274 /**
1275  * evict_should_delete - determine whether the inode is eligible for deletion
1276  * @inode: The inode to evict
1277  *
1278  * This function determines whether the evicted inode is eligible to be deleted
1279  * and locks the inode glock.
1280  *
1281  * Returns: the fate of the dinode
1282  */
1283 static enum dinode_demise evict_should_delete(struct inode *inode,
1284 					      struct gfs2_holder *gh)
1285 {
1286 	struct gfs2_inode *ip = GFS2_I(inode);
1287 	struct super_block *sb = inode->i_sb;
1288 	struct gfs2_sbd *sdp = sb->s_fs_info;
1289 	int ret;
1290 
1291 	if (test_bit(GIF_ALLOC_FAILED, &ip->i_flags)) {
1292 		BUG_ON(!gfs2_glock_is_locked_by_me(ip->i_gl));
1293 		goto should_delete;
1294 	}
1295 
1296 	if (test_bit(GIF_DEFERRED_DELETE, &ip->i_flags))
1297 		return SHOULD_DEFER_EVICTION;
1298 
1299 	/* Deletes should never happen under memory pressure anymore.  */
1300 	if (WARN_ON_ONCE(current->flags & PF_MEMALLOC))
1301 		return SHOULD_DEFER_EVICTION;
1302 
1303 	/* Must not read inode block until block type has been verified */
1304 	ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, gh);
1305 	if (unlikely(ret)) {
1306 		glock_clear_object(ip->i_iopen_gh.gh_gl, ip);
1307 		ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
1308 		gfs2_glock_dq_uninit(&ip->i_iopen_gh);
1309 		return SHOULD_DEFER_EVICTION;
1310 	}
1311 
1312 	if (gfs2_inode_already_deleted(ip->i_gl, ip->i_no_formal_ino))
1313 		return SHOULD_NOT_DELETE_DINODE;
1314 	ret = gfs2_check_blk_type(sdp, ip->i_no_addr, GFS2_BLKST_UNLINKED);
1315 	if (ret)
1316 		return SHOULD_NOT_DELETE_DINODE;
1317 
1318 	if (test_bit(GIF_INVALID, &ip->i_flags)) {
1319 		ret = gfs2_inode_refresh(ip);
1320 		if (ret)
1321 			return SHOULD_NOT_DELETE_DINODE;
1322 	}
1323 
1324 	/*
1325 	 * The inode may have been recreated in the meantime.
1326 	 */
1327 	if (inode->i_nlink)
1328 		return SHOULD_NOT_DELETE_DINODE;
1329 
1330 should_delete:
1331 	if (gfs2_holder_initialized(&ip->i_iopen_gh) &&
1332 	    test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) {
1333 		if (!gfs2_upgrade_iopen_glock(inode)) {
1334 			gfs2_holder_uninit(&ip->i_iopen_gh);
1335 			return SHOULD_NOT_DELETE_DINODE;
1336 		}
1337 	}
1338 	return SHOULD_DELETE_DINODE;
1339 }
1340 
1341 /**
1342  * evict_unlinked_inode - delete the pieces of an unlinked evicted inode
1343  * @inode: The inode to evict
1344  */
1345 static int evict_unlinked_inode(struct inode *inode)
1346 {
1347 	struct gfs2_inode *ip = GFS2_I(inode);
1348 	int ret;
1349 
1350 	if (S_ISDIR(inode->i_mode) &&
1351 	    (ip->i_diskflags & GFS2_DIF_EXHASH)) {
1352 		ret = gfs2_dir_exhash_dealloc(ip);
1353 		if (ret)
1354 			goto out;
1355 	}
1356 
1357 	if (ip->i_eattr) {
1358 		ret = gfs2_ea_dealloc(ip);
1359 		if (ret)
1360 			goto out;
1361 	}
1362 
1363 	if (!gfs2_is_stuffed(ip)) {
1364 		ret = gfs2_file_dealloc(ip);
1365 		if (ret)
1366 			goto out;
1367 	}
1368 
1369 	/* We're about to clear the bitmap for the dinode, but as soon as we
1370 	   do, gfs2_create_inode can create another inode at the same block
1371 	   location and try to set gl_object again. We clear gl_object here so
1372 	   that subsequent inode creates don't see an old gl_object. */
1373 	glock_clear_object(ip->i_gl, ip);
1374 	ret = gfs2_dinode_dealloc(ip);
1375 	gfs2_inode_remember_delete(ip->i_gl, ip->i_no_formal_ino);
1376 out:
1377 	return ret;
1378 }
1379 
1380 /*
1381  * evict_linked_inode - evict an inode whose dinode has not been unlinked
1382  * @inode: The inode to evict
1383  */
1384 static int evict_linked_inode(struct inode *inode)
1385 {
1386 	struct super_block *sb = inode->i_sb;
1387 	struct gfs2_sbd *sdp = sb->s_fs_info;
1388 	struct gfs2_inode *ip = GFS2_I(inode);
1389 	struct address_space *metamapping;
1390 	int ret;
1391 
1392 	gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
1393 		       GFS2_LFC_EVICT_INODE);
1394 	metamapping = gfs2_glock2aspace(ip->i_gl);
1395 	if (test_bit(GLF_DIRTY, &ip->i_gl->gl_flags)) {
1396 		filemap_fdatawrite(metamapping);
1397 		filemap_fdatawait(metamapping);
1398 	}
1399 	write_inode_now(inode, 1);
1400 	gfs2_ail_flush(ip->i_gl, 0);
1401 
1402 	ret = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks);
1403 	if (ret)
1404 		return ret;
1405 
1406 	/* Needs to be done before glock release & also in a transaction */
1407 	truncate_inode_pages(&inode->i_data, 0);
1408 	truncate_inode_pages(metamapping, 0);
1409 	gfs2_trans_end(sdp);
1410 	return 0;
1411 }
1412 
1413 /**
1414  * gfs2_evict_inode - Remove an inode from cache
1415  * @inode: The inode to evict
1416  *
1417  * There are three cases to consider:
1418  * 1. i_nlink == 0, we are final opener (and must deallocate)
1419  * 2. i_nlink == 0, we are not the final opener (and cannot deallocate)
1420  * 3. i_nlink > 0
1421  *
1422  * If the fs is read only, then we have to treat all cases as per #3
1423  * since we are unable to do any deallocation. The inode will be
1424  * deallocated by the next read/write node to attempt an allocation
1425  * in the same resource group
1426  *
1427  * We have to (at the moment) hold the inodes main lock to cover
1428  * the gap between unlocking the shared lock on the iopen lock and
1429  * taking the exclusive lock. I'd rather do a shared -> exclusive
1430  * conversion on the iopen lock, but we can change that later. This
1431  * is safe, just less efficient.
1432  */
1433 
1434 static void gfs2_evict_inode(struct inode *inode)
1435 {
1436 	struct super_block *sb = inode->i_sb;
1437 	struct gfs2_sbd *sdp = sb->s_fs_info;
1438 	struct gfs2_inode *ip = GFS2_I(inode);
1439 	struct gfs2_holder gh;
1440 	int ret;
1441 
1442 	if (test_bit(GIF_FREE_VFS_INODE, &ip->i_flags)) {
1443 		clear_inode(inode);
1444 		return;
1445 	}
1446 
1447 	if (inode->i_nlink || sb_rdonly(sb))
1448 		goto out;
1449 
1450 	gfs2_holder_mark_uninitialized(&gh);
1451 	ret = evict_should_delete(inode, &gh);
1452 	if (ret == SHOULD_DEFER_EVICTION)
1453 		goto out;
1454 	if (ret == SHOULD_DELETE_DINODE)
1455 		ret = evict_unlinked_inode(inode);
1456 	else
1457 		ret = evict_linked_inode(inode);
1458 
1459 	if (gfs2_rs_active(&ip->i_res))
1460 		gfs2_rs_deltree(&ip->i_res);
1461 
1462 	if (gfs2_holder_initialized(&gh)) {
1463 		glock_clear_object(ip->i_gl, ip);
1464 		gfs2_glock_dq_uninit(&gh);
1465 	}
1466 	if (ret && ret != GLR_TRYFAILED && ret != -EROFS)
1467 		fs_warn(sdp, "gfs2_evict_inode: %d\n", ret);
1468 out:
1469 	truncate_inode_pages_final(&inode->i_data);
1470 	if (ip->i_qadata)
1471 		gfs2_assert_warn(sdp, ip->i_qadata->qa_ref == 0);
1472 	gfs2_rs_delete(ip, NULL);
1473 	gfs2_ordered_del_inode(ip);
1474 	clear_inode(inode);
1475 	gfs2_dir_hash_inval(ip);
1476 	if (ip->i_gl) {
1477 		glock_clear_object(ip->i_gl, ip);
1478 		wait_on_bit_io(&ip->i_flags, GIF_GLOP_PENDING, TASK_UNINTERRUPTIBLE);
1479 		gfs2_glock_add_to_lru(ip->i_gl);
1480 		gfs2_glock_put_eventually(ip->i_gl);
1481 		ip->i_gl = NULL;
1482 	}
1483 	if (gfs2_holder_initialized(&ip->i_iopen_gh)) {
1484 		struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
1485 
1486 		glock_clear_object(gl, ip);
1487 		if (test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) {
1488 			ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
1489 			gfs2_glock_dq(&ip->i_iopen_gh);
1490 		}
1491 		gfs2_glock_hold(gl);
1492 		gfs2_holder_uninit(&ip->i_iopen_gh);
1493 		gfs2_glock_put_eventually(gl);
1494 	}
1495 }
1496 
1497 static struct inode *gfs2_alloc_inode(struct super_block *sb)
1498 {
1499 	struct gfs2_inode *ip;
1500 
1501 	ip = kmem_cache_alloc(gfs2_inode_cachep, GFP_KERNEL);
1502 	if (!ip)
1503 		return NULL;
1504 	ip->i_flags = 0;
1505 	ip->i_gl = NULL;
1506 	gfs2_holder_mark_uninitialized(&ip->i_iopen_gh);
1507 	memset(&ip->i_res, 0, sizeof(ip->i_res));
1508 	RB_CLEAR_NODE(&ip->i_res.rs_node);
1509 	ip->i_rahead = 0;
1510 	return &ip->i_inode;
1511 }
1512 
1513 static void gfs2_free_inode(struct inode *inode)
1514 {
1515 	kmem_cache_free(gfs2_inode_cachep, GFS2_I(inode));
1516 }
1517 
1518 extern void free_local_statfs_inodes(struct gfs2_sbd *sdp)
1519 {
1520 	struct local_statfs_inode *lsi, *safe;
1521 
1522 	/* Run through the statfs inodes list to iput and free memory */
1523 	list_for_each_entry_safe(lsi, safe, &sdp->sd_sc_inodes_list, si_list) {
1524 		if (lsi->si_jid == sdp->sd_jdesc->jd_jid)
1525 			sdp->sd_sc_inode = NULL; /* belongs to this node */
1526 		if (lsi->si_sc_inode)
1527 			iput(lsi->si_sc_inode);
1528 		list_del(&lsi->si_list);
1529 		kfree(lsi);
1530 	}
1531 }
1532 
1533 extern struct inode *find_local_statfs_inode(struct gfs2_sbd *sdp,
1534 					     unsigned int index)
1535 {
1536 	struct local_statfs_inode *lsi;
1537 
1538 	/* Return the local (per node) statfs inode in the
1539 	 * sdp->sd_sc_inodes_list corresponding to the 'index'. */
1540 	list_for_each_entry(lsi, &sdp->sd_sc_inodes_list, si_list) {
1541 		if (lsi->si_jid == index)
1542 			return lsi->si_sc_inode;
1543 	}
1544 	return NULL;
1545 }
1546 
1547 const struct super_operations gfs2_super_ops = {
1548 	.alloc_inode		= gfs2_alloc_inode,
1549 	.free_inode		= gfs2_free_inode,
1550 	.write_inode		= gfs2_write_inode,
1551 	.dirty_inode		= gfs2_dirty_inode,
1552 	.evict_inode		= gfs2_evict_inode,
1553 	.put_super		= gfs2_put_super,
1554 	.sync_fs		= gfs2_sync_fs,
1555 	.freeze_super		= gfs2_freeze,
1556 	.thaw_super		= gfs2_unfreeze,
1557 	.statfs			= gfs2_statfs,
1558 	.drop_inode		= gfs2_drop_inode,
1559 	.show_options		= gfs2_show_options,
1560 };
1561 
1562