xref: /linux/fs/gfs2/glops.c (revision 092e0e7e520a1fca03e13c9f2d157432a8657ff2)
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9 
10 #include <linux/spinlock.h>
11 #include <linux/completion.h>
12 #include <linux/buffer_head.h>
13 #include <linux/gfs2_ondisk.h>
14 #include <linux/bio.h>
15 #include <linux/posix_acl.h>
16 
17 #include "gfs2.h"
18 #include "incore.h"
19 #include "bmap.h"
20 #include "glock.h"
21 #include "glops.h"
22 #include "inode.h"
23 #include "log.h"
24 #include "meta_io.h"
25 #include "recovery.h"
26 #include "rgrp.h"
27 #include "util.h"
28 #include "trans.h"
29 
30 /**
31  * ail_empty_gl - remove all buffers for a given lock from the AIL
32  * @gl: the glock
33  *
34  * None of the buffers should be dirty, locked, or pinned.
35  */
36 
37 static void gfs2_ail_empty_gl(struct gfs2_glock *gl)
38 {
39 	struct gfs2_sbd *sdp = gl->gl_sbd;
40 	struct list_head *head = &gl->gl_ail_list;
41 	struct gfs2_bufdata *bd;
42 	struct buffer_head *bh;
43 	struct gfs2_trans tr;
44 
45 	memset(&tr, 0, sizeof(tr));
46 	tr.tr_revokes = atomic_read(&gl->gl_ail_count);
47 
48 	if (!tr.tr_revokes)
49 		return;
50 
51 	/* A shortened, inline version of gfs2_trans_begin() */
52 	tr.tr_reserved = 1 + gfs2_struct2blk(sdp, tr.tr_revokes, sizeof(u64));
53 	tr.tr_ip = (unsigned long)__builtin_return_address(0);
54 	INIT_LIST_HEAD(&tr.tr_list_buf);
55 	gfs2_log_reserve(sdp, tr.tr_reserved);
56 	BUG_ON(current->journal_info);
57 	current->journal_info = &tr;
58 
59 	gfs2_log_lock(sdp);
60 	while (!list_empty(head)) {
61 		bd = list_entry(head->next, struct gfs2_bufdata,
62 				bd_ail_gl_list);
63 		bh = bd->bd_bh;
64 		gfs2_remove_from_ail(bd);
65 		bd->bd_bh = NULL;
66 		bh->b_private = NULL;
67 		bd->bd_blkno = bh->b_blocknr;
68 		gfs2_assert_withdraw(sdp, !buffer_busy(bh));
69 		gfs2_trans_add_revoke(sdp, bd);
70 	}
71 	gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count));
72 	gfs2_log_unlock(sdp);
73 
74 	gfs2_trans_end(sdp);
75 	gfs2_log_flush(sdp, NULL);
76 }
77 
78 /**
79  * rgrp_go_sync - sync out the metadata for this glock
80  * @gl: the glock
81  *
82  * Called when demoting or unlocking an EX glock.  We must flush
83  * to disk all dirty buffers/pages relating to this glock, and must not
84  * not return to caller to demote/unlock the glock until I/O is complete.
85  */
86 
87 static void rgrp_go_sync(struct gfs2_glock *gl)
88 {
89 	struct address_space *metamapping = gfs2_glock2aspace(gl);
90 	int error;
91 
92 	if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
93 		return;
94 	BUG_ON(gl->gl_state != LM_ST_EXCLUSIVE);
95 
96 	gfs2_log_flush(gl->gl_sbd, gl);
97 	filemap_fdatawrite(metamapping);
98 	error = filemap_fdatawait(metamapping);
99         mapping_set_error(metamapping, error);
100 	gfs2_ail_empty_gl(gl);
101 }
102 
103 /**
104  * rgrp_go_inval - invalidate the metadata for this glock
105  * @gl: the glock
106  * @flags:
107  *
108  * We never used LM_ST_DEFERRED with resource groups, so that we
109  * should always see the metadata flag set here.
110  *
111  */
112 
113 static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
114 {
115 	struct address_space *mapping = gfs2_glock2aspace(gl);
116 
117 	BUG_ON(!(flags & DIO_METADATA));
118 	gfs2_assert_withdraw(gl->gl_sbd, !atomic_read(&gl->gl_ail_count));
119 	truncate_inode_pages(mapping, 0);
120 
121 	if (gl->gl_object) {
122 		struct gfs2_rgrpd *rgd = (struct gfs2_rgrpd *)gl->gl_object;
123 		rgd->rd_flags &= ~GFS2_RDF_UPTODATE;
124 	}
125 }
126 
127 /**
128  * inode_go_sync - Sync the dirty data and/or metadata for an inode glock
129  * @gl: the glock protecting the inode
130  *
131  */
132 
133 static void inode_go_sync(struct gfs2_glock *gl)
134 {
135 	struct gfs2_inode *ip = gl->gl_object;
136 	struct address_space *metamapping = gfs2_glock2aspace(gl);
137 	int error;
138 
139 	if (ip && !S_ISREG(ip->i_inode.i_mode))
140 		ip = NULL;
141 	if (ip && test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
142 		unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
143 	if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
144 		return;
145 
146 	BUG_ON(gl->gl_state != LM_ST_EXCLUSIVE);
147 
148 	gfs2_log_flush(gl->gl_sbd, gl);
149 	filemap_fdatawrite(metamapping);
150 	if (ip) {
151 		struct address_space *mapping = ip->i_inode.i_mapping;
152 		filemap_fdatawrite(mapping);
153 		error = filemap_fdatawait(mapping);
154 		mapping_set_error(mapping, error);
155 	}
156 	error = filemap_fdatawait(metamapping);
157 	mapping_set_error(metamapping, error);
158 	gfs2_ail_empty_gl(gl);
159 	/*
160 	 * Writeback of the data mapping may cause the dirty flag to be set
161 	 * so we have to clear it again here.
162 	 */
163 	smp_mb__before_clear_bit();
164 	clear_bit(GLF_DIRTY, &gl->gl_flags);
165 }
166 
167 /**
168  * inode_go_inval - prepare a inode glock to be released
169  * @gl: the glock
170  * @flags:
171  *
172  * Normally we invlidate everything, but if we are moving into
173  * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we
174  * can keep hold of the metadata, since it won't have changed.
175  *
176  */
177 
178 static void inode_go_inval(struct gfs2_glock *gl, int flags)
179 {
180 	struct gfs2_inode *ip = gl->gl_object;
181 
182 	gfs2_assert_withdraw(gl->gl_sbd, !atomic_read(&gl->gl_ail_count));
183 
184 	if (flags & DIO_METADATA) {
185 		struct address_space *mapping = gfs2_glock2aspace(gl);
186 		truncate_inode_pages(mapping, 0);
187 		if (ip) {
188 			set_bit(GIF_INVALID, &ip->i_flags);
189 			forget_all_cached_acls(&ip->i_inode);
190 		}
191 	}
192 
193 	if (ip == GFS2_I(gl->gl_sbd->sd_rindex))
194 		gl->gl_sbd->sd_rindex_uptodate = 0;
195 	if (ip && S_ISREG(ip->i_inode.i_mode))
196 		truncate_inode_pages(ip->i_inode.i_mapping, 0);
197 }
198 
199 /**
200  * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock
201  * @gl: the glock
202  *
203  * Returns: 1 if it's ok
204  */
205 
206 static int inode_go_demote_ok(const struct gfs2_glock *gl)
207 {
208 	struct gfs2_sbd *sdp = gl->gl_sbd;
209 	if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object)
210 		return 0;
211 	return 1;
212 }
213 
214 /**
215  * inode_go_lock - operation done after an inode lock is locked by a process
216  * @gl: the glock
217  * @flags:
218  *
219  * Returns: errno
220  */
221 
222 static int inode_go_lock(struct gfs2_holder *gh)
223 {
224 	struct gfs2_glock *gl = gh->gh_gl;
225 	struct gfs2_sbd *sdp = gl->gl_sbd;
226 	struct gfs2_inode *ip = gl->gl_object;
227 	int error = 0;
228 
229 	if (!ip || (gh->gh_flags & GL_SKIP))
230 		return 0;
231 
232 	if (test_bit(GIF_INVALID, &ip->i_flags)) {
233 		error = gfs2_inode_refresh(ip);
234 		if (error)
235 			return error;
236 	}
237 
238 	if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) &&
239 	    (gl->gl_state == LM_ST_EXCLUSIVE) &&
240 	    (gh->gh_state == LM_ST_EXCLUSIVE)) {
241 		spin_lock(&sdp->sd_trunc_lock);
242 		if (list_empty(&ip->i_trunc_list))
243 			list_add(&sdp->sd_trunc_list, &ip->i_trunc_list);
244 		spin_unlock(&sdp->sd_trunc_lock);
245 		wake_up(&sdp->sd_quota_wait);
246 		return 1;
247 	}
248 
249 	return error;
250 }
251 
252 /**
253  * inode_go_dump - print information about an inode
254  * @seq: The iterator
255  * @ip: the inode
256  *
257  * Returns: 0 on success, -ENOBUFS when we run out of space
258  */
259 
260 static int inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl)
261 {
262 	const struct gfs2_inode *ip = gl->gl_object;
263 	if (ip == NULL)
264 		return 0;
265 	gfs2_print_dbg(seq, " I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu\n",
266 		  (unsigned long long)ip->i_no_formal_ino,
267 		  (unsigned long long)ip->i_no_addr,
268 		  IF2DT(ip->i_inode.i_mode), ip->i_flags,
269 		  (unsigned int)ip->i_diskflags,
270 		  (unsigned long long)i_size_read(&ip->i_inode));
271 	return 0;
272 }
273 
274 /**
275  * rgrp_go_demote_ok - Check to see if it's ok to unlock a RG's glock
276  * @gl: the glock
277  *
278  * Returns: 1 if it's ok
279  */
280 
281 static int rgrp_go_demote_ok(const struct gfs2_glock *gl)
282 {
283 	const struct address_space *mapping = (const struct address_space *)(gl + 1);
284 	return !mapping->nrpages;
285 }
286 
287 /**
288  * rgrp_go_lock - operation done after an rgrp lock is locked by
289  *    a first holder on this node.
290  * @gl: the glock
291  * @flags:
292  *
293  * Returns: errno
294  */
295 
296 static int rgrp_go_lock(struct gfs2_holder *gh)
297 {
298 	return gfs2_rgrp_bh_get(gh->gh_gl->gl_object);
299 }
300 
301 /**
302  * rgrp_go_unlock - operation done before an rgrp lock is unlocked by
303  *    a last holder on this node.
304  * @gl: the glock
305  * @flags:
306  *
307  */
308 
309 static void rgrp_go_unlock(struct gfs2_holder *gh)
310 {
311 	gfs2_rgrp_bh_put(gh->gh_gl->gl_object);
312 }
313 
314 /**
315  * trans_go_sync - promote/demote the transaction glock
316  * @gl: the glock
317  * @state: the requested state
318  * @flags:
319  *
320  */
321 
322 static void trans_go_sync(struct gfs2_glock *gl)
323 {
324 	struct gfs2_sbd *sdp = gl->gl_sbd;
325 
326 	if (gl->gl_state != LM_ST_UNLOCKED &&
327 	    test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
328 		flush_workqueue(gfs2_delete_workqueue);
329 		gfs2_meta_syncfs(sdp);
330 		gfs2_log_shutdown(sdp);
331 	}
332 }
333 
334 /**
335  * trans_go_xmote_bh - After promoting/demoting the transaction glock
336  * @gl: the glock
337  *
338  */
339 
340 static int trans_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh)
341 {
342 	struct gfs2_sbd *sdp = gl->gl_sbd;
343 	struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
344 	struct gfs2_glock *j_gl = ip->i_gl;
345 	struct gfs2_log_header_host head;
346 	int error;
347 
348 	if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
349 		j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
350 
351 		error = gfs2_find_jhead(sdp->sd_jdesc, &head);
352 		if (error)
353 			gfs2_consist(sdp);
354 		if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT))
355 			gfs2_consist(sdp);
356 
357 		/*  Initialize some head of the log stuff  */
358 		if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) {
359 			sdp->sd_log_sequence = head.lh_sequence + 1;
360 			gfs2_log_pointers_init(sdp, head.lh_blkno);
361 		}
362 	}
363 	return 0;
364 }
365 
366 /**
367  * trans_go_demote_ok
368  * @gl: the glock
369  *
370  * Always returns 0
371  */
372 
373 static int trans_go_demote_ok(const struct gfs2_glock *gl)
374 {
375 	return 0;
376 }
377 
378 /**
379  * iopen_go_callback - schedule the dcache entry for the inode to be deleted
380  * @gl: the glock
381  *
382  * gl_spin lock is held while calling this
383  */
384 static void iopen_go_callback(struct gfs2_glock *gl)
385 {
386 	struct gfs2_inode *ip = (struct gfs2_inode *)gl->gl_object;
387 
388 	if (gl->gl_demote_state == LM_ST_UNLOCKED &&
389 	    gl->gl_state == LM_ST_SHARED && ip) {
390 		gfs2_glock_hold(gl);
391 		if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
392 			gfs2_glock_put_nolock(gl);
393 	}
394 }
395 
396 const struct gfs2_glock_operations gfs2_meta_glops = {
397 	.go_type = LM_TYPE_META,
398 };
399 
400 const struct gfs2_glock_operations gfs2_inode_glops = {
401 	.go_xmote_th = inode_go_sync,
402 	.go_inval = inode_go_inval,
403 	.go_demote_ok = inode_go_demote_ok,
404 	.go_lock = inode_go_lock,
405 	.go_dump = inode_go_dump,
406 	.go_type = LM_TYPE_INODE,
407 	.go_min_hold_time = HZ / 5,
408 	.go_flags = GLOF_ASPACE,
409 };
410 
411 const struct gfs2_glock_operations gfs2_rgrp_glops = {
412 	.go_xmote_th = rgrp_go_sync,
413 	.go_inval = rgrp_go_inval,
414 	.go_demote_ok = rgrp_go_demote_ok,
415 	.go_lock = rgrp_go_lock,
416 	.go_unlock = rgrp_go_unlock,
417 	.go_dump = gfs2_rgrp_dump,
418 	.go_type = LM_TYPE_RGRP,
419 	.go_min_hold_time = HZ / 5,
420 	.go_flags = GLOF_ASPACE,
421 };
422 
423 const struct gfs2_glock_operations gfs2_trans_glops = {
424 	.go_xmote_th = trans_go_sync,
425 	.go_xmote_bh = trans_go_xmote_bh,
426 	.go_demote_ok = trans_go_demote_ok,
427 	.go_type = LM_TYPE_NONDISK,
428 };
429 
430 const struct gfs2_glock_operations gfs2_iopen_glops = {
431 	.go_type = LM_TYPE_IOPEN,
432 	.go_callback = iopen_go_callback,
433 };
434 
435 const struct gfs2_glock_operations gfs2_flock_glops = {
436 	.go_type = LM_TYPE_FLOCK,
437 };
438 
439 const struct gfs2_glock_operations gfs2_nondisk_glops = {
440 	.go_type = LM_TYPE_NONDISK,
441 };
442 
443 const struct gfs2_glock_operations gfs2_quota_glops = {
444 	.go_type = LM_TYPE_QUOTA,
445 };
446 
447 const struct gfs2_glock_operations gfs2_journal_glops = {
448 	.go_type = LM_TYPE_JOURNAL,
449 };
450 
451 const struct gfs2_glock_operations *gfs2_glops_list[] = {
452 	[LM_TYPE_META] = &gfs2_meta_glops,
453 	[LM_TYPE_INODE] = &gfs2_inode_glops,
454 	[LM_TYPE_RGRP] = &gfs2_rgrp_glops,
455 	[LM_TYPE_IOPEN] = &gfs2_iopen_glops,
456 	[LM_TYPE_FLOCK] = &gfs2_flock_glops,
457 	[LM_TYPE_NONDISK] = &gfs2_nondisk_glops,
458 	[LM_TYPE_QUOTA] = &gfs2_quota_glops,
459 	[LM_TYPE_JOURNAL] = &gfs2_journal_glops,
460 };
461 
462