1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 */
6
7 #include <linux/spinlock.h>
8 #include <linux/completion.h>
9 #include <linux/buffer_head.h>
10 #include <linux/gfs2_ondisk.h>
11 #include <linux/bio.h>
12 #include <linux/posix_acl.h>
13 #include <linux/security.h>
14 #include <linux/log2.h>
15
16 #include "gfs2.h"
17 #include "incore.h"
18 #include "bmap.h"
19 #include "glock.h"
20 #include "glops.h"
21 #include "inode.h"
22 #include "log.h"
23 #include "meta_io.h"
24 #include "recovery.h"
25 #include "rgrp.h"
26 #include "util.h"
27 #include "trans.h"
28 #include "dir.h"
29 #include "lops.h"
30
31 struct workqueue_struct *gfs2_freeze_wq;
32
33 extern struct workqueue_struct *gfs2_control_wq;
34
gfs2_ail_error(struct gfs2_glock * gl,const struct buffer_head * bh)35 static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)
36 {
37 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
38
39 fs_err(sdp,
40 "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page "
41 "state 0x%lx\n",
42 bh, (unsigned long long)bh->b_blocknr, bh->b_state,
43 bh->b_folio->mapping, bh->b_folio->flags);
44 fs_err(sdp, "AIL glock %u:%llu mapping %p\n",
45 gl->gl_name.ln_type, gl->gl_name.ln_number,
46 gfs2_glock2aspace(gl));
47 gfs2_lm(sdp, "AIL error\n");
48 gfs2_withdraw_delayed(sdp);
49 }
50
51 /**
52 * __gfs2_ail_flush - remove all buffers for a given lock from the AIL
53 * @gl: the glock
54 * @fsync: set when called from fsync (not all buffers will be clean)
55 * @nr_revokes: Number of buffers to revoke
56 *
57 * None of the buffers should be dirty, locked, or pinned.
58 */
59
__gfs2_ail_flush(struct gfs2_glock * gl,bool fsync,unsigned int nr_revokes)60 static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
61 unsigned int nr_revokes)
62 {
63 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
64 struct list_head *head = &gl->gl_ail_list;
65 struct gfs2_bufdata *bd, *tmp;
66 struct buffer_head *bh;
67 const unsigned long b_state = (1UL << BH_Dirty)|(1UL << BH_Pinned)|(1UL << BH_Lock);
68
69 gfs2_log_lock(sdp);
70 spin_lock(&sdp->sd_ail_lock);
71 list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) {
72 if (nr_revokes == 0)
73 break;
74 bh = bd->bd_bh;
75 if (bh->b_state & b_state) {
76 if (fsync)
77 continue;
78 gfs2_ail_error(gl, bh);
79 }
80 gfs2_trans_add_revoke(sdp, bd);
81 nr_revokes--;
82 }
83 GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count));
84 spin_unlock(&sdp->sd_ail_lock);
85 gfs2_log_unlock(sdp);
86
87 if (gfs2_withdrawing(sdp))
88 gfs2_withdraw(sdp);
89 }
90
91
gfs2_ail_empty_gl(struct gfs2_glock * gl)92 static int gfs2_ail_empty_gl(struct gfs2_glock *gl)
93 {
94 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
95 struct gfs2_trans tr;
96 unsigned int revokes;
97 int ret = 0;
98
99 revokes = atomic_read(&gl->gl_ail_count);
100
101 if (!revokes) {
102 bool have_revokes;
103 bool log_in_flight;
104
105 /*
106 * We have nothing on the ail, but there could be revokes on
107 * the sdp revoke queue, in which case, we still want to flush
108 * the log and wait for it to finish.
109 *
110 * If the sdp revoke list is empty too, we might still have an
111 * io outstanding for writing revokes, so we should wait for
112 * it before returning.
113 *
114 * If none of these conditions are true, our revokes are all
115 * flushed and we can return.
116 */
117 gfs2_log_lock(sdp);
118 have_revokes = !list_empty(&sdp->sd_log_revokes);
119 log_in_flight = atomic_read(&sdp->sd_log_in_flight);
120 gfs2_log_unlock(sdp);
121 if (have_revokes)
122 goto flush;
123 if (log_in_flight)
124 log_flush_wait(sdp);
125 return 0;
126 }
127
128 memset(&tr, 0, sizeof(tr));
129 set_bit(TR_ONSTACK, &tr.tr_flags);
130 ret = __gfs2_trans_begin(&tr, sdp, 0, revokes, _RET_IP_);
131 if (ret) {
132 fs_err(sdp, "Transaction error %d: Unable to write revokes.", ret);
133 goto flush;
134 }
135 __gfs2_ail_flush(gl, 0, revokes);
136 gfs2_trans_end(sdp);
137
138 flush:
139 if (!ret)
140 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
141 GFS2_LFC_AIL_EMPTY_GL);
142 return ret;
143 }
144
gfs2_ail_flush(struct gfs2_glock * gl,bool fsync)145 void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
146 {
147 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
148 unsigned int revokes = atomic_read(&gl->gl_ail_count);
149 int ret;
150
151 if (!revokes)
152 return;
153
154 ret = gfs2_trans_begin(sdp, 0, revokes);
155 if (ret)
156 return;
157 __gfs2_ail_flush(gl, fsync, revokes);
158 gfs2_trans_end(sdp);
159 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
160 GFS2_LFC_AIL_FLUSH);
161 }
162
163 /**
164 * gfs2_rgrp_metasync - sync out the metadata of a resource group
165 * @gl: the glock protecting the resource group
166 *
167 */
168
gfs2_rgrp_metasync(struct gfs2_glock * gl)169 static int gfs2_rgrp_metasync(struct gfs2_glock *gl)
170 {
171 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
172 struct address_space *metamapping = gfs2_aspace(sdp);
173 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
174 const unsigned bsize = sdp->sd_sb.sb_bsize;
175 loff_t start = (rgd->rd_addr * bsize) & PAGE_MASK;
176 loff_t end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1;
177 int error;
178
179 filemap_fdatawrite_range(metamapping, start, end);
180 error = filemap_fdatawait_range(metamapping, start, end);
181 WARN_ON_ONCE(error && !gfs2_withdrawing_or_withdrawn(sdp));
182 mapping_set_error(metamapping, error);
183 if (error)
184 gfs2_io_error(sdp);
185 return error;
186 }
187
188 /**
189 * rgrp_go_sync - sync out the metadata for this glock
190 * @gl: the glock
191 *
192 * Called when demoting or unlocking an EX glock. We must flush
193 * to disk all dirty buffers/pages relating to this glock, and must not
194 * return to caller to demote/unlock the glock until I/O is complete.
195 */
196
rgrp_go_sync(struct gfs2_glock * gl)197 static int rgrp_go_sync(struct gfs2_glock *gl)
198 {
199 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
200 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
201 int error;
202
203 if (!rgd || !test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
204 return 0;
205 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
206
207 gfs2_log_flush(sdp, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
208 GFS2_LFC_RGRP_GO_SYNC);
209 error = gfs2_rgrp_metasync(gl);
210 if (!error)
211 error = gfs2_ail_empty_gl(gl);
212 gfs2_free_clones(rgd);
213 return error;
214 }
215
216 /**
217 * rgrp_go_inval - invalidate the metadata for this glock
218 * @gl: the glock
219 * @flags:
220 *
221 * We never used LM_ST_DEFERRED with resource groups, so that we
222 * should always see the metadata flag set here.
223 *
224 */
225
rgrp_go_inval(struct gfs2_glock * gl,int flags)226 static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
227 {
228 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
229 struct address_space *mapping = gfs2_aspace(sdp);
230 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
231 const unsigned bsize = sdp->sd_sb.sb_bsize;
232 loff_t start, end;
233
234 if (!rgd)
235 return;
236 start = (rgd->rd_addr * bsize) & PAGE_MASK;
237 end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1;
238 gfs2_rgrp_brelse(rgd);
239 WARN_ON_ONCE(!(flags & DIO_METADATA));
240 truncate_inode_pages_range(mapping, start, end);
241 }
242
gfs2_rgrp_go_dump(struct seq_file * seq,const struct gfs2_glock * gl,const char * fs_id_buf)243 static void gfs2_rgrp_go_dump(struct seq_file *seq, const struct gfs2_glock *gl,
244 const char *fs_id_buf)
245 {
246 struct gfs2_rgrpd *rgd = gl->gl_object;
247
248 if (rgd)
249 gfs2_rgrp_dump(seq, rgd, fs_id_buf);
250 }
251
gfs2_glock2inode(struct gfs2_glock * gl)252 static struct gfs2_inode *gfs2_glock2inode(struct gfs2_glock *gl)
253 {
254 struct gfs2_inode *ip;
255
256 spin_lock(&gl->gl_lockref.lock);
257 ip = gl->gl_object;
258 if (ip)
259 set_bit(GIF_GLOP_PENDING, &ip->i_flags);
260 spin_unlock(&gl->gl_lockref.lock);
261 return ip;
262 }
263
gfs2_glock2rgrp(struct gfs2_glock * gl)264 struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl)
265 {
266 struct gfs2_rgrpd *rgd;
267
268 spin_lock(&gl->gl_lockref.lock);
269 rgd = gl->gl_object;
270 spin_unlock(&gl->gl_lockref.lock);
271
272 return rgd;
273 }
274
gfs2_clear_glop_pending(struct gfs2_inode * ip)275 static void gfs2_clear_glop_pending(struct gfs2_inode *ip)
276 {
277 if (!ip)
278 return;
279
280 clear_bit_unlock(GIF_GLOP_PENDING, &ip->i_flags);
281 wake_up_bit(&ip->i_flags, GIF_GLOP_PENDING);
282 }
283
284 /**
285 * gfs2_inode_metasync - sync out the metadata of an inode
286 * @gl: the glock protecting the inode
287 *
288 */
gfs2_inode_metasync(struct gfs2_glock * gl)289 int gfs2_inode_metasync(struct gfs2_glock *gl)
290 {
291 struct address_space *metamapping = gfs2_glock2aspace(gl);
292 int error;
293
294 filemap_fdatawrite(metamapping);
295 error = filemap_fdatawait(metamapping);
296 if (error)
297 gfs2_io_error(gl->gl_name.ln_sbd);
298 return error;
299 }
300
301 /**
302 * inode_go_sync - Sync the dirty metadata of an inode
303 * @gl: the glock protecting the inode
304 *
305 */
306
inode_go_sync(struct gfs2_glock * gl)307 static int inode_go_sync(struct gfs2_glock *gl)
308 {
309 struct gfs2_inode *ip = gfs2_glock2inode(gl);
310 int isreg = ip && S_ISREG(ip->i_inode.i_mode);
311 struct address_space *metamapping = gfs2_glock2aspace(gl);
312 int error = 0, ret;
313
314 if (isreg) {
315 if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
316 unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
317 inode_dio_wait(&ip->i_inode);
318 }
319 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
320 goto out;
321
322 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
323
324 gfs2_log_flush(gl->gl_name.ln_sbd, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
325 GFS2_LFC_INODE_GO_SYNC);
326 filemap_fdatawrite(metamapping);
327 if (isreg) {
328 struct address_space *mapping = ip->i_inode.i_mapping;
329 filemap_fdatawrite(mapping);
330 error = filemap_fdatawait(mapping);
331 mapping_set_error(mapping, error);
332 }
333 ret = gfs2_inode_metasync(gl);
334 if (!error)
335 error = ret;
336 ret = gfs2_ail_empty_gl(gl);
337 if (!error)
338 error = ret;
339 /*
340 * Writeback of the data mapping may cause the dirty flag to be set
341 * so we have to clear it again here.
342 */
343 smp_mb__before_atomic();
344 clear_bit(GLF_DIRTY, &gl->gl_flags);
345
346 out:
347 gfs2_clear_glop_pending(ip);
348 return error;
349 }
350
351 /**
352 * inode_go_inval - prepare a inode glock to be released
353 * @gl: the glock
354 * @flags:
355 *
356 * Normally we invalidate everything, but if we are moving into
357 * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we
358 * can keep hold of the metadata, since it won't have changed.
359 *
360 */
361
inode_go_inval(struct gfs2_glock * gl,int flags)362 static void inode_go_inval(struct gfs2_glock *gl, int flags)
363 {
364 struct gfs2_inode *ip = gfs2_glock2inode(gl);
365
366 if (flags & DIO_METADATA) {
367 struct address_space *mapping = gfs2_glock2aspace(gl);
368 truncate_inode_pages(mapping, 0);
369 if (ip) {
370 set_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags);
371 forget_all_cached_acls(&ip->i_inode);
372 security_inode_invalidate_secctx(&ip->i_inode);
373 gfs2_dir_hash_inval(ip);
374 }
375 }
376
377 if (ip == GFS2_I(gl->gl_name.ln_sbd->sd_rindex)) {
378 gfs2_log_flush(gl->gl_name.ln_sbd, NULL,
379 GFS2_LOG_HEAD_FLUSH_NORMAL |
380 GFS2_LFC_INODE_GO_INVAL);
381 gl->gl_name.ln_sbd->sd_rindex_uptodate = 0;
382 }
383 if (ip && S_ISREG(ip->i_inode.i_mode))
384 truncate_inode_pages(ip->i_inode.i_mapping, 0);
385
386 gfs2_clear_glop_pending(ip);
387 }
388
gfs2_dinode_in(struct gfs2_inode * ip,const void * buf)389 static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
390 {
391 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
392 const struct gfs2_dinode *str = buf;
393 struct timespec64 atime, iatime;
394 u16 height, depth;
395 umode_t mode = be32_to_cpu(str->di_mode);
396 struct inode *inode = &ip->i_inode;
397 bool is_new = inode->i_state & I_NEW;
398
399 if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr))) {
400 gfs2_consist_inode(ip);
401 return -EIO;
402 }
403 if (unlikely(!is_new && inode_wrong_type(inode, mode))) {
404 gfs2_consist_inode(ip);
405 return -EIO;
406 }
407 ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino);
408 inode->i_mode = mode;
409 if (is_new) {
410 inode->i_rdev = 0;
411 switch (mode & S_IFMT) {
412 case S_IFBLK:
413 case S_IFCHR:
414 inode->i_rdev = MKDEV(be32_to_cpu(str->di_major),
415 be32_to_cpu(str->di_minor));
416 break;
417 }
418 }
419
420 i_uid_write(inode, be32_to_cpu(str->di_uid));
421 i_gid_write(inode, be32_to_cpu(str->di_gid));
422 set_nlink(inode, be32_to_cpu(str->di_nlink));
423 i_size_write(inode, be64_to_cpu(str->di_size));
424 gfs2_set_inode_blocks(inode, be64_to_cpu(str->di_blocks));
425 atime.tv_sec = be64_to_cpu(str->di_atime);
426 atime.tv_nsec = be32_to_cpu(str->di_atime_nsec);
427 iatime = inode_get_atime(inode);
428 if (timespec64_compare(&iatime, &atime) < 0)
429 inode_set_atime_to_ts(inode, atime);
430 inode_set_mtime(inode, be64_to_cpu(str->di_mtime),
431 be32_to_cpu(str->di_mtime_nsec));
432 inode_set_ctime(inode, be64_to_cpu(str->di_ctime),
433 be32_to_cpu(str->di_ctime_nsec));
434
435 ip->i_goal = be64_to_cpu(str->di_goal_meta);
436 ip->i_generation = be64_to_cpu(str->di_generation);
437
438 ip->i_diskflags = be32_to_cpu(str->di_flags);
439 ip->i_eattr = be64_to_cpu(str->di_eattr);
440 /* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */
441 gfs2_set_inode_flags(inode);
442 height = be16_to_cpu(str->di_height);
443 if (unlikely(height > sdp->sd_max_height)) {
444 gfs2_consist_inode(ip);
445 return -EIO;
446 }
447 ip->i_height = (u8)height;
448
449 depth = be16_to_cpu(str->di_depth);
450 if (unlikely(depth > GFS2_DIR_MAX_DEPTH)) {
451 gfs2_consist_inode(ip);
452 return -EIO;
453 }
454 if ((ip->i_diskflags & GFS2_DIF_EXHASH) &&
455 depth < ilog2(sdp->sd_hash_ptrs)) {
456 gfs2_consist_inode(ip);
457 return -EIO;
458 }
459 ip->i_depth = (u8)depth;
460 ip->i_entries = be32_to_cpu(str->di_entries);
461
462 if (gfs2_is_stuffed(ip) && inode->i_size > gfs2_max_stuffed_size(ip)) {
463 gfs2_consist_inode(ip);
464 return -EIO;
465 }
466 if (S_ISREG(inode->i_mode))
467 gfs2_set_aops(inode);
468
469 return 0;
470 }
471
472 /**
473 * gfs2_inode_refresh - Refresh the incore copy of the dinode
474 * @ip: The GFS2 inode
475 *
476 * Returns: errno
477 */
478
gfs2_inode_refresh(struct gfs2_inode * ip)479 static int gfs2_inode_refresh(struct gfs2_inode *ip)
480 {
481 struct buffer_head *dibh;
482 int error;
483
484 error = gfs2_meta_inode_buffer(ip, &dibh);
485 if (error)
486 return error;
487
488 error = gfs2_dinode_in(ip, dibh->b_data);
489 brelse(dibh);
490 return error;
491 }
492
493 /**
494 * inode_go_instantiate - read in an inode if necessary
495 * @gl: The glock
496 *
497 * Returns: errno
498 */
499
inode_go_instantiate(struct gfs2_glock * gl)500 static int inode_go_instantiate(struct gfs2_glock *gl)
501 {
502 struct gfs2_inode *ip = gl->gl_object;
503 struct gfs2_glock *io_gl;
504 int error;
505
506 if (!ip) /* no inode to populate - read it in later */
507 return 0;
508
509 error = gfs2_inode_refresh(ip);
510 if (error)
511 return error;
512 io_gl = ip->i_iopen_gh.gh_gl;
513 io_gl->gl_no_formal_ino = ip->i_no_formal_ino;
514 return 0;
515 }
516
inode_go_held(struct gfs2_holder * gh)517 static int inode_go_held(struct gfs2_holder *gh)
518 {
519 struct gfs2_glock *gl = gh->gh_gl;
520 struct gfs2_inode *ip = gl->gl_object;
521 int error = 0;
522
523 if (!ip) /* no inode to populate - read it in later */
524 return 0;
525
526 if (gh->gh_state != LM_ST_DEFERRED)
527 inode_dio_wait(&ip->i_inode);
528
529 if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) &&
530 (gl->gl_state == LM_ST_EXCLUSIVE) &&
531 (gh->gh_state == LM_ST_EXCLUSIVE))
532 error = gfs2_truncatei_resume(ip);
533
534 return error;
535 }
536
537 /**
538 * inode_go_dump - print information about an inode
539 * @seq: The iterator
540 * @gl: The glock
541 * @fs_id_buf: file system id (may be empty)
542 *
543 */
544
inode_go_dump(struct seq_file * seq,const struct gfs2_glock * gl,const char * fs_id_buf)545 static void inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl,
546 const char *fs_id_buf)
547 {
548 struct gfs2_inode *ip = gl->gl_object;
549 const struct inode *inode = &ip->i_inode;
550
551 if (ip == NULL)
552 return;
553
554 gfs2_print_dbg(seq, "%s I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu "
555 "p:%lu\n", fs_id_buf,
556 (unsigned long long)ip->i_no_formal_ino,
557 (unsigned long long)ip->i_no_addr,
558 IF2DT(inode->i_mode), ip->i_flags,
559 (unsigned int)ip->i_diskflags,
560 (unsigned long long)i_size_read(inode),
561 inode->i_data.nrpages);
562 }
563
564 /**
565 * freeze_go_callback - A cluster node is requesting a freeze
566 * @gl: the glock
567 * @remote: true if this came from a different cluster node
568 */
569
freeze_go_callback(struct gfs2_glock * gl,bool remote)570 static void freeze_go_callback(struct gfs2_glock *gl, bool remote)
571 {
572 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
573 struct super_block *sb = sdp->sd_vfs;
574
575 if (!remote ||
576 (gl->gl_state != LM_ST_SHARED &&
577 gl->gl_state != LM_ST_UNLOCKED) ||
578 gl->gl_demote_state != LM_ST_UNLOCKED)
579 return;
580
581 /*
582 * Try to get an active super block reference to prevent racing with
583 * unmount (see super_trylock_shared()). But note that unmount isn't
584 * the only place where a write lock on s_umount is taken, and we can
585 * fail here because of things like remount as well.
586 */
587 if (down_read_trylock(&sb->s_umount)) {
588 atomic_inc(&sb->s_active);
589 up_read(&sb->s_umount);
590 if (!queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work))
591 deactivate_super(sb);
592 }
593 }
594
595 /**
596 * freeze_go_xmote_bh - After promoting/demoting the freeze glock
597 * @gl: the glock
598 */
freeze_go_xmote_bh(struct gfs2_glock * gl)599 static int freeze_go_xmote_bh(struct gfs2_glock *gl)
600 {
601 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
602 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
603 struct gfs2_glock *j_gl = ip->i_gl;
604 struct gfs2_log_header_host head;
605 int error;
606
607 if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
608 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
609
610 error = gfs2_find_jhead(sdp->sd_jdesc, &head);
611 if (gfs2_assert_withdraw_delayed(sdp, !error))
612 return error;
613 if (gfs2_assert_withdraw_delayed(sdp, head.lh_flags &
614 GFS2_LOG_HEAD_UNMOUNT))
615 return -EIO;
616 gfs2_log_pointers_init(sdp, &head);
617 }
618 return 0;
619 }
620
621 /**
622 * iopen_go_callback - schedule the dcache entry for the inode to be deleted
623 * @gl: the glock
624 * @remote: true if this came from a different cluster node
625 *
626 * gl_lockref.lock lock is held while calling this
627 */
iopen_go_callback(struct gfs2_glock * gl,bool remote)628 static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
629 {
630 struct gfs2_inode *ip = gl->gl_object;
631 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
632
633 if (!remote || sb_rdonly(sdp->sd_vfs) ||
634 test_bit(SDF_KILL, &sdp->sd_flags))
635 return;
636
637 if (gl->gl_demote_state == LM_ST_UNLOCKED &&
638 gl->gl_state == LM_ST_SHARED && ip) {
639 gl->gl_lockref.count++;
640 if (!gfs2_queue_try_to_evict(gl))
641 gl->gl_lockref.count--;
642 }
643 }
644
645 /**
646 * inode_go_unlocked - wake up anyone waiting for dlm's unlock ast
647 * @gl: glock being unlocked
648 *
649 * For now, this is only used for the journal inode glock. In withdraw
650 * situations, we need to wait for the glock to be unlocked so that we know
651 * other nodes may proceed with recovery / journal replay.
652 */
inode_go_unlocked(struct gfs2_glock * gl)653 static void inode_go_unlocked(struct gfs2_glock *gl)
654 {
655 /* Note that we cannot reference gl_object because it's already set
656 * to NULL by this point in its lifecycle. */
657 if (!test_bit(GLF_UNLOCKED, &gl->gl_flags))
658 return;
659 clear_bit_unlock(GLF_UNLOCKED, &gl->gl_flags);
660 wake_up_bit(&gl->gl_flags, GLF_UNLOCKED);
661 }
662
663 /**
664 * nondisk_go_callback - used to signal when a node did a withdraw
665 * @gl: the nondisk glock
666 * @remote: true if this came from a different cluster node
667 *
668 */
nondisk_go_callback(struct gfs2_glock * gl,bool remote)669 static void nondisk_go_callback(struct gfs2_glock *gl, bool remote)
670 {
671 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
672
673 /* Ignore the callback unless it's from another node, and it's the
674 live lock. */
675 if (!remote || gl->gl_name.ln_number != GFS2_LIVE_LOCK)
676 return;
677
678 /* First order of business is to cancel the demote request. We don't
679 * really want to demote a nondisk glock. At best it's just to inform
680 * us of another node's withdraw. We'll keep it in SH mode. */
681 clear_bit(GLF_DEMOTE, &gl->gl_flags);
682 clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
683
684 /* Ignore the unlock if we're withdrawn, unmounting, or in recovery. */
685 if (test_bit(SDF_NORECOVERY, &sdp->sd_flags) ||
686 test_bit(SDF_WITHDRAWN, &sdp->sd_flags) ||
687 test_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags))
688 return;
689
690 /* We only care when a node wants us to unlock, because that means
691 * they want a journal recovered. */
692 if (gl->gl_demote_state != LM_ST_UNLOCKED)
693 return;
694
695 if (sdp->sd_args.ar_spectator) {
696 fs_warn(sdp, "Spectator node cannot recover journals.\n");
697 return;
698 }
699
700 fs_warn(sdp, "Some node has withdrawn; checking for recovery.\n");
701 set_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags);
702 /*
703 * We can't call remote_withdraw directly here or gfs2_recover_journal
704 * because this is called from the glock unlock function and the
705 * remote_withdraw needs to enqueue and dequeue the same "live" glock
706 * we were called from. So we queue it to the control work queue in
707 * lock_dlm.
708 */
709 queue_delayed_work(gfs2_control_wq, &sdp->sd_control_work, 0);
710 }
711
712 const struct gfs2_glock_operations gfs2_meta_glops = {
713 .go_type = LM_TYPE_META,
714 .go_flags = GLOF_NONDISK,
715 };
716
717 const struct gfs2_glock_operations gfs2_inode_glops = {
718 .go_sync = inode_go_sync,
719 .go_inval = inode_go_inval,
720 .go_instantiate = inode_go_instantiate,
721 .go_held = inode_go_held,
722 .go_dump = inode_go_dump,
723 .go_type = LM_TYPE_INODE,
724 .go_flags = GLOF_ASPACE | GLOF_LVB,
725 .go_unlocked = inode_go_unlocked,
726 };
727
728 const struct gfs2_glock_operations gfs2_rgrp_glops = {
729 .go_sync = rgrp_go_sync,
730 .go_inval = rgrp_go_inval,
731 .go_instantiate = gfs2_rgrp_go_instantiate,
732 .go_dump = gfs2_rgrp_go_dump,
733 .go_type = LM_TYPE_RGRP,
734 .go_flags = GLOF_LVB,
735 };
736
737 const struct gfs2_glock_operations gfs2_freeze_glops = {
738 .go_xmote_bh = freeze_go_xmote_bh,
739 .go_callback = freeze_go_callback,
740 .go_type = LM_TYPE_NONDISK,
741 .go_flags = GLOF_NONDISK,
742 };
743
744 const struct gfs2_glock_operations gfs2_iopen_glops = {
745 .go_type = LM_TYPE_IOPEN,
746 .go_callback = iopen_go_callback,
747 .go_dump = inode_go_dump,
748 .go_flags = GLOF_NONDISK,
749 .go_subclass = 1,
750 };
751
752 const struct gfs2_glock_operations gfs2_flock_glops = {
753 .go_type = LM_TYPE_FLOCK,
754 .go_flags = GLOF_NONDISK,
755 };
756
757 const struct gfs2_glock_operations gfs2_nondisk_glops = {
758 .go_type = LM_TYPE_NONDISK,
759 .go_flags = GLOF_NONDISK,
760 .go_callback = nondisk_go_callback,
761 };
762
763 const struct gfs2_glock_operations gfs2_quota_glops = {
764 .go_type = LM_TYPE_QUOTA,
765 .go_flags = GLOF_LVB | GLOF_NONDISK,
766 };
767
768 const struct gfs2_glock_operations gfs2_journal_glops = {
769 .go_type = LM_TYPE_JOURNAL,
770 .go_flags = GLOF_NONDISK,
771 };
772
773 const struct gfs2_glock_operations *gfs2_glops_list[] = {
774 [LM_TYPE_META] = &gfs2_meta_glops,
775 [LM_TYPE_INODE] = &gfs2_inode_glops,
776 [LM_TYPE_RGRP] = &gfs2_rgrp_glops,
777 [LM_TYPE_IOPEN] = &gfs2_iopen_glops,
778 [LM_TYPE_FLOCK] = &gfs2_flock_glops,
779 [LM_TYPE_NONDISK] = &gfs2_nondisk_glops,
780 [LM_TYPE_QUOTA] = &gfs2_quota_glops,
781 [LM_TYPE_JOURNAL] = &gfs2_journal_glops,
782 };
783
784