1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 5 */ 6 7 #include <linux/spinlock.h> 8 #include <linux/completion.h> 9 #include <linux/buffer_head.h> 10 #include <linux/gfs2_ondisk.h> 11 #include <linux/bio.h> 12 #include <linux/posix_acl.h> 13 #include <linux/security.h> 14 15 #include "gfs2.h" 16 #include "incore.h" 17 #include "bmap.h" 18 #include "glock.h" 19 #include "glops.h" 20 #include "inode.h" 21 #include "log.h" 22 #include "meta_io.h" 23 #include "recovery.h" 24 #include "rgrp.h" 25 #include "util.h" 26 #include "trans.h" 27 #include "dir.h" 28 #include "lops.h" 29 30 struct workqueue_struct *gfs2_freeze_wq; 31 32 extern struct workqueue_struct *gfs2_control_wq; 33 34 static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh) 35 { 36 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 37 38 fs_err(sdp, 39 "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page " 40 "state 0x%lx\n", 41 bh, (unsigned long long)bh->b_blocknr, bh->b_state, 42 bh->b_folio->mapping, bh->b_folio->flags); 43 fs_err(sdp, "AIL glock %u:%llu mapping %p\n", 44 gl->gl_name.ln_type, gl->gl_name.ln_number, 45 gfs2_glock2aspace(gl)); 46 gfs2_lm(sdp, "AIL error\n"); 47 gfs2_withdraw_delayed(sdp); 48 } 49 50 /** 51 * __gfs2_ail_flush - remove all buffers for a given lock from the AIL 52 * @gl: the glock 53 * @fsync: set when called from fsync (not all buffers will be clean) 54 * @nr_revokes: Number of buffers to revoke 55 * 56 * None of the buffers should be dirty, locked, or pinned. 57 */ 58 59 static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync, 60 unsigned int nr_revokes) 61 { 62 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 63 struct list_head *head = &gl->gl_ail_list; 64 struct gfs2_bufdata *bd, *tmp; 65 struct buffer_head *bh; 66 const unsigned long b_state = (1UL << BH_Dirty)|(1UL << BH_Pinned)|(1UL << BH_Lock); 67 68 gfs2_log_lock(sdp); 69 spin_lock(&sdp->sd_ail_lock); 70 list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) { 71 if (nr_revokes == 0) 72 break; 73 bh = bd->bd_bh; 74 if (bh->b_state & b_state) { 75 if (fsync) 76 continue; 77 gfs2_ail_error(gl, bh); 78 } 79 gfs2_trans_add_revoke(sdp, bd); 80 nr_revokes--; 81 } 82 GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count)); 83 spin_unlock(&sdp->sd_ail_lock); 84 gfs2_log_unlock(sdp); 85 } 86 87 88 static int gfs2_ail_empty_gl(struct gfs2_glock *gl) 89 { 90 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 91 struct gfs2_trans tr; 92 unsigned int revokes; 93 int ret = 0; 94 95 revokes = atomic_read(&gl->gl_ail_count); 96 97 if (!revokes) { 98 bool have_revokes; 99 bool log_in_flight; 100 101 /* 102 * We have nothing on the ail, but there could be revokes on 103 * the sdp revoke queue, in which case, we still want to flush 104 * the log and wait for it to finish. 105 * 106 * If the sdp revoke list is empty too, we might still have an 107 * io outstanding for writing revokes, so we should wait for 108 * it before returning. 109 * 110 * If none of these conditions are true, our revokes are all 111 * flushed and we can return. 112 */ 113 gfs2_log_lock(sdp); 114 have_revokes = !list_empty(&sdp->sd_log_revokes); 115 log_in_flight = atomic_read(&sdp->sd_log_in_flight); 116 gfs2_log_unlock(sdp); 117 if (have_revokes) 118 goto flush; 119 if (log_in_flight) 120 log_flush_wait(sdp); 121 return 0; 122 } 123 124 memset(&tr, 0, sizeof(tr)); 125 set_bit(TR_ONSTACK, &tr.tr_flags); 126 ret = __gfs2_trans_begin(&tr, sdp, 0, revokes, _RET_IP_); 127 if (ret) { 128 fs_err(sdp, "Transaction error %d: Unable to write revokes.", ret); 129 goto flush; 130 } 131 __gfs2_ail_flush(gl, 0, revokes); 132 gfs2_trans_end(sdp); 133 134 flush: 135 if (!ret) 136 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL | 137 GFS2_LFC_AIL_EMPTY_GL); 138 return ret; 139 } 140 141 void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync) 142 { 143 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 144 unsigned int revokes = atomic_read(&gl->gl_ail_count); 145 int ret; 146 147 if (!revokes) 148 return; 149 150 ret = gfs2_trans_begin(sdp, 0, revokes); 151 if (ret) 152 return; 153 __gfs2_ail_flush(gl, fsync, revokes); 154 gfs2_trans_end(sdp); 155 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL | 156 GFS2_LFC_AIL_FLUSH); 157 } 158 159 /** 160 * gfs2_rgrp_metasync - sync out the metadata of a resource group 161 * @gl: the glock protecting the resource group 162 * 163 */ 164 165 static int gfs2_rgrp_metasync(struct gfs2_glock *gl) 166 { 167 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 168 struct address_space *metamapping = &sdp->sd_aspace; 169 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl); 170 const unsigned bsize = sdp->sd_sb.sb_bsize; 171 loff_t start = (rgd->rd_addr * bsize) & PAGE_MASK; 172 loff_t end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1; 173 int error; 174 175 filemap_fdatawrite_range(metamapping, start, end); 176 error = filemap_fdatawait_range(metamapping, start, end); 177 WARN_ON_ONCE(error && !gfs2_withdrawn(sdp)); 178 mapping_set_error(metamapping, error); 179 if (error) 180 gfs2_io_error(sdp); 181 return error; 182 } 183 184 /** 185 * rgrp_go_sync - sync out the metadata for this glock 186 * @gl: the glock 187 * 188 * Called when demoting or unlocking an EX glock. We must flush 189 * to disk all dirty buffers/pages relating to this glock, and must not 190 * return to caller to demote/unlock the glock until I/O is complete. 191 */ 192 193 static int rgrp_go_sync(struct gfs2_glock *gl) 194 { 195 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 196 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl); 197 int error; 198 199 if (!rgd || !test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) 200 return 0; 201 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE); 202 203 gfs2_log_flush(sdp, gl, GFS2_LOG_HEAD_FLUSH_NORMAL | 204 GFS2_LFC_RGRP_GO_SYNC); 205 error = gfs2_rgrp_metasync(gl); 206 if (!error) 207 error = gfs2_ail_empty_gl(gl); 208 gfs2_free_clones(rgd); 209 return error; 210 } 211 212 /** 213 * rgrp_go_inval - invalidate the metadata for this glock 214 * @gl: the glock 215 * @flags: 216 * 217 * We never used LM_ST_DEFERRED with resource groups, so that we 218 * should always see the metadata flag set here. 219 * 220 */ 221 222 static void rgrp_go_inval(struct gfs2_glock *gl, int flags) 223 { 224 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 225 struct address_space *mapping = &sdp->sd_aspace; 226 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl); 227 const unsigned bsize = sdp->sd_sb.sb_bsize; 228 loff_t start, end; 229 230 if (!rgd) 231 return; 232 start = (rgd->rd_addr * bsize) & PAGE_MASK; 233 end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1; 234 gfs2_rgrp_brelse(rgd); 235 WARN_ON_ONCE(!(flags & DIO_METADATA)); 236 truncate_inode_pages_range(mapping, start, end); 237 } 238 239 static void gfs2_rgrp_go_dump(struct seq_file *seq, struct gfs2_glock *gl, 240 const char *fs_id_buf) 241 { 242 struct gfs2_rgrpd *rgd = gl->gl_object; 243 244 if (rgd) 245 gfs2_rgrp_dump(seq, rgd, fs_id_buf); 246 } 247 248 static struct gfs2_inode *gfs2_glock2inode(struct gfs2_glock *gl) 249 { 250 struct gfs2_inode *ip; 251 252 spin_lock(&gl->gl_lockref.lock); 253 ip = gl->gl_object; 254 if (ip) 255 set_bit(GIF_GLOP_PENDING, &ip->i_flags); 256 spin_unlock(&gl->gl_lockref.lock); 257 return ip; 258 } 259 260 struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl) 261 { 262 struct gfs2_rgrpd *rgd; 263 264 spin_lock(&gl->gl_lockref.lock); 265 rgd = gl->gl_object; 266 spin_unlock(&gl->gl_lockref.lock); 267 268 return rgd; 269 } 270 271 static void gfs2_clear_glop_pending(struct gfs2_inode *ip) 272 { 273 if (!ip) 274 return; 275 276 clear_bit_unlock(GIF_GLOP_PENDING, &ip->i_flags); 277 wake_up_bit(&ip->i_flags, GIF_GLOP_PENDING); 278 } 279 280 /** 281 * gfs2_inode_metasync - sync out the metadata of an inode 282 * @gl: the glock protecting the inode 283 * 284 */ 285 int gfs2_inode_metasync(struct gfs2_glock *gl) 286 { 287 struct address_space *metamapping = gfs2_glock2aspace(gl); 288 int error; 289 290 filemap_fdatawrite(metamapping); 291 error = filemap_fdatawait(metamapping); 292 if (error) 293 gfs2_io_error(gl->gl_name.ln_sbd); 294 return error; 295 } 296 297 /** 298 * inode_go_sync - Sync the dirty metadata of an inode 299 * @gl: the glock protecting the inode 300 * 301 */ 302 303 static int inode_go_sync(struct gfs2_glock *gl) 304 { 305 struct gfs2_inode *ip = gfs2_glock2inode(gl); 306 int isreg = ip && S_ISREG(ip->i_inode.i_mode); 307 struct address_space *metamapping = gfs2_glock2aspace(gl); 308 int error = 0, ret; 309 310 if (isreg) { 311 if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags)) 312 unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0); 313 inode_dio_wait(&ip->i_inode); 314 } 315 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) 316 goto out; 317 318 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE); 319 320 gfs2_log_flush(gl->gl_name.ln_sbd, gl, GFS2_LOG_HEAD_FLUSH_NORMAL | 321 GFS2_LFC_INODE_GO_SYNC); 322 filemap_fdatawrite(metamapping); 323 if (isreg) { 324 struct address_space *mapping = ip->i_inode.i_mapping; 325 filemap_fdatawrite(mapping); 326 error = filemap_fdatawait(mapping); 327 mapping_set_error(mapping, error); 328 } 329 ret = gfs2_inode_metasync(gl); 330 if (!error) 331 error = ret; 332 ret = gfs2_ail_empty_gl(gl); 333 if (!error) 334 error = ret; 335 /* 336 * Writeback of the data mapping may cause the dirty flag to be set 337 * so we have to clear it again here. 338 */ 339 smp_mb__before_atomic(); 340 clear_bit(GLF_DIRTY, &gl->gl_flags); 341 342 out: 343 gfs2_clear_glop_pending(ip); 344 return error; 345 } 346 347 /** 348 * inode_go_inval - prepare a inode glock to be released 349 * @gl: the glock 350 * @flags: 351 * 352 * Normally we invalidate everything, but if we are moving into 353 * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we 354 * can keep hold of the metadata, since it won't have changed. 355 * 356 */ 357 358 static void inode_go_inval(struct gfs2_glock *gl, int flags) 359 { 360 struct gfs2_inode *ip = gfs2_glock2inode(gl); 361 362 if (flags & DIO_METADATA) { 363 struct address_space *mapping = gfs2_glock2aspace(gl); 364 truncate_inode_pages(mapping, 0); 365 if (ip) { 366 set_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags); 367 forget_all_cached_acls(&ip->i_inode); 368 security_inode_invalidate_secctx(&ip->i_inode); 369 gfs2_dir_hash_inval(ip); 370 } 371 } 372 373 if (ip == GFS2_I(gl->gl_name.ln_sbd->sd_rindex)) { 374 gfs2_log_flush(gl->gl_name.ln_sbd, NULL, 375 GFS2_LOG_HEAD_FLUSH_NORMAL | 376 GFS2_LFC_INODE_GO_INVAL); 377 gl->gl_name.ln_sbd->sd_rindex_uptodate = 0; 378 } 379 if (ip && S_ISREG(ip->i_inode.i_mode)) 380 truncate_inode_pages(ip->i_inode.i_mapping, 0); 381 382 gfs2_clear_glop_pending(ip); 383 } 384 385 /** 386 * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock 387 * @gl: the glock 388 * 389 * Returns: 1 if it's ok 390 */ 391 392 static int inode_go_demote_ok(const struct gfs2_glock *gl) 393 { 394 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 395 396 if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object) 397 return 0; 398 399 return 1; 400 } 401 402 static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf) 403 { 404 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 405 const struct gfs2_dinode *str = buf; 406 struct timespec64 atime; 407 u16 height, depth; 408 umode_t mode = be32_to_cpu(str->di_mode); 409 struct inode *inode = &ip->i_inode; 410 bool is_new = inode->i_state & I_NEW; 411 412 if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr))) 413 goto corrupt; 414 if (unlikely(!is_new && inode_wrong_type(inode, mode))) 415 goto corrupt; 416 ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino); 417 inode->i_mode = mode; 418 if (is_new) { 419 inode->i_rdev = 0; 420 switch (mode & S_IFMT) { 421 case S_IFBLK: 422 case S_IFCHR: 423 inode->i_rdev = MKDEV(be32_to_cpu(str->di_major), 424 be32_to_cpu(str->di_minor)); 425 break; 426 } 427 } 428 429 i_uid_write(inode, be32_to_cpu(str->di_uid)); 430 i_gid_write(inode, be32_to_cpu(str->di_gid)); 431 set_nlink(inode, be32_to_cpu(str->di_nlink)); 432 i_size_write(inode, be64_to_cpu(str->di_size)); 433 gfs2_set_inode_blocks(inode, be64_to_cpu(str->di_blocks)); 434 atime.tv_sec = be64_to_cpu(str->di_atime); 435 atime.tv_nsec = be32_to_cpu(str->di_atime_nsec); 436 if (timespec64_compare(&inode->i_atime, &atime) < 0) 437 inode->i_atime = atime; 438 inode->i_mtime.tv_sec = be64_to_cpu(str->di_mtime); 439 inode->i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec); 440 inode->i_ctime.tv_sec = be64_to_cpu(str->di_ctime); 441 inode->i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec); 442 443 ip->i_goal = be64_to_cpu(str->di_goal_meta); 444 ip->i_generation = be64_to_cpu(str->di_generation); 445 446 ip->i_diskflags = be32_to_cpu(str->di_flags); 447 ip->i_eattr = be64_to_cpu(str->di_eattr); 448 /* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */ 449 gfs2_set_inode_flags(inode); 450 height = be16_to_cpu(str->di_height); 451 if (unlikely(height > sdp->sd_max_height)) 452 goto corrupt; 453 ip->i_height = (u8)height; 454 455 depth = be16_to_cpu(str->di_depth); 456 if (unlikely(depth > GFS2_DIR_MAX_DEPTH)) 457 goto corrupt; 458 ip->i_depth = (u8)depth; 459 ip->i_entries = be32_to_cpu(str->di_entries); 460 461 if (gfs2_is_stuffed(ip) && inode->i_size > gfs2_max_stuffed_size(ip)) 462 goto corrupt; 463 464 if (S_ISREG(inode->i_mode)) 465 gfs2_set_aops(inode); 466 467 return 0; 468 corrupt: 469 gfs2_consist_inode(ip); 470 return -EIO; 471 } 472 473 /** 474 * gfs2_inode_refresh - Refresh the incore copy of the dinode 475 * @ip: The GFS2 inode 476 * 477 * Returns: errno 478 */ 479 480 int gfs2_inode_refresh(struct gfs2_inode *ip) 481 { 482 struct buffer_head *dibh; 483 int error; 484 485 error = gfs2_meta_inode_buffer(ip, &dibh); 486 if (error) 487 return error; 488 489 error = gfs2_dinode_in(ip, dibh->b_data); 490 brelse(dibh); 491 return error; 492 } 493 494 /** 495 * inode_go_instantiate - read in an inode if necessary 496 * @gh: The glock holder 497 * 498 * Returns: errno 499 */ 500 501 static int inode_go_instantiate(struct gfs2_glock *gl) 502 { 503 struct gfs2_inode *ip = gl->gl_object; 504 505 if (!ip) /* no inode to populate - read it in later */ 506 return 0; 507 508 return gfs2_inode_refresh(ip); 509 } 510 511 static int inode_go_held(struct gfs2_holder *gh) 512 { 513 struct gfs2_glock *gl = gh->gh_gl; 514 struct gfs2_inode *ip = gl->gl_object; 515 int error = 0; 516 517 if (!ip) /* no inode to populate - read it in later */ 518 return 0; 519 520 if (gh->gh_state != LM_ST_DEFERRED) 521 inode_dio_wait(&ip->i_inode); 522 523 if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) && 524 (gl->gl_state == LM_ST_EXCLUSIVE) && 525 (gh->gh_state == LM_ST_EXCLUSIVE)) 526 error = gfs2_truncatei_resume(ip); 527 528 return error; 529 } 530 531 /** 532 * inode_go_dump - print information about an inode 533 * @seq: The iterator 534 * @gl: The glock 535 * @fs_id_buf: file system id (may be empty) 536 * 537 */ 538 539 static void inode_go_dump(struct seq_file *seq, struct gfs2_glock *gl, 540 const char *fs_id_buf) 541 { 542 struct gfs2_inode *ip = gl->gl_object; 543 struct inode *inode; 544 unsigned long nrpages; 545 546 if (ip == NULL) 547 return; 548 549 inode = &ip->i_inode; 550 xa_lock_irq(&inode->i_data.i_pages); 551 nrpages = inode->i_data.nrpages; 552 xa_unlock_irq(&inode->i_data.i_pages); 553 554 gfs2_print_dbg(seq, "%s I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu " 555 "p:%lu\n", fs_id_buf, 556 (unsigned long long)ip->i_no_formal_ino, 557 (unsigned long long)ip->i_no_addr, 558 IF2DT(ip->i_inode.i_mode), ip->i_flags, 559 (unsigned int)ip->i_diskflags, 560 (unsigned long long)i_size_read(inode), nrpages); 561 } 562 563 /** 564 * freeze_go_sync - promote/demote the freeze glock 565 * @gl: the glock 566 */ 567 568 static int freeze_go_sync(struct gfs2_glock *gl) 569 { 570 int error = 0; 571 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 572 573 /* 574 * We need to check gl_state == LM_ST_SHARED here and not gl_req == 575 * LM_ST_EXCLUSIVE. That's because when any node does a freeze, 576 * all the nodes should have the freeze glock in SH mode and they all 577 * call do_xmote: One for EX and the others for UN. They ALL must 578 * freeze locally, and they ALL must queue freeze work. The freeze_work 579 * calls freeze_func, which tries to reacquire the freeze glock in SH, 580 * effectively waiting for the thaw on the node who holds it in EX. 581 * Once thawed, the work func acquires the freeze glock in 582 * SH and everybody goes back to thawed. 583 */ 584 if (gl->gl_state == LM_ST_SHARED && !gfs2_withdrawn(sdp) && 585 !test_bit(SDF_NORECOVERY, &sdp->sd_flags)) { 586 atomic_set(&sdp->sd_freeze_state, SFS_STARTING_FREEZE); 587 error = freeze_super(sdp->sd_vfs); 588 if (error) { 589 fs_info(sdp, "GFS2: couldn't freeze filesystem: %d\n", 590 error); 591 if (gfs2_withdrawn(sdp)) { 592 atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN); 593 return 0; 594 } 595 gfs2_assert_withdraw(sdp, 0); 596 } 597 queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work); 598 if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) 599 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE | 600 GFS2_LFC_FREEZE_GO_SYNC); 601 else /* read-only mounts */ 602 atomic_set(&sdp->sd_freeze_state, SFS_FROZEN); 603 } 604 return 0; 605 } 606 607 /** 608 * freeze_go_xmote_bh - After promoting/demoting the freeze glock 609 * @gl: the glock 610 */ 611 static int freeze_go_xmote_bh(struct gfs2_glock *gl) 612 { 613 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 614 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode); 615 struct gfs2_glock *j_gl = ip->i_gl; 616 struct gfs2_log_header_host head; 617 int error; 618 619 if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) { 620 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA); 621 622 error = gfs2_find_jhead(sdp->sd_jdesc, &head, false); 623 if (gfs2_assert_withdraw_delayed(sdp, !error)) 624 return error; 625 if (gfs2_assert_withdraw_delayed(sdp, head.lh_flags & 626 GFS2_LOG_HEAD_UNMOUNT)) 627 return -EIO; 628 sdp->sd_log_sequence = head.lh_sequence + 1; 629 gfs2_log_pointers_init(sdp, head.lh_blkno); 630 } 631 return 0; 632 } 633 634 /** 635 * freeze_go_demote_ok 636 * @gl: the glock 637 * 638 * Always returns 0 639 */ 640 641 static int freeze_go_demote_ok(const struct gfs2_glock *gl) 642 { 643 return 0; 644 } 645 646 /** 647 * iopen_go_callback - schedule the dcache entry for the inode to be deleted 648 * @gl: the glock 649 * @remote: true if this came from a different cluster node 650 * 651 * gl_lockref.lock lock is held while calling this 652 */ 653 static void iopen_go_callback(struct gfs2_glock *gl, bool remote) 654 { 655 struct gfs2_inode *ip = gl->gl_object; 656 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 657 658 if (!remote || sb_rdonly(sdp->sd_vfs) || 659 test_bit(SDF_DEACTIVATING, &sdp->sd_flags)) 660 return; 661 662 if (gl->gl_demote_state == LM_ST_UNLOCKED && 663 gl->gl_state == LM_ST_SHARED && ip) { 664 gl->gl_lockref.count++; 665 if (!gfs2_queue_try_to_evict(gl)) 666 gl->gl_lockref.count--; 667 } 668 } 669 670 /** 671 * inode_go_free - wake up anyone waiting for dlm's unlock ast to free it 672 * @gl: glock being freed 673 * 674 * For now, this is only used for the journal inode glock. In withdraw 675 * situations, we need to wait for the glock to be freed so that we know 676 * other nodes may proceed with recovery / journal replay. 677 */ 678 static void inode_go_free(struct gfs2_glock *gl) 679 { 680 /* Note that we cannot reference gl_object because it's already set 681 * to NULL by this point in its lifecycle. */ 682 if (!test_bit(GLF_FREEING, &gl->gl_flags)) 683 return; 684 clear_bit_unlock(GLF_FREEING, &gl->gl_flags); 685 wake_up_bit(&gl->gl_flags, GLF_FREEING); 686 } 687 688 /** 689 * nondisk_go_callback - used to signal when a node did a withdraw 690 * @gl: the nondisk glock 691 * @remote: true if this came from a different cluster node 692 * 693 */ 694 static void nondisk_go_callback(struct gfs2_glock *gl, bool remote) 695 { 696 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 697 698 /* Ignore the callback unless it's from another node, and it's the 699 live lock. */ 700 if (!remote || gl->gl_name.ln_number != GFS2_LIVE_LOCK) 701 return; 702 703 /* First order of business is to cancel the demote request. We don't 704 * really want to demote a nondisk glock. At best it's just to inform 705 * us of another node's withdraw. We'll keep it in SH mode. */ 706 clear_bit(GLF_DEMOTE, &gl->gl_flags); 707 clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags); 708 709 /* Ignore the unlock if we're withdrawn, unmounting, or in recovery. */ 710 if (test_bit(SDF_NORECOVERY, &sdp->sd_flags) || 711 test_bit(SDF_WITHDRAWN, &sdp->sd_flags) || 712 test_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags)) 713 return; 714 715 /* We only care when a node wants us to unlock, because that means 716 * they want a journal recovered. */ 717 if (gl->gl_demote_state != LM_ST_UNLOCKED) 718 return; 719 720 if (sdp->sd_args.ar_spectator) { 721 fs_warn(sdp, "Spectator node cannot recover journals.\n"); 722 return; 723 } 724 725 fs_warn(sdp, "Some node has withdrawn; checking for recovery.\n"); 726 set_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags); 727 /* 728 * We can't call remote_withdraw directly here or gfs2_recover_journal 729 * because this is called from the glock unlock function and the 730 * remote_withdraw needs to enqueue and dequeue the same "live" glock 731 * we were called from. So we queue it to the control work queue in 732 * lock_dlm. 733 */ 734 queue_delayed_work(gfs2_control_wq, &sdp->sd_control_work, 0); 735 } 736 737 const struct gfs2_glock_operations gfs2_meta_glops = { 738 .go_type = LM_TYPE_META, 739 .go_flags = GLOF_NONDISK, 740 }; 741 742 const struct gfs2_glock_operations gfs2_inode_glops = { 743 .go_sync = inode_go_sync, 744 .go_inval = inode_go_inval, 745 .go_demote_ok = inode_go_demote_ok, 746 .go_instantiate = inode_go_instantiate, 747 .go_held = inode_go_held, 748 .go_dump = inode_go_dump, 749 .go_type = LM_TYPE_INODE, 750 .go_flags = GLOF_ASPACE | GLOF_LRU | GLOF_LVB, 751 .go_free = inode_go_free, 752 }; 753 754 const struct gfs2_glock_operations gfs2_rgrp_glops = { 755 .go_sync = rgrp_go_sync, 756 .go_inval = rgrp_go_inval, 757 .go_instantiate = gfs2_rgrp_go_instantiate, 758 .go_dump = gfs2_rgrp_go_dump, 759 .go_type = LM_TYPE_RGRP, 760 .go_flags = GLOF_LVB, 761 }; 762 763 const struct gfs2_glock_operations gfs2_freeze_glops = { 764 .go_sync = freeze_go_sync, 765 .go_xmote_bh = freeze_go_xmote_bh, 766 .go_demote_ok = freeze_go_demote_ok, 767 .go_type = LM_TYPE_NONDISK, 768 .go_flags = GLOF_NONDISK, 769 }; 770 771 const struct gfs2_glock_operations gfs2_iopen_glops = { 772 .go_type = LM_TYPE_IOPEN, 773 .go_callback = iopen_go_callback, 774 .go_dump = inode_go_dump, 775 .go_flags = GLOF_LRU | GLOF_NONDISK, 776 .go_subclass = 1, 777 }; 778 779 const struct gfs2_glock_operations gfs2_flock_glops = { 780 .go_type = LM_TYPE_FLOCK, 781 .go_flags = GLOF_LRU | GLOF_NONDISK, 782 }; 783 784 const struct gfs2_glock_operations gfs2_nondisk_glops = { 785 .go_type = LM_TYPE_NONDISK, 786 .go_flags = GLOF_NONDISK, 787 .go_callback = nondisk_go_callback, 788 }; 789 790 const struct gfs2_glock_operations gfs2_quota_glops = { 791 .go_type = LM_TYPE_QUOTA, 792 .go_flags = GLOF_LVB | GLOF_LRU | GLOF_NONDISK, 793 }; 794 795 const struct gfs2_glock_operations gfs2_journal_glops = { 796 .go_type = LM_TYPE_JOURNAL, 797 .go_flags = GLOF_NONDISK, 798 }; 799 800 const struct gfs2_glock_operations *gfs2_glops_list[] = { 801 [LM_TYPE_META] = &gfs2_meta_glops, 802 [LM_TYPE_INODE] = &gfs2_inode_glops, 803 [LM_TYPE_RGRP] = &gfs2_rgrp_glops, 804 [LM_TYPE_IOPEN] = &gfs2_iopen_glops, 805 [LM_TYPE_FLOCK] = &gfs2_flock_glops, 806 [LM_TYPE_NONDISK] = &gfs2_nondisk_glops, 807 [LM_TYPE_QUOTA] = &gfs2_quota_glops, 808 [LM_TYPE_JOURNAL] = &gfs2_journal_glops, 809 }; 810 811