1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 5 */ 6 7 #include <linux/spinlock.h> 8 #include <linux/completion.h> 9 #include <linux/buffer_head.h> 10 #include <linux/gfs2_ondisk.h> 11 #include <linux/bio.h> 12 #include <linux/posix_acl.h> 13 #include <linux/security.h> 14 #include <linux/log2.h> 15 16 #include "gfs2.h" 17 #include "incore.h" 18 #include "bmap.h" 19 #include "glock.h" 20 #include "glops.h" 21 #include "inode.h" 22 #include "log.h" 23 #include "meta_io.h" 24 #include "recovery.h" 25 #include "rgrp.h" 26 #include "util.h" 27 #include "trans.h" 28 #include "dir.h" 29 #include "lops.h" 30 31 struct workqueue_struct *gfs2_freeze_wq; 32 33 static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh) 34 { 35 struct gfs2_sbd *sdp = glock_sbd(gl); 36 37 fs_err(sdp, 38 "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page " 39 "state 0x%lx\n", 40 bh, (unsigned long long)bh->b_blocknr, bh->b_state, 41 bh->b_folio->mapping, bh->b_folio->flags.f); 42 fs_err(sdp, "AIL glock %u:%llu mapping %p\n", 43 glock_type(gl), glock_number(gl), 44 gfs2_glock2aspace(gl)); 45 gfs2_lm(sdp, "AIL error\n"); 46 gfs2_withdraw(sdp); 47 } 48 49 /** 50 * __gfs2_ail_flush - remove all buffers for a given lock from the AIL 51 * @gl: the glock 52 * @fsync: set when called from fsync (not all buffers will be clean) 53 * @nr_revokes: Number of buffers to revoke 54 * 55 * None of the buffers should be dirty, locked, or pinned. 56 */ 57 58 static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync, 59 unsigned int nr_revokes) 60 { 61 struct gfs2_sbd *sdp = glock_sbd(gl); 62 struct list_head *head = &gl->gl_ail_list; 63 struct gfs2_bufdata *bd, *tmp; 64 struct buffer_head *bh; 65 const unsigned long b_state = (1UL << BH_Dirty)|(1UL << BH_Pinned)|(1UL << BH_Lock); 66 67 spin_lock(&sdp->sd_log_lock); 68 spin_lock(&sdp->sd_ail_lock); 69 list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) { 70 if (nr_revokes == 0) 71 break; 72 bh = bd->bd_bh; 73 if (bh->b_state & b_state) { 74 if (fsync) 75 continue; 76 gfs2_ail_error(gl, bh); 77 } 78 gfs2_trans_add_revoke(sdp, bd); 79 nr_revokes--; 80 } 81 GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count)); 82 spin_unlock(&sdp->sd_ail_lock); 83 spin_unlock(&sdp->sd_log_lock); 84 } 85 86 87 static int gfs2_ail_empty_gl(struct gfs2_glock *gl) 88 { 89 struct gfs2_sbd *sdp = glock_sbd(gl); 90 struct gfs2_trans tr; 91 unsigned int revokes; 92 int ret = 0; 93 94 revokes = atomic_read(&gl->gl_ail_count); 95 96 if (!revokes) { 97 bool have_revokes; 98 bool log_in_flight; 99 100 /* 101 * We have nothing on the ail, but there could be revokes on 102 * the sdp revoke queue, in which case, we still want to flush 103 * the log and wait for it to finish. 104 * 105 * If the sdp revoke list is empty too, we might still have an 106 * io outstanding for writing revokes, so we should wait for 107 * it before returning. 108 * 109 * If none of these conditions are true, our revokes are all 110 * flushed and we can return. 111 */ 112 spin_lock(&sdp->sd_log_lock); 113 have_revokes = !list_empty(&sdp->sd_log_revokes); 114 log_in_flight = atomic_read(&sdp->sd_log_in_flight); 115 spin_unlock(&sdp->sd_log_lock); 116 if (have_revokes) 117 goto flush; 118 if (log_in_flight) 119 log_flush_wait(sdp); 120 return 0; 121 } 122 123 memset(&tr, 0, sizeof(tr)); 124 set_bit(TR_ONSTACK, &tr.tr_flags); 125 ret = __gfs2_trans_begin(&tr, sdp, 0, revokes, _RET_IP_); 126 if (ret) { 127 fs_err(sdp, "Transaction error %d: Unable to write revokes.", ret); 128 goto flush; 129 } 130 __gfs2_ail_flush(gl, 0, revokes); 131 gfs2_trans_end(sdp); 132 133 flush: 134 if (!ret) 135 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL | 136 GFS2_LFC_AIL_EMPTY_GL); 137 return ret; 138 } 139 140 void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync) 141 { 142 struct gfs2_sbd *sdp = glock_sbd(gl); 143 unsigned int revokes = atomic_read(&gl->gl_ail_count); 144 int ret; 145 146 if (!revokes) 147 return; 148 149 ret = gfs2_trans_begin(sdp, 0, revokes); 150 if (ret) 151 return; 152 __gfs2_ail_flush(gl, fsync, revokes); 153 gfs2_trans_end(sdp); 154 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL | 155 GFS2_LFC_AIL_FLUSH); 156 } 157 158 /** 159 * gfs2_rgrp_metasync - sync out the metadata of a resource group 160 * @gl: the glock protecting the resource group 161 * 162 */ 163 164 static int gfs2_rgrp_metasync(struct gfs2_glock *gl) 165 { 166 struct gfs2_sbd *sdp = glock_sbd(gl); 167 struct address_space *metamapping = gfs2_aspace(sdp); 168 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl); 169 const unsigned bsize = sdp->sd_sb.sb_bsize; 170 loff_t start = (rgd->rd_addr * bsize) & PAGE_MASK; 171 loff_t end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1; 172 int error; 173 174 filemap_fdatawrite_range(metamapping, start, end); 175 error = filemap_fdatawait_range(metamapping, start, end); 176 WARN_ON_ONCE(error && !gfs2_withdrawn(sdp)); 177 mapping_set_error(metamapping, error); 178 if (error) 179 gfs2_io_error(sdp); 180 return error; 181 } 182 183 /** 184 * rgrp_go_sync - sync out the metadata for this glock 185 * @gl: the glock 186 * 187 * Called when demoting or unlocking an EX glock. We must flush 188 * to disk all dirty buffers/pages relating to this glock, and must not 189 * return to caller to demote/unlock the glock until I/O is complete. 190 */ 191 192 static int rgrp_go_sync(struct gfs2_glock *gl) 193 { 194 struct gfs2_sbd *sdp = glock_sbd(gl); 195 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl); 196 int error; 197 198 if (!rgd || !test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) 199 return 0; 200 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE); 201 202 gfs2_log_flush(sdp, gl, GFS2_LOG_HEAD_FLUSH_NORMAL | 203 GFS2_LFC_RGRP_GO_SYNC); 204 error = gfs2_rgrp_metasync(gl); 205 if (!error) 206 error = gfs2_ail_empty_gl(gl); 207 gfs2_free_clones(rgd); 208 return error; 209 } 210 211 /** 212 * rgrp_go_inval - invalidate the metadata for this glock 213 * @gl: the glock 214 * @flags: 215 * 216 * We never used LM_ST_DEFERRED with resource groups, so that we 217 * should always see the metadata flag set here. 218 * 219 */ 220 221 static void rgrp_go_inval(struct gfs2_glock *gl, int flags) 222 { 223 struct gfs2_sbd *sdp = glock_sbd(gl); 224 struct address_space *mapping = gfs2_aspace(sdp); 225 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl); 226 const unsigned bsize = sdp->sd_sb.sb_bsize; 227 loff_t start, end; 228 229 if (!rgd) 230 return; 231 start = (rgd->rd_addr * bsize) & PAGE_MASK; 232 end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1; 233 gfs2_rgrp_brelse(rgd); 234 WARN_ON_ONCE(!(flags & DIO_METADATA)); 235 gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count)); 236 truncate_inode_pages_range(mapping, start, end); 237 } 238 239 static void gfs2_rgrp_go_dump(struct seq_file *seq, const struct gfs2_glock *gl, 240 const char *fs_id_buf) 241 { 242 struct gfs2_rgrpd *rgd = gl->gl_object; 243 244 if (rgd) 245 gfs2_rgrp_dump(seq, rgd, fs_id_buf); 246 } 247 248 static struct gfs2_inode *gfs2_glock2inode(struct gfs2_glock *gl) 249 { 250 struct gfs2_inode *ip; 251 252 spin_lock(&gl->gl_lockref.lock); 253 ip = gl->gl_object; 254 if (ip) 255 set_bit(GIF_GLOP_PENDING, &ip->i_flags); 256 spin_unlock(&gl->gl_lockref.lock); 257 return ip; 258 } 259 260 struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl) 261 { 262 struct gfs2_rgrpd *rgd; 263 264 spin_lock(&gl->gl_lockref.lock); 265 rgd = gl->gl_object; 266 spin_unlock(&gl->gl_lockref.lock); 267 268 return rgd; 269 } 270 271 static void gfs2_clear_glop_pending(struct gfs2_inode *ip) 272 { 273 if (!ip) 274 return; 275 276 clear_bit_unlock(GIF_GLOP_PENDING, &ip->i_flags); 277 wake_up_bit(&ip->i_flags, GIF_GLOP_PENDING); 278 } 279 280 /** 281 * gfs2_inode_metasync - sync out the metadata of an inode 282 * @gl: the glock protecting the inode 283 * 284 */ 285 int gfs2_inode_metasync(struct gfs2_glock *gl) 286 { 287 struct address_space *metamapping = gfs2_glock2aspace(gl); 288 int error; 289 290 filemap_fdatawrite(metamapping); 291 error = filemap_fdatawait(metamapping); 292 if (error) 293 gfs2_io_error(glock_sbd(gl)); 294 return error; 295 } 296 297 /** 298 * inode_go_sync - Sync the dirty metadata of an inode 299 * @gl: the glock protecting the inode 300 * 301 */ 302 303 static int inode_go_sync(struct gfs2_glock *gl) 304 { 305 struct gfs2_inode *ip = gfs2_glock2inode(gl); 306 int isreg = ip && S_ISREG(ip->i_inode.i_mode); 307 struct address_space *metamapping = gfs2_glock2aspace(gl); 308 int error = 0, ret; 309 310 if (isreg) { 311 if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags)) 312 unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0); 313 inode_dio_wait(&ip->i_inode); 314 } 315 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) 316 goto out; 317 318 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE); 319 320 gfs2_log_flush(glock_sbd(gl), gl, GFS2_LOG_HEAD_FLUSH_NORMAL | 321 GFS2_LFC_INODE_GO_SYNC); 322 filemap_fdatawrite(metamapping); 323 if (isreg) { 324 struct address_space *mapping = ip->i_inode.i_mapping; 325 filemap_fdatawrite(mapping); 326 error = filemap_fdatawait(mapping); 327 mapping_set_error(mapping, error); 328 } 329 ret = gfs2_inode_metasync(gl); 330 if (!error) 331 error = ret; 332 ret = gfs2_ail_empty_gl(gl); 333 if (!error) 334 error = ret; 335 /* 336 * Writeback of the data mapping may cause the dirty flag to be set 337 * so we have to clear it again here. 338 */ 339 smp_mb__before_atomic(); 340 clear_bit(GLF_DIRTY, &gl->gl_flags); 341 342 out: 343 gfs2_clear_glop_pending(ip); 344 return error; 345 } 346 347 /** 348 * inode_go_inval - prepare a inode glock to be released 349 * @gl: the glock 350 * @flags: 351 * 352 * Normally we invalidate everything, but if we are moving into 353 * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we 354 * can keep hold of the metadata, since it won't have changed. 355 * 356 */ 357 358 static void inode_go_inval(struct gfs2_glock *gl, int flags) 359 { 360 struct gfs2_inode *ip = gfs2_glock2inode(gl); 361 362 gfs2_assert_withdraw(glock_sbd(gl), !atomic_read(&gl->gl_ail_count)); 363 364 if (flags & DIO_METADATA) { 365 struct address_space *mapping = gfs2_glock2aspace(gl); 366 truncate_inode_pages(mapping, 0); 367 if (ip) { 368 set_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags); 369 forget_all_cached_acls(&ip->i_inode); 370 security_inode_invalidate_secctx(&ip->i_inode); 371 gfs2_dir_hash_inval(ip); 372 } 373 } 374 375 if (ip == GFS2_I(glock_sbd(gl)->sd_rindex)) { 376 gfs2_log_flush(glock_sbd(gl), NULL, 377 GFS2_LOG_HEAD_FLUSH_NORMAL | 378 GFS2_LFC_INODE_GO_INVAL); 379 glock_sbd(gl)->sd_rindex_uptodate = 0; 380 } 381 if (ip && S_ISREG(ip->i_inode.i_mode)) 382 truncate_inode_pages(ip->i_inode.i_mapping, 0); 383 384 gfs2_clear_glop_pending(ip); 385 } 386 387 static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf) 388 { 389 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 390 const struct gfs2_dinode *str = buf; 391 struct timespec64 atime, iatime; 392 u16 height, depth; 393 umode_t mode = be32_to_cpu(str->di_mode); 394 struct inode *inode = &ip->i_inode; 395 bool is_new = inode_state_read_once(inode) & I_NEW; 396 397 if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr))) { 398 gfs2_consist_inode(ip); 399 return -EIO; 400 } 401 if (unlikely(!is_new && inode_wrong_type(inode, mode))) { 402 gfs2_consist_inode(ip); 403 return -EIO; 404 } 405 ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino); 406 inode->i_mode = mode; 407 if (is_new) { 408 inode->i_rdev = 0; 409 switch (mode & S_IFMT) { 410 case S_IFBLK: 411 case S_IFCHR: 412 inode->i_rdev = MKDEV(be32_to_cpu(str->di_major), 413 be32_to_cpu(str->di_minor)); 414 break; 415 } 416 } 417 418 i_uid_write(inode, be32_to_cpu(str->di_uid)); 419 i_gid_write(inode, be32_to_cpu(str->di_gid)); 420 set_nlink(inode, be32_to_cpu(str->di_nlink)); 421 i_size_write(inode, be64_to_cpu(str->di_size)); 422 gfs2_set_inode_blocks(inode, be64_to_cpu(str->di_blocks)); 423 atime.tv_sec = be64_to_cpu(str->di_atime); 424 atime.tv_nsec = be32_to_cpu(str->di_atime_nsec); 425 iatime = inode_get_atime(inode); 426 if (timespec64_compare(&iatime, &atime) < 0) 427 inode_set_atime_to_ts(inode, atime); 428 inode_set_mtime(inode, be64_to_cpu(str->di_mtime), 429 be32_to_cpu(str->di_mtime_nsec)); 430 inode_set_ctime(inode, be64_to_cpu(str->di_ctime), 431 be32_to_cpu(str->di_ctime_nsec)); 432 433 ip->i_goal = be64_to_cpu(str->di_goal_meta); 434 ip->i_generation = be64_to_cpu(str->di_generation); 435 436 ip->i_diskflags = be32_to_cpu(str->di_flags); 437 ip->i_eattr = be64_to_cpu(str->di_eattr); 438 /* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */ 439 gfs2_set_inode_flags(inode); 440 height = be16_to_cpu(str->di_height); 441 if (unlikely(height > sdp->sd_max_height)) { 442 gfs2_consist_inode(ip); 443 return -EIO; 444 } 445 ip->i_height = (u8)height; 446 447 depth = be16_to_cpu(str->di_depth); 448 if (unlikely(depth > GFS2_DIR_MAX_DEPTH)) { 449 gfs2_consist_inode(ip); 450 return -EIO; 451 } 452 if ((ip->i_diskflags & GFS2_DIF_EXHASH) && 453 depth < ilog2(sdp->sd_hash_ptrs)) { 454 gfs2_consist_inode(ip); 455 return -EIO; 456 } 457 ip->i_depth = (u8)depth; 458 ip->i_entries = be32_to_cpu(str->di_entries); 459 460 if (!S_ISDIR(inode->i_mode) && (ip->i_diskflags & GFS2_DIF_EXHASH)) { 461 gfs2_consist_inode(ip); 462 return -EIO; 463 } 464 465 if (gfs2_is_stuffed(ip) && inode->i_size > gfs2_max_stuffed_size(ip)) { 466 gfs2_consist_inode(ip); 467 return -EIO; 468 } 469 if (S_ISREG(inode->i_mode)) 470 gfs2_set_aops(inode); 471 472 return 0; 473 } 474 475 /** 476 * gfs2_inode_refresh - Refresh the incore copy of the dinode 477 * @ip: The GFS2 inode 478 * 479 * Returns: errno 480 */ 481 482 static int gfs2_inode_refresh(struct gfs2_inode *ip) 483 { 484 struct buffer_head *dibh; 485 int error; 486 487 error = gfs2_meta_inode_buffer(ip, &dibh); 488 if (error) 489 return error; 490 491 error = gfs2_dinode_in(ip, dibh->b_data); 492 brelse(dibh); 493 return error; 494 } 495 496 /** 497 * inode_go_instantiate - read in an inode if necessary 498 * @gl: The glock 499 * 500 * Returns: errno 501 */ 502 503 static int inode_go_instantiate(struct gfs2_glock *gl) 504 { 505 struct gfs2_inode *ip = gl->gl_object; 506 struct gfs2_glock *io_gl; 507 int error; 508 509 if (!ip) /* no inode to populate - read it in later */ 510 return 0; 511 512 error = gfs2_inode_refresh(ip); 513 if (error) 514 return error; 515 io_gl = ip->i_iopen_gh.gh_gl; 516 io_gl->gl_no_formal_ino = ip->i_no_formal_ino; 517 return 0; 518 } 519 520 static int inode_go_held(struct gfs2_holder *gh) 521 { 522 struct gfs2_glock *gl = gh->gh_gl; 523 struct gfs2_inode *ip = gl->gl_object; 524 int error = 0; 525 526 if (!ip) /* no inode to populate - read it in later */ 527 return 0; 528 529 if (gh->gh_state != LM_ST_DEFERRED) 530 inode_dio_wait(&ip->i_inode); 531 532 if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) && 533 (gl->gl_state == LM_ST_EXCLUSIVE) && 534 (gh->gh_state == LM_ST_EXCLUSIVE)) 535 error = gfs2_truncatei_resume(ip); 536 537 return error; 538 } 539 540 /** 541 * inode_go_dump - print information about an inode 542 * @seq: The iterator 543 * @gl: The glock 544 * @fs_id_buf: file system id (may be empty) 545 * 546 */ 547 548 static void inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl, 549 const char *fs_id_buf) 550 { 551 struct gfs2_inode *ip = gl->gl_object; 552 const struct inode *inode = &ip->i_inode; 553 554 if (ip == NULL) 555 return; 556 557 gfs2_print_dbg(seq, "%s I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu " 558 "p:%lu\n", fs_id_buf, 559 (unsigned long long)ip->i_no_formal_ino, 560 (unsigned long long)ip->i_no_addr, 561 IF2DT(inode->i_mode), ip->i_flags, 562 (unsigned int)ip->i_diskflags, 563 (unsigned long long)i_size_read(inode), 564 inode->i_data.nrpages); 565 } 566 567 /** 568 * freeze_go_callback - A cluster node is requesting a freeze 569 * @gl: the glock 570 * @remote: true if this came from a different cluster node 571 */ 572 573 static void freeze_go_callback(struct gfs2_glock *gl, bool remote) 574 { 575 struct gfs2_sbd *sdp = glock_sbd(gl); 576 struct super_block *sb = sdp->sd_vfs; 577 578 if (!remote || 579 (gl->gl_state != LM_ST_SHARED && 580 gl->gl_state != LM_ST_UNLOCKED) || 581 gl->gl_demote_state != LM_ST_UNLOCKED) 582 return; 583 584 /* 585 * Try to get an active super block reference to prevent racing with 586 * unmount (see super_trylock_shared()). But note that unmount isn't 587 * the only place where a write lock on s_umount is taken, and we can 588 * fail here because of things like remount as well. 589 */ 590 if (down_read_trylock(&sb->s_umount)) { 591 atomic_inc(&sb->s_active); 592 up_read(&sb->s_umount); 593 if (!queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work)) 594 deactivate_super(sb); 595 } 596 } 597 598 /** 599 * freeze_go_xmote_bh - After promoting/demoting the freeze glock 600 * @gl: the glock 601 */ 602 static int freeze_go_xmote_bh(struct gfs2_glock *gl) 603 { 604 struct gfs2_sbd *sdp = glock_sbd(gl); 605 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode); 606 struct gfs2_glock *j_gl = ip->i_gl; 607 struct gfs2_log_header_host head; 608 int error; 609 610 if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) { 611 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA); 612 613 error = gfs2_find_jhead(sdp->sd_jdesc, &head); 614 if (gfs2_assert_withdraw(sdp, !error)) 615 return error; 616 if (gfs2_assert_withdraw(sdp, head.lh_flags & 617 GFS2_LOG_HEAD_UNMOUNT)) 618 return -EIO; 619 gfs2_log_pointers_init(sdp, &head); 620 } 621 return 0; 622 } 623 624 /** 625 * iopen_go_callback - schedule the dcache entry for the inode to be deleted 626 * @gl: the glock 627 * @remote: true if this came from a different cluster node 628 * 629 * gl_lockref.lock lock is held while calling this 630 */ 631 static void iopen_go_callback(struct gfs2_glock *gl, bool remote) 632 { 633 struct gfs2_inode *ip = gl->gl_object; 634 struct gfs2_sbd *sdp = glock_sbd(gl); 635 636 if (!remote || test_bit(SDF_KILL, &sdp->sd_flags)) 637 return; 638 639 if (gl->gl_demote_state == LM_ST_UNLOCKED && 640 gl->gl_state == LM_ST_SHARED && ip) { 641 gl->gl_lockref.count++; 642 if (!gfs2_queue_try_to_evict(gl)) 643 gl->gl_lockref.count--; 644 } 645 } 646 647 const struct gfs2_glock_operations gfs2_meta_glops = { 648 .go_type = LM_TYPE_META, 649 }; 650 651 const struct gfs2_glock_operations gfs2_inode_glops = { 652 .go_sync = inode_go_sync, 653 .go_inval = inode_go_inval, 654 .go_instantiate = inode_go_instantiate, 655 .go_held = inode_go_held, 656 .go_dump = inode_go_dump, 657 .go_type = LM_TYPE_INODE, 658 .go_flags = GLOF_ASPACE | GLOF_LVB, 659 }; 660 661 const struct gfs2_glock_operations gfs2_rgrp_glops = { 662 .go_sync = rgrp_go_sync, 663 .go_inval = rgrp_go_inval, 664 .go_instantiate = gfs2_rgrp_go_instantiate, 665 .go_dump = gfs2_rgrp_go_dump, 666 .go_type = LM_TYPE_RGRP, 667 .go_flags = GLOF_LVB, 668 }; 669 670 const struct gfs2_glock_operations gfs2_freeze_glops = { 671 .go_xmote_bh = freeze_go_xmote_bh, 672 .go_callback = freeze_go_callback, 673 .go_type = LM_TYPE_NONDISK, 674 }; 675 676 const struct gfs2_glock_operations gfs2_iopen_glops = { 677 .go_type = LM_TYPE_IOPEN, 678 .go_callback = iopen_go_callback, 679 .go_dump = inode_go_dump, 680 .go_subclass = 1, 681 }; 682 683 const struct gfs2_glock_operations gfs2_flock_glops = { 684 .go_type = LM_TYPE_FLOCK, 685 }; 686 687 const struct gfs2_glock_operations gfs2_nondisk_glops = { 688 .go_type = LM_TYPE_NONDISK, 689 }; 690 691 const struct gfs2_glock_operations gfs2_quota_glops = { 692 .go_type = LM_TYPE_QUOTA, 693 .go_flags = GLOF_LVB, 694 }; 695 696 const struct gfs2_glock_operations gfs2_journal_glops = { 697 .go_type = LM_TYPE_JOURNAL, 698 }; 699 700 const struct gfs2_glock_operations *gfs2_glops_list[] = { 701 [LM_TYPE_META] = &gfs2_meta_glops, 702 [LM_TYPE_INODE] = &gfs2_inode_glops, 703 [LM_TYPE_RGRP] = &gfs2_rgrp_glops, 704 [LM_TYPE_IOPEN] = &gfs2_iopen_glops, 705 [LM_TYPE_FLOCK] = &gfs2_flock_glops, 706 [LM_TYPE_NONDISK] = &gfs2_nondisk_glops, 707 [LM_TYPE_QUOTA] = &gfs2_quota_glops, 708 [LM_TYPE_JOURNAL] = &gfs2_journal_glops, 709 }; 710 711