1 /* 2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 4 * 5 * This copyrighted material is made available to anyone wishing to use, 6 * modify, copy, or redistribute it subject to the terms and conditions 7 * of the GNU General Public License version 2. 8 */ 9 10 #include <linux/spinlock.h> 11 #include <linux/completion.h> 12 #include <linux/buffer_head.h> 13 #include <linux/gfs2_ondisk.h> 14 #include <linux/bio.h> 15 #include <linux/posix_acl.h> 16 #include <linux/security.h> 17 18 #include "gfs2.h" 19 #include "incore.h" 20 #include "bmap.h" 21 #include "glock.h" 22 #include "glops.h" 23 #include "inode.h" 24 #include "log.h" 25 #include "meta_io.h" 26 #include "recovery.h" 27 #include "rgrp.h" 28 #include "util.h" 29 #include "trans.h" 30 #include "dir.h" 31 32 struct workqueue_struct *gfs2_freeze_wq; 33 34 static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh) 35 { 36 fs_err(gl->gl_name.ln_sbd, 37 "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page " 38 "state 0x%lx\n", 39 bh, (unsigned long long)bh->b_blocknr, bh->b_state, 40 bh->b_page->mapping, bh->b_page->flags); 41 fs_err(gl->gl_name.ln_sbd, "AIL glock %u:%llu mapping %p\n", 42 gl->gl_name.ln_type, gl->gl_name.ln_number, 43 gfs2_glock2aspace(gl)); 44 gfs2_lm_withdraw(gl->gl_name.ln_sbd, "AIL error\n"); 45 } 46 47 /** 48 * __gfs2_ail_flush - remove all buffers for a given lock from the AIL 49 * @gl: the glock 50 * @fsync: set when called from fsync (not all buffers will be clean) 51 * 52 * None of the buffers should be dirty, locked, or pinned. 53 */ 54 55 static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync, 56 unsigned int nr_revokes) 57 { 58 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 59 struct list_head *head = &gl->gl_ail_list; 60 struct gfs2_bufdata *bd, *tmp; 61 struct buffer_head *bh; 62 const unsigned long b_state = (1UL << BH_Dirty)|(1UL << BH_Pinned)|(1UL << BH_Lock); 63 64 gfs2_log_lock(sdp); 65 spin_lock(&sdp->sd_ail_lock); 66 list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) { 67 if (nr_revokes == 0) 68 break; 69 bh = bd->bd_bh; 70 if (bh->b_state & b_state) { 71 if (fsync) 72 continue; 73 gfs2_ail_error(gl, bh); 74 } 75 gfs2_trans_add_revoke(sdp, bd); 76 nr_revokes--; 77 } 78 GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count)); 79 spin_unlock(&sdp->sd_ail_lock); 80 gfs2_log_unlock(sdp); 81 } 82 83 84 static void gfs2_ail_empty_gl(struct gfs2_glock *gl) 85 { 86 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 87 struct gfs2_trans tr; 88 89 memset(&tr, 0, sizeof(tr)); 90 INIT_LIST_HEAD(&tr.tr_buf); 91 INIT_LIST_HEAD(&tr.tr_databuf); 92 tr.tr_revokes = atomic_read(&gl->gl_ail_count); 93 94 if (!tr.tr_revokes) 95 return; 96 97 /* A shortened, inline version of gfs2_trans_begin() 98 * tr->alloced is not set since the transaction structure is 99 * on the stack */ 100 tr.tr_reserved = 1 + gfs2_struct2blk(sdp, tr.tr_revokes, sizeof(u64)); 101 tr.tr_ip = _RET_IP_; 102 if (gfs2_log_reserve(sdp, tr.tr_reserved) < 0) 103 return; 104 WARN_ON_ONCE(current->journal_info); 105 current->journal_info = &tr; 106 107 __gfs2_ail_flush(gl, 0, tr.tr_revokes); 108 109 gfs2_trans_end(sdp); 110 gfs2_log_flush(sdp, NULL, NORMAL_FLUSH); 111 } 112 113 void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync) 114 { 115 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 116 unsigned int revokes = atomic_read(&gl->gl_ail_count); 117 unsigned int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64); 118 int ret; 119 120 if (!revokes) 121 return; 122 123 while (revokes > max_revokes) 124 max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64); 125 126 ret = gfs2_trans_begin(sdp, 0, max_revokes); 127 if (ret) 128 return; 129 __gfs2_ail_flush(gl, fsync, max_revokes); 130 gfs2_trans_end(sdp); 131 gfs2_log_flush(sdp, NULL, NORMAL_FLUSH); 132 } 133 134 /** 135 * rgrp_go_sync - sync out the metadata for this glock 136 * @gl: the glock 137 * 138 * Called when demoting or unlocking an EX glock. We must flush 139 * to disk all dirty buffers/pages relating to this glock, and must not 140 * not return to caller to demote/unlock the glock until I/O is complete. 141 */ 142 143 static void rgrp_go_sync(struct gfs2_glock *gl) 144 { 145 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 146 struct address_space *mapping = &sdp->sd_aspace; 147 struct gfs2_rgrpd *rgd; 148 int error; 149 150 spin_lock(&gl->gl_lockref.lock); 151 rgd = gl->gl_object; 152 if (rgd) 153 gfs2_rgrp_brelse(rgd); 154 spin_unlock(&gl->gl_lockref.lock); 155 156 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) 157 return; 158 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE); 159 160 gfs2_log_flush(sdp, gl, NORMAL_FLUSH); 161 filemap_fdatawrite_range(mapping, gl->gl_vm.start, gl->gl_vm.end); 162 error = filemap_fdatawait_range(mapping, gl->gl_vm.start, gl->gl_vm.end); 163 mapping_set_error(mapping, error); 164 gfs2_ail_empty_gl(gl); 165 166 spin_lock(&gl->gl_lockref.lock); 167 rgd = gl->gl_object; 168 if (rgd) 169 gfs2_free_clones(rgd); 170 spin_unlock(&gl->gl_lockref.lock); 171 } 172 173 /** 174 * rgrp_go_inval - invalidate the metadata for this glock 175 * @gl: the glock 176 * @flags: 177 * 178 * We never used LM_ST_DEFERRED with resource groups, so that we 179 * should always see the metadata flag set here. 180 * 181 */ 182 183 static void rgrp_go_inval(struct gfs2_glock *gl, int flags) 184 { 185 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 186 struct address_space *mapping = &sdp->sd_aspace; 187 struct gfs2_rgrpd *rgd = gl->gl_object; 188 189 if (rgd) 190 gfs2_rgrp_brelse(rgd); 191 192 WARN_ON_ONCE(!(flags & DIO_METADATA)); 193 gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count)); 194 truncate_inode_pages_range(mapping, gl->gl_vm.start, gl->gl_vm.end); 195 196 if (rgd) 197 rgd->rd_flags &= ~GFS2_RDF_UPTODATE; 198 } 199 200 /** 201 * inode_go_sync - Sync the dirty data and/or metadata for an inode glock 202 * @gl: the glock protecting the inode 203 * 204 */ 205 206 static void inode_go_sync(struct gfs2_glock *gl) 207 { 208 struct gfs2_inode *ip = gl->gl_object; 209 struct address_space *metamapping = gfs2_glock2aspace(gl); 210 int error; 211 212 if (ip && !S_ISREG(ip->i_inode.i_mode)) 213 ip = NULL; 214 if (ip) { 215 if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags)) 216 unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0); 217 inode_dio_wait(&ip->i_inode); 218 } 219 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) 220 return; 221 222 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE); 223 224 gfs2_log_flush(gl->gl_name.ln_sbd, gl, NORMAL_FLUSH); 225 filemap_fdatawrite(metamapping); 226 if (ip) { 227 struct address_space *mapping = ip->i_inode.i_mapping; 228 filemap_fdatawrite(mapping); 229 error = filemap_fdatawait(mapping); 230 mapping_set_error(mapping, error); 231 } 232 error = filemap_fdatawait(metamapping); 233 mapping_set_error(metamapping, error); 234 gfs2_ail_empty_gl(gl); 235 /* 236 * Writeback of the data mapping may cause the dirty flag to be set 237 * so we have to clear it again here. 238 */ 239 smp_mb__before_atomic(); 240 clear_bit(GLF_DIRTY, &gl->gl_flags); 241 } 242 243 /** 244 * inode_go_inval - prepare a inode glock to be released 245 * @gl: the glock 246 * @flags: 247 * 248 * Normally we invalidate everything, but if we are moving into 249 * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we 250 * can keep hold of the metadata, since it won't have changed. 251 * 252 */ 253 254 static void inode_go_inval(struct gfs2_glock *gl, int flags) 255 { 256 struct gfs2_inode *ip = gl->gl_object; 257 258 gfs2_assert_withdraw(gl->gl_name.ln_sbd, !atomic_read(&gl->gl_ail_count)); 259 260 if (flags & DIO_METADATA) { 261 struct address_space *mapping = gfs2_glock2aspace(gl); 262 truncate_inode_pages(mapping, 0); 263 if (ip) { 264 set_bit(GIF_INVALID, &ip->i_flags); 265 forget_all_cached_acls(&ip->i_inode); 266 security_inode_invalidate_secctx(&ip->i_inode); 267 gfs2_dir_hash_inval(ip); 268 } 269 } 270 271 if (ip == GFS2_I(gl->gl_name.ln_sbd->sd_rindex)) { 272 gfs2_log_flush(gl->gl_name.ln_sbd, NULL, NORMAL_FLUSH); 273 gl->gl_name.ln_sbd->sd_rindex_uptodate = 0; 274 } 275 if (ip && S_ISREG(ip->i_inode.i_mode)) 276 truncate_inode_pages(ip->i_inode.i_mapping, 0); 277 } 278 279 /** 280 * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock 281 * @gl: the glock 282 * 283 * Returns: 1 if it's ok 284 */ 285 286 static int inode_go_demote_ok(const struct gfs2_glock *gl) 287 { 288 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 289 struct gfs2_holder *gh; 290 291 if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object) 292 return 0; 293 294 if (!list_empty(&gl->gl_holders)) { 295 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list); 296 if (gh->gh_list.next != &gl->gl_holders) 297 return 0; 298 } 299 300 return 1; 301 } 302 303 /** 304 * gfs2_set_nlink - Set the inode's link count based on on-disk info 305 * @inode: The inode in question 306 * @nlink: The link count 307 * 308 * If the link count has hit zero, it must never be raised, whatever the 309 * on-disk inode might say. When new struct inodes are created the link 310 * count is set to 1, so that we can safely use this test even when reading 311 * in on disk information for the first time. 312 */ 313 314 static void gfs2_set_nlink(struct inode *inode, u32 nlink) 315 { 316 /* 317 * We will need to review setting the nlink count here in the 318 * light of the forthcoming ro bind mount work. This is a reminder 319 * to do that. 320 */ 321 if ((inode->i_nlink != nlink) && (inode->i_nlink != 0)) { 322 if (nlink == 0) 323 clear_nlink(inode); 324 else 325 set_nlink(inode, nlink); 326 } 327 } 328 329 static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf) 330 { 331 const struct gfs2_dinode *str = buf; 332 struct timespec atime; 333 u16 height, depth; 334 335 if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr))) 336 goto corrupt; 337 ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino); 338 ip->i_inode.i_mode = be32_to_cpu(str->di_mode); 339 ip->i_inode.i_rdev = 0; 340 switch (ip->i_inode.i_mode & S_IFMT) { 341 case S_IFBLK: 342 case S_IFCHR: 343 ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major), 344 be32_to_cpu(str->di_minor)); 345 break; 346 }; 347 348 i_uid_write(&ip->i_inode, be32_to_cpu(str->di_uid)); 349 i_gid_write(&ip->i_inode, be32_to_cpu(str->di_gid)); 350 gfs2_set_nlink(&ip->i_inode, be32_to_cpu(str->di_nlink)); 351 i_size_write(&ip->i_inode, be64_to_cpu(str->di_size)); 352 gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks)); 353 atime.tv_sec = be64_to_cpu(str->di_atime); 354 atime.tv_nsec = be32_to_cpu(str->di_atime_nsec); 355 if (timespec_compare(&ip->i_inode.i_atime, &atime) < 0) 356 ip->i_inode.i_atime = atime; 357 ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime); 358 ip->i_inode.i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec); 359 ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime); 360 ip->i_inode.i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec); 361 362 ip->i_goal = be64_to_cpu(str->di_goal_meta); 363 ip->i_generation = be64_to_cpu(str->di_generation); 364 365 ip->i_diskflags = be32_to_cpu(str->di_flags); 366 ip->i_eattr = be64_to_cpu(str->di_eattr); 367 /* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */ 368 gfs2_set_inode_flags(&ip->i_inode); 369 height = be16_to_cpu(str->di_height); 370 if (unlikely(height > GFS2_MAX_META_HEIGHT)) 371 goto corrupt; 372 ip->i_height = (u8)height; 373 374 depth = be16_to_cpu(str->di_depth); 375 if (unlikely(depth > GFS2_DIR_MAX_DEPTH)) 376 goto corrupt; 377 ip->i_depth = (u8)depth; 378 ip->i_entries = be32_to_cpu(str->di_entries); 379 380 if (S_ISREG(ip->i_inode.i_mode)) 381 gfs2_set_aops(&ip->i_inode); 382 383 return 0; 384 corrupt: 385 gfs2_consist_inode(ip); 386 return -EIO; 387 } 388 389 /** 390 * gfs2_inode_refresh - Refresh the incore copy of the dinode 391 * @ip: The GFS2 inode 392 * 393 * Returns: errno 394 */ 395 396 int gfs2_inode_refresh(struct gfs2_inode *ip) 397 { 398 struct buffer_head *dibh; 399 int error; 400 401 error = gfs2_meta_inode_buffer(ip, &dibh); 402 if (error) 403 return error; 404 405 error = gfs2_dinode_in(ip, dibh->b_data); 406 brelse(dibh); 407 clear_bit(GIF_INVALID, &ip->i_flags); 408 409 return error; 410 } 411 412 /** 413 * inode_go_lock - operation done after an inode lock is locked by a process 414 * @gl: the glock 415 * @flags: 416 * 417 * Returns: errno 418 */ 419 420 static int inode_go_lock(struct gfs2_holder *gh) 421 { 422 struct gfs2_glock *gl = gh->gh_gl; 423 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 424 struct gfs2_inode *ip = gl->gl_object; 425 int error = 0; 426 427 if (!ip || (gh->gh_flags & GL_SKIP)) 428 return 0; 429 430 if (test_bit(GIF_INVALID, &ip->i_flags)) { 431 error = gfs2_inode_refresh(ip); 432 if (error) 433 return error; 434 } 435 436 if (gh->gh_state != LM_ST_DEFERRED) 437 inode_dio_wait(&ip->i_inode); 438 439 if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) && 440 (gl->gl_state == LM_ST_EXCLUSIVE) && 441 (gh->gh_state == LM_ST_EXCLUSIVE)) { 442 spin_lock(&sdp->sd_trunc_lock); 443 if (list_empty(&ip->i_trunc_list)) 444 list_add(&sdp->sd_trunc_list, &ip->i_trunc_list); 445 spin_unlock(&sdp->sd_trunc_lock); 446 wake_up(&sdp->sd_quota_wait); 447 return 1; 448 } 449 450 return error; 451 } 452 453 /** 454 * inode_go_dump - print information about an inode 455 * @seq: The iterator 456 * @ip: the inode 457 * 458 */ 459 460 static void inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl) 461 { 462 const struct gfs2_inode *ip = gl->gl_object; 463 if (ip == NULL) 464 return; 465 gfs2_print_dbg(seq, " I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu\n", 466 (unsigned long long)ip->i_no_formal_ino, 467 (unsigned long long)ip->i_no_addr, 468 IF2DT(ip->i_inode.i_mode), ip->i_flags, 469 (unsigned int)ip->i_diskflags, 470 (unsigned long long)i_size_read(&ip->i_inode)); 471 } 472 473 /** 474 * freeze_go_sync - promote/demote the freeze glock 475 * @gl: the glock 476 * @state: the requested state 477 * @flags: 478 * 479 */ 480 481 static void freeze_go_sync(struct gfs2_glock *gl) 482 { 483 int error = 0; 484 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 485 486 if (gl->gl_state == LM_ST_SHARED && 487 test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) { 488 atomic_set(&sdp->sd_freeze_state, SFS_STARTING_FREEZE); 489 error = freeze_super(sdp->sd_vfs); 490 if (error) { 491 printk(KERN_INFO "GFS2: couldn't freeze filesystem: %d\n", error); 492 gfs2_assert_withdraw(sdp, 0); 493 } 494 queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work); 495 gfs2_log_flush(sdp, NULL, FREEZE_FLUSH); 496 } 497 } 498 499 /** 500 * freeze_go_xmote_bh - After promoting/demoting the freeze glock 501 * @gl: the glock 502 * 503 */ 504 505 static int freeze_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh) 506 { 507 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 508 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode); 509 struct gfs2_glock *j_gl = ip->i_gl; 510 struct gfs2_log_header_host head; 511 int error; 512 513 if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) { 514 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA); 515 516 error = gfs2_find_jhead(sdp->sd_jdesc, &head); 517 if (error) 518 gfs2_consist(sdp); 519 if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) 520 gfs2_consist(sdp); 521 522 /* Initialize some head of the log stuff */ 523 if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) { 524 sdp->sd_log_sequence = head.lh_sequence + 1; 525 gfs2_log_pointers_init(sdp, head.lh_blkno); 526 } 527 } 528 return 0; 529 } 530 531 /** 532 * trans_go_demote_ok 533 * @gl: the glock 534 * 535 * Always returns 0 536 */ 537 538 static int freeze_go_demote_ok(const struct gfs2_glock *gl) 539 { 540 return 0; 541 } 542 543 /** 544 * iopen_go_callback - schedule the dcache entry for the inode to be deleted 545 * @gl: the glock 546 * 547 * gl_lockref.lock lock is held while calling this 548 */ 549 static void iopen_go_callback(struct gfs2_glock *gl, bool remote) 550 { 551 struct gfs2_inode *ip = (struct gfs2_inode *)gl->gl_object; 552 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 553 554 if (!remote || (sdp->sd_vfs->s_flags & MS_RDONLY)) 555 return; 556 557 if (gl->gl_demote_state == LM_ST_UNLOCKED && 558 gl->gl_state == LM_ST_SHARED && ip) { 559 gl->gl_lockref.count++; 560 if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0) 561 gl->gl_lockref.count--; 562 } 563 } 564 565 const struct gfs2_glock_operations gfs2_meta_glops = { 566 .go_type = LM_TYPE_META, 567 }; 568 569 const struct gfs2_glock_operations gfs2_inode_glops = { 570 .go_sync = inode_go_sync, 571 .go_inval = inode_go_inval, 572 .go_demote_ok = inode_go_demote_ok, 573 .go_lock = inode_go_lock, 574 .go_dump = inode_go_dump, 575 .go_type = LM_TYPE_INODE, 576 .go_flags = GLOF_ASPACE | GLOF_LRU, 577 }; 578 579 const struct gfs2_glock_operations gfs2_rgrp_glops = { 580 .go_sync = rgrp_go_sync, 581 .go_inval = rgrp_go_inval, 582 .go_lock = gfs2_rgrp_go_lock, 583 .go_unlock = gfs2_rgrp_go_unlock, 584 .go_dump = gfs2_rgrp_dump, 585 .go_type = LM_TYPE_RGRP, 586 .go_flags = GLOF_LVB, 587 }; 588 589 const struct gfs2_glock_operations gfs2_freeze_glops = { 590 .go_sync = freeze_go_sync, 591 .go_xmote_bh = freeze_go_xmote_bh, 592 .go_demote_ok = freeze_go_demote_ok, 593 .go_type = LM_TYPE_NONDISK, 594 }; 595 596 const struct gfs2_glock_operations gfs2_iopen_glops = { 597 .go_type = LM_TYPE_IOPEN, 598 .go_callback = iopen_go_callback, 599 .go_flags = GLOF_LRU, 600 }; 601 602 const struct gfs2_glock_operations gfs2_flock_glops = { 603 .go_type = LM_TYPE_FLOCK, 604 .go_flags = GLOF_LRU, 605 }; 606 607 const struct gfs2_glock_operations gfs2_nondisk_glops = { 608 .go_type = LM_TYPE_NONDISK, 609 }; 610 611 const struct gfs2_glock_operations gfs2_quota_glops = { 612 .go_type = LM_TYPE_QUOTA, 613 .go_flags = GLOF_LVB | GLOF_LRU, 614 }; 615 616 const struct gfs2_glock_operations gfs2_journal_glops = { 617 .go_type = LM_TYPE_JOURNAL, 618 }; 619 620 const struct gfs2_glock_operations *gfs2_glops_list[] = { 621 [LM_TYPE_META] = &gfs2_meta_glops, 622 [LM_TYPE_INODE] = &gfs2_inode_glops, 623 [LM_TYPE_RGRP] = &gfs2_rgrp_glops, 624 [LM_TYPE_IOPEN] = &gfs2_iopen_glops, 625 [LM_TYPE_FLOCK] = &gfs2_flock_glops, 626 [LM_TYPE_NONDISK] = &gfs2_nondisk_glops, 627 [LM_TYPE_QUOTA] = &gfs2_quota_glops, 628 [LM_TYPE_JOURNAL] = &gfs2_journal_glops, 629 }; 630 631