1 /* 2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 4 * 5 * This copyrighted material is made available to anyone wishing to use, 6 * modify, copy, or redistribute it subject to the terms and conditions 7 * of the GNU General Public License version 2. 8 */ 9 10 #include <linux/spinlock.h> 11 #include <linux/completion.h> 12 #include <linux/buffer_head.h> 13 #include <linux/gfs2_ondisk.h> 14 #include <linux/bio.h> 15 #include <linux/posix_acl.h> 16 17 #include "gfs2.h" 18 #include "incore.h" 19 #include "bmap.h" 20 #include "glock.h" 21 #include "glops.h" 22 #include "inode.h" 23 #include "log.h" 24 #include "meta_io.h" 25 #include "recovery.h" 26 #include "rgrp.h" 27 #include "util.h" 28 #include "trans.h" 29 #include "dir.h" 30 31 struct workqueue_struct *gfs2_freeze_wq; 32 33 static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh) 34 { 35 fs_err(gl->gl_sbd, "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page state 0x%lx\n", 36 bh, (unsigned long long)bh->b_blocknr, bh->b_state, 37 bh->b_page->mapping, bh->b_page->flags); 38 fs_err(gl->gl_sbd, "AIL glock %u:%llu mapping %p\n", 39 gl->gl_name.ln_type, gl->gl_name.ln_number, 40 gfs2_glock2aspace(gl)); 41 gfs2_lm_withdraw(gl->gl_sbd, "AIL error\n"); 42 } 43 44 /** 45 * __gfs2_ail_flush - remove all buffers for a given lock from the AIL 46 * @gl: the glock 47 * @fsync: set when called from fsync (not all buffers will be clean) 48 * 49 * None of the buffers should be dirty, locked, or pinned. 50 */ 51 52 static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync, 53 unsigned int nr_revokes) 54 { 55 struct gfs2_sbd *sdp = gl->gl_sbd; 56 struct list_head *head = &gl->gl_ail_list; 57 struct gfs2_bufdata *bd, *tmp; 58 struct buffer_head *bh; 59 const unsigned long b_state = (1UL << BH_Dirty)|(1UL << BH_Pinned)|(1UL << BH_Lock); 60 61 gfs2_log_lock(sdp); 62 spin_lock(&sdp->sd_ail_lock); 63 list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) { 64 if (nr_revokes == 0) 65 break; 66 bh = bd->bd_bh; 67 if (bh->b_state & b_state) { 68 if (fsync) 69 continue; 70 gfs2_ail_error(gl, bh); 71 } 72 gfs2_trans_add_revoke(sdp, bd); 73 nr_revokes--; 74 } 75 GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count)); 76 spin_unlock(&sdp->sd_ail_lock); 77 gfs2_log_unlock(sdp); 78 } 79 80 81 static void gfs2_ail_empty_gl(struct gfs2_glock *gl) 82 { 83 struct gfs2_sbd *sdp = gl->gl_sbd; 84 struct gfs2_trans tr; 85 86 memset(&tr, 0, sizeof(tr)); 87 INIT_LIST_HEAD(&tr.tr_buf); 88 INIT_LIST_HEAD(&tr.tr_databuf); 89 tr.tr_revokes = atomic_read(&gl->gl_ail_count); 90 91 if (!tr.tr_revokes) 92 return; 93 94 /* A shortened, inline version of gfs2_trans_begin() 95 * tr->alloced is not set since the transaction structure is 96 * on the stack */ 97 tr.tr_reserved = 1 + gfs2_struct2blk(sdp, tr.tr_revokes, sizeof(u64)); 98 tr.tr_ip = _RET_IP_; 99 if (gfs2_log_reserve(sdp, tr.tr_reserved) < 0) 100 return; 101 WARN_ON_ONCE(current->journal_info); 102 current->journal_info = &tr; 103 104 __gfs2_ail_flush(gl, 0, tr.tr_revokes); 105 106 gfs2_trans_end(sdp); 107 gfs2_log_flush(sdp, NULL, NORMAL_FLUSH); 108 } 109 110 void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync) 111 { 112 struct gfs2_sbd *sdp = gl->gl_sbd; 113 unsigned int revokes = atomic_read(&gl->gl_ail_count); 114 unsigned int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64); 115 int ret; 116 117 if (!revokes) 118 return; 119 120 while (revokes > max_revokes) 121 max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64); 122 123 ret = gfs2_trans_begin(sdp, 0, max_revokes); 124 if (ret) 125 return; 126 __gfs2_ail_flush(gl, fsync, max_revokes); 127 gfs2_trans_end(sdp); 128 gfs2_log_flush(sdp, NULL, NORMAL_FLUSH); 129 } 130 131 /** 132 * rgrp_go_sync - sync out the metadata for this glock 133 * @gl: the glock 134 * 135 * Called when demoting or unlocking an EX glock. We must flush 136 * to disk all dirty buffers/pages relating to this glock, and must not 137 * not return to caller to demote/unlock the glock until I/O is complete. 138 */ 139 140 static void rgrp_go_sync(struct gfs2_glock *gl) 141 { 142 struct gfs2_sbd *sdp = gl->gl_sbd; 143 struct address_space *mapping = &sdp->sd_aspace; 144 struct gfs2_rgrpd *rgd; 145 int error; 146 147 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) 148 return; 149 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE); 150 151 gfs2_log_flush(sdp, gl, NORMAL_FLUSH); 152 filemap_fdatawrite_range(mapping, gl->gl_vm.start, gl->gl_vm.end); 153 error = filemap_fdatawait_range(mapping, gl->gl_vm.start, gl->gl_vm.end); 154 mapping_set_error(mapping, error); 155 gfs2_ail_empty_gl(gl); 156 157 spin_lock(&gl->gl_spin); 158 rgd = gl->gl_object; 159 if (rgd) 160 gfs2_free_clones(rgd); 161 spin_unlock(&gl->gl_spin); 162 } 163 164 /** 165 * rgrp_go_inval - invalidate the metadata for this glock 166 * @gl: the glock 167 * @flags: 168 * 169 * We never used LM_ST_DEFERRED with resource groups, so that we 170 * should always see the metadata flag set here. 171 * 172 */ 173 174 static void rgrp_go_inval(struct gfs2_glock *gl, int flags) 175 { 176 struct gfs2_sbd *sdp = gl->gl_sbd; 177 struct address_space *mapping = &sdp->sd_aspace; 178 179 WARN_ON_ONCE(!(flags & DIO_METADATA)); 180 gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count)); 181 truncate_inode_pages_range(mapping, gl->gl_vm.start, gl->gl_vm.end); 182 183 if (gl->gl_object) { 184 struct gfs2_rgrpd *rgd = (struct gfs2_rgrpd *)gl->gl_object; 185 rgd->rd_flags &= ~GFS2_RDF_UPTODATE; 186 } 187 } 188 189 /** 190 * inode_go_sync - Sync the dirty data and/or metadata for an inode glock 191 * @gl: the glock protecting the inode 192 * 193 */ 194 195 static void inode_go_sync(struct gfs2_glock *gl) 196 { 197 struct gfs2_inode *ip = gl->gl_object; 198 struct address_space *metamapping = gfs2_glock2aspace(gl); 199 int error; 200 201 if (ip && !S_ISREG(ip->i_inode.i_mode)) 202 ip = NULL; 203 if (ip) { 204 if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags)) 205 unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0); 206 inode_dio_wait(&ip->i_inode); 207 } 208 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) 209 return; 210 211 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE); 212 213 gfs2_log_flush(gl->gl_sbd, gl, NORMAL_FLUSH); 214 filemap_fdatawrite(metamapping); 215 if (ip) { 216 struct address_space *mapping = ip->i_inode.i_mapping; 217 filemap_fdatawrite(mapping); 218 error = filemap_fdatawait(mapping); 219 mapping_set_error(mapping, error); 220 } 221 error = filemap_fdatawait(metamapping); 222 mapping_set_error(metamapping, error); 223 gfs2_ail_empty_gl(gl); 224 /* 225 * Writeback of the data mapping may cause the dirty flag to be set 226 * so we have to clear it again here. 227 */ 228 smp_mb__before_atomic(); 229 clear_bit(GLF_DIRTY, &gl->gl_flags); 230 } 231 232 /** 233 * inode_go_inval - prepare a inode glock to be released 234 * @gl: the glock 235 * @flags: 236 * 237 * Normally we invalidate everything, but if we are moving into 238 * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we 239 * can keep hold of the metadata, since it won't have changed. 240 * 241 */ 242 243 static void inode_go_inval(struct gfs2_glock *gl, int flags) 244 { 245 struct gfs2_inode *ip = gl->gl_object; 246 247 gfs2_assert_withdraw(gl->gl_sbd, !atomic_read(&gl->gl_ail_count)); 248 249 if (flags & DIO_METADATA) { 250 struct address_space *mapping = gfs2_glock2aspace(gl); 251 truncate_inode_pages(mapping, 0); 252 if (ip) { 253 set_bit(GIF_INVALID, &ip->i_flags); 254 forget_all_cached_acls(&ip->i_inode); 255 gfs2_dir_hash_inval(ip); 256 } 257 } 258 259 if (ip == GFS2_I(gl->gl_sbd->sd_rindex)) { 260 gfs2_log_flush(gl->gl_sbd, NULL, NORMAL_FLUSH); 261 gl->gl_sbd->sd_rindex_uptodate = 0; 262 } 263 if (ip && S_ISREG(ip->i_inode.i_mode)) 264 truncate_inode_pages(ip->i_inode.i_mapping, 0); 265 } 266 267 /** 268 * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock 269 * @gl: the glock 270 * 271 * Returns: 1 if it's ok 272 */ 273 274 static int inode_go_demote_ok(const struct gfs2_glock *gl) 275 { 276 struct gfs2_sbd *sdp = gl->gl_sbd; 277 struct gfs2_holder *gh; 278 279 if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object) 280 return 0; 281 282 if (!list_empty(&gl->gl_holders)) { 283 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list); 284 if (gh->gh_list.next != &gl->gl_holders) 285 return 0; 286 } 287 288 return 1; 289 } 290 291 /** 292 * gfs2_set_nlink - Set the inode's link count based on on-disk info 293 * @inode: The inode in question 294 * @nlink: The link count 295 * 296 * If the link count has hit zero, it must never be raised, whatever the 297 * on-disk inode might say. When new struct inodes are created the link 298 * count is set to 1, so that we can safely use this test even when reading 299 * in on disk information for the first time. 300 */ 301 302 static void gfs2_set_nlink(struct inode *inode, u32 nlink) 303 { 304 /* 305 * We will need to review setting the nlink count here in the 306 * light of the forthcoming ro bind mount work. This is a reminder 307 * to do that. 308 */ 309 if ((inode->i_nlink != nlink) && (inode->i_nlink != 0)) { 310 if (nlink == 0) 311 clear_nlink(inode); 312 else 313 set_nlink(inode, nlink); 314 } 315 } 316 317 static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf) 318 { 319 const struct gfs2_dinode *str = buf; 320 struct timespec atime; 321 u16 height, depth; 322 323 if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr))) 324 goto corrupt; 325 ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino); 326 ip->i_inode.i_mode = be32_to_cpu(str->di_mode); 327 ip->i_inode.i_rdev = 0; 328 switch (ip->i_inode.i_mode & S_IFMT) { 329 case S_IFBLK: 330 case S_IFCHR: 331 ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major), 332 be32_to_cpu(str->di_minor)); 333 break; 334 }; 335 336 i_uid_write(&ip->i_inode, be32_to_cpu(str->di_uid)); 337 i_gid_write(&ip->i_inode, be32_to_cpu(str->di_gid)); 338 gfs2_set_nlink(&ip->i_inode, be32_to_cpu(str->di_nlink)); 339 i_size_write(&ip->i_inode, be64_to_cpu(str->di_size)); 340 gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks)); 341 atime.tv_sec = be64_to_cpu(str->di_atime); 342 atime.tv_nsec = be32_to_cpu(str->di_atime_nsec); 343 if (timespec_compare(&ip->i_inode.i_atime, &atime) < 0) 344 ip->i_inode.i_atime = atime; 345 ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime); 346 ip->i_inode.i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec); 347 ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime); 348 ip->i_inode.i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec); 349 350 ip->i_goal = be64_to_cpu(str->di_goal_meta); 351 ip->i_generation = be64_to_cpu(str->di_generation); 352 353 ip->i_diskflags = be32_to_cpu(str->di_flags); 354 ip->i_eattr = be64_to_cpu(str->di_eattr); 355 /* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */ 356 gfs2_set_inode_flags(&ip->i_inode); 357 height = be16_to_cpu(str->di_height); 358 if (unlikely(height > GFS2_MAX_META_HEIGHT)) 359 goto corrupt; 360 ip->i_height = (u8)height; 361 362 depth = be16_to_cpu(str->di_depth); 363 if (unlikely(depth > GFS2_DIR_MAX_DEPTH)) 364 goto corrupt; 365 ip->i_depth = (u8)depth; 366 ip->i_entries = be32_to_cpu(str->di_entries); 367 368 if (S_ISREG(ip->i_inode.i_mode)) 369 gfs2_set_aops(&ip->i_inode); 370 371 return 0; 372 corrupt: 373 gfs2_consist_inode(ip); 374 return -EIO; 375 } 376 377 /** 378 * gfs2_inode_refresh - Refresh the incore copy of the dinode 379 * @ip: The GFS2 inode 380 * 381 * Returns: errno 382 */ 383 384 int gfs2_inode_refresh(struct gfs2_inode *ip) 385 { 386 struct buffer_head *dibh; 387 int error; 388 389 error = gfs2_meta_inode_buffer(ip, &dibh); 390 if (error) 391 return error; 392 393 error = gfs2_dinode_in(ip, dibh->b_data); 394 brelse(dibh); 395 clear_bit(GIF_INVALID, &ip->i_flags); 396 397 return error; 398 } 399 400 /** 401 * inode_go_lock - operation done after an inode lock is locked by a process 402 * @gl: the glock 403 * @flags: 404 * 405 * Returns: errno 406 */ 407 408 static int inode_go_lock(struct gfs2_holder *gh) 409 { 410 struct gfs2_glock *gl = gh->gh_gl; 411 struct gfs2_sbd *sdp = gl->gl_sbd; 412 struct gfs2_inode *ip = gl->gl_object; 413 int error = 0; 414 415 if (!ip || (gh->gh_flags & GL_SKIP)) 416 return 0; 417 418 if (test_bit(GIF_INVALID, &ip->i_flags)) { 419 error = gfs2_inode_refresh(ip); 420 if (error) 421 return error; 422 } 423 424 if (gh->gh_state != LM_ST_DEFERRED) 425 inode_dio_wait(&ip->i_inode); 426 427 if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) && 428 (gl->gl_state == LM_ST_EXCLUSIVE) && 429 (gh->gh_state == LM_ST_EXCLUSIVE)) { 430 spin_lock(&sdp->sd_trunc_lock); 431 if (list_empty(&ip->i_trunc_list)) 432 list_add(&sdp->sd_trunc_list, &ip->i_trunc_list); 433 spin_unlock(&sdp->sd_trunc_lock); 434 wake_up(&sdp->sd_quota_wait); 435 return 1; 436 } 437 438 return error; 439 } 440 441 /** 442 * inode_go_dump - print information about an inode 443 * @seq: The iterator 444 * @ip: the inode 445 * 446 */ 447 448 static void inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl) 449 { 450 const struct gfs2_inode *ip = gl->gl_object; 451 if (ip == NULL) 452 return; 453 gfs2_print_dbg(seq, " I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu\n", 454 (unsigned long long)ip->i_no_formal_ino, 455 (unsigned long long)ip->i_no_addr, 456 IF2DT(ip->i_inode.i_mode), ip->i_flags, 457 (unsigned int)ip->i_diskflags, 458 (unsigned long long)i_size_read(&ip->i_inode)); 459 } 460 461 /** 462 * freeze_go_sync - promote/demote the freeze glock 463 * @gl: the glock 464 * @state: the requested state 465 * @flags: 466 * 467 */ 468 469 static void freeze_go_sync(struct gfs2_glock *gl) 470 { 471 int error = 0; 472 struct gfs2_sbd *sdp = gl->gl_sbd; 473 474 if (gl->gl_state == LM_ST_SHARED && 475 test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) { 476 atomic_set(&sdp->sd_freeze_state, SFS_STARTING_FREEZE); 477 error = freeze_super(sdp->sd_vfs); 478 if (error) { 479 printk(KERN_INFO "GFS2: couldn't freeze filesystem: %d\n", error); 480 gfs2_assert_withdraw(sdp, 0); 481 } 482 queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work); 483 gfs2_log_flush(sdp, NULL, FREEZE_FLUSH); 484 } 485 } 486 487 /** 488 * freeze_go_xmote_bh - After promoting/demoting the freeze glock 489 * @gl: the glock 490 * 491 */ 492 493 static int freeze_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh) 494 { 495 struct gfs2_sbd *sdp = gl->gl_sbd; 496 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode); 497 struct gfs2_glock *j_gl = ip->i_gl; 498 struct gfs2_log_header_host head; 499 int error; 500 501 if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) { 502 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA); 503 504 error = gfs2_find_jhead(sdp->sd_jdesc, &head); 505 if (error) 506 gfs2_consist(sdp); 507 if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) 508 gfs2_consist(sdp); 509 510 /* Initialize some head of the log stuff */ 511 if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) { 512 sdp->sd_log_sequence = head.lh_sequence + 1; 513 gfs2_log_pointers_init(sdp, head.lh_blkno); 514 } 515 } 516 return 0; 517 } 518 519 /** 520 * trans_go_demote_ok 521 * @gl: the glock 522 * 523 * Always returns 0 524 */ 525 526 static int freeze_go_demote_ok(const struct gfs2_glock *gl) 527 { 528 return 0; 529 } 530 531 /** 532 * iopen_go_callback - schedule the dcache entry for the inode to be deleted 533 * @gl: the glock 534 * 535 * gl_spin lock is held while calling this 536 */ 537 static void iopen_go_callback(struct gfs2_glock *gl, bool remote) 538 { 539 struct gfs2_inode *ip = (struct gfs2_inode *)gl->gl_object; 540 struct gfs2_sbd *sdp = gl->gl_sbd; 541 542 if (!remote || (sdp->sd_vfs->s_flags & MS_RDONLY)) 543 return; 544 545 if (gl->gl_demote_state == LM_ST_UNLOCKED && 546 gl->gl_state == LM_ST_SHARED && ip) { 547 gl->gl_lockref.count++; 548 if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0) 549 gl->gl_lockref.count--; 550 } 551 } 552 553 const struct gfs2_glock_operations gfs2_meta_glops = { 554 .go_type = LM_TYPE_META, 555 }; 556 557 const struct gfs2_glock_operations gfs2_inode_glops = { 558 .go_sync = inode_go_sync, 559 .go_inval = inode_go_inval, 560 .go_demote_ok = inode_go_demote_ok, 561 .go_lock = inode_go_lock, 562 .go_dump = inode_go_dump, 563 .go_type = LM_TYPE_INODE, 564 .go_flags = GLOF_ASPACE, 565 }; 566 567 const struct gfs2_glock_operations gfs2_rgrp_glops = { 568 .go_sync = rgrp_go_sync, 569 .go_inval = rgrp_go_inval, 570 .go_lock = gfs2_rgrp_go_lock, 571 .go_unlock = gfs2_rgrp_go_unlock, 572 .go_dump = gfs2_rgrp_dump, 573 .go_type = LM_TYPE_RGRP, 574 .go_flags = GLOF_LVB, 575 }; 576 577 const struct gfs2_glock_operations gfs2_freeze_glops = { 578 .go_sync = freeze_go_sync, 579 .go_xmote_bh = freeze_go_xmote_bh, 580 .go_demote_ok = freeze_go_demote_ok, 581 .go_type = LM_TYPE_NONDISK, 582 }; 583 584 const struct gfs2_glock_operations gfs2_iopen_glops = { 585 .go_type = LM_TYPE_IOPEN, 586 .go_callback = iopen_go_callback, 587 }; 588 589 const struct gfs2_glock_operations gfs2_flock_glops = { 590 .go_type = LM_TYPE_FLOCK, 591 }; 592 593 const struct gfs2_glock_operations gfs2_nondisk_glops = { 594 .go_type = LM_TYPE_NONDISK, 595 }; 596 597 const struct gfs2_glock_operations gfs2_quota_glops = { 598 .go_type = LM_TYPE_QUOTA, 599 .go_flags = GLOF_LVB, 600 }; 601 602 const struct gfs2_glock_operations gfs2_journal_glops = { 603 .go_type = LM_TYPE_JOURNAL, 604 }; 605 606 const struct gfs2_glock_operations *gfs2_glops_list[] = { 607 [LM_TYPE_META] = &gfs2_meta_glops, 608 [LM_TYPE_INODE] = &gfs2_inode_glops, 609 [LM_TYPE_RGRP] = &gfs2_rgrp_glops, 610 [LM_TYPE_IOPEN] = &gfs2_iopen_glops, 611 [LM_TYPE_FLOCK] = &gfs2_flock_glops, 612 [LM_TYPE_NONDISK] = &gfs2_nondisk_glops, 613 [LM_TYPE_QUOTA] = &gfs2_quota_glops, 614 [LM_TYPE_JOURNAL] = &gfs2_journal_glops, 615 }; 616 617