1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 4 * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved. 5 */ 6 7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 8 9 #include <linux/bio.h> 10 #include <linux/sched/signal.h> 11 #include <linux/slab.h> 12 #include <linux/spinlock.h> 13 #include <linux/completion.h> 14 #include <linux/buffer_head.h> 15 #include <linux/statfs.h> 16 #include <linux/seq_file.h> 17 #include <linux/mount.h> 18 #include <linux/kthread.h> 19 #include <linux/delay.h> 20 #include <linux/gfs2_ondisk.h> 21 #include <linux/crc32.h> 22 #include <linux/time.h> 23 #include <linux/wait.h> 24 #include <linux/writeback.h> 25 #include <linux/backing-dev.h> 26 #include <linux/kernel.h> 27 28 #include "gfs2.h" 29 #include "incore.h" 30 #include "bmap.h" 31 #include "dir.h" 32 #include "glock.h" 33 #include "glops.h" 34 #include "inode.h" 35 #include "log.h" 36 #include "meta_io.h" 37 #include "quota.h" 38 #include "recovery.h" 39 #include "rgrp.h" 40 #include "super.h" 41 #include "trans.h" 42 #include "util.h" 43 #include "sys.h" 44 #include "xattr.h" 45 #include "lops.h" 46 47 enum evict_behavior { 48 EVICT_SHOULD_DELETE, 49 EVICT_SHOULD_SKIP_DELETE, 50 EVICT_SHOULD_DEFER_DELETE, 51 }; 52 53 /** 54 * gfs2_jindex_free - Clear all the journal index information 55 * @sdp: The GFS2 superblock 56 * 57 */ 58 59 void gfs2_jindex_free(struct gfs2_sbd *sdp) 60 { 61 struct list_head list; 62 struct gfs2_jdesc *jd; 63 64 spin_lock(&sdp->sd_jindex_spin); 65 list_add(&list, &sdp->sd_jindex_list); 66 list_del_init(&sdp->sd_jindex_list); 67 sdp->sd_journals = 0; 68 spin_unlock(&sdp->sd_jindex_spin); 69 70 down_write(&sdp->sd_log_flush_lock); 71 sdp->sd_jdesc = NULL; 72 up_write(&sdp->sd_log_flush_lock); 73 74 while (!list_empty(&list)) { 75 jd = list_first_entry(&list, struct gfs2_jdesc, jd_list); 76 BUG_ON(jd->jd_log_bio); 77 gfs2_free_journal_extents(jd); 78 list_del(&jd->jd_list); 79 iput(jd->jd_inode); 80 jd->jd_inode = NULL; 81 kfree(jd); 82 } 83 } 84 85 static struct gfs2_jdesc *jdesc_find_i(struct list_head *head, unsigned int jid) 86 { 87 struct gfs2_jdesc *jd; 88 89 list_for_each_entry(jd, head, jd_list) { 90 if (jd->jd_jid == jid) 91 return jd; 92 } 93 return NULL; 94 } 95 96 struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid) 97 { 98 struct gfs2_jdesc *jd; 99 100 spin_lock(&sdp->sd_jindex_spin); 101 jd = jdesc_find_i(&sdp->sd_jindex_list, jid); 102 spin_unlock(&sdp->sd_jindex_spin); 103 104 return jd; 105 } 106 107 int gfs2_jdesc_check(struct gfs2_jdesc *jd) 108 { 109 struct gfs2_inode *ip = GFS2_I(jd->jd_inode); 110 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); 111 u64 size = i_size_read(jd->jd_inode); 112 113 if (gfs2_check_internal_file_size(jd->jd_inode, 8 << 20, BIT(30))) 114 return -EIO; 115 116 jd->jd_blocks = size >> sdp->sd_sb.sb_bsize_shift; 117 118 if (gfs2_write_alloc_required(ip, 0, size)) { 119 gfs2_consist_inode(ip); 120 return -EIO; 121 } 122 123 return 0; 124 } 125 126 /** 127 * gfs2_make_fs_rw - Turn a Read-Only FS into a Read-Write one 128 * @sdp: the filesystem 129 * 130 * Returns: errno 131 */ 132 133 int gfs2_make_fs_rw(struct gfs2_sbd *sdp) 134 { 135 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode); 136 struct gfs2_glock *j_gl = ip->i_gl; 137 int error; 138 139 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA); 140 if (gfs2_withdrawn(sdp)) 141 return -EIO; 142 143 if (sdp->sd_log_sequence == 0) { 144 fs_err(sdp, "unknown status of our own journal jid %d", 145 sdp->sd_lockstruct.ls_jid); 146 return -EIO; 147 } 148 149 error = gfs2_quota_init(sdp); 150 if (!error && gfs2_withdrawn(sdp)) { 151 gfs2_quota_cleanup(sdp); 152 error = -EIO; 153 } 154 if (!error) 155 set_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags); 156 return error; 157 } 158 159 void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc, const void *buf) 160 { 161 const struct gfs2_statfs_change *str = buf; 162 163 sc->sc_total = be64_to_cpu(str->sc_total); 164 sc->sc_free = be64_to_cpu(str->sc_free); 165 sc->sc_dinodes = be64_to_cpu(str->sc_dinodes); 166 } 167 168 void gfs2_statfs_change_out(const struct gfs2_statfs_change_host *sc, void *buf) 169 { 170 struct gfs2_statfs_change *str = buf; 171 172 str->sc_total = cpu_to_be64(sc->sc_total); 173 str->sc_free = cpu_to_be64(sc->sc_free); 174 str->sc_dinodes = cpu_to_be64(sc->sc_dinodes); 175 } 176 177 int gfs2_statfs_init(struct gfs2_sbd *sdp) 178 { 179 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); 180 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; 181 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; 182 struct buffer_head *m_bh; 183 struct gfs2_holder gh; 184 int error; 185 186 error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE, 187 &gh); 188 if (error) 189 return error; 190 191 error = gfs2_meta_inode_buffer(m_ip, &m_bh); 192 if (error) 193 goto out; 194 195 if (sdp->sd_args.ar_spectator) { 196 spin_lock(&sdp->sd_statfs_spin); 197 gfs2_statfs_change_in(m_sc, m_bh->b_data + 198 sizeof(struct gfs2_dinode)); 199 spin_unlock(&sdp->sd_statfs_spin); 200 } else { 201 spin_lock(&sdp->sd_statfs_spin); 202 gfs2_statfs_change_in(m_sc, m_bh->b_data + 203 sizeof(struct gfs2_dinode)); 204 gfs2_statfs_change_in(l_sc, sdp->sd_sc_bh->b_data + 205 sizeof(struct gfs2_dinode)); 206 spin_unlock(&sdp->sd_statfs_spin); 207 208 } 209 210 brelse(m_bh); 211 out: 212 gfs2_glock_dq_uninit(&gh); 213 return 0; 214 } 215 216 void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free, 217 s64 dinodes) 218 { 219 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode); 220 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; 221 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; 222 s64 x, y; 223 int need_sync = 0; 224 225 gfs2_trans_add_meta(l_ip->i_gl, sdp->sd_sc_bh); 226 227 spin_lock(&sdp->sd_statfs_spin); 228 l_sc->sc_total += total; 229 l_sc->sc_free += free; 230 l_sc->sc_dinodes += dinodes; 231 gfs2_statfs_change_out(l_sc, sdp->sd_sc_bh->b_data + 232 sizeof(struct gfs2_dinode)); 233 if (sdp->sd_args.ar_statfs_percent) { 234 x = 100 * l_sc->sc_free; 235 y = m_sc->sc_free * sdp->sd_args.ar_statfs_percent; 236 if (x >= y || x <= -y) 237 need_sync = 1; 238 } 239 spin_unlock(&sdp->sd_statfs_spin); 240 241 if (need_sync) 242 gfs2_wake_up_statfs(sdp); 243 } 244 245 void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh) 246 { 247 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); 248 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode); 249 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; 250 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; 251 252 gfs2_trans_add_meta(l_ip->i_gl, sdp->sd_sc_bh); 253 gfs2_trans_add_meta(m_ip->i_gl, m_bh); 254 255 spin_lock(&sdp->sd_statfs_spin); 256 m_sc->sc_total += l_sc->sc_total; 257 m_sc->sc_free += l_sc->sc_free; 258 m_sc->sc_dinodes += l_sc->sc_dinodes; 259 memset(l_sc, 0, sizeof(struct gfs2_statfs_change)); 260 memset(sdp->sd_sc_bh->b_data + sizeof(struct gfs2_dinode), 261 0, sizeof(struct gfs2_statfs_change)); 262 gfs2_statfs_change_out(m_sc, m_bh->b_data + sizeof(struct gfs2_dinode)); 263 spin_unlock(&sdp->sd_statfs_spin); 264 } 265 266 int gfs2_statfs_sync(struct super_block *sb, int type) 267 { 268 struct gfs2_sbd *sdp = sb->s_fs_info; 269 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); 270 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; 271 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; 272 struct gfs2_holder gh; 273 struct buffer_head *m_bh; 274 int error; 275 276 error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE, 277 &gh); 278 if (error) 279 goto out; 280 281 error = gfs2_meta_inode_buffer(m_ip, &m_bh); 282 if (error) 283 goto out_unlock; 284 285 spin_lock(&sdp->sd_statfs_spin); 286 gfs2_statfs_change_in(m_sc, m_bh->b_data + 287 sizeof(struct gfs2_dinode)); 288 if (!l_sc->sc_total && !l_sc->sc_free && !l_sc->sc_dinodes) { 289 spin_unlock(&sdp->sd_statfs_spin); 290 goto out_bh; 291 } 292 spin_unlock(&sdp->sd_statfs_spin); 293 294 error = gfs2_trans_begin(sdp, 2 * RES_DINODE, 0); 295 if (error) 296 goto out_bh; 297 298 update_statfs(sdp, m_bh); 299 sdp->sd_statfs_force_sync = 0; 300 301 gfs2_trans_end(sdp); 302 303 out_bh: 304 brelse(m_bh); 305 out_unlock: 306 gfs2_glock_dq_uninit(&gh); 307 out: 308 return error; 309 } 310 311 struct lfcc { 312 struct list_head list; 313 struct gfs2_holder gh; 314 }; 315 316 /** 317 * gfs2_lock_fs_check_clean - Stop all writes to the FS and check that all 318 * journals are clean 319 * @sdp: the file system 320 * 321 * Returns: errno 322 */ 323 324 static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp) 325 { 326 struct gfs2_inode *ip; 327 struct gfs2_jdesc *jd; 328 struct lfcc *lfcc; 329 LIST_HEAD(list); 330 struct gfs2_log_header_host lh; 331 int error, error2; 332 333 /* 334 * Grab all the journal glocks in SH mode. We are *probably* doing 335 * that to prevent recovery. 336 */ 337 338 list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) { 339 lfcc = kmalloc_obj(struct lfcc); 340 if (!lfcc) { 341 error = -ENOMEM; 342 goto out; 343 } 344 ip = GFS2_I(jd->jd_inode); 345 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &lfcc->gh); 346 if (error) { 347 kfree(lfcc); 348 goto out; 349 } 350 list_add(&lfcc->list, &list); 351 } 352 353 gfs2_freeze_unlock(sdp); 354 355 error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_EXCLUSIVE, 356 LM_FLAG_RECOVER | GL_NOPID, 357 &sdp->sd_freeze_gh); 358 if (error) 359 goto relock_shared; 360 361 list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) { 362 error = gfs2_jdesc_check(jd); 363 if (error) 364 break; 365 error = gfs2_find_jhead(jd, &lh); 366 if (error) 367 break; 368 if (!(lh.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) { 369 error = -EBUSY; 370 break; 371 } 372 } 373 374 if (!error) 375 goto out; /* success */ 376 377 gfs2_freeze_unlock(sdp); 378 379 relock_shared: 380 error2 = gfs2_freeze_lock_shared(sdp); 381 gfs2_assert_withdraw(sdp, !error2); 382 383 out: 384 while (!list_empty(&list)) { 385 lfcc = list_first_entry(&list, struct lfcc, list); 386 list_del(&lfcc->list); 387 gfs2_glock_dq_uninit(&lfcc->gh); 388 kfree(lfcc); 389 } 390 return error; 391 } 392 393 void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf) 394 { 395 const struct inode *inode = &ip->i_inode; 396 struct gfs2_dinode *str = buf; 397 398 str->di_header.mh_magic = cpu_to_be32(GFS2_MAGIC); 399 str->di_header.mh_type = cpu_to_be32(GFS2_METATYPE_DI); 400 str->di_header.mh_format = cpu_to_be32(GFS2_FORMAT_DI); 401 str->di_num.no_addr = cpu_to_be64(ip->i_no_addr); 402 str->di_num.no_formal_ino = cpu_to_be64(ip->i_no_formal_ino); 403 str->di_mode = cpu_to_be32(inode->i_mode); 404 str->di_uid = cpu_to_be32(i_uid_read(inode)); 405 str->di_gid = cpu_to_be32(i_gid_read(inode)); 406 str->di_nlink = cpu_to_be32(inode->i_nlink); 407 str->di_size = cpu_to_be64(i_size_read(inode)); 408 str->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(inode)); 409 str->di_atime = cpu_to_be64(inode_get_atime_sec(inode)); 410 str->di_mtime = cpu_to_be64(inode_get_mtime_sec(inode)); 411 str->di_ctime = cpu_to_be64(inode_get_ctime_sec(inode)); 412 413 str->di_goal_meta = cpu_to_be64(ip->i_goal); 414 str->di_goal_data = cpu_to_be64(ip->i_goal); 415 str->di_generation = cpu_to_be64(ip->i_generation); 416 417 str->di_flags = cpu_to_be32(ip->i_diskflags); 418 str->di_height = cpu_to_be16(ip->i_height); 419 str->di_payload_format = cpu_to_be32(S_ISDIR(inode->i_mode) && 420 !(ip->i_diskflags & GFS2_DIF_EXHASH) ? 421 GFS2_FORMAT_DE : 0); 422 str->di_depth = cpu_to_be16(ip->i_depth); 423 str->di_entries = cpu_to_be32(ip->i_entries); 424 425 str->di_eattr = cpu_to_be64(ip->i_eattr); 426 str->di_atime_nsec = cpu_to_be32(inode_get_atime_nsec(inode)); 427 str->di_mtime_nsec = cpu_to_be32(inode_get_mtime_nsec(inode)); 428 str->di_ctime_nsec = cpu_to_be32(inode_get_ctime_nsec(inode)); 429 } 430 431 /** 432 * gfs2_write_inode - Make sure the inode is stable on the disk 433 * @inode: The inode 434 * @wbc: The writeback control structure 435 * 436 * Returns: errno 437 */ 438 439 static int gfs2_write_inode(struct inode *inode, struct writeback_control *wbc) 440 { 441 struct gfs2_inode *ip = GFS2_I(inode); 442 struct gfs2_sbd *sdp = GFS2_SB(inode); 443 struct address_space *metamapping = gfs2_glock2aspace(ip->i_gl); 444 struct backing_dev_info *bdi = inode_to_bdi(metamapping->host); 445 int ret = 0; 446 bool flush_all = (wbc->sync_mode == WB_SYNC_ALL || gfs2_is_jdata(ip)); 447 448 if (flush_all) 449 gfs2_log_flush(GFS2_SB(inode), ip->i_gl, 450 GFS2_LOG_HEAD_FLUSH_NORMAL | 451 GFS2_LFC_WRITE_INODE); 452 if (bdi_wb_dirty_exceeded(bdi)) 453 gfs2_ail1_flush(sdp, wbc); 454 else 455 filemap_fdatawrite(metamapping); 456 if (flush_all) 457 ret = filemap_fdatawait(metamapping); 458 if (ret) 459 mark_inode_dirty_sync(inode); 460 else { 461 spin_lock(&inode->i_lock); 462 if (!(inode->i_flags & I_DIRTY)) 463 gfs2_ordered_del_inode(ip); 464 spin_unlock(&inode->i_lock); 465 } 466 return ret; 467 } 468 469 /** 470 * gfs2_dirty_inode - check for atime updates 471 * @inode: The inode in question 472 * @flags: The type of dirty 473 * 474 * Unfortunately it can be called under any combination of inode 475 * glock and freeze glock, so we have to check carefully. 476 * 477 * At the moment this deals only with atime - it should be possible 478 * to expand that role in future, once a review of the locking has 479 * been carried out. 480 */ 481 482 static void gfs2_dirty_inode(struct inode *inode, int flags) 483 { 484 struct gfs2_inode *ip = GFS2_I(inode); 485 struct gfs2_sbd *sdp = GFS2_SB(inode); 486 struct buffer_head *bh; 487 struct gfs2_holder gh; 488 int need_unlock = 0; 489 int need_endtrans = 0; 490 int ret; 491 492 /* This can only happen during incomplete inode creation. */ 493 if (unlikely(!ip->i_gl)) 494 return; 495 496 if (gfs2_withdrawn(sdp)) 497 return; 498 if (!gfs2_glock_is_locked_by_me(ip->i_gl)) { 499 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); 500 if (ret) { 501 fs_err(sdp, "dirty_inode: glock %d\n", ret); 502 gfs2_dump_glock(NULL, ip->i_gl, true); 503 return; 504 } 505 need_unlock = 1; 506 } else if (WARN_ON_ONCE(ip->i_gl->gl_state != LM_ST_EXCLUSIVE)) 507 return; 508 509 if (current->journal_info == NULL) { 510 ret = gfs2_trans_begin(sdp, RES_DINODE, 0); 511 if (ret) { 512 fs_err(sdp, "dirty_inode: gfs2_trans_begin %d\n", ret); 513 goto out; 514 } 515 need_endtrans = 1; 516 } 517 518 ret = gfs2_meta_inode_buffer(ip, &bh); 519 if (ret == 0) { 520 gfs2_trans_add_meta(ip->i_gl, bh); 521 gfs2_dinode_out(ip, bh->b_data); 522 brelse(bh); 523 } 524 525 if (need_endtrans) 526 gfs2_trans_end(sdp); 527 out: 528 if (need_unlock) 529 gfs2_glock_dq_uninit(&gh); 530 } 531 532 /** 533 * gfs2_make_fs_ro - Turn a Read-Write FS into a Read-Only one 534 * @sdp: the filesystem 535 * 536 * Returns: errno 537 */ 538 539 void gfs2_make_fs_ro(struct gfs2_sbd *sdp) 540 { 541 int log_write_allowed = test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags); 542 543 if (!test_bit(SDF_KILL, &sdp->sd_flags)) 544 gfs2_flush_delete_work(sdp); 545 546 gfs2_destroy_threads(sdp); 547 548 if (log_write_allowed) { 549 gfs2_quota_sync(sdp->sd_vfs, 0); 550 gfs2_statfs_sync(sdp->sd_vfs, 0); 551 552 /* We do two log flushes here. The first one commits dirty inodes 553 * and rgrps to the journal, but queues up revokes to the ail list. 554 * The second flush writes out and removes the revokes. 555 * 556 * The first must be done before the FLUSH_SHUTDOWN code 557 * clears the LIVE flag, otherwise it will not be able to start 558 * a transaction to write its revokes, and the error will cause 559 * a withdraw of the file system. */ 560 gfs2_log_flush(sdp, NULL, GFS2_LFC_MAKE_FS_RO); 561 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_SHUTDOWN | 562 GFS2_LFC_MAKE_FS_RO); 563 wait_event_timeout(sdp->sd_log_waitq, 564 gfs2_log_is_empty(sdp), 565 HZ * 5); 566 gfs2_assert_warn(sdp, gfs2_log_is_empty(sdp)); 567 } 568 gfs2_quota_cleanup(sdp); 569 } 570 571 /** 572 * gfs2_put_super - Unmount the filesystem 573 * @sb: The VFS superblock 574 * 575 */ 576 577 static void gfs2_put_super(struct super_block *sb) 578 { 579 struct gfs2_sbd *sdp = sb->s_fs_info; 580 struct gfs2_jdesc *jd; 581 582 /* No more recovery requests */ 583 set_bit(SDF_NORECOVERY, &sdp->sd_flags); 584 smp_mb(); 585 586 /* Wait on outstanding recovery */ 587 restart: 588 spin_lock(&sdp->sd_jindex_spin); 589 list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) { 590 if (!test_bit(JDF_RECOVERY, &jd->jd_flags)) 591 continue; 592 spin_unlock(&sdp->sd_jindex_spin); 593 wait_on_bit(&jd->jd_flags, JDF_RECOVERY, 594 TASK_UNINTERRUPTIBLE); 595 goto restart; 596 } 597 spin_unlock(&sdp->sd_jindex_spin); 598 599 /* Wait for withdraw to complete */ 600 flush_work(&sdp->sd_withdraw_work); 601 602 if (!sb_rdonly(sb)) 603 gfs2_make_fs_ro(sdp); 604 else { 605 if (gfs2_withdrawn(sdp)) 606 gfs2_destroy_threads(sdp); 607 608 gfs2_quota_cleanup(sdp); 609 } 610 611 /* At this point, we're through modifying the disk */ 612 613 /* Release stuff */ 614 615 gfs2_freeze_unlock(sdp); 616 617 iput(sdp->sd_jindex); 618 iput(sdp->sd_statfs_inode); 619 iput(sdp->sd_rindex); 620 iput(sdp->sd_quota_inode); 621 622 gfs2_glock_put(sdp->sd_rename_gl); 623 gfs2_glock_put(sdp->sd_freeze_gl); 624 625 if (!sdp->sd_args.ar_spectator) { 626 if (gfs2_holder_initialized(&sdp->sd_journal_gh)) 627 gfs2_glock_dq_uninit(&sdp->sd_journal_gh); 628 if (gfs2_holder_initialized(&sdp->sd_jinode_gh)) 629 gfs2_glock_dq_uninit(&sdp->sd_jinode_gh); 630 brelse(sdp->sd_sc_bh); 631 gfs2_glock_dq_uninit(&sdp->sd_sc_gh); 632 gfs2_glock_dq_uninit(&sdp->sd_qc_gh); 633 free_local_statfs_inodes(sdp); 634 iput(sdp->sd_qc_inode); 635 } 636 637 gfs2_glock_dq_uninit(&sdp->sd_live_gh); 638 gfs2_clear_rgrpd(sdp); 639 gfs2_jindex_free(sdp); 640 /* Take apart glock structures and buffer lists */ 641 gfs2_gl_hash_clear(sdp); 642 iput(sdp->sd_inode); 643 gfs2_delete_debugfs_file(sdp); 644 645 gfs2_sys_fs_del(sdp); 646 free_sbd(sdp); 647 } 648 649 /** 650 * gfs2_sync_fs - sync the filesystem 651 * @sb: the superblock 652 * @wait: true to wait for completion 653 * 654 * Flushes the log to disk. 655 */ 656 657 static int gfs2_sync_fs(struct super_block *sb, int wait) 658 { 659 struct gfs2_sbd *sdp = sb->s_fs_info; 660 661 gfs2_quota_sync(sb, -1); 662 if (wait) 663 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL | 664 GFS2_LFC_SYNC_FS); 665 return sdp->sd_log_error; 666 } 667 668 static int gfs2_do_thaw(struct gfs2_sbd *sdp, enum freeze_holder who, const void *freeze_owner) 669 { 670 struct super_block *sb = sdp->sd_vfs; 671 int error; 672 673 error = gfs2_freeze_lock_shared(sdp); 674 if (error) 675 goto fail; 676 error = thaw_super(sb, who, freeze_owner); 677 if (!error) 678 return 0; 679 680 fail: 681 fs_info(sdp, "GFS2: couldn't thaw filesystem: %d\n", error); 682 gfs2_assert_withdraw(sdp, 0); 683 return error; 684 } 685 686 void gfs2_freeze_func(struct work_struct *work) 687 { 688 struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_freeze_work); 689 struct super_block *sb = sdp->sd_vfs; 690 int error; 691 692 mutex_lock(&sdp->sd_freeze_mutex); 693 error = -EBUSY; 694 if (test_bit(SDF_FROZEN, &sdp->sd_flags)) 695 goto freeze_failed; 696 697 error = freeze_super(sb, FREEZE_HOLDER_USERSPACE, NULL); 698 if (error) 699 goto freeze_failed; 700 701 gfs2_freeze_unlock(sdp); 702 set_bit(SDF_FROZEN, &sdp->sd_flags); 703 704 error = gfs2_do_thaw(sdp, FREEZE_HOLDER_USERSPACE, NULL); 705 if (error) 706 goto out; 707 708 clear_bit(SDF_FROZEN, &sdp->sd_flags); 709 goto out; 710 711 freeze_failed: 712 fs_info(sdp, "GFS2: couldn't freeze filesystem: %d\n", error); 713 714 out: 715 mutex_unlock(&sdp->sd_freeze_mutex); 716 deactivate_super(sb); 717 } 718 719 /** 720 * gfs2_freeze_super - prevent further writes to the filesystem 721 * @sb: the VFS structure for the filesystem 722 * @who: freeze flags 723 * @freeze_owner: owner of the freeze 724 * 725 */ 726 727 static int gfs2_freeze_super(struct super_block *sb, enum freeze_holder who, 728 const void *freeze_owner) 729 { 730 struct gfs2_sbd *sdp = sb->s_fs_info; 731 int error; 732 733 if (!mutex_trylock(&sdp->sd_freeze_mutex)) 734 return -EBUSY; 735 if (test_bit(SDF_FROZEN, &sdp->sd_flags)) { 736 mutex_unlock(&sdp->sd_freeze_mutex); 737 return -EBUSY; 738 } 739 740 for (;;) { 741 error = freeze_super(sb, who, freeze_owner); 742 if (error) { 743 fs_info(sdp, "GFS2: couldn't freeze filesystem: %d\n", 744 error); 745 goto out; 746 } 747 748 error = gfs2_lock_fs_check_clean(sdp); 749 if (!error) { 750 set_bit(SDF_FREEZE_INITIATOR, &sdp->sd_flags); 751 set_bit(SDF_FROZEN, &sdp->sd_flags); 752 break; 753 } 754 755 (void)gfs2_do_thaw(sdp, who, freeze_owner); 756 757 if (error == -EBUSY) 758 fs_err(sdp, "waiting for recovery before freeze\n"); 759 else if (error == -EIO) { 760 fs_err(sdp, "Fatal IO error: cannot freeze gfs2 due " 761 "to recovery error.\n"); 762 goto out; 763 } else { 764 fs_err(sdp, "error freezing FS: %d\n", error); 765 } 766 fs_err(sdp, "retrying...\n"); 767 msleep(1000); 768 } 769 770 out: 771 mutex_unlock(&sdp->sd_freeze_mutex); 772 return error; 773 } 774 775 static int gfs2_freeze_fs(struct super_block *sb) 776 { 777 struct gfs2_sbd *sdp = sb->s_fs_info; 778 779 if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) { 780 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE | 781 GFS2_LFC_FREEZE_GO_SYNC); 782 if (gfs2_withdrawn(sdp)) 783 return -EIO; 784 } 785 return 0; 786 } 787 788 /** 789 * gfs2_thaw_super - reallow writes to the filesystem 790 * @sb: the VFS structure for the filesystem 791 * @who: freeze flags 792 * @freeze_owner: owner of the freeze 793 * 794 */ 795 796 static int gfs2_thaw_super(struct super_block *sb, enum freeze_holder who, 797 const void *freeze_owner) 798 { 799 struct gfs2_sbd *sdp = sb->s_fs_info; 800 int error; 801 802 if (!mutex_trylock(&sdp->sd_freeze_mutex)) 803 return -EBUSY; 804 if (!test_bit(SDF_FREEZE_INITIATOR, &sdp->sd_flags)) { 805 mutex_unlock(&sdp->sd_freeze_mutex); 806 return -EINVAL; 807 } 808 809 atomic_inc(&sb->s_active); 810 gfs2_freeze_unlock(sdp); 811 812 error = gfs2_do_thaw(sdp, who, freeze_owner); 813 814 if (!error) { 815 clear_bit(SDF_FREEZE_INITIATOR, &sdp->sd_flags); 816 clear_bit(SDF_FROZEN, &sdp->sd_flags); 817 } 818 mutex_unlock(&sdp->sd_freeze_mutex); 819 deactivate_super(sb); 820 return error; 821 } 822 823 /** 824 * statfs_slow_fill - fill in the sg for a given RG 825 * @rgd: the RG 826 * @sc: the sc structure 827 * 828 * Returns: 0 on success, -ESTALE if the LVB is invalid 829 */ 830 831 static int statfs_slow_fill(struct gfs2_rgrpd *rgd, 832 struct gfs2_statfs_change_host *sc) 833 { 834 gfs2_rgrp_verify(rgd); 835 sc->sc_total += rgd->rd_data; 836 sc->sc_free += rgd->rd_free; 837 sc->sc_dinodes += rgd->rd_dinodes; 838 return 0; 839 } 840 841 /** 842 * gfs2_statfs_slow - Stat a filesystem using asynchronous locking 843 * @sdp: the filesystem 844 * @sc: the sc info that will be returned 845 * 846 * Any error (other than a signal) will cause this routine to fall back 847 * to the synchronous version. 848 * 849 * FIXME: This really shouldn't busy wait like this. 850 * 851 * Returns: errno 852 */ 853 854 static int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc) 855 { 856 struct gfs2_rgrpd *rgd_next; 857 struct gfs2_holder *gha, *gh; 858 unsigned int slots = 64; 859 unsigned int x; 860 int done; 861 int error = 0, err; 862 863 memset(sc, 0, sizeof(struct gfs2_statfs_change_host)); 864 gha = kmalloc_objs(struct gfs2_holder, slots); 865 if (!gha) 866 return -ENOMEM; 867 for (x = 0; x < slots; x++) 868 gfs2_holder_mark_uninitialized(gha + x); 869 870 rgd_next = gfs2_rgrpd_get_first(sdp); 871 872 for (;;) { 873 done = 1; 874 875 for (x = 0; x < slots; x++) { 876 gh = gha + x; 877 878 if (gfs2_holder_initialized(gh) && gfs2_glock_poll(gh)) { 879 err = gfs2_glock_wait(gh); 880 if (err) { 881 gfs2_holder_uninit(gh); 882 error = err; 883 } else { 884 if (!error) { 885 struct gfs2_rgrpd *rgd = 886 gfs2_glock2rgrp(gh->gh_gl); 887 888 error = statfs_slow_fill(rgd, sc); 889 } 890 gfs2_glock_dq_uninit(gh); 891 } 892 } 893 894 if (gfs2_holder_initialized(gh)) 895 done = 0; 896 else if (rgd_next && !error) { 897 error = gfs2_glock_nq_init(rgd_next->rd_gl, 898 LM_ST_SHARED, 899 GL_ASYNC, 900 gh); 901 rgd_next = gfs2_rgrpd_get_next(rgd_next); 902 done = 0; 903 } 904 905 if (signal_pending(current)) 906 error = -ERESTARTSYS; 907 } 908 909 if (done) 910 break; 911 912 yield(); 913 } 914 915 kfree(gha); 916 return error; 917 } 918 919 /** 920 * gfs2_statfs_i - Do a statfs 921 * @sdp: the filesystem 922 * @sc: the sc structure 923 * 924 * Returns: errno 925 */ 926 927 static int gfs2_statfs_i(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc) 928 { 929 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; 930 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; 931 932 spin_lock(&sdp->sd_statfs_spin); 933 934 *sc = *m_sc; 935 sc->sc_total += l_sc->sc_total; 936 sc->sc_free += l_sc->sc_free; 937 sc->sc_dinodes += l_sc->sc_dinodes; 938 939 spin_unlock(&sdp->sd_statfs_spin); 940 941 if (sc->sc_free < 0) 942 sc->sc_free = 0; 943 if (sc->sc_free > sc->sc_total) 944 sc->sc_free = sc->sc_total; 945 if (sc->sc_dinodes < 0) 946 sc->sc_dinodes = 0; 947 948 return 0; 949 } 950 951 /** 952 * gfs2_statfs - Gather and return stats about the filesystem 953 * @dentry: The name of the link 954 * @buf: The buffer 955 * 956 * Returns: 0 on success or error code 957 */ 958 959 static int gfs2_statfs(struct dentry *dentry, struct kstatfs *buf) 960 { 961 struct super_block *sb = dentry->d_sb; 962 struct gfs2_sbd *sdp = sb->s_fs_info; 963 struct gfs2_statfs_change_host sc; 964 int error; 965 966 error = gfs2_rindex_update(sdp); 967 if (error) 968 return error; 969 970 if (gfs2_tune_get(sdp, gt_statfs_slow)) 971 error = gfs2_statfs_slow(sdp, &sc); 972 else 973 error = gfs2_statfs_i(sdp, &sc); 974 975 if (error) 976 return error; 977 978 buf->f_type = GFS2_MAGIC; 979 buf->f_bsize = sdp->sd_sb.sb_bsize; 980 buf->f_blocks = sc.sc_total; 981 buf->f_bfree = sc.sc_free; 982 buf->f_bavail = sc.sc_free; 983 buf->f_files = sc.sc_dinodes + sc.sc_free; 984 buf->f_ffree = sc.sc_free; 985 buf->f_namelen = GFS2_FNAMESIZE; 986 buf->f_fsid = uuid_to_fsid(sb->s_uuid.b); 987 988 return 0; 989 } 990 991 /** 992 * gfs2_drop_inode - Drop an inode (test for remote unlink) 993 * @inode: The inode to drop 994 * 995 * If we've received a callback on an iopen lock then it's because a 996 * remote node tried to deallocate the inode but failed due to this node 997 * still having the inode open. Here we mark the link count zero 998 * since we know that it must have reached zero if the GLF_DEMOTE flag 999 * is set on the iopen glock. If we didn't do a disk read since the 1000 * remote node removed the final link then we might otherwise miss 1001 * this event. This check ensures that this node will deallocate the 1002 * inode's blocks, or alternatively pass the baton on to another 1003 * node for later deallocation. 1004 */ 1005 1006 static int gfs2_drop_inode(struct inode *inode) 1007 { 1008 struct gfs2_inode *ip = GFS2_I(inode); 1009 struct gfs2_sbd *sdp = GFS2_SB(inode); 1010 1011 if (inode->i_nlink && 1012 gfs2_holder_initialized(&ip->i_iopen_gh)) { 1013 struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl; 1014 if (glock_needs_demote(gl)) 1015 clear_nlink(inode); 1016 } 1017 1018 /* 1019 * When under memory pressure when an inode's link count has dropped to 1020 * zero, defer deleting the inode to the delete workqueue. This avoids 1021 * calling into DLM under memory pressure, which can deadlock. 1022 */ 1023 if (!inode->i_nlink && 1024 unlikely(current->flags & PF_MEMALLOC) && 1025 gfs2_holder_initialized(&ip->i_iopen_gh)) { 1026 struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl; 1027 1028 gfs2_glock_hold(gl); 1029 if (!gfs2_queue_verify_delete(gl, true)) 1030 gfs2_glock_put_async(gl); 1031 return 0; 1032 } 1033 1034 /* 1035 * No longer cache inodes when trying to evict them all. 1036 */ 1037 if (test_bit(SDF_EVICTING, &sdp->sd_flags)) 1038 return 1; 1039 1040 return inode_generic_drop(inode); 1041 } 1042 1043 /** 1044 * gfs2_show_options - Show mount options for /proc/mounts 1045 * @s: seq_file structure 1046 * @root: root of this (sub)tree 1047 * 1048 * Returns: 0 on success or error code 1049 */ 1050 1051 static int gfs2_show_options(struct seq_file *s, struct dentry *root) 1052 { 1053 struct gfs2_sbd *sdp = root->d_sb->s_fs_info; 1054 struct gfs2_args *args = &sdp->sd_args; 1055 unsigned int logd_secs, statfs_slow, statfs_quantum, quota_quantum; 1056 1057 spin_lock(&sdp->sd_tune.gt_spin); 1058 logd_secs = sdp->sd_tune.gt_logd_secs; 1059 quota_quantum = sdp->sd_tune.gt_quota_quantum; 1060 statfs_quantum = sdp->sd_tune.gt_statfs_quantum; 1061 statfs_slow = sdp->sd_tune.gt_statfs_slow; 1062 spin_unlock(&sdp->sd_tune.gt_spin); 1063 1064 if (is_subdir(root, sdp->sd_master_dir)) 1065 seq_puts(s, ",meta"); 1066 if (args->ar_lockproto[0]) 1067 seq_show_option(s, "lockproto", args->ar_lockproto); 1068 if (args->ar_locktable[0]) 1069 seq_show_option(s, "locktable", args->ar_locktable); 1070 if (args->ar_hostdata[0]) 1071 seq_show_option(s, "hostdata", args->ar_hostdata); 1072 if (args->ar_spectator) 1073 seq_puts(s, ",spectator"); 1074 if (args->ar_localflocks) 1075 seq_puts(s, ",localflocks"); 1076 if (args->ar_debug) 1077 seq_puts(s, ",debug"); 1078 if (args->ar_posix_acl) 1079 seq_puts(s, ",acl"); 1080 if (args->ar_quota != GFS2_QUOTA_DEFAULT) { 1081 char *state; 1082 switch (args->ar_quota) { 1083 case GFS2_QUOTA_OFF: 1084 state = "off"; 1085 break; 1086 case GFS2_QUOTA_ACCOUNT: 1087 state = "account"; 1088 break; 1089 case GFS2_QUOTA_ON: 1090 state = "on"; 1091 break; 1092 case GFS2_QUOTA_QUIET: 1093 state = "quiet"; 1094 break; 1095 default: 1096 state = "unknown"; 1097 break; 1098 } 1099 seq_printf(s, ",quota=%s", state); 1100 } 1101 if (args->ar_suiddir) 1102 seq_puts(s, ",suiddir"); 1103 if (args->ar_data != GFS2_DATA_DEFAULT) { 1104 char *state; 1105 switch (args->ar_data) { 1106 case GFS2_DATA_WRITEBACK: 1107 state = "writeback"; 1108 break; 1109 case GFS2_DATA_ORDERED: 1110 state = "ordered"; 1111 break; 1112 default: 1113 state = "unknown"; 1114 break; 1115 } 1116 seq_printf(s, ",data=%s", state); 1117 } 1118 if (args->ar_discard) 1119 seq_puts(s, ",discard"); 1120 if (logd_secs != 30) 1121 seq_printf(s, ",commit=%d", logd_secs); 1122 if (statfs_quantum != 30) 1123 seq_printf(s, ",statfs_quantum=%d", statfs_quantum); 1124 else if (statfs_slow) 1125 seq_puts(s, ",statfs_quantum=0"); 1126 if (quota_quantum != 60) 1127 seq_printf(s, ",quota_quantum=%d", quota_quantum); 1128 if (args->ar_statfs_percent) 1129 seq_printf(s, ",statfs_percent=%d", args->ar_statfs_percent); 1130 if (args->ar_errors != GFS2_ERRORS_DEFAULT) { 1131 const char *state; 1132 1133 switch (args->ar_errors) { 1134 case GFS2_ERRORS_WITHDRAW: 1135 state = "withdraw"; 1136 break; 1137 case GFS2_ERRORS_DEACTIVATE: 1138 state = "deactivate"; 1139 break; 1140 case GFS2_ERRORS_PANIC: 1141 state = "panic"; 1142 break; 1143 default: 1144 state = "unknown"; 1145 break; 1146 } 1147 seq_printf(s, ",errors=%s", state); 1148 } 1149 if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) 1150 seq_puts(s, ",nobarrier"); 1151 if (test_bit(SDF_DEMOTE, &sdp->sd_flags)) 1152 seq_puts(s, ",demote_interface_used"); 1153 if (args->ar_rgrplvb) 1154 seq_puts(s, ",rgrplvb"); 1155 if (args->ar_loccookie) 1156 seq_puts(s, ",loccookie"); 1157 return 0; 1158 } 1159 1160 /** 1161 * gfs2_glock_put_eventually 1162 * @gl: The glock to put 1163 * 1164 * When under memory pressure, trigger a deferred glock put to make sure we 1165 * won't call into DLM and deadlock. Otherwise, put the glock directly. 1166 */ 1167 1168 static void gfs2_glock_put_eventually(struct gfs2_glock *gl) 1169 { 1170 if (current->flags & PF_MEMALLOC) 1171 gfs2_glock_put_async(gl); 1172 else 1173 gfs2_glock_put(gl); 1174 } 1175 1176 static enum evict_behavior gfs2_upgrade_iopen_glock(struct inode *inode) 1177 { 1178 struct gfs2_inode *ip = GFS2_I(inode); 1179 struct gfs2_sbd *sdp = GFS2_SB(inode); 1180 struct gfs2_holder *gh = &ip->i_iopen_gh; 1181 int error; 1182 1183 gh->gh_flags |= GL_NOCACHE; 1184 gfs2_glock_dq_wait(gh); 1185 1186 /* 1187 * If there are no other lock holders, we will immediately get 1188 * exclusive access to the iopen glock here. 1189 * 1190 * Otherwise, the other nodes holding the lock will be notified about 1191 * our locking request (see iopen_go_callback()). If they do not have 1192 * the inode open, they are expected to evict the cached inode and 1193 * release the lock, allowing us to proceed. 1194 * 1195 * Otherwise, if they cannot evict the inode, they are expected to poke 1196 * the inode glock (note: not the iopen glock). We will notice that 1197 * and stop waiting for the iopen glock immediately. The other node(s) 1198 * are then expected to take care of deleting the inode when they no 1199 * longer use it. 1200 * 1201 * As a last resort, if another node keeps holding the iopen glock 1202 * without showing any activity on the inode glock, we will eventually 1203 * time out and fail the iopen glock upgrade. 1204 */ 1205 1206 gfs2_holder_reinit(LM_ST_EXCLUSIVE, GL_ASYNC | GL_NOCACHE, gh); 1207 error = gfs2_glock_nq(gh); 1208 if (error) 1209 return EVICT_SHOULD_SKIP_DELETE; 1210 1211 wait_event_interruptible_timeout(sdp->sd_async_glock_wait, 1212 !test_bit(HIF_WAIT, &gh->gh_iflags) || 1213 glock_needs_demote(ip->i_gl), 1214 5 * HZ); 1215 if (!test_bit(HIF_HOLDER, &gh->gh_iflags)) { 1216 gfs2_glock_dq(gh); 1217 if (glock_needs_demote(ip->i_gl)) 1218 return EVICT_SHOULD_SKIP_DELETE; 1219 return EVICT_SHOULD_DEFER_DELETE; 1220 } 1221 error = gfs2_glock_holder_ready(gh); 1222 if (error) 1223 return EVICT_SHOULD_SKIP_DELETE; 1224 return EVICT_SHOULD_DELETE; 1225 } 1226 1227 /** 1228 * evict_should_delete - determine whether the inode is eligible for deletion 1229 * @inode: The inode to evict 1230 * @gh: The glock holder structure 1231 * 1232 * This function determines whether the evicted inode is eligible to be deleted 1233 * and locks the inode glock. 1234 * 1235 * Returns: the fate of the dinode 1236 */ 1237 static enum evict_behavior evict_should_delete(struct inode *inode, 1238 struct gfs2_holder *gh) 1239 { 1240 struct gfs2_inode *ip = GFS2_I(inode); 1241 struct super_block *sb = inode->i_sb; 1242 struct gfs2_sbd *sdp = sb->s_fs_info; 1243 int ret; 1244 1245 if (inode->i_nlink) 1246 return EVICT_SHOULD_SKIP_DELETE; 1247 1248 if (gfs2_holder_initialized(&ip->i_iopen_gh) && 1249 test_bit(GLF_DEFER_DELETE, &ip->i_iopen_gh.gh_gl->gl_flags)) 1250 return EVICT_SHOULD_DEFER_DELETE; 1251 1252 /* Deletes should never happen under memory pressure anymore. */ 1253 if (WARN_ON_ONCE(current->flags & PF_MEMALLOC)) 1254 return EVICT_SHOULD_DEFER_DELETE; 1255 1256 /* Must not read inode block until block type has been verified */ 1257 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, gh); 1258 if (unlikely(ret)) 1259 return EVICT_SHOULD_SKIP_DELETE; 1260 1261 if (gfs2_inode_already_deleted(ip->i_gl, ip->i_no_formal_ino)) 1262 return EVICT_SHOULD_SKIP_DELETE; 1263 ret = gfs2_check_blk_type(sdp, ip->i_no_addr, GFS2_BLKST_UNLINKED); 1264 if (ret) 1265 return EVICT_SHOULD_SKIP_DELETE; 1266 1267 ret = gfs2_instantiate(gh); 1268 if (ret) 1269 return EVICT_SHOULD_SKIP_DELETE; 1270 1271 /* 1272 * The inode may have been recreated in the meantime. 1273 */ 1274 if (inode->i_nlink) 1275 return EVICT_SHOULD_SKIP_DELETE; 1276 1277 if (gfs2_holder_initialized(&ip->i_iopen_gh) && 1278 test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) 1279 return gfs2_upgrade_iopen_glock(inode); 1280 return EVICT_SHOULD_DELETE; 1281 } 1282 1283 /** 1284 * evict_unlinked_inode - delete the pieces of an unlinked evicted inode 1285 * @inode: The inode to evict 1286 * @gh: The glock holder structure 1287 */ 1288 static int evict_unlinked_inode(struct inode *inode, struct gfs2_holder *gh) 1289 { 1290 struct gfs2_inode *ip = GFS2_I(inode); 1291 struct gfs2_glock *gl = ip->i_gl; 1292 int ret; 1293 1294 /* The inode glock must be held exclusively and be instantiated. */ 1295 BUG_ON(!gfs2_holder_initialized(gh) || 1296 test_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags)); 1297 1298 if (S_ISDIR(inode->i_mode) && 1299 (ip->i_diskflags & GFS2_DIF_EXHASH)) { 1300 ret = gfs2_dir_exhash_dealloc(ip); 1301 if (ret) 1302 goto out; 1303 } 1304 1305 if (ip->i_eattr) { 1306 ret = gfs2_ea_dealloc(ip, true); 1307 if (ret) 1308 goto out; 1309 } 1310 1311 if (!gfs2_is_stuffed(ip)) { 1312 ret = gfs2_file_dealloc(ip); 1313 if (ret) 1314 goto out; 1315 } 1316 1317 /* 1318 * As soon as we clear the bitmap for the dinode, gfs2_create_inode() 1319 * can get called to recreate it, or even gfs2_inode_lookup() if the 1320 * inode was recreated on another node in the meantime. 1321 * 1322 * However, inserting the new inode into the inode hash table will not 1323 * succeed until the old inode is removed, and that only happens after 1324 * ->evict_inode() returns. The new inode is attached to its inode and 1325 * iopen glocks after inserting it into the inode hash table, so at 1326 * that point we can be sure that both glocks are unused. 1327 */ 1328 1329 ret = gfs2_dinode_dealloc(ip); 1330 if (!ret) 1331 gfs2_inode_remember_delete(gl, ip->i_no_formal_ino); 1332 1333 out: 1334 return ret; 1335 } 1336 1337 static int gfs2_truncate_inode_pages(struct inode *inode) 1338 { 1339 struct gfs2_inode *ip = GFS2_I(inode); 1340 struct gfs2_sbd *sdp = GFS2_SB(inode); 1341 struct address_space *mapping = &inode->i_data; 1342 bool need_trans = gfs2_is_jdata(ip) && mapping->nrpages; 1343 int ret = 0; 1344 1345 /* 1346 * Truncating a jdata inode address space may create revokes in 1347 * truncate_inode_pages() -> gfs2_invalidate_folio() -> ... -> 1348 * gfs2_remove_from_journal(), so we need a transaction here. 1349 * 1350 * During a withdraw, no new transactions can be created. We still 1351 * take the log flush lock to prevent truncate from racing with 1352 * gfs2_log_flush(). 1353 */ 1354 if (need_trans) { 1355 ret = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks); 1356 if (ret) 1357 down_read(&sdp->sd_log_flush_lock); 1358 } 1359 truncate_inode_pages(mapping, 0); 1360 if (need_trans) { 1361 if (ret) 1362 up_read(&sdp->sd_log_flush_lock); 1363 else 1364 gfs2_trans_end(sdp); 1365 } 1366 return ret; 1367 } 1368 1369 static void gfs2_truncate_inode_pages_final(struct inode *inode) 1370 { 1371 struct gfs2_inode *ip = GFS2_I(inode); 1372 struct gfs2_sbd *sdp = GFS2_SB(inode); 1373 struct address_space *mapping = &inode->i_data; 1374 bool need_lock = gfs2_is_jdata(ip) && mapping->nrpages; 1375 1376 if (need_lock) 1377 down_read(&sdp->sd_log_flush_lock); 1378 truncate_inode_pages_final(mapping); 1379 if (need_lock) 1380 up_read(&sdp->sd_log_flush_lock); 1381 } 1382 1383 /* 1384 * evict_linked_inode - evict an inode whose dinode has not been unlinked 1385 * @inode: The inode to evict 1386 * @gh: The glock holder structure 1387 */ 1388 static int evict_linked_inode(struct inode *inode, struct gfs2_holder *gh) 1389 { 1390 struct super_block *sb = inode->i_sb; 1391 struct gfs2_sbd *sdp = sb->s_fs_info; 1392 struct gfs2_inode *ip = GFS2_I(inode); 1393 struct gfs2_glock *gl = ip->i_gl; 1394 struct address_space *metamapping = gfs2_glock2aspace(gl); 1395 int ret; 1396 1397 if (!(test_bit(GLF_DIRTY, &gl->gl_flags) || inode->i_flags & I_DIRTY)) 1398 goto clean; 1399 1400 /* The inode glock must be held exclusively and be instantiated. */ 1401 if (!gfs2_holder_initialized(gh)) 1402 ret = gfs2_glock_nq_init(gl, LM_ST_EXCLUSIVE, 0, gh); 1403 else 1404 ret = gfs2_instantiate(gh); 1405 if (ret) 1406 return ret; 1407 1408 gfs2_log_flush(sdp, gl, GFS2_LOG_HEAD_FLUSH_NORMAL | 1409 GFS2_LFC_EVICT_INODE); 1410 if (test_bit(GLF_DIRTY, &gl->gl_flags)) { 1411 filemap_fdatawrite(metamapping); 1412 filemap_fdatawait(metamapping); 1413 } 1414 write_inode_now(inode, 1); 1415 gfs2_ail_flush(gl, 0); 1416 1417 clean: 1418 ret = gfs2_truncate_inode_pages(inode); 1419 truncate_inode_pages(metamapping, 0); 1420 return ret; 1421 } 1422 1423 /** 1424 * gfs2_evict_inode - Remove an inode from cache 1425 * @inode: The inode to evict 1426 * 1427 * There are three cases to consider: 1428 * 1. i_nlink == 0, we are final opener (and must deallocate) 1429 * 2. i_nlink == 0, we are not the final opener (and cannot deallocate) 1430 * 3. i_nlink > 0 1431 * 1432 * If the fs is read only, then we have to treat all cases as per #3 1433 * since we are unable to do any deallocation. The inode will be 1434 * deallocated by the next read/write node to attempt an allocation 1435 * in the same resource group 1436 * 1437 * We have to (at the moment) hold the inodes main lock to cover 1438 * the gap between unlocking the shared lock on the iopen lock and 1439 * taking the exclusive lock. I'd rather do a shared -> exclusive 1440 * conversion on the iopen lock, but we can change that later. This 1441 * is safe, just less efficient. 1442 */ 1443 1444 static void gfs2_evict_inode(struct inode *inode) 1445 { 1446 struct super_block *sb = inode->i_sb; 1447 struct gfs2_sbd *sdp = sb->s_fs_info; 1448 struct gfs2_inode *ip = GFS2_I(inode); 1449 struct gfs2_holder gh; 1450 enum evict_behavior behavior; 1451 int ret; 1452 1453 gfs2_holder_mark_uninitialized(&gh); 1454 if (sb_rdonly(sb) || !ip->i_no_addr || !ip->i_gl) 1455 goto out; 1456 1457 /* 1458 * In case of an incomplete mount, gfs2_evict_inode() may be called for 1459 * system files without having an active journal to write to. In that 1460 * case, skip the filesystem evict. 1461 */ 1462 if (!sdp->sd_jdesc) 1463 goto out; 1464 1465 behavior = evict_should_delete(inode, &gh); 1466 if (behavior == EVICT_SHOULD_DEFER_DELETE && 1467 !test_bit(SDF_KILL, &sdp->sd_flags)) { 1468 struct gfs2_glock *io_gl = ip->i_iopen_gh.gh_gl; 1469 1470 if (io_gl) { 1471 gfs2_glock_hold(io_gl); 1472 if (!gfs2_queue_verify_delete(io_gl, true)) 1473 gfs2_glock_put(io_gl); 1474 goto out; 1475 } 1476 behavior = EVICT_SHOULD_SKIP_DELETE; 1477 } 1478 if (behavior == EVICT_SHOULD_DELETE) 1479 ret = evict_unlinked_inode(inode, &gh); 1480 else 1481 ret = evict_linked_inode(inode, &gh); 1482 1483 if (gfs2_rs_active(&ip->i_res)) 1484 gfs2_rs_deltree(&ip->i_res); 1485 1486 if (ret && !gfs2_withdrawn(sdp) && ret != -EROFS) 1487 fs_warn(sdp, "gfs2_evict_inode: %d\n", ret); 1488 out: 1489 if (gfs2_holder_initialized(&gh)) 1490 gfs2_glock_dq_uninit(&gh); 1491 gfs2_truncate_inode_pages_final(inode); 1492 if (ip->i_qadata) 1493 gfs2_assert_warn(sdp, ip->i_qadata->qa_ref == 0); 1494 gfs2_rs_deltree(&ip->i_res); 1495 gfs2_ordered_del_inode(ip); 1496 clear_inode(inode); 1497 gfs2_dir_hash_inval(ip); 1498 if (gfs2_holder_initialized(&ip->i_iopen_gh)) { 1499 struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl; 1500 1501 glock_clear_object(gl, ip); 1502 gfs2_glock_hold(gl); 1503 ip->i_iopen_gh.gh_flags |= GL_NOCACHE; 1504 gfs2_glock_dq_uninit(&ip->i_iopen_gh); 1505 gfs2_glock_put_eventually(gl); 1506 } 1507 if (ip->i_gl) { 1508 glock_clear_object(ip->i_gl, ip); 1509 wait_on_bit_io(&ip->i_flags, GIF_GLOP_PENDING, TASK_UNINTERRUPTIBLE); 1510 gfs2_glock_put_eventually(ip->i_gl); 1511 rcu_assign_pointer(ip->i_gl, NULL); 1512 } 1513 } 1514 1515 static struct inode *gfs2_alloc_inode(struct super_block *sb) 1516 { 1517 struct gfs2_inode *ip; 1518 1519 ip = alloc_inode_sb(sb, gfs2_inode_cachep, GFP_KERNEL); 1520 if (!ip) 1521 return NULL; 1522 ip->i_no_addr = 0; 1523 ip->i_no_formal_ino = 0; 1524 ip->i_flags = 0; 1525 ip->i_gl = NULL; 1526 gfs2_holder_mark_uninitialized(&ip->i_iopen_gh); 1527 memset(&ip->i_res, 0, sizeof(ip->i_res)); 1528 RB_CLEAR_NODE(&ip->i_res.rs_node); 1529 ip->i_diskflags = 0; 1530 ip->i_rahead = 0; 1531 return &ip->i_inode; 1532 } 1533 1534 static void gfs2_free_inode(struct inode *inode) 1535 { 1536 kmem_cache_free(gfs2_inode_cachep, GFS2_I(inode)); 1537 } 1538 1539 void free_local_statfs_inodes(struct gfs2_sbd *sdp) 1540 { 1541 struct local_statfs_inode *lsi, *safe; 1542 1543 /* Run through the statfs inodes list to iput and free memory */ 1544 list_for_each_entry_safe(lsi, safe, &sdp->sd_sc_inodes_list, si_list) { 1545 if (lsi->si_jid == sdp->sd_jdesc->jd_jid) 1546 sdp->sd_sc_inode = NULL; /* belongs to this node */ 1547 if (lsi->si_sc_inode) 1548 iput(lsi->si_sc_inode); 1549 list_del(&lsi->si_list); 1550 kfree(lsi); 1551 } 1552 } 1553 1554 struct inode *find_local_statfs_inode(struct gfs2_sbd *sdp, 1555 unsigned int index) 1556 { 1557 struct local_statfs_inode *lsi; 1558 1559 /* Return the local (per node) statfs inode in the 1560 * sdp->sd_sc_inodes_list corresponding to the 'index'. */ 1561 list_for_each_entry(lsi, &sdp->sd_sc_inodes_list, si_list) { 1562 if (lsi->si_jid == index) 1563 return lsi->si_sc_inode; 1564 } 1565 return NULL; 1566 } 1567 1568 const struct super_operations gfs2_super_ops = { 1569 .alloc_inode = gfs2_alloc_inode, 1570 .free_inode = gfs2_free_inode, 1571 .write_inode = gfs2_write_inode, 1572 .dirty_inode = gfs2_dirty_inode, 1573 .evict_inode = gfs2_evict_inode, 1574 .put_super = gfs2_put_super, 1575 .sync_fs = gfs2_sync_fs, 1576 .freeze_super = gfs2_freeze_super, 1577 .freeze_fs = gfs2_freeze_fs, 1578 .thaw_super = gfs2_thaw_super, 1579 .statfs = gfs2_statfs, 1580 .drop_inode = gfs2_drop_inode, 1581 .show_options = gfs2_show_options, 1582 }; 1583 1584