1 /* 2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. 4 * 5 * This copyrighted material is made available to anyone wishing to use, 6 * modify, copy, or redistribute it subject to the terms and conditions 7 * of the GNU General Public License version 2. 8 */ 9 10 #include <linux/sched.h> 11 #include <linux/slab.h> 12 #include <linux/spinlock.h> 13 #include <linux/completion.h> 14 #include <linux/buffer_head.h> 15 #include <linux/gfs2_ondisk.h> 16 #include <linux/lm_interface.h> 17 18 #include "gfs2.h" 19 #include "incore.h" 20 #include "bmap.h" 21 #include "glock.h" 22 #include "glops.h" 23 #include "inode.h" 24 #include "log.h" 25 #include "meta_io.h" 26 #include "recovery.h" 27 #include "rgrp.h" 28 #include "util.h" 29 #include "trans.h" 30 31 /** 32 * ail_empty_gl - remove all buffers for a given lock from the AIL 33 * @gl: the glock 34 * 35 * None of the buffers should be dirty, locked, or pinned. 36 */ 37 38 static void gfs2_ail_empty_gl(struct gfs2_glock *gl) 39 { 40 struct gfs2_sbd *sdp = gl->gl_sbd; 41 unsigned int blocks; 42 struct list_head *head = &gl->gl_ail_list; 43 struct gfs2_bufdata *bd; 44 struct buffer_head *bh; 45 u64 blkno; 46 int error; 47 48 blocks = atomic_read(&gl->gl_ail_count); 49 if (!blocks) 50 return; 51 52 error = gfs2_trans_begin(sdp, 0, blocks); 53 if (gfs2_assert_withdraw(sdp, !error)) 54 return; 55 56 gfs2_log_lock(sdp); 57 while (!list_empty(head)) { 58 bd = list_entry(head->next, struct gfs2_bufdata, 59 bd_ail_gl_list); 60 bh = bd->bd_bh; 61 blkno = bh->b_blocknr; 62 gfs2_assert_withdraw(sdp, !buffer_busy(bh)); 63 64 bd->bd_ail = NULL; 65 list_del(&bd->bd_ail_st_list); 66 list_del(&bd->bd_ail_gl_list); 67 atomic_dec(&gl->gl_ail_count); 68 brelse(bh); 69 gfs2_log_unlock(sdp); 70 71 gfs2_trans_add_revoke(sdp, blkno); 72 73 gfs2_log_lock(sdp); 74 } 75 gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count)); 76 gfs2_log_unlock(sdp); 77 78 gfs2_trans_end(sdp); 79 gfs2_log_flush(sdp, NULL); 80 } 81 82 /** 83 * gfs2_pte_inval - Sync and invalidate all PTEs associated with a glock 84 * @gl: the glock 85 * 86 */ 87 88 static void gfs2_pte_inval(struct gfs2_glock *gl) 89 { 90 struct gfs2_inode *ip; 91 struct inode *inode; 92 93 ip = gl->gl_object; 94 inode = &ip->i_inode; 95 if (!ip || !S_ISREG(inode->i_mode)) 96 return; 97 98 if (!test_bit(GIF_PAGED, &ip->i_flags)) 99 return; 100 101 unmap_shared_mapping_range(inode->i_mapping, 0, 0); 102 103 if (test_bit(GIF_SW_PAGED, &ip->i_flags)) 104 set_bit(GLF_DIRTY, &gl->gl_flags); 105 106 clear_bit(GIF_SW_PAGED, &ip->i_flags); 107 } 108 109 /** 110 * meta_go_sync - sync out the metadata for this glock 111 * @gl: the glock 112 * 113 * Called when demoting or unlocking an EX glock. We must flush 114 * to disk all dirty buffers/pages relating to this glock, and must not 115 * not return to caller to demote/unlock the glock until I/O is complete. 116 */ 117 118 static void meta_go_sync(struct gfs2_glock *gl) 119 { 120 if (gl->gl_state != LM_ST_EXCLUSIVE) 121 return; 122 123 if (test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) { 124 gfs2_log_flush(gl->gl_sbd, gl); 125 gfs2_meta_sync(gl); 126 gfs2_ail_empty_gl(gl); 127 } 128 } 129 130 /** 131 * meta_go_inval - invalidate the metadata for this glock 132 * @gl: the glock 133 * @flags: 134 * 135 */ 136 137 static void meta_go_inval(struct gfs2_glock *gl, int flags) 138 { 139 if (!(flags & DIO_METADATA)) 140 return; 141 142 gfs2_meta_inval(gl); 143 gl->gl_vn++; 144 } 145 146 /** 147 * inode_go_sync - Sync the dirty data and/or metadata for an inode glock 148 * @gl: the glock protecting the inode 149 * 150 */ 151 152 static void inode_go_sync(struct gfs2_glock *gl) 153 { 154 struct gfs2_inode *ip = gl->gl_object; 155 156 if (ip && !S_ISREG(ip->i_inode.i_mode)) 157 ip = NULL; 158 159 if (test_bit(GLF_DIRTY, &gl->gl_flags)) { 160 gfs2_log_flush(gl->gl_sbd, gl); 161 if (ip) 162 filemap_fdatawrite(ip->i_inode.i_mapping); 163 gfs2_meta_sync(gl); 164 if (ip) { 165 struct address_space *mapping = ip->i_inode.i_mapping; 166 int error = filemap_fdatawait(mapping); 167 if (error == -ENOSPC) 168 set_bit(AS_ENOSPC, &mapping->flags); 169 else if (error) 170 set_bit(AS_EIO, &mapping->flags); 171 } 172 clear_bit(GLF_DIRTY, &gl->gl_flags); 173 gfs2_ail_empty_gl(gl); 174 } 175 } 176 177 /** 178 * inode_go_xmote_th - promote/demote a glock 179 * @gl: the glock 180 * @state: the requested state 181 * @flags: 182 * 183 */ 184 185 static void inode_go_xmote_th(struct gfs2_glock *gl) 186 { 187 if (gl->gl_state != LM_ST_UNLOCKED) 188 gfs2_pte_inval(gl); 189 if (gl->gl_state == LM_ST_EXCLUSIVE) 190 inode_go_sync(gl); 191 } 192 193 /** 194 * inode_go_xmote_bh - After promoting/demoting a glock 195 * @gl: the glock 196 * 197 */ 198 199 static void inode_go_xmote_bh(struct gfs2_glock *gl) 200 { 201 struct gfs2_holder *gh = gl->gl_req_gh; 202 struct buffer_head *bh; 203 int error; 204 205 if (gl->gl_state != LM_ST_UNLOCKED && 206 (!gh || !(gh->gh_flags & GL_SKIP))) { 207 error = gfs2_meta_read(gl, gl->gl_name.ln_number, 0, &bh); 208 if (!error) 209 brelse(bh); 210 } 211 } 212 213 /** 214 * inode_go_drop_th - unlock a glock 215 * @gl: the glock 216 * 217 * Invoked from rq_demote(). 218 * Another node needs the lock in EXCLUSIVE mode, or lock (unused for too long) 219 * is being purged from our node's glock cache; we're dropping lock. 220 */ 221 222 static void inode_go_drop_th(struct gfs2_glock *gl) 223 { 224 gfs2_pte_inval(gl); 225 if (gl->gl_state == LM_ST_EXCLUSIVE) 226 inode_go_sync(gl); 227 } 228 229 /** 230 * inode_go_inval - prepare a inode glock to be released 231 * @gl: the glock 232 * @flags: 233 * 234 */ 235 236 static void inode_go_inval(struct gfs2_glock *gl, int flags) 237 { 238 struct gfs2_inode *ip = gl->gl_object; 239 int meta = (flags & DIO_METADATA); 240 241 if (meta) { 242 gfs2_meta_inval(gl); 243 if (ip) 244 set_bit(GIF_INVALID, &ip->i_flags); 245 } 246 247 if (ip && S_ISREG(ip->i_inode.i_mode)) { 248 truncate_inode_pages(ip->i_inode.i_mapping, 0); 249 gfs2_assert_withdraw(GFS2_SB(&ip->i_inode), !ip->i_inode.i_mapping->nrpages); 250 clear_bit(GIF_PAGED, &ip->i_flags); 251 } 252 } 253 254 /** 255 * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock 256 * @gl: the glock 257 * 258 * Returns: 1 if it's ok 259 */ 260 261 static int inode_go_demote_ok(struct gfs2_glock *gl) 262 { 263 struct gfs2_sbd *sdp = gl->gl_sbd; 264 int demote = 0; 265 266 if (!gl->gl_object && !gl->gl_aspace->i_mapping->nrpages) 267 demote = 1; 268 else if (!sdp->sd_args.ar_localcaching && 269 time_after_eq(jiffies, gl->gl_stamp + 270 gfs2_tune_get(sdp, gt_demote_secs) * HZ)) 271 demote = 1; 272 273 return demote; 274 } 275 276 /** 277 * inode_go_lock - operation done after an inode lock is locked by a process 278 * @gl: the glock 279 * @flags: 280 * 281 * Returns: errno 282 */ 283 284 static int inode_go_lock(struct gfs2_holder *gh) 285 { 286 struct gfs2_glock *gl = gh->gh_gl; 287 struct gfs2_inode *ip = gl->gl_object; 288 int error = 0; 289 290 if (!ip) 291 return 0; 292 293 if (test_bit(GIF_INVALID, &ip->i_flags)) { 294 error = gfs2_inode_refresh(ip); 295 if (error) 296 return error; 297 } 298 299 if ((ip->i_di.di_flags & GFS2_DIF_TRUNC_IN_PROG) && 300 (gl->gl_state == LM_ST_EXCLUSIVE) && 301 (gh->gh_state == LM_ST_EXCLUSIVE)) 302 error = gfs2_truncatei_resume(ip); 303 304 return error; 305 } 306 307 /** 308 * inode_go_unlock - operation done before an inode lock is unlocked by a 309 * process 310 * @gl: the glock 311 * @flags: 312 * 313 */ 314 315 static void inode_go_unlock(struct gfs2_holder *gh) 316 { 317 struct gfs2_glock *gl = gh->gh_gl; 318 struct gfs2_inode *ip = gl->gl_object; 319 320 if (ip) 321 gfs2_meta_cache_flush(ip); 322 } 323 324 /** 325 * rgrp_go_demote_ok - Check to see if it's ok to unlock a RG's glock 326 * @gl: the glock 327 * 328 * Returns: 1 if it's ok 329 */ 330 331 static int rgrp_go_demote_ok(struct gfs2_glock *gl) 332 { 333 return !gl->gl_aspace->i_mapping->nrpages; 334 } 335 336 /** 337 * rgrp_go_lock - operation done after an rgrp lock is locked by 338 * a first holder on this node. 339 * @gl: the glock 340 * @flags: 341 * 342 * Returns: errno 343 */ 344 345 static int rgrp_go_lock(struct gfs2_holder *gh) 346 { 347 return gfs2_rgrp_bh_get(gh->gh_gl->gl_object); 348 } 349 350 /** 351 * rgrp_go_unlock - operation done before an rgrp lock is unlocked by 352 * a last holder on this node. 353 * @gl: the glock 354 * @flags: 355 * 356 */ 357 358 static void rgrp_go_unlock(struct gfs2_holder *gh) 359 { 360 gfs2_rgrp_bh_put(gh->gh_gl->gl_object); 361 } 362 363 /** 364 * trans_go_xmote_th - promote/demote the transaction glock 365 * @gl: the glock 366 * @state: the requested state 367 * @flags: 368 * 369 */ 370 371 static void trans_go_xmote_th(struct gfs2_glock *gl) 372 { 373 struct gfs2_sbd *sdp = gl->gl_sbd; 374 375 if (gl->gl_state != LM_ST_UNLOCKED && 376 test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) { 377 gfs2_meta_syncfs(sdp); 378 gfs2_log_shutdown(sdp); 379 } 380 } 381 382 /** 383 * trans_go_xmote_bh - After promoting/demoting the transaction glock 384 * @gl: the glock 385 * 386 */ 387 388 static void trans_go_xmote_bh(struct gfs2_glock *gl) 389 { 390 struct gfs2_sbd *sdp = gl->gl_sbd; 391 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode); 392 struct gfs2_glock *j_gl = ip->i_gl; 393 struct gfs2_log_header_host head; 394 int error; 395 396 if (gl->gl_state != LM_ST_UNLOCKED && 397 test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) { 398 gfs2_meta_cache_flush(GFS2_I(sdp->sd_jdesc->jd_inode)); 399 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA); 400 401 error = gfs2_find_jhead(sdp->sd_jdesc, &head); 402 if (error) 403 gfs2_consist(sdp); 404 if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) 405 gfs2_consist(sdp); 406 407 /* Initialize some head of the log stuff */ 408 if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) { 409 sdp->sd_log_sequence = head.lh_sequence + 1; 410 gfs2_log_pointers_init(sdp, head.lh_blkno); 411 } 412 } 413 } 414 415 /** 416 * trans_go_drop_th - unlock the transaction glock 417 * @gl: the glock 418 * 419 * We want to sync the device even with localcaching. Remember 420 * that localcaching journal replay only marks buffers dirty. 421 */ 422 423 static void trans_go_drop_th(struct gfs2_glock *gl) 424 { 425 struct gfs2_sbd *sdp = gl->gl_sbd; 426 427 if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) { 428 gfs2_meta_syncfs(sdp); 429 gfs2_log_shutdown(sdp); 430 } 431 } 432 433 /** 434 * quota_go_demote_ok - Check to see if it's ok to unlock a quota glock 435 * @gl: the glock 436 * 437 * Returns: 1 if it's ok 438 */ 439 440 static int quota_go_demote_ok(struct gfs2_glock *gl) 441 { 442 return !atomic_read(&gl->gl_lvb_count); 443 } 444 445 const struct gfs2_glock_operations gfs2_meta_glops = { 446 .go_xmote_th = meta_go_sync, 447 .go_drop_th = meta_go_sync, 448 .go_type = LM_TYPE_META, 449 }; 450 451 const struct gfs2_glock_operations gfs2_inode_glops = { 452 .go_xmote_th = inode_go_xmote_th, 453 .go_xmote_bh = inode_go_xmote_bh, 454 .go_drop_th = inode_go_drop_th, 455 .go_inval = inode_go_inval, 456 .go_demote_ok = inode_go_demote_ok, 457 .go_lock = inode_go_lock, 458 .go_unlock = inode_go_unlock, 459 .go_type = LM_TYPE_INODE, 460 }; 461 462 const struct gfs2_glock_operations gfs2_rgrp_glops = { 463 .go_inval = meta_go_inval, 464 .go_demote_ok = rgrp_go_demote_ok, 465 .go_lock = rgrp_go_lock, 466 .go_unlock = rgrp_go_unlock, 467 .go_type = LM_TYPE_RGRP, 468 }; 469 470 const struct gfs2_glock_operations gfs2_trans_glops = { 471 .go_xmote_th = trans_go_xmote_th, 472 .go_xmote_bh = trans_go_xmote_bh, 473 .go_drop_th = trans_go_drop_th, 474 .go_type = LM_TYPE_NONDISK, 475 }; 476 477 const struct gfs2_glock_operations gfs2_iopen_glops = { 478 .go_type = LM_TYPE_IOPEN, 479 }; 480 481 const struct gfs2_glock_operations gfs2_flock_glops = { 482 .go_type = LM_TYPE_FLOCK, 483 }; 484 485 const struct gfs2_glock_operations gfs2_nondisk_glops = { 486 .go_type = LM_TYPE_NONDISK, 487 }; 488 489 const struct gfs2_glock_operations gfs2_quota_glops = { 490 .go_demote_ok = quota_go_demote_ok, 491 .go_type = LM_TYPE_QUOTA, 492 }; 493 494 const struct gfs2_glock_operations gfs2_journal_glops = { 495 .go_type = LM_TYPE_JOURNAL, 496 }; 497 498