1 /* 2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 4 * 5 * This copyrighted material is made available to anyone wishing to use, 6 * modify, copy, or redistribute it subject to the terms and conditions 7 * of the GNU General Public License version 2. 8 */ 9 10 #include <linux/slab.h> 11 #include <linux/spinlock.h> 12 #include <linux/completion.h> 13 #include <linux/buffer_head.h> 14 #include <linux/gfs2_ondisk.h> 15 #include <linux/lm_interface.h> 16 #include <linux/bio.h> 17 18 #include "gfs2.h" 19 #include "incore.h" 20 #include "bmap.h" 21 #include "glock.h" 22 #include "glops.h" 23 #include "inode.h" 24 #include "log.h" 25 #include "meta_io.h" 26 #include "recovery.h" 27 #include "rgrp.h" 28 #include "util.h" 29 #include "trans.h" 30 31 /** 32 * ail_empty_gl - remove all buffers for a given lock from the AIL 33 * @gl: the glock 34 * 35 * None of the buffers should be dirty, locked, or pinned. 36 */ 37 38 static void gfs2_ail_empty_gl(struct gfs2_glock *gl) 39 { 40 struct gfs2_sbd *sdp = gl->gl_sbd; 41 unsigned int blocks; 42 struct list_head *head = &gl->gl_ail_list; 43 struct gfs2_bufdata *bd; 44 struct buffer_head *bh; 45 int error; 46 47 blocks = atomic_read(&gl->gl_ail_count); 48 if (!blocks) 49 return; 50 51 error = gfs2_trans_begin(sdp, 0, blocks); 52 if (gfs2_assert_withdraw(sdp, !error)) 53 return; 54 55 gfs2_log_lock(sdp); 56 while (!list_empty(head)) { 57 bd = list_entry(head->next, struct gfs2_bufdata, 58 bd_ail_gl_list); 59 bh = bd->bd_bh; 60 gfs2_remove_from_ail(bd); 61 bd->bd_bh = NULL; 62 bh->b_private = NULL; 63 bd->bd_blkno = bh->b_blocknr; 64 gfs2_assert_withdraw(sdp, !buffer_busy(bh)); 65 gfs2_trans_add_revoke(sdp, bd); 66 } 67 gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count)); 68 gfs2_log_unlock(sdp); 69 70 gfs2_trans_end(sdp); 71 gfs2_log_flush(sdp, NULL); 72 } 73 74 /** 75 * gfs2_pte_inval - Sync and invalidate all PTEs associated with a glock 76 * @gl: the glock 77 * 78 */ 79 80 static void gfs2_pte_inval(struct gfs2_glock *gl) 81 { 82 struct gfs2_inode *ip; 83 struct inode *inode; 84 85 ip = gl->gl_object; 86 inode = &ip->i_inode; 87 if (!ip || !S_ISREG(inode->i_mode)) 88 return; 89 90 unmap_shared_mapping_range(inode->i_mapping, 0, 0); 91 if (test_bit(GIF_SW_PAGED, &ip->i_flags)) 92 set_bit(GLF_DIRTY, &gl->gl_flags); 93 94 } 95 96 /** 97 * meta_go_sync - sync out the metadata for this glock 98 * @gl: the glock 99 * 100 * Called when demoting or unlocking an EX glock. We must flush 101 * to disk all dirty buffers/pages relating to this glock, and must not 102 * not return to caller to demote/unlock the glock until I/O is complete. 103 */ 104 105 static void meta_go_sync(struct gfs2_glock *gl) 106 { 107 if (gl->gl_state != LM_ST_EXCLUSIVE) 108 return; 109 110 if (test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) { 111 gfs2_log_flush(gl->gl_sbd, gl); 112 gfs2_meta_sync(gl); 113 gfs2_ail_empty_gl(gl); 114 } 115 } 116 117 /** 118 * meta_go_inval - invalidate the metadata for this glock 119 * @gl: the glock 120 * @flags: 121 * 122 */ 123 124 static void meta_go_inval(struct gfs2_glock *gl, int flags) 125 { 126 if (!(flags & DIO_METADATA)) 127 return; 128 129 gfs2_meta_inval(gl); 130 if (gl->gl_object == GFS2_I(gl->gl_sbd->sd_rindex)) 131 gl->gl_sbd->sd_rindex_uptodate = 0; 132 else if (gl->gl_ops == &gfs2_rgrp_glops && gl->gl_object) { 133 struct gfs2_rgrpd *rgd = (struct gfs2_rgrpd *)gl->gl_object; 134 135 rgd->rd_flags &= ~GFS2_RDF_UPTODATE; 136 } 137 } 138 139 /** 140 * inode_go_sync - Sync the dirty data and/or metadata for an inode glock 141 * @gl: the glock protecting the inode 142 * 143 */ 144 145 static void inode_go_sync(struct gfs2_glock *gl) 146 { 147 struct gfs2_inode *ip = gl->gl_object; 148 struct address_space *metamapping = gl->gl_aspace->i_mapping; 149 int error; 150 151 if (gl->gl_state != LM_ST_UNLOCKED) 152 gfs2_pte_inval(gl); 153 if (gl->gl_state != LM_ST_EXCLUSIVE) 154 return; 155 156 if (ip && !S_ISREG(ip->i_inode.i_mode)) 157 ip = NULL; 158 159 if (test_bit(GLF_DIRTY, &gl->gl_flags)) { 160 gfs2_log_flush(gl->gl_sbd, gl); 161 filemap_fdatawrite(metamapping); 162 if (ip) { 163 struct address_space *mapping = ip->i_inode.i_mapping; 164 filemap_fdatawrite(mapping); 165 error = filemap_fdatawait(mapping); 166 mapping_set_error(mapping, error); 167 } 168 error = filemap_fdatawait(metamapping); 169 mapping_set_error(metamapping, error); 170 clear_bit(GLF_DIRTY, &gl->gl_flags); 171 gfs2_ail_empty_gl(gl); 172 } 173 } 174 175 /** 176 * inode_go_inval - prepare a inode glock to be released 177 * @gl: the glock 178 * @flags: 179 * 180 */ 181 182 static void inode_go_inval(struct gfs2_glock *gl, int flags) 183 { 184 struct gfs2_inode *ip = gl->gl_object; 185 int meta = (flags & DIO_METADATA); 186 187 if (meta) { 188 gfs2_meta_inval(gl); 189 if (ip) 190 set_bit(GIF_INVALID, &ip->i_flags); 191 } 192 193 if (ip && S_ISREG(ip->i_inode.i_mode)) 194 truncate_inode_pages(ip->i_inode.i_mapping, 0); 195 } 196 197 /** 198 * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock 199 * @gl: the glock 200 * 201 * Returns: 1 if it's ok 202 */ 203 204 static int inode_go_demote_ok(const struct gfs2_glock *gl) 205 { 206 struct gfs2_sbd *sdp = gl->gl_sbd; 207 if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object) 208 return 0; 209 return 1; 210 } 211 212 /** 213 * inode_go_lock - operation done after an inode lock is locked by a process 214 * @gl: the glock 215 * @flags: 216 * 217 * Returns: errno 218 */ 219 220 static int inode_go_lock(struct gfs2_holder *gh) 221 { 222 struct gfs2_glock *gl = gh->gh_gl; 223 struct gfs2_sbd *sdp = gl->gl_sbd; 224 struct gfs2_inode *ip = gl->gl_object; 225 int error = 0; 226 227 if (!ip || (gh->gh_flags & GL_SKIP)) 228 return 0; 229 230 if (test_bit(GIF_INVALID, &ip->i_flags)) { 231 error = gfs2_inode_refresh(ip); 232 if (error) 233 return error; 234 } 235 236 if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) && 237 (gl->gl_state == LM_ST_EXCLUSIVE) && 238 (gh->gh_state == LM_ST_EXCLUSIVE)) { 239 spin_lock(&sdp->sd_trunc_lock); 240 if (list_empty(&ip->i_trunc_list)) 241 list_add(&sdp->sd_trunc_list, &ip->i_trunc_list); 242 spin_unlock(&sdp->sd_trunc_lock); 243 wake_up(&sdp->sd_quota_wait); 244 return 1; 245 } 246 247 return error; 248 } 249 250 /** 251 * inode_go_dump - print information about an inode 252 * @seq: The iterator 253 * @ip: the inode 254 * 255 * Returns: 0 on success, -ENOBUFS when we run out of space 256 */ 257 258 static int inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl) 259 { 260 const struct gfs2_inode *ip = gl->gl_object; 261 if (ip == NULL) 262 return 0; 263 gfs2_print_dbg(seq, " I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu/%llu\n", 264 (unsigned long long)ip->i_no_formal_ino, 265 (unsigned long long)ip->i_no_addr, 266 IF2DT(ip->i_inode.i_mode), ip->i_flags, 267 (unsigned int)ip->i_diskflags, 268 (unsigned long long)ip->i_inode.i_size, 269 (unsigned long long)ip->i_disksize); 270 return 0; 271 } 272 273 /** 274 * rgrp_go_demote_ok - Check to see if it's ok to unlock a RG's glock 275 * @gl: the glock 276 * 277 * Returns: 1 if it's ok 278 */ 279 280 static int rgrp_go_demote_ok(const struct gfs2_glock *gl) 281 { 282 return !gl->gl_aspace->i_mapping->nrpages; 283 } 284 285 /** 286 * rgrp_go_lock - operation done after an rgrp lock is locked by 287 * a first holder on this node. 288 * @gl: the glock 289 * @flags: 290 * 291 * Returns: errno 292 */ 293 294 static int rgrp_go_lock(struct gfs2_holder *gh) 295 { 296 return gfs2_rgrp_bh_get(gh->gh_gl->gl_object); 297 } 298 299 /** 300 * rgrp_go_unlock - operation done before an rgrp lock is unlocked by 301 * a last holder on this node. 302 * @gl: the glock 303 * @flags: 304 * 305 */ 306 307 static void rgrp_go_unlock(struct gfs2_holder *gh) 308 { 309 gfs2_rgrp_bh_put(gh->gh_gl->gl_object); 310 } 311 312 /** 313 * rgrp_go_dump - print out an rgrp 314 * @seq: The iterator 315 * @gl: The glock in question 316 * 317 */ 318 319 static int rgrp_go_dump(struct seq_file *seq, const struct gfs2_glock *gl) 320 { 321 const struct gfs2_rgrpd *rgd = gl->gl_object; 322 if (rgd == NULL) 323 return 0; 324 gfs2_print_dbg(seq, " R: n:%llu f:%02x b:%u/%u i:%u\n", 325 (unsigned long long)rgd->rd_addr, rgd->rd_flags, 326 rgd->rd_free, rgd->rd_free_clone, rgd->rd_dinodes); 327 return 0; 328 } 329 330 /** 331 * trans_go_sync - promote/demote the transaction glock 332 * @gl: the glock 333 * @state: the requested state 334 * @flags: 335 * 336 */ 337 338 static void trans_go_sync(struct gfs2_glock *gl) 339 { 340 struct gfs2_sbd *sdp = gl->gl_sbd; 341 342 if (gl->gl_state != LM_ST_UNLOCKED && 343 test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) { 344 gfs2_meta_syncfs(sdp); 345 gfs2_log_shutdown(sdp); 346 } 347 } 348 349 /** 350 * trans_go_xmote_bh - After promoting/demoting the transaction glock 351 * @gl: the glock 352 * 353 */ 354 355 static int trans_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh) 356 { 357 struct gfs2_sbd *sdp = gl->gl_sbd; 358 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode); 359 struct gfs2_glock *j_gl = ip->i_gl; 360 struct gfs2_log_header_host head; 361 int error; 362 363 if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) { 364 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA); 365 366 error = gfs2_find_jhead(sdp->sd_jdesc, &head); 367 if (error) 368 gfs2_consist(sdp); 369 if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) 370 gfs2_consist(sdp); 371 372 /* Initialize some head of the log stuff */ 373 if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) { 374 sdp->sd_log_sequence = head.lh_sequence + 1; 375 gfs2_log_pointers_init(sdp, head.lh_blkno); 376 } 377 } 378 return 0; 379 } 380 381 /** 382 * trans_go_demote_ok 383 * @gl: the glock 384 * 385 * Always returns 0 386 */ 387 388 static int trans_go_demote_ok(const struct gfs2_glock *gl) 389 { 390 return 0; 391 } 392 393 /** 394 * quota_go_demote_ok - Check to see if it's ok to unlock a quota glock 395 * @gl: the glock 396 * 397 * Returns: 1 if it's ok 398 */ 399 400 static int quota_go_demote_ok(const struct gfs2_glock *gl) 401 { 402 return !atomic_read(&gl->gl_lvb_count); 403 } 404 405 const struct gfs2_glock_operations gfs2_meta_glops = { 406 .go_xmote_th = meta_go_sync, 407 .go_type = LM_TYPE_META, 408 }; 409 410 const struct gfs2_glock_operations gfs2_inode_glops = { 411 .go_xmote_th = inode_go_sync, 412 .go_inval = inode_go_inval, 413 .go_demote_ok = inode_go_demote_ok, 414 .go_lock = inode_go_lock, 415 .go_dump = inode_go_dump, 416 .go_type = LM_TYPE_INODE, 417 .go_min_hold_time = HZ / 5, 418 }; 419 420 const struct gfs2_glock_operations gfs2_rgrp_glops = { 421 .go_xmote_th = meta_go_sync, 422 .go_inval = meta_go_inval, 423 .go_demote_ok = rgrp_go_demote_ok, 424 .go_lock = rgrp_go_lock, 425 .go_unlock = rgrp_go_unlock, 426 .go_dump = rgrp_go_dump, 427 .go_type = LM_TYPE_RGRP, 428 .go_min_hold_time = HZ / 5, 429 }; 430 431 const struct gfs2_glock_operations gfs2_trans_glops = { 432 .go_xmote_th = trans_go_sync, 433 .go_xmote_bh = trans_go_xmote_bh, 434 .go_demote_ok = trans_go_demote_ok, 435 .go_type = LM_TYPE_NONDISK, 436 }; 437 438 const struct gfs2_glock_operations gfs2_iopen_glops = { 439 .go_type = LM_TYPE_IOPEN, 440 }; 441 442 const struct gfs2_glock_operations gfs2_flock_glops = { 443 .go_type = LM_TYPE_FLOCK, 444 }; 445 446 const struct gfs2_glock_operations gfs2_nondisk_glops = { 447 .go_type = LM_TYPE_NONDISK, 448 }; 449 450 const struct gfs2_glock_operations gfs2_quota_glops = { 451 .go_demote_ok = quota_go_demote_ok, 452 .go_type = LM_TYPE_QUOTA, 453 }; 454 455 const struct gfs2_glock_operations gfs2_journal_glops = { 456 .go_type = LM_TYPE_JOURNAL, 457 }; 458 459