xref: /linux/fs/gfs2/meta_io.c (revision f3449bf31d352f70c80a7993c272a7854ae98086)
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9 
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/mm.h>
16 #include <linux/pagemap.h>
17 #include <linux/writeback.h>
18 #include <linux/swap.h>
19 #include <linux/delay.h>
20 #include <linux/bio.h>
21 #include <linux/gfs2_ondisk.h>
22 
23 #include "gfs2.h"
24 #include "incore.h"
25 #include "glock.h"
26 #include "glops.h"
27 #include "inode.h"
28 #include "log.h"
29 #include "lops.h"
30 #include "meta_io.h"
31 #include "rgrp.h"
32 #include "trans.h"
33 #include "util.h"
34 
35 static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wbc)
36 {
37 	struct buffer_head *bh, *head;
38 	int nr_underway = 0;
39 	int write_op = REQ_META |
40 		(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC_PLUG : WRITE);
41 
42 	BUG_ON(!PageLocked(page));
43 	BUG_ON(!page_has_buffers(page));
44 
45 	head = page_buffers(page);
46 	bh = head;
47 
48 	do {
49 		if (!buffer_mapped(bh))
50 			continue;
51 		/*
52 		 * If it's a fully non-blocking write attempt and we cannot
53 		 * lock the buffer then redirty the page.  Note that this can
54 		 * potentially cause a busy-wait loop from pdflush and kswapd
55 		 * activity, but those code paths have their own higher-level
56 		 * throttling.
57 		 */
58 		if (wbc->sync_mode != WB_SYNC_NONE) {
59 			lock_buffer(bh);
60 		} else if (!trylock_buffer(bh)) {
61 			redirty_page_for_writepage(wbc, page);
62 			continue;
63 		}
64 		if (test_clear_buffer_dirty(bh)) {
65 			mark_buffer_async_write(bh);
66 		} else {
67 			unlock_buffer(bh);
68 		}
69 	} while ((bh = bh->b_this_page) != head);
70 
71 	/*
72 	 * The page and its buffers are protected by PageWriteback(), so we can
73 	 * drop the bh refcounts early.
74 	 */
75 	BUG_ON(PageWriteback(page));
76 	set_page_writeback(page);
77 
78 	do {
79 		struct buffer_head *next = bh->b_this_page;
80 		if (buffer_async_write(bh)) {
81 			submit_bh(write_op, bh);
82 			nr_underway++;
83 		}
84 		bh = next;
85 	} while (bh != head);
86 	unlock_page(page);
87 
88 	if (nr_underway == 0)
89 		end_page_writeback(page);
90 
91 	return 0;
92 }
93 
94 const struct address_space_operations gfs2_meta_aops = {
95 	.writepage = gfs2_aspace_writepage,
96 	.releasepage = gfs2_releasepage,
97 	.sync_page = block_sync_page,
98 };
99 
100 /**
101  * gfs2_meta_sync - Sync all buffers associated with a glock
102  * @gl: The glock
103  *
104  */
105 
106 void gfs2_meta_sync(struct gfs2_glock *gl)
107 {
108 	struct address_space *mapping = gfs2_glock2aspace(gl);
109 	int error;
110 
111 	filemap_fdatawrite(mapping);
112 	error = filemap_fdatawait(mapping);
113 
114 	if (error)
115 		gfs2_io_error(gl->gl_sbd);
116 }
117 
118 /**
119  * gfs2_getbuf - Get a buffer with a given address space
120  * @gl: the glock
121  * @blkno: the block number (filesystem scope)
122  * @create: 1 if the buffer should be created
123  *
124  * Returns: the buffer
125  */
126 
127 struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create)
128 {
129 	struct address_space *mapping = gfs2_glock2aspace(gl);
130 	struct gfs2_sbd *sdp = gl->gl_sbd;
131 	struct page *page;
132 	struct buffer_head *bh;
133 	unsigned int shift;
134 	unsigned long index;
135 	unsigned int bufnum;
136 
137 	shift = PAGE_CACHE_SHIFT - sdp->sd_sb.sb_bsize_shift;
138 	index = blkno >> shift;             /* convert block to page */
139 	bufnum = blkno - (index << shift);  /* block buf index within page */
140 
141 	if (create) {
142 		for (;;) {
143 			page = grab_cache_page(mapping, index);
144 			if (page)
145 				break;
146 			yield();
147 		}
148 	} else {
149 		page = find_lock_page(mapping, index);
150 		if (!page)
151 			return NULL;
152 	}
153 
154 	if (!page_has_buffers(page))
155 		create_empty_buffers(page, sdp->sd_sb.sb_bsize, 0);
156 
157 	/* Locate header for our buffer within our page */
158 	for (bh = page_buffers(page); bufnum--; bh = bh->b_this_page)
159 		/* Do nothing */;
160 	get_bh(bh);
161 
162 	if (!buffer_mapped(bh))
163 		map_bh(bh, sdp->sd_vfs, blkno);
164 
165 	unlock_page(page);
166 	mark_page_accessed(page);
167 	page_cache_release(page);
168 
169 	return bh;
170 }
171 
172 static void meta_prep_new(struct buffer_head *bh)
173 {
174 	struct gfs2_meta_header *mh = (struct gfs2_meta_header *)bh->b_data;
175 
176 	lock_buffer(bh);
177 	clear_buffer_dirty(bh);
178 	set_buffer_uptodate(bh);
179 	unlock_buffer(bh);
180 
181 	mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
182 }
183 
184 /**
185  * gfs2_meta_new - Get a block
186  * @gl: The glock associated with this block
187  * @blkno: The block number
188  *
189  * Returns: The buffer
190  */
191 
192 struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno)
193 {
194 	struct buffer_head *bh;
195 	bh = gfs2_getbuf(gl, blkno, CREATE);
196 	meta_prep_new(bh);
197 	return bh;
198 }
199 
200 /**
201  * gfs2_meta_read - Read a block from disk
202  * @gl: The glock covering the block
203  * @blkno: The block number
204  * @flags: flags
205  * @bhp: the place where the buffer is returned (NULL on failure)
206  *
207  * Returns: errno
208  */
209 
210 int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
211 		   struct buffer_head **bhp)
212 {
213 	struct gfs2_sbd *sdp = gl->gl_sbd;
214 	struct buffer_head *bh;
215 
216 	if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
217 		return -EIO;
218 
219 	*bhp = bh = gfs2_getbuf(gl, blkno, CREATE);
220 
221 	lock_buffer(bh);
222 	if (buffer_uptodate(bh)) {
223 		unlock_buffer(bh);
224 		return 0;
225 	}
226 	bh->b_end_io = end_buffer_read_sync;
227 	get_bh(bh);
228 	submit_bh(READ_SYNC | REQ_META, bh);
229 	if (!(flags & DIO_WAIT))
230 		return 0;
231 
232 	wait_on_buffer(bh);
233 	if (unlikely(!buffer_uptodate(bh))) {
234 		struct gfs2_trans *tr = current->journal_info;
235 		if (tr && tr->tr_touched)
236 			gfs2_io_error_bh(sdp, bh);
237 		brelse(bh);
238 		return -EIO;
239 	}
240 
241 	return 0;
242 }
243 
244 /**
245  * gfs2_meta_wait - Reread a block from disk
246  * @sdp: the filesystem
247  * @bh: The block to wait for
248  *
249  * Returns: errno
250  */
251 
252 int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh)
253 {
254 	if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
255 		return -EIO;
256 
257 	wait_on_buffer(bh);
258 
259 	if (!buffer_uptodate(bh)) {
260 		struct gfs2_trans *tr = current->journal_info;
261 		if (tr && tr->tr_touched)
262 			gfs2_io_error_bh(sdp, bh);
263 		return -EIO;
264 	}
265 	if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
266 		return -EIO;
267 
268 	return 0;
269 }
270 
271 /**
272  * gfs2_attach_bufdata - attach a struct gfs2_bufdata structure to a buffer
273  * @gl: the glock the buffer belongs to
274  * @bh: The buffer to be attached to
275  * @meta: Flag to indicate whether its metadata or not
276  */
277 
278 void gfs2_attach_bufdata(struct gfs2_glock *gl, struct buffer_head *bh,
279 			 int meta)
280 {
281 	struct gfs2_bufdata *bd;
282 
283 	if (meta)
284 		lock_page(bh->b_page);
285 
286 	if (bh->b_private) {
287 		if (meta)
288 			unlock_page(bh->b_page);
289 		return;
290 	}
291 
292 	bd = kmem_cache_zalloc(gfs2_bufdata_cachep, GFP_NOFS | __GFP_NOFAIL);
293 	bd->bd_bh = bh;
294 	bd->bd_gl = gl;
295 
296 	INIT_LIST_HEAD(&bd->bd_list_tr);
297 	if (meta)
298 		lops_init_le(&bd->bd_le, &gfs2_buf_lops);
299 	else
300 		lops_init_le(&bd->bd_le, &gfs2_databuf_lops);
301 	bh->b_private = bd;
302 
303 	if (meta)
304 		unlock_page(bh->b_page);
305 }
306 
307 void gfs2_remove_from_journal(struct buffer_head *bh, struct gfs2_trans *tr, int meta)
308 {
309 	struct address_space *mapping = bh->b_page->mapping;
310 	struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
311 	struct gfs2_bufdata *bd = bh->b_private;
312 
313 	if (test_clear_buffer_pinned(bh)) {
314 		atomic_dec(&sdp->sd_log_pinned);
315 		list_del_init(&bd->bd_le.le_list);
316 		if (meta) {
317 			gfs2_assert_warn(sdp, sdp->sd_log_num_buf);
318 			sdp->sd_log_num_buf--;
319 			tr->tr_num_buf_rm++;
320 		} else {
321 			gfs2_assert_warn(sdp, sdp->sd_log_num_databuf);
322 			sdp->sd_log_num_databuf--;
323 			tr->tr_num_databuf_rm++;
324 		}
325 		tr->tr_touched = 1;
326 		brelse(bh);
327 	}
328 	if (bd) {
329 		if (bd->bd_ail) {
330 			gfs2_remove_from_ail(bd);
331 			bh->b_private = NULL;
332 			bd->bd_bh = NULL;
333 			bd->bd_blkno = bh->b_blocknr;
334 			gfs2_trans_add_revoke(sdp, bd);
335 		}
336 	}
337 	clear_buffer_dirty(bh);
338 	clear_buffer_uptodate(bh);
339 }
340 
341 /**
342  * gfs2_meta_wipe - make inode's buffers so they aren't dirty/pinned anymore
343  * @ip: the inode who owns the buffers
344  * @bstart: the first buffer in the run
345  * @blen: the number of buffers in the run
346  *
347  */
348 
349 void gfs2_meta_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen)
350 {
351 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
352 	struct buffer_head *bh;
353 
354 	while (blen) {
355 		bh = gfs2_getbuf(ip->i_gl, bstart, NO_CREATE);
356 		if (bh) {
357 			lock_buffer(bh);
358 			gfs2_log_lock(sdp);
359 			gfs2_remove_from_journal(bh, current->journal_info, 1);
360 			gfs2_log_unlock(sdp);
361 			unlock_buffer(bh);
362 			brelse(bh);
363 		}
364 
365 		bstart++;
366 		blen--;
367 	}
368 }
369 
370 /**
371  * gfs2_meta_indirect_buffer - Get a metadata buffer
372  * @ip: The GFS2 inode
373  * @height: The level of this buf in the metadata (indir addr) tree (if any)
374  * @num: The block number (device relative) of the buffer
375  * @new: Non-zero if we may create a new buffer
376  * @bhp: the buffer is returned here
377  *
378  * Returns: errno
379  */
380 
381 int gfs2_meta_indirect_buffer(struct gfs2_inode *ip, int height, u64 num,
382 			      int new, struct buffer_head **bhp)
383 {
384 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
385 	struct gfs2_glock *gl = ip->i_gl;
386 	struct buffer_head *bh;
387 	int ret = 0;
388 
389 	if (new) {
390 		BUG_ON(height == 0);
391 		bh = gfs2_meta_new(gl, num);
392 		gfs2_trans_add_bh(ip->i_gl, bh, 1);
393 		gfs2_metatype_set(bh, GFS2_METATYPE_IN, GFS2_FORMAT_IN);
394 		gfs2_buffer_clear_tail(bh, sizeof(struct gfs2_meta_header));
395 	} else {
396 		u32 mtype = height ? GFS2_METATYPE_IN : GFS2_METATYPE_DI;
397 		ret = gfs2_meta_read(gl, num, DIO_WAIT, &bh);
398 		if (ret == 0 && gfs2_metatype_check(sdp, bh, mtype)) {
399 			brelse(bh);
400 			ret = -EIO;
401 		}
402 	}
403 	*bhp = bh;
404 	return ret;
405 }
406 
407 /**
408  * gfs2_meta_ra - start readahead on an extent of a file
409  * @gl: the glock the blocks belong to
410  * @dblock: the starting disk block
411  * @extlen: the number of blocks in the extent
412  *
413  * returns: the first buffer in the extent
414  */
415 
416 struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
417 {
418 	struct gfs2_sbd *sdp = gl->gl_sbd;
419 	struct buffer_head *first_bh, *bh;
420 	u32 max_ra = gfs2_tune_get(sdp, gt_max_readahead) >>
421 			  sdp->sd_sb.sb_bsize_shift;
422 
423 	BUG_ON(!extlen);
424 
425 	if (max_ra < 1)
426 		max_ra = 1;
427 	if (extlen > max_ra)
428 		extlen = max_ra;
429 
430 	first_bh = gfs2_getbuf(gl, dblock, CREATE);
431 
432 	if (buffer_uptodate(first_bh))
433 		goto out;
434 	if (!buffer_locked(first_bh))
435 		ll_rw_block(READ_SYNC | REQ_META, 1, &first_bh);
436 
437 	dblock++;
438 	extlen--;
439 
440 	while (extlen) {
441 		bh = gfs2_getbuf(gl, dblock, CREATE);
442 
443 		if (!buffer_uptodate(bh) && !buffer_locked(bh))
444 			ll_rw_block(READA, 1, &bh);
445 		brelse(bh);
446 		dblock++;
447 		extlen--;
448 		if (!buffer_locked(first_bh) && buffer_uptodate(first_bh))
449 			goto out;
450 	}
451 
452 	wait_on_buffer(first_bh);
453 out:
454 	return first_bh;
455 }
456 
457