xref: /linux/fs/ocfs2/buffer_head_io.c (revision 93d546399c2b7d66a54d5fbd5eee17de19246bf6)
1 /* -*- mode: c; c-basic-offset: 8; -*-
2  * vim: noexpandtab sw=8 ts=8 sts=0:
3  *
4  * io.c
5  *
6  * Buffer cache handling
7  *
8  * Copyright (C) 2002, 2004 Oracle.  All rights reserved.
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public
12  * License as published by the Free Software Foundation; either
13  * version 2 of the License, or (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public
21  * License along with this program; if not, write to the
22  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23  * Boston, MA 021110-1307, USA.
24  */
25 
26 #include <linux/fs.h>
27 #include <linux/types.h>
28 #include <linux/slab.h>
29 #include <linux/highmem.h>
30 
31 #include <cluster/masklog.h>
32 
33 #include "ocfs2.h"
34 
35 #include "alloc.h"
36 #include "inode.h"
37 #include "journal.h"
38 #include "uptodate.h"
39 
40 #include "buffer_head_io.h"
41 
42 int ocfs2_write_block(struct ocfs2_super *osb, struct buffer_head *bh,
43 		      struct inode *inode)
44 {
45 	int ret = 0;
46 
47 	mlog_entry("(bh->b_blocknr = %llu, inode=%p)\n",
48 		   (unsigned long long)bh->b_blocknr, inode);
49 
50 	BUG_ON(bh->b_blocknr < OCFS2_SUPER_BLOCK_BLKNO);
51 	BUG_ON(buffer_jbd(bh));
52 
53 	/* No need to check for a soft readonly file system here. non
54 	 * journalled writes are only ever done on system files which
55 	 * can get modified during recovery even if read-only. */
56 	if (ocfs2_is_hard_readonly(osb)) {
57 		ret = -EROFS;
58 		goto out;
59 	}
60 
61 	mutex_lock(&OCFS2_I(inode)->ip_io_mutex);
62 
63 	lock_buffer(bh);
64 	set_buffer_uptodate(bh);
65 
66 	/* remove from dirty list before I/O. */
67 	clear_buffer_dirty(bh);
68 
69 	get_bh(bh); /* for end_buffer_write_sync() */
70 	bh->b_end_io = end_buffer_write_sync;
71 	submit_bh(WRITE, bh);
72 
73 	wait_on_buffer(bh);
74 
75 	if (buffer_uptodate(bh)) {
76 		ocfs2_set_buffer_uptodate(inode, bh);
77 	} else {
78 		/* We don't need to remove the clustered uptodate
79 		 * information for this bh as it's not marked locally
80 		 * uptodate. */
81 		ret = -EIO;
82 		put_bh(bh);
83 	}
84 
85 	mutex_unlock(&OCFS2_I(inode)->ip_io_mutex);
86 out:
87 	mlog_exit(ret);
88 	return ret;
89 }
90 
91 int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block,
92 			   unsigned int nr, struct buffer_head *bhs[])
93 {
94 	int status = 0;
95 	unsigned int i;
96 	struct buffer_head *bh;
97 
98 	if (!nr) {
99 		mlog(ML_BH_IO, "No buffers will be read!\n");
100 		goto bail;
101 	}
102 
103 	for (i = 0 ; i < nr ; i++) {
104 		if (bhs[i] == NULL) {
105 			bhs[i] = sb_getblk(osb->sb, block++);
106 			if (bhs[i] == NULL) {
107 				status = -EIO;
108 				mlog_errno(status);
109 				goto bail;
110 			}
111 		}
112 		bh = bhs[i];
113 
114 		if (buffer_jbd(bh)) {
115 			mlog(ML_BH_IO,
116 			     "trying to sync read a jbd "
117 			     "managed bh (blocknr = %llu), skipping\n",
118 			     (unsigned long long)bh->b_blocknr);
119 			continue;
120 		}
121 
122 		if (buffer_dirty(bh)) {
123 			/* This should probably be a BUG, or
124 			 * at least return an error. */
125 			mlog(ML_ERROR,
126 			     "trying to sync read a dirty "
127 			     "buffer! (blocknr = %llu), skipping\n",
128 			     (unsigned long long)bh->b_blocknr);
129 			continue;
130 		}
131 
132 		lock_buffer(bh);
133 		if (buffer_jbd(bh)) {
134 			mlog(ML_ERROR,
135 			     "block %llu had the JBD bit set "
136 			     "while I was in lock_buffer!",
137 			     (unsigned long long)bh->b_blocknr);
138 			BUG();
139 		}
140 
141 		clear_buffer_uptodate(bh);
142 		get_bh(bh); /* for end_buffer_read_sync() */
143 		bh->b_end_io = end_buffer_read_sync;
144 		submit_bh(READ, bh);
145 	}
146 
147 	for (i = nr; i > 0; i--) {
148 		bh = bhs[i - 1];
149 
150 		/* No need to wait on the buffer if it's managed by JBD. */
151 		if (!buffer_jbd(bh))
152 			wait_on_buffer(bh);
153 
154 		if (!buffer_uptodate(bh)) {
155 			/* Status won't be cleared from here on out,
156 			 * so we can safely record this and loop back
157 			 * to cleanup the other buffers. */
158 			status = -EIO;
159 			put_bh(bh);
160 			bhs[i - 1] = NULL;
161 		}
162 	}
163 
164 bail:
165 	return status;
166 }
167 
168 int ocfs2_read_blocks(struct inode *inode, u64 block, int nr,
169 		      struct buffer_head *bhs[], int flags)
170 {
171 	int status = 0;
172 	int i, ignore_cache = 0;
173 	struct buffer_head *bh;
174 
175 	mlog_entry("(inode=%p, block=(%llu), nr=(%d), flags=%d)\n",
176 		   inode, (unsigned long long)block, nr, flags);
177 
178 	BUG_ON(!inode);
179 	BUG_ON((flags & OCFS2_BH_READAHEAD) &&
180 	       (flags & OCFS2_BH_IGNORE_CACHE));
181 
182 	if (bhs == NULL) {
183 		status = -EINVAL;
184 		mlog_errno(status);
185 		goto bail;
186 	}
187 
188 	if (nr < 0) {
189 		mlog(ML_ERROR, "asked to read %d blocks!\n", nr);
190 		status = -EINVAL;
191 		mlog_errno(status);
192 		goto bail;
193 	}
194 
195 	if (nr == 0) {
196 		mlog(ML_BH_IO, "No buffers will be read!\n");
197 		status = 0;
198 		goto bail;
199 	}
200 
201 	mutex_lock(&OCFS2_I(inode)->ip_io_mutex);
202 	for (i = 0 ; i < nr ; i++) {
203 		if (bhs[i] == NULL) {
204 			bhs[i] = sb_getblk(inode->i_sb, block++);
205 			if (bhs[i] == NULL) {
206 				mutex_unlock(&OCFS2_I(inode)->ip_io_mutex);
207 				status = -EIO;
208 				mlog_errno(status);
209 				goto bail;
210 			}
211 		}
212 		bh = bhs[i];
213 		ignore_cache = (flags & OCFS2_BH_IGNORE_CACHE);
214 
215 		/* There are three read-ahead cases here which we need to
216 		 * be concerned with. All three assume a buffer has
217 		 * previously been submitted with OCFS2_BH_READAHEAD
218 		 * and it hasn't yet completed I/O.
219 		 *
220 		 * 1) The current request is sync to disk. This rarely
221 		 *    happens these days, and never when performance
222 		 *    matters - the code can just wait on the buffer
223 		 *    lock and re-submit.
224 		 *
225 		 * 2) The current request is cached, but not
226 		 *    readahead. ocfs2_buffer_uptodate() will return
227 		 *    false anyway, so we'll wind up waiting on the
228 		 *    buffer lock to do I/O. We re-check the request
229 		 *    with after getting the lock to avoid a re-submit.
230 		 *
231 		 * 3) The current request is readahead (and so must
232 		 *    also be a caching one). We short circuit if the
233 		 *    buffer is locked (under I/O) and if it's in the
234 		 *    uptodate cache. The re-check from #2 catches the
235 		 *    case that the previous read-ahead completes just
236 		 *    before our is-it-in-flight check.
237 		 */
238 
239 		if (!ignore_cache && !ocfs2_buffer_uptodate(inode, bh)) {
240 			mlog(ML_UPTODATE,
241 			     "bh (%llu), inode %llu not uptodate\n",
242 			     (unsigned long long)bh->b_blocknr,
243 			     (unsigned long long)OCFS2_I(inode)->ip_blkno);
244 			/* We're using ignore_cache here to say
245 			 * "go to disk" */
246 			ignore_cache = 1;
247 		}
248 
249 		if (buffer_jbd(bh)) {
250 			if (ignore_cache)
251 				mlog(ML_BH_IO, "trying to sync read a jbd "
252 					       "managed bh (blocknr = %llu)\n",
253 				     (unsigned long long)bh->b_blocknr);
254 			continue;
255 		}
256 
257 		if (ignore_cache) {
258 			if (buffer_dirty(bh)) {
259 				/* This should probably be a BUG, or
260 				 * at least return an error. */
261 				mlog(ML_BH_IO, "asking me to sync read a dirty "
262 					       "buffer! (blocknr = %llu)\n",
263 				     (unsigned long long)bh->b_blocknr);
264 				continue;
265 			}
266 
267 			/* A read-ahead request was made - if the
268 			 * buffer is already under read-ahead from a
269 			 * previously submitted request than we are
270 			 * done here. */
271 			if ((flags & OCFS2_BH_READAHEAD)
272 			    && ocfs2_buffer_read_ahead(inode, bh))
273 				continue;
274 
275 			lock_buffer(bh);
276 			if (buffer_jbd(bh)) {
277 #ifdef CATCH_BH_JBD_RACES
278 				mlog(ML_ERROR, "block %llu had the JBD bit set "
279 					       "while I was in lock_buffer!",
280 				     (unsigned long long)bh->b_blocknr);
281 				BUG();
282 #else
283 				unlock_buffer(bh);
284 				continue;
285 #endif
286 			}
287 
288 			/* Re-check ocfs2_buffer_uptodate() as a
289 			 * previously read-ahead buffer may have
290 			 * completed I/O while we were waiting for the
291 			 * buffer lock. */
292 			if (!(flags & OCFS2_BH_IGNORE_CACHE)
293 			    && !(flags & OCFS2_BH_READAHEAD)
294 			    && ocfs2_buffer_uptodate(inode, bh)) {
295 				unlock_buffer(bh);
296 				continue;
297 			}
298 
299 			clear_buffer_uptodate(bh);
300 			get_bh(bh); /* for end_buffer_read_sync() */
301 			bh->b_end_io = end_buffer_read_sync;
302 			submit_bh(READ, bh);
303 			continue;
304 		}
305 	}
306 
307 	status = 0;
308 
309 	for (i = (nr - 1); i >= 0; i--) {
310 		bh = bhs[i];
311 
312 		if (!(flags & OCFS2_BH_READAHEAD)) {
313 			/* We know this can't have changed as we hold the
314 			 * inode sem. Avoid doing any work on the bh if the
315 			 * journal has it. */
316 			if (!buffer_jbd(bh))
317 				wait_on_buffer(bh);
318 
319 			if (!buffer_uptodate(bh)) {
320 				/* Status won't be cleared from here on out,
321 				 * so we can safely record this and loop back
322 				 * to cleanup the other buffers. Don't need to
323 				 * remove the clustered uptodate information
324 				 * for this bh as it's not marked locally
325 				 * uptodate. */
326 				status = -EIO;
327 				put_bh(bh);
328 				bhs[i] = NULL;
329 				continue;
330 			}
331 		}
332 
333 		/* Always set the buffer in the cache, even if it was
334 		 * a forced read, or read-ahead which hasn't yet
335 		 * completed. */
336 		ocfs2_set_buffer_uptodate(inode, bh);
337 	}
338 	mutex_unlock(&OCFS2_I(inode)->ip_io_mutex);
339 
340 	mlog(ML_BH_IO, "block=(%llu), nr=(%d), cached=%s, flags=0x%x\n",
341 	     (unsigned long long)block, nr,
342 	     ((flags & OCFS2_BH_IGNORE_CACHE) || ignore_cache) ? "no" : "yes",
343 	     flags);
344 
345 bail:
346 
347 	mlog_exit(status);
348 	return status;
349 }
350 
351 /* Check whether the blkno is the super block or one of the backups. */
352 static void ocfs2_check_super_or_backup(struct super_block *sb,
353 					sector_t blkno)
354 {
355 	int i;
356 	u64 backup_blkno;
357 
358 	if (blkno == OCFS2_SUPER_BLOCK_BLKNO)
359 		return;
360 
361 	for (i = 0; i < OCFS2_MAX_BACKUP_SUPERBLOCKS; i++) {
362 		backup_blkno = ocfs2_backup_super_blkno(sb, i);
363 		if (backup_blkno == blkno)
364 			return;
365 	}
366 
367 	BUG();
368 }
369 
370 /*
371  * Write super block and backups doesn't need to collaborate with journal,
372  * so we don't need to lock ip_io_mutex and inode doesn't need to bea passed
373  * into this function.
374  */
375 int ocfs2_write_super_or_backup(struct ocfs2_super *osb,
376 				struct buffer_head *bh)
377 {
378 	int ret = 0;
379 
380 	mlog_entry_void();
381 
382 	BUG_ON(buffer_jbd(bh));
383 	ocfs2_check_super_or_backup(osb->sb, bh->b_blocknr);
384 
385 	if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) {
386 		ret = -EROFS;
387 		goto out;
388 	}
389 
390 	lock_buffer(bh);
391 	set_buffer_uptodate(bh);
392 
393 	/* remove from dirty list before I/O. */
394 	clear_buffer_dirty(bh);
395 
396 	get_bh(bh); /* for end_buffer_write_sync() */
397 	bh->b_end_io = end_buffer_write_sync;
398 	submit_bh(WRITE, bh);
399 
400 	wait_on_buffer(bh);
401 
402 	if (!buffer_uptodate(bh)) {
403 		ret = -EIO;
404 		put_bh(bh);
405 	}
406 
407 out:
408 	mlog_exit(ret);
409 	return ret;
410 }
411