xref: /linux/fs/gfs2/file.c (revision f2ee442115c9b6219083c019939a9cc0c9abb2f8)
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9 
10 #include <linux/slab.h>
11 #include <linux/spinlock.h>
12 #include <linux/completion.h>
13 #include <linux/buffer_head.h>
14 #include <linux/pagemap.h>
15 #include <linux/uio.h>
16 #include <linux/blkdev.h>
17 #include <linux/mm.h>
18 #include <linux/mount.h>
19 #include <linux/fs.h>
20 #include <linux/gfs2_ondisk.h>
21 #include <linux/ext2_fs.h>
22 #include <linux/falloc.h>
23 #include <linux/swap.h>
24 #include <linux/crc32.h>
25 #include <linux/writeback.h>
26 #include <asm/uaccess.h>
27 #include <linux/dlm.h>
28 #include <linux/dlm_plock.h>
29 
30 #include "gfs2.h"
31 #include "incore.h"
32 #include "bmap.h"
33 #include "dir.h"
34 #include "glock.h"
35 #include "glops.h"
36 #include "inode.h"
37 #include "log.h"
38 #include "meta_io.h"
39 #include "quota.h"
40 #include "rgrp.h"
41 #include "trans.h"
42 #include "util.h"
43 
44 /**
45  * gfs2_llseek - seek to a location in a file
46  * @file: the file
47  * @offset: the offset
48  * @origin: Where to seek from (SEEK_SET, SEEK_CUR, or SEEK_END)
49  *
50  * SEEK_END requires the glock for the file because it references the
51  * file's size.
52  *
53  * Returns: The new offset, or errno
54  */
55 
56 static loff_t gfs2_llseek(struct file *file, loff_t offset, int origin)
57 {
58 	struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
59 	struct gfs2_holder i_gh;
60 	loff_t error;
61 
62 	switch (origin) {
63 	case SEEK_END: /* These reference inode->i_size */
64 	case SEEK_DATA:
65 	case SEEK_HOLE:
66 		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
67 					   &i_gh);
68 		if (!error) {
69 			error = generic_file_llseek(file, offset, origin);
70 			gfs2_glock_dq_uninit(&i_gh);
71 		}
72 		break;
73 	case SEEK_CUR:
74 	case SEEK_SET:
75 		error = generic_file_llseek(file, offset, origin);
76 		break;
77 	default:
78 		error = -EINVAL;
79 	}
80 
81 	return error;
82 }
83 
84 /**
85  * gfs2_readdir - Read directory entries from a directory
86  * @file: The directory to read from
87  * @dirent: Buffer for dirents
88  * @filldir: Function used to do the copying
89  *
90  * Returns: errno
91  */
92 
93 static int gfs2_readdir(struct file *file, void *dirent, filldir_t filldir)
94 {
95 	struct inode *dir = file->f_mapping->host;
96 	struct gfs2_inode *dip = GFS2_I(dir);
97 	struct gfs2_holder d_gh;
98 	u64 offset = file->f_pos;
99 	int error;
100 
101 	gfs2_holder_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
102 	error = gfs2_glock_nq(&d_gh);
103 	if (error) {
104 		gfs2_holder_uninit(&d_gh);
105 		return error;
106 	}
107 
108 	error = gfs2_dir_read(dir, &offset, dirent, filldir);
109 
110 	gfs2_glock_dq_uninit(&d_gh);
111 
112 	file->f_pos = offset;
113 
114 	return error;
115 }
116 
117 /**
118  * fsflags_cvt
119  * @table: A table of 32 u32 flags
120  * @val: a 32 bit value to convert
121  *
122  * This function can be used to convert between fsflags values and
123  * GFS2's own flags values.
124  *
125  * Returns: the converted flags
126  */
127 static u32 fsflags_cvt(const u32 *table, u32 val)
128 {
129 	u32 res = 0;
130 	while(val) {
131 		if (val & 1)
132 			res |= *table;
133 		table++;
134 		val >>= 1;
135 	}
136 	return res;
137 }
138 
139 static const u32 fsflags_to_gfs2[32] = {
140 	[3] = GFS2_DIF_SYNC,
141 	[4] = GFS2_DIF_IMMUTABLE,
142 	[5] = GFS2_DIF_APPENDONLY,
143 	[7] = GFS2_DIF_NOATIME,
144 	[12] = GFS2_DIF_EXHASH,
145 	[14] = GFS2_DIF_INHERIT_JDATA,
146 };
147 
148 static const u32 gfs2_to_fsflags[32] = {
149 	[gfs2fl_Sync] = FS_SYNC_FL,
150 	[gfs2fl_Immutable] = FS_IMMUTABLE_FL,
151 	[gfs2fl_AppendOnly] = FS_APPEND_FL,
152 	[gfs2fl_NoAtime] = FS_NOATIME_FL,
153 	[gfs2fl_ExHash] = FS_INDEX_FL,
154 	[gfs2fl_InheritJdata] = FS_JOURNAL_DATA_FL,
155 };
156 
157 static int gfs2_get_flags(struct file *filp, u32 __user *ptr)
158 {
159 	struct inode *inode = filp->f_path.dentry->d_inode;
160 	struct gfs2_inode *ip = GFS2_I(inode);
161 	struct gfs2_holder gh;
162 	int error;
163 	u32 fsflags;
164 
165 	gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
166 	error = gfs2_glock_nq(&gh);
167 	if (error)
168 		return error;
169 
170 	fsflags = fsflags_cvt(gfs2_to_fsflags, ip->i_diskflags);
171 	if (!S_ISDIR(inode->i_mode) && ip->i_diskflags & GFS2_DIF_JDATA)
172 		fsflags |= FS_JOURNAL_DATA_FL;
173 	if (put_user(fsflags, ptr))
174 		error = -EFAULT;
175 
176 	gfs2_glock_dq(&gh);
177 	gfs2_holder_uninit(&gh);
178 	return error;
179 }
180 
181 void gfs2_set_inode_flags(struct inode *inode)
182 {
183 	struct gfs2_inode *ip = GFS2_I(inode);
184 	unsigned int flags = inode->i_flags;
185 
186 	flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_NOSEC);
187 	if ((ip->i_eattr == 0) && !is_sxid(inode->i_mode))
188 		inode->i_flags |= S_NOSEC;
189 	if (ip->i_diskflags & GFS2_DIF_IMMUTABLE)
190 		flags |= S_IMMUTABLE;
191 	if (ip->i_diskflags & GFS2_DIF_APPENDONLY)
192 		flags |= S_APPEND;
193 	if (ip->i_diskflags & GFS2_DIF_NOATIME)
194 		flags |= S_NOATIME;
195 	if (ip->i_diskflags & GFS2_DIF_SYNC)
196 		flags |= S_SYNC;
197 	inode->i_flags = flags;
198 }
199 
200 /* Flags that can be set by user space */
201 #define GFS2_FLAGS_USER_SET (GFS2_DIF_JDATA|			\
202 			     GFS2_DIF_IMMUTABLE|		\
203 			     GFS2_DIF_APPENDONLY|		\
204 			     GFS2_DIF_NOATIME|			\
205 			     GFS2_DIF_SYNC|			\
206 			     GFS2_DIF_SYSTEM|			\
207 			     GFS2_DIF_INHERIT_JDATA)
208 
209 /**
210  * gfs2_set_flags - set flags on an inode
211  * @inode: The inode
212  * @flags: The flags to set
213  * @mask: Indicates which flags are valid
214  *
215  */
216 static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
217 {
218 	struct inode *inode = filp->f_path.dentry->d_inode;
219 	struct gfs2_inode *ip = GFS2_I(inode);
220 	struct gfs2_sbd *sdp = GFS2_SB(inode);
221 	struct buffer_head *bh;
222 	struct gfs2_holder gh;
223 	int error;
224 	u32 new_flags, flags;
225 
226 	error = mnt_want_write(filp->f_path.mnt);
227 	if (error)
228 		return error;
229 
230 	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
231 	if (error)
232 		goto out_drop_write;
233 
234 	error = -EACCES;
235 	if (!inode_owner_or_capable(inode))
236 		goto out;
237 
238 	error = 0;
239 	flags = ip->i_diskflags;
240 	new_flags = (flags & ~mask) | (reqflags & mask);
241 	if ((new_flags ^ flags) == 0)
242 		goto out;
243 
244 	error = -EINVAL;
245 	if ((new_flags ^ flags) & ~GFS2_FLAGS_USER_SET)
246 		goto out;
247 
248 	error = -EPERM;
249 	if (IS_IMMUTABLE(inode) && (new_flags & GFS2_DIF_IMMUTABLE))
250 		goto out;
251 	if (IS_APPEND(inode) && (new_flags & GFS2_DIF_APPENDONLY))
252 		goto out;
253 	if (((new_flags ^ flags) & GFS2_DIF_IMMUTABLE) &&
254 	    !capable(CAP_LINUX_IMMUTABLE))
255 		goto out;
256 	if (!IS_IMMUTABLE(inode)) {
257 		error = gfs2_permission(inode, MAY_WRITE);
258 		if (error)
259 			goto out;
260 	}
261 	if ((flags ^ new_flags) & GFS2_DIF_JDATA) {
262 		if (flags & GFS2_DIF_JDATA)
263 			gfs2_log_flush(sdp, ip->i_gl);
264 		error = filemap_fdatawrite(inode->i_mapping);
265 		if (error)
266 			goto out;
267 		error = filemap_fdatawait(inode->i_mapping);
268 		if (error)
269 			goto out;
270 	}
271 	error = gfs2_trans_begin(sdp, RES_DINODE, 0);
272 	if (error)
273 		goto out;
274 	error = gfs2_meta_inode_buffer(ip, &bh);
275 	if (error)
276 		goto out_trans_end;
277 	gfs2_trans_add_bh(ip->i_gl, bh, 1);
278 	ip->i_diskflags = new_flags;
279 	gfs2_dinode_out(ip, bh->b_data);
280 	brelse(bh);
281 	gfs2_set_inode_flags(inode);
282 	gfs2_set_aops(inode);
283 out_trans_end:
284 	gfs2_trans_end(sdp);
285 out:
286 	gfs2_glock_dq_uninit(&gh);
287 out_drop_write:
288 	mnt_drop_write(filp->f_path.mnt);
289 	return error;
290 }
291 
292 static int gfs2_set_flags(struct file *filp, u32 __user *ptr)
293 {
294 	struct inode *inode = filp->f_path.dentry->d_inode;
295 	u32 fsflags, gfsflags;
296 
297 	if (get_user(fsflags, ptr))
298 		return -EFAULT;
299 
300 	gfsflags = fsflags_cvt(fsflags_to_gfs2, fsflags);
301 	if (!S_ISDIR(inode->i_mode)) {
302 		if (gfsflags & GFS2_DIF_INHERIT_JDATA)
303 			gfsflags ^= (GFS2_DIF_JDATA | GFS2_DIF_INHERIT_JDATA);
304 		return do_gfs2_set_flags(filp, gfsflags, ~0);
305 	}
306 	return do_gfs2_set_flags(filp, gfsflags, ~GFS2_DIF_JDATA);
307 }
308 
309 static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
310 {
311 	switch(cmd) {
312 	case FS_IOC_GETFLAGS:
313 		return gfs2_get_flags(filp, (u32 __user *)arg);
314 	case FS_IOC_SETFLAGS:
315 		return gfs2_set_flags(filp, (u32 __user *)arg);
316 	}
317 	return -ENOTTY;
318 }
319 
320 /**
321  * gfs2_allocate_page_backing - Use bmap to allocate blocks
322  * @page: The (locked) page to allocate backing for
323  *
324  * We try to allocate all the blocks required for the page in
325  * one go. This might fail for various reasons, so we keep
326  * trying until all the blocks to back this page are allocated.
327  * If some of the blocks are already allocated, thats ok too.
328  */
329 
330 static int gfs2_allocate_page_backing(struct page *page)
331 {
332 	struct inode *inode = page->mapping->host;
333 	struct buffer_head bh;
334 	unsigned long size = PAGE_CACHE_SIZE;
335 	u64 lblock = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
336 
337 	do {
338 		bh.b_state = 0;
339 		bh.b_size = size;
340 		gfs2_block_map(inode, lblock, &bh, 1);
341 		if (!buffer_mapped(&bh))
342 			return -EIO;
343 		size -= bh.b_size;
344 		lblock += (bh.b_size >> inode->i_blkbits);
345 	} while(size > 0);
346 	return 0;
347 }
348 
349 /**
350  * gfs2_page_mkwrite - Make a shared, mmap()ed, page writable
351  * @vma: The virtual memory area
352  * @page: The page which is about to become writable
353  *
354  * When the page becomes writable, we need to ensure that we have
355  * blocks allocated on disk to back that page.
356  */
357 
358 static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
359 {
360 	struct page *page = vmf->page;
361 	struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
362 	struct gfs2_inode *ip = GFS2_I(inode);
363 	struct gfs2_sbd *sdp = GFS2_SB(inode);
364 	unsigned long last_index;
365 	u64 pos = page->index << PAGE_CACHE_SHIFT;
366 	unsigned int data_blocks, ind_blocks, rblocks;
367 	struct gfs2_holder gh;
368 	struct gfs2_alloc *al;
369 	loff_t size;
370 	int ret;
371 
372 	/* Wait if fs is frozen. This is racy so we check again later on
373 	 * and retry if the fs has been frozen after the page lock has
374 	 * been acquired
375 	 */
376 	vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
377 
378 	gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
379 	ret = gfs2_glock_nq(&gh);
380 	if (ret)
381 		goto out;
382 
383 	set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
384 	set_bit(GIF_SW_PAGED, &ip->i_flags);
385 
386 	if (!gfs2_write_alloc_required(ip, pos, PAGE_CACHE_SIZE)) {
387 		lock_page(page);
388 		if (!PageUptodate(page) || page->mapping != inode->i_mapping) {
389 			ret = -EAGAIN;
390 			unlock_page(page);
391 		}
392 		goto out_unlock;
393 	}
394 
395 	ret = -ENOMEM;
396 	al = gfs2_alloc_get(ip);
397 	if (al == NULL)
398 		goto out_unlock;
399 
400 	ret = gfs2_quota_lock_check(ip);
401 	if (ret)
402 		goto out_alloc_put;
403 	gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks);
404 	al->al_requested = data_blocks + ind_blocks;
405 	ret = gfs2_inplace_reserve(ip);
406 	if (ret)
407 		goto out_quota_unlock;
408 
409 	rblocks = RES_DINODE + ind_blocks;
410 	if (gfs2_is_jdata(ip))
411 		rblocks += data_blocks ? data_blocks : 1;
412 	if (ind_blocks || data_blocks) {
413 		rblocks += RES_STATFS + RES_QUOTA;
414 		rblocks += gfs2_rg_blocks(ip);
415 	}
416 	ret = gfs2_trans_begin(sdp, rblocks, 0);
417 	if (ret)
418 		goto out_trans_fail;
419 
420 	lock_page(page);
421 	ret = -EINVAL;
422 	size = i_size_read(inode);
423 	last_index = (size - 1) >> PAGE_CACHE_SHIFT;
424 	/* Check page index against inode size */
425 	if (size == 0 || (page->index > last_index))
426 		goto out_trans_end;
427 
428 	ret = -EAGAIN;
429 	/* If truncated, we must retry the operation, we may have raced
430 	 * with the glock demotion code.
431 	 */
432 	if (!PageUptodate(page) || page->mapping != inode->i_mapping)
433 		goto out_trans_end;
434 
435 	/* Unstuff, if required, and allocate backing blocks for page */
436 	ret = 0;
437 	if (gfs2_is_stuffed(ip))
438 		ret = gfs2_unstuff_dinode(ip, page);
439 	if (ret == 0)
440 		ret = gfs2_allocate_page_backing(page);
441 
442 out_trans_end:
443 	if (ret)
444 		unlock_page(page);
445 	gfs2_trans_end(sdp);
446 out_trans_fail:
447 	gfs2_inplace_release(ip);
448 out_quota_unlock:
449 	gfs2_quota_unlock(ip);
450 out_alloc_put:
451 	gfs2_alloc_put(ip);
452 out_unlock:
453 	gfs2_glock_dq(&gh);
454 out:
455 	gfs2_holder_uninit(&gh);
456 	if (ret == 0) {
457 		set_page_dirty(page);
458 		/* This check must be post dropping of transaction lock */
459 		if (inode->i_sb->s_frozen == SB_UNFROZEN) {
460 			wait_on_page_writeback(page);
461 		} else {
462 			ret = -EAGAIN;
463 			unlock_page(page);
464 		}
465 	}
466 	return block_page_mkwrite_return(ret);
467 }
468 
469 static const struct vm_operations_struct gfs2_vm_ops = {
470 	.fault = filemap_fault,
471 	.page_mkwrite = gfs2_page_mkwrite,
472 };
473 
474 /**
475  * gfs2_mmap -
476  * @file: The file to map
477  * @vma: The VMA which described the mapping
478  *
479  * There is no need to get a lock here unless we should be updating
480  * atime. We ignore any locking errors since the only consequence is
481  * a missed atime update (which will just be deferred until later).
482  *
483  * Returns: 0
484  */
485 
486 static int gfs2_mmap(struct file *file, struct vm_area_struct *vma)
487 {
488 	struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
489 
490 	if (!(file->f_flags & O_NOATIME) &&
491 	    !IS_NOATIME(&ip->i_inode)) {
492 		struct gfs2_holder i_gh;
493 		int error;
494 
495 		gfs2_holder_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
496 		error = gfs2_glock_nq(&i_gh);
497 		if (error == 0) {
498 			file_accessed(file);
499 			gfs2_glock_dq(&i_gh);
500 		}
501 		gfs2_holder_uninit(&i_gh);
502 		if (error)
503 			return error;
504 	}
505 	vma->vm_ops = &gfs2_vm_ops;
506 	vma->vm_flags |= VM_CAN_NONLINEAR;
507 
508 	return 0;
509 }
510 
511 /**
512  * gfs2_open - open a file
513  * @inode: the inode to open
514  * @file: the struct file for this opening
515  *
516  * Returns: errno
517  */
518 
519 static int gfs2_open(struct inode *inode, struct file *file)
520 {
521 	struct gfs2_inode *ip = GFS2_I(inode);
522 	struct gfs2_holder i_gh;
523 	struct gfs2_file *fp;
524 	int error;
525 
526 	fp = kzalloc(sizeof(struct gfs2_file), GFP_KERNEL);
527 	if (!fp)
528 		return -ENOMEM;
529 
530 	mutex_init(&fp->f_fl_mutex);
531 
532 	gfs2_assert_warn(GFS2_SB(inode), !file->private_data);
533 	file->private_data = fp;
534 
535 	if (S_ISREG(ip->i_inode.i_mode)) {
536 		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
537 					   &i_gh);
538 		if (error)
539 			goto fail;
540 
541 		if (!(file->f_flags & O_LARGEFILE) &&
542 		    i_size_read(inode) > MAX_NON_LFS) {
543 			error = -EOVERFLOW;
544 			goto fail_gunlock;
545 		}
546 
547 		gfs2_glock_dq_uninit(&i_gh);
548 	}
549 
550 	return 0;
551 
552 fail_gunlock:
553 	gfs2_glock_dq_uninit(&i_gh);
554 fail:
555 	file->private_data = NULL;
556 	kfree(fp);
557 	return error;
558 }
559 
560 /**
561  * gfs2_close - called to close a struct file
562  * @inode: the inode the struct file belongs to
563  * @file: the struct file being closed
564  *
565  * Returns: errno
566  */
567 
568 static int gfs2_close(struct inode *inode, struct file *file)
569 {
570 	struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
571 	struct gfs2_file *fp;
572 
573 	fp = file->private_data;
574 	file->private_data = NULL;
575 
576 	if (gfs2_assert_warn(sdp, fp))
577 		return -EIO;
578 
579 	kfree(fp);
580 
581 	return 0;
582 }
583 
584 /**
585  * gfs2_fsync - sync the dirty data for a file (across the cluster)
586  * @file: the file that points to the dentry
587  * @start: the start position in the file to sync
588  * @end: the end position in the file to sync
589  * @datasync: set if we can ignore timestamp changes
590  *
591  * We split the data flushing here so that we don't wait for the data
592  * until after we've also sent the metadata to disk. Note that for
593  * data=ordered, we will write & wait for the data at the log flush
594  * stage anyway, so this is unlikely to make much of a difference
595  * except in the data=writeback case.
596  *
597  * If the fdatawrite fails due to any reason except -EIO, we will
598  * continue the remainder of the fsync, although we'll still report
599  * the error at the end. This is to match filemap_write_and_wait_range()
600  * behaviour.
601  *
602  * Returns: errno
603  */
604 
605 static int gfs2_fsync(struct file *file, loff_t start, loff_t end,
606 		      int datasync)
607 {
608 	struct address_space *mapping = file->f_mapping;
609 	struct inode *inode = mapping->host;
610 	int sync_state = inode->i_state & (I_DIRTY_SYNC|I_DIRTY_DATASYNC);
611 	struct gfs2_inode *ip = GFS2_I(inode);
612 	int ret, ret1 = 0;
613 
614 	if (mapping->nrpages) {
615 		ret1 = filemap_fdatawrite_range(mapping, start, end);
616 		if (ret1 == -EIO)
617 			return ret1;
618 	}
619 
620 	if (datasync)
621 		sync_state &= ~I_DIRTY_SYNC;
622 
623 	if (sync_state) {
624 		ret = sync_inode_metadata(inode, 1);
625 		if (ret)
626 			return ret;
627 		if (gfs2_is_jdata(ip))
628 			filemap_write_and_wait(mapping);
629 		gfs2_ail_flush(ip->i_gl, 1);
630 	}
631 
632 	if (mapping->nrpages)
633 		ret = filemap_fdatawait_range(mapping, start, end);
634 
635 	return ret ? ret : ret1;
636 }
637 
638 /**
639  * gfs2_file_aio_write - Perform a write to a file
640  * @iocb: The io context
641  * @iov: The data to write
642  * @nr_segs: Number of @iov segments
643  * @pos: The file position
644  *
645  * We have to do a lock/unlock here to refresh the inode size for
646  * O_APPEND writes, otherwise we can land up writing at the wrong
647  * offset. There is still a race, but provided the app is using its
648  * own file locking, this will make O_APPEND work as expected.
649  *
650  */
651 
652 static ssize_t gfs2_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
653 				   unsigned long nr_segs, loff_t pos)
654 {
655 	struct file *file = iocb->ki_filp;
656 
657 	if (file->f_flags & O_APPEND) {
658 		struct dentry *dentry = file->f_dentry;
659 		struct gfs2_inode *ip = GFS2_I(dentry->d_inode);
660 		struct gfs2_holder gh;
661 		int ret;
662 
663 		ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
664 		if (ret)
665 			return ret;
666 		gfs2_glock_dq_uninit(&gh);
667 	}
668 
669 	return generic_file_aio_write(iocb, iov, nr_segs, pos);
670 }
671 
672 static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
673 			   int mode)
674 {
675 	struct gfs2_inode *ip = GFS2_I(inode);
676 	struct buffer_head *dibh;
677 	int error;
678 	unsigned int nr_blks;
679 	sector_t lblock = offset >> inode->i_blkbits;
680 
681 	error = gfs2_meta_inode_buffer(ip, &dibh);
682 	if (unlikely(error))
683 		return error;
684 
685 	gfs2_trans_add_bh(ip->i_gl, dibh, 1);
686 
687 	if (gfs2_is_stuffed(ip)) {
688 		error = gfs2_unstuff_dinode(ip, NULL);
689 		if (unlikely(error))
690 			goto out;
691 	}
692 
693 	while (len) {
694 		struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
695 		bh_map.b_size = len;
696 		set_buffer_zeronew(&bh_map);
697 
698 		error = gfs2_block_map(inode, lblock, &bh_map, 1);
699 		if (unlikely(error))
700 			goto out;
701 		len -= bh_map.b_size;
702 		nr_blks = bh_map.b_size >> inode->i_blkbits;
703 		lblock += nr_blks;
704 		if (!buffer_new(&bh_map))
705 			continue;
706 		if (unlikely(!buffer_zeronew(&bh_map))) {
707 			error = -EIO;
708 			goto out;
709 		}
710 	}
711 	if (offset + len > inode->i_size && !(mode & FALLOC_FL_KEEP_SIZE))
712 		i_size_write(inode, offset + len);
713 
714 	mark_inode_dirty(inode);
715 
716 out:
717 	brelse(dibh);
718 	return error;
719 }
720 
721 static void calc_max_reserv(struct gfs2_inode *ip, loff_t max, loff_t *len,
722 			    unsigned int *data_blocks, unsigned int *ind_blocks)
723 {
724 	const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
725 	unsigned int max_blocks = ip->i_rgd->rd_free_clone;
726 	unsigned int tmp, max_data = max_blocks - 3 * (sdp->sd_max_height - 1);
727 
728 	for (tmp = max_data; tmp > sdp->sd_diptrs;) {
729 		tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs);
730 		max_data -= tmp;
731 	}
732 	/* This calculation isn't the exact reverse of gfs2_write_calc_reserve,
733 	   so it might end up with fewer data blocks */
734 	if (max_data <= *data_blocks)
735 		return;
736 	*data_blocks = max_data;
737 	*ind_blocks = max_blocks - max_data;
738 	*len = ((loff_t)max_data - 3) << sdp->sd_sb.sb_bsize_shift;
739 	if (*len > max) {
740 		*len = max;
741 		gfs2_write_calc_reserv(ip, max, data_blocks, ind_blocks);
742 	}
743 }
744 
745 static long gfs2_fallocate(struct file *file, int mode, loff_t offset,
746 			   loff_t len)
747 {
748 	struct inode *inode = file->f_path.dentry->d_inode;
749 	struct gfs2_sbd *sdp = GFS2_SB(inode);
750 	struct gfs2_inode *ip = GFS2_I(inode);
751 	unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
752 	loff_t bytes, max_bytes;
753 	struct gfs2_alloc *al;
754 	int error;
755 	loff_t bsize_mask = ~((loff_t)sdp->sd_sb.sb_bsize - 1);
756 	loff_t next = (offset + len - 1) >> sdp->sd_sb.sb_bsize_shift;
757 	loff_t max_chunk_size = UINT_MAX & bsize_mask;
758 	next = (next + 1) << sdp->sd_sb.sb_bsize_shift;
759 
760 	/* We only support the FALLOC_FL_KEEP_SIZE mode */
761 	if (mode & ~FALLOC_FL_KEEP_SIZE)
762 		return -EOPNOTSUPP;
763 
764 	offset &= bsize_mask;
765 
766 	len = next - offset;
767 	bytes = sdp->sd_max_rg_data * sdp->sd_sb.sb_bsize / 2;
768 	if (!bytes)
769 		bytes = UINT_MAX;
770 	bytes &= bsize_mask;
771 	if (bytes == 0)
772 		bytes = sdp->sd_sb.sb_bsize;
773 
774 	gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
775 	error = gfs2_glock_nq(&ip->i_gh);
776 	if (unlikely(error))
777 		goto out_uninit;
778 
779 	if (!gfs2_write_alloc_required(ip, offset, len))
780 		goto out_unlock;
781 
782 	while (len > 0) {
783 		if (len < bytes)
784 			bytes = len;
785 		al = gfs2_alloc_get(ip);
786 		if (!al) {
787 			error = -ENOMEM;
788 			goto out_unlock;
789 		}
790 
791 		error = gfs2_quota_lock_check(ip);
792 		if (error)
793 			goto out_alloc_put;
794 
795 retry:
796 		gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks);
797 
798 		al->al_requested = data_blocks + ind_blocks;
799 		error = gfs2_inplace_reserve(ip);
800 		if (error) {
801 			if (error == -ENOSPC && bytes > sdp->sd_sb.sb_bsize) {
802 				bytes >>= 1;
803 				bytes &= bsize_mask;
804 				if (bytes == 0)
805 					bytes = sdp->sd_sb.sb_bsize;
806 				goto retry;
807 			}
808 			goto out_qunlock;
809 		}
810 		max_bytes = bytes;
811 		calc_max_reserv(ip, (len > max_chunk_size)? max_chunk_size: len,
812 				&max_bytes, &data_blocks, &ind_blocks);
813 		al->al_requested = data_blocks + ind_blocks;
814 
815 		rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA +
816 			  RES_RG_HDR + gfs2_rg_blocks(ip);
817 		if (gfs2_is_jdata(ip))
818 			rblocks += data_blocks ? data_blocks : 1;
819 
820 		error = gfs2_trans_begin(sdp, rblocks,
821 					 PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize);
822 		if (error)
823 			goto out_trans_fail;
824 
825 		error = fallocate_chunk(inode, offset, max_bytes, mode);
826 		gfs2_trans_end(sdp);
827 
828 		if (error)
829 			goto out_trans_fail;
830 
831 		len -= max_bytes;
832 		offset += max_bytes;
833 		gfs2_inplace_release(ip);
834 		gfs2_quota_unlock(ip);
835 		gfs2_alloc_put(ip);
836 	}
837 	goto out_unlock;
838 
839 out_trans_fail:
840 	gfs2_inplace_release(ip);
841 out_qunlock:
842 	gfs2_quota_unlock(ip);
843 out_alloc_put:
844 	gfs2_alloc_put(ip);
845 out_unlock:
846 	gfs2_glock_dq(&ip->i_gh);
847 out_uninit:
848 	gfs2_holder_uninit(&ip->i_gh);
849 	return error;
850 }
851 
852 #ifdef CONFIG_GFS2_FS_LOCKING_DLM
853 
854 /**
855  * gfs2_setlease - acquire/release a file lease
856  * @file: the file pointer
857  * @arg: lease type
858  * @fl: file lock
859  *
860  * We don't currently have a way to enforce a lease across the whole
861  * cluster; until we do, disable leases (by just returning -EINVAL),
862  * unless the administrator has requested purely local locking.
863  *
864  * Locking: called under lock_flocks
865  *
866  * Returns: errno
867  */
868 
869 static int gfs2_setlease(struct file *file, long arg, struct file_lock **fl)
870 {
871 	return -EINVAL;
872 }
873 
874 /**
875  * gfs2_lock - acquire/release a posix lock on a file
876  * @file: the file pointer
877  * @cmd: either modify or retrieve lock state, possibly wait
878  * @fl: type and range of lock
879  *
880  * Returns: errno
881  */
882 
883 static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
884 {
885 	struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
886 	struct gfs2_sbd *sdp = GFS2_SB(file->f_mapping->host);
887 	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
888 
889 	if (!(fl->fl_flags & FL_POSIX))
890 		return -ENOLCK;
891 	if (__mandatory_lock(&ip->i_inode) && fl->fl_type != F_UNLCK)
892 		return -ENOLCK;
893 
894 	if (cmd == F_CANCELLK) {
895 		/* Hack: */
896 		cmd = F_SETLK;
897 		fl->fl_type = F_UNLCK;
898 	}
899 	if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
900 		return -EIO;
901 	if (IS_GETLK(cmd))
902 		return dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl);
903 	else if (fl->fl_type == F_UNLCK)
904 		return dlm_posix_unlock(ls->ls_dlm, ip->i_no_addr, file, fl);
905 	else
906 		return dlm_posix_lock(ls->ls_dlm, ip->i_no_addr, file, cmd, fl);
907 }
908 
909 static int do_flock(struct file *file, int cmd, struct file_lock *fl)
910 {
911 	struct gfs2_file *fp = file->private_data;
912 	struct gfs2_holder *fl_gh = &fp->f_fl_gh;
913 	struct gfs2_inode *ip = GFS2_I(file->f_path.dentry->d_inode);
914 	struct gfs2_glock *gl;
915 	unsigned int state;
916 	int flags;
917 	int error = 0;
918 
919 	state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
920 	flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY) | GL_EXACT | GL_NOCACHE;
921 
922 	mutex_lock(&fp->f_fl_mutex);
923 
924 	gl = fl_gh->gh_gl;
925 	if (gl) {
926 		if (fl_gh->gh_state == state)
927 			goto out;
928 		flock_lock_file_wait(file,
929 				     &(struct file_lock){.fl_type = F_UNLCK});
930 		gfs2_glock_dq_wait(fl_gh);
931 		gfs2_holder_reinit(state, flags, fl_gh);
932 	} else {
933 		error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr,
934 				       &gfs2_flock_glops, CREATE, &gl);
935 		if (error)
936 			goto out;
937 		gfs2_holder_init(gl, state, flags, fl_gh);
938 		gfs2_glock_put(gl);
939 	}
940 	error = gfs2_glock_nq(fl_gh);
941 	if (error) {
942 		gfs2_holder_uninit(fl_gh);
943 		if (error == GLR_TRYFAILED)
944 			error = -EAGAIN;
945 	} else {
946 		error = flock_lock_file_wait(file, fl);
947 		gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
948 	}
949 
950 out:
951 	mutex_unlock(&fp->f_fl_mutex);
952 	return error;
953 }
954 
955 static void do_unflock(struct file *file, struct file_lock *fl)
956 {
957 	struct gfs2_file *fp = file->private_data;
958 	struct gfs2_holder *fl_gh = &fp->f_fl_gh;
959 
960 	mutex_lock(&fp->f_fl_mutex);
961 	flock_lock_file_wait(file, fl);
962 	if (fl_gh->gh_gl) {
963 		gfs2_glock_dq_wait(fl_gh);
964 		gfs2_holder_uninit(fl_gh);
965 	}
966 	mutex_unlock(&fp->f_fl_mutex);
967 }
968 
969 /**
970  * gfs2_flock - acquire/release a flock lock on a file
971  * @file: the file pointer
972  * @cmd: either modify or retrieve lock state, possibly wait
973  * @fl: type and range of lock
974  *
975  * Returns: errno
976  */
977 
978 static int gfs2_flock(struct file *file, int cmd, struct file_lock *fl)
979 {
980 	if (!(fl->fl_flags & FL_FLOCK))
981 		return -ENOLCK;
982 	if (fl->fl_type & LOCK_MAND)
983 		return -EOPNOTSUPP;
984 
985 	if (fl->fl_type == F_UNLCK) {
986 		do_unflock(file, fl);
987 		return 0;
988 	} else {
989 		return do_flock(file, cmd, fl);
990 	}
991 }
992 
993 const struct file_operations gfs2_file_fops = {
994 	.llseek		= gfs2_llseek,
995 	.read		= do_sync_read,
996 	.aio_read	= generic_file_aio_read,
997 	.write		= do_sync_write,
998 	.aio_write	= gfs2_file_aio_write,
999 	.unlocked_ioctl	= gfs2_ioctl,
1000 	.mmap		= gfs2_mmap,
1001 	.open		= gfs2_open,
1002 	.release	= gfs2_close,
1003 	.fsync		= gfs2_fsync,
1004 	.lock		= gfs2_lock,
1005 	.flock		= gfs2_flock,
1006 	.splice_read	= generic_file_splice_read,
1007 	.splice_write	= generic_file_splice_write,
1008 	.setlease	= gfs2_setlease,
1009 	.fallocate	= gfs2_fallocate,
1010 };
1011 
1012 const struct file_operations gfs2_dir_fops = {
1013 	.readdir	= gfs2_readdir,
1014 	.unlocked_ioctl	= gfs2_ioctl,
1015 	.open		= gfs2_open,
1016 	.release	= gfs2_close,
1017 	.fsync		= gfs2_fsync,
1018 	.lock		= gfs2_lock,
1019 	.flock		= gfs2_flock,
1020 	.llseek		= default_llseek,
1021 };
1022 
1023 #endif /* CONFIG_GFS2_FS_LOCKING_DLM */
1024 
1025 const struct file_operations gfs2_file_fops_nolock = {
1026 	.llseek		= gfs2_llseek,
1027 	.read		= do_sync_read,
1028 	.aio_read	= generic_file_aio_read,
1029 	.write		= do_sync_write,
1030 	.aio_write	= gfs2_file_aio_write,
1031 	.unlocked_ioctl	= gfs2_ioctl,
1032 	.mmap		= gfs2_mmap,
1033 	.open		= gfs2_open,
1034 	.release	= gfs2_close,
1035 	.fsync		= gfs2_fsync,
1036 	.splice_read	= generic_file_splice_read,
1037 	.splice_write	= generic_file_splice_write,
1038 	.setlease	= generic_setlease,
1039 	.fallocate	= gfs2_fallocate,
1040 };
1041 
1042 const struct file_operations gfs2_dir_fops_nolock = {
1043 	.readdir	= gfs2_readdir,
1044 	.unlocked_ioctl	= gfs2_ioctl,
1045 	.open		= gfs2_open,
1046 	.release	= gfs2_close,
1047 	.fsync		= gfs2_fsync,
1048 	.llseek		= default_llseek,
1049 };
1050 
1051