xref: /linux/fs/ext4/file.c (revision c4ee0af3fa0dc65f690fc908f02b8355f9576ea0)
1 /*
2  *  linux/fs/ext4/file.c
3  *
4  * Copyright (C) 1992, 1993, 1994, 1995
5  * Remy Card (card@masi.ibp.fr)
6  * Laboratoire MASI - Institut Blaise Pascal
7  * Universite Pierre et Marie Curie (Paris VI)
8  *
9  *  from
10  *
11  *  linux/fs/minix/file.c
12  *
13  *  Copyright (C) 1991, 1992  Linus Torvalds
14  *
15  *  ext4 fs regular file handling primitives
16  *
17  *  64-bit file support on 64-bit platforms by Jakub Jelinek
18  *	(jj@sunsite.ms.mff.cuni.cz)
19  */
20 
21 #include <linux/time.h>
22 #include <linux/fs.h>
23 #include <linux/jbd2.h>
24 #include <linux/mount.h>
25 #include <linux/path.h>
26 #include <linux/aio.h>
27 #include <linux/quotaops.h>
28 #include <linux/pagevec.h>
29 #include "ext4.h"
30 #include "ext4_jbd2.h"
31 #include "xattr.h"
32 #include "acl.h"
33 
34 /*
35  * Called when an inode is released. Note that this is different
36  * from ext4_file_open: open gets called at every open, but release
37  * gets called only when /all/ the files are closed.
38  */
39 static int ext4_release_file(struct inode *inode, struct file *filp)
40 {
41 	if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) {
42 		ext4_alloc_da_blocks(inode);
43 		ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
44 	}
45 	/* if we are the last writer on the inode, drop the block reservation */
46 	if ((filp->f_mode & FMODE_WRITE) &&
47 			(atomic_read(&inode->i_writecount) == 1) &&
48 		        !EXT4_I(inode)->i_reserved_data_blocks)
49 	{
50 		down_write(&EXT4_I(inode)->i_data_sem);
51 		ext4_discard_preallocations(inode);
52 		up_write(&EXT4_I(inode)->i_data_sem);
53 	}
54 	if (is_dx(inode) && filp->private_data)
55 		ext4_htree_free_dir_info(filp->private_data);
56 
57 	return 0;
58 }
59 
60 void ext4_unwritten_wait(struct inode *inode)
61 {
62 	wait_queue_head_t *wq = ext4_ioend_wq(inode);
63 
64 	wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_unwritten) == 0));
65 }
66 
67 /*
68  * This tests whether the IO in question is block-aligned or not.
69  * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they
70  * are converted to written only after the IO is complete.  Until they are
71  * mapped, these blocks appear as holes, so dio_zero_block() will assume that
72  * it needs to zero out portions of the start and/or end block.  If 2 AIO
73  * threads are at work on the same unwritten block, they must be synchronized
74  * or one thread will zero the other's data, causing corruption.
75  */
76 static int
77 ext4_unaligned_aio(struct inode *inode, const struct iovec *iov,
78 		   unsigned long nr_segs, loff_t pos)
79 {
80 	struct super_block *sb = inode->i_sb;
81 	int blockmask = sb->s_blocksize - 1;
82 	size_t count = iov_length(iov, nr_segs);
83 	loff_t final_size = pos + count;
84 
85 	if (pos >= inode->i_size)
86 		return 0;
87 
88 	if ((pos & blockmask) || (final_size & blockmask))
89 		return 1;
90 
91 	return 0;
92 }
93 
94 static ssize_t
95 ext4_file_dio_write(struct kiocb *iocb, const struct iovec *iov,
96 		    unsigned long nr_segs, loff_t pos)
97 {
98 	struct file *file = iocb->ki_filp;
99 	struct inode *inode = file->f_mapping->host;
100 	struct blk_plug plug;
101 	int unaligned_aio = 0;
102 	ssize_t ret;
103 	int overwrite = 0;
104 	size_t length = iov_length(iov, nr_segs);
105 
106 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) &&
107 	    !is_sync_kiocb(iocb))
108 		unaligned_aio = ext4_unaligned_aio(inode, iov, nr_segs, pos);
109 
110 	/* Unaligned direct AIO must be serialized; see comment above */
111 	if (unaligned_aio) {
112 		mutex_lock(ext4_aio_mutex(inode));
113 		ext4_unwritten_wait(inode);
114 	}
115 
116 	BUG_ON(iocb->ki_pos != pos);
117 
118 	mutex_lock(&inode->i_mutex);
119 	blk_start_plug(&plug);
120 
121 	iocb->private = &overwrite;
122 
123 	/* check whether we do a DIO overwrite or not */
124 	if (ext4_should_dioread_nolock(inode) && !unaligned_aio &&
125 	    !file->f_mapping->nrpages && pos + length <= i_size_read(inode)) {
126 		struct ext4_map_blocks map;
127 		unsigned int blkbits = inode->i_blkbits;
128 		int err, len;
129 
130 		map.m_lblk = pos >> blkbits;
131 		map.m_len = (EXT4_BLOCK_ALIGN(pos + length, blkbits) >> blkbits)
132 			- map.m_lblk;
133 		len = map.m_len;
134 
135 		err = ext4_map_blocks(NULL, inode, &map, 0);
136 		/*
137 		 * 'err==len' means that all of blocks has been preallocated no
138 		 * matter they are initialized or not.  For excluding
139 		 * uninitialized extents, we need to check m_flags.  There are
140 		 * two conditions that indicate for initialized extents.
141 		 * 1) If we hit extent cache, EXT4_MAP_MAPPED flag is returned;
142 		 * 2) If we do a real lookup, non-flags are returned.
143 		 * So we should check these two conditions.
144 		 */
145 		if (err == len && (map.m_flags & EXT4_MAP_MAPPED))
146 			overwrite = 1;
147 	}
148 
149 	ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
150 	mutex_unlock(&inode->i_mutex);
151 
152 	if (ret > 0) {
153 		ssize_t err;
154 
155 		err = generic_write_sync(file, pos, ret);
156 		if (err < 0 && ret > 0)
157 			ret = err;
158 	}
159 	blk_finish_plug(&plug);
160 
161 	if (unaligned_aio)
162 		mutex_unlock(ext4_aio_mutex(inode));
163 
164 	return ret;
165 }
166 
167 static ssize_t
168 ext4_file_write(struct kiocb *iocb, const struct iovec *iov,
169 		unsigned long nr_segs, loff_t pos)
170 {
171 	struct inode *inode = file_inode(iocb->ki_filp);
172 	ssize_t ret;
173 
174 	/*
175 	 * If we have encountered a bitmap-format file, the size limit
176 	 * is smaller than s_maxbytes, which is for extent-mapped files.
177 	 */
178 
179 	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
180 		struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
181 		size_t length = iov_length(iov, nr_segs);
182 
183 		if ((pos > sbi->s_bitmap_maxbytes ||
184 		    (pos == sbi->s_bitmap_maxbytes && length > 0)))
185 			return -EFBIG;
186 
187 		if (pos + length > sbi->s_bitmap_maxbytes) {
188 			nr_segs = iov_shorten((struct iovec *)iov, nr_segs,
189 					      sbi->s_bitmap_maxbytes - pos);
190 		}
191 	}
192 
193 	if (unlikely(iocb->ki_filp->f_flags & O_DIRECT))
194 		ret = ext4_file_dio_write(iocb, iov, nr_segs, pos);
195 	else
196 		ret = generic_file_aio_write(iocb, iov, nr_segs, pos);
197 
198 	return ret;
199 }
200 
201 static const struct vm_operations_struct ext4_file_vm_ops = {
202 	.fault		= filemap_fault,
203 	.page_mkwrite   = ext4_page_mkwrite,
204 	.remap_pages	= generic_file_remap_pages,
205 };
206 
207 static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
208 {
209 	struct address_space *mapping = file->f_mapping;
210 
211 	if (!mapping->a_ops->readpage)
212 		return -ENOEXEC;
213 	file_accessed(file);
214 	vma->vm_ops = &ext4_file_vm_ops;
215 	return 0;
216 }
217 
218 static int ext4_file_open(struct inode * inode, struct file * filp)
219 {
220 	struct super_block *sb = inode->i_sb;
221 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
222 	struct vfsmount *mnt = filp->f_path.mnt;
223 	struct path path;
224 	char buf[64], *cp;
225 
226 	if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) &&
227 		     !(sb->s_flags & MS_RDONLY))) {
228 		sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED;
229 		/*
230 		 * Sample where the filesystem has been mounted and
231 		 * store it in the superblock for sysadmin convenience
232 		 * when trying to sort through large numbers of block
233 		 * devices or filesystem images.
234 		 */
235 		memset(buf, 0, sizeof(buf));
236 		path.mnt = mnt;
237 		path.dentry = mnt->mnt_root;
238 		cp = d_path(&path, buf, sizeof(buf));
239 		if (!IS_ERR(cp)) {
240 			handle_t *handle;
241 			int err;
242 
243 			handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
244 			if (IS_ERR(handle))
245 				return PTR_ERR(handle);
246 			err = ext4_journal_get_write_access(handle, sbi->s_sbh);
247 			if (err) {
248 				ext4_journal_stop(handle);
249 				return err;
250 			}
251 			strlcpy(sbi->s_es->s_last_mounted, cp,
252 				sizeof(sbi->s_es->s_last_mounted));
253 			ext4_handle_dirty_super(handle, sb);
254 			ext4_journal_stop(handle);
255 		}
256 	}
257 	/*
258 	 * Set up the jbd2_inode if we are opening the inode for
259 	 * writing and the journal is present
260 	 */
261 	if (filp->f_mode & FMODE_WRITE) {
262 		int ret = ext4_inode_attach_jinode(inode);
263 		if (ret < 0)
264 			return ret;
265 	}
266 	return dquot_file_open(inode, filp);
267 }
268 
269 /*
270  * Here we use ext4_map_blocks() to get a block mapping for a extent-based
271  * file rather than ext4_ext_walk_space() because we can introduce
272  * SEEK_DATA/SEEK_HOLE for block-mapped and extent-mapped file at the same
273  * function.  When extent status tree has been fully implemented, it will
274  * track all extent status for a file and we can directly use it to
275  * retrieve the offset for SEEK_DATA/SEEK_HOLE.
276  */
277 
278 /*
279  * When we retrieve the offset for SEEK_DATA/SEEK_HOLE, we would need to
280  * lookup page cache to check whether or not there has some data between
281  * [startoff, endoff] because, if this range contains an unwritten extent,
282  * we determine this extent as a data or a hole according to whether the
283  * page cache has data or not.
284  */
285 static int ext4_find_unwritten_pgoff(struct inode *inode,
286 				     int whence,
287 				     struct ext4_map_blocks *map,
288 				     loff_t *offset)
289 {
290 	struct pagevec pvec;
291 	unsigned int blkbits;
292 	pgoff_t index;
293 	pgoff_t end;
294 	loff_t endoff;
295 	loff_t startoff;
296 	loff_t lastoff;
297 	int found = 0;
298 
299 	blkbits = inode->i_sb->s_blocksize_bits;
300 	startoff = *offset;
301 	lastoff = startoff;
302 	endoff = (loff_t)(map->m_lblk + map->m_len) << blkbits;
303 
304 	index = startoff >> PAGE_CACHE_SHIFT;
305 	end = endoff >> PAGE_CACHE_SHIFT;
306 
307 	pagevec_init(&pvec, 0);
308 	do {
309 		int i, num;
310 		unsigned long nr_pages;
311 
312 		num = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
313 		nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
314 					  (pgoff_t)num);
315 		if (nr_pages == 0) {
316 			if (whence == SEEK_DATA)
317 				break;
318 
319 			BUG_ON(whence != SEEK_HOLE);
320 			/*
321 			 * If this is the first time to go into the loop and
322 			 * offset is not beyond the end offset, it will be a
323 			 * hole at this offset
324 			 */
325 			if (lastoff == startoff || lastoff < endoff)
326 				found = 1;
327 			break;
328 		}
329 
330 		/*
331 		 * If this is the first time to go into the loop and
332 		 * offset is smaller than the first page offset, it will be a
333 		 * hole at this offset.
334 		 */
335 		if (lastoff == startoff && whence == SEEK_HOLE &&
336 		    lastoff < page_offset(pvec.pages[0])) {
337 			found = 1;
338 			break;
339 		}
340 
341 		for (i = 0; i < nr_pages; i++) {
342 			struct page *page = pvec.pages[i];
343 			struct buffer_head *bh, *head;
344 
345 			/*
346 			 * If the current offset is not beyond the end of given
347 			 * range, it will be a hole.
348 			 */
349 			if (lastoff < endoff && whence == SEEK_HOLE &&
350 			    page->index > end) {
351 				found = 1;
352 				*offset = lastoff;
353 				goto out;
354 			}
355 
356 			lock_page(page);
357 
358 			if (unlikely(page->mapping != inode->i_mapping)) {
359 				unlock_page(page);
360 				continue;
361 			}
362 
363 			if (!page_has_buffers(page)) {
364 				unlock_page(page);
365 				continue;
366 			}
367 
368 			if (page_has_buffers(page)) {
369 				lastoff = page_offset(page);
370 				bh = head = page_buffers(page);
371 				do {
372 					if (buffer_uptodate(bh) ||
373 					    buffer_unwritten(bh)) {
374 						if (whence == SEEK_DATA)
375 							found = 1;
376 					} else {
377 						if (whence == SEEK_HOLE)
378 							found = 1;
379 					}
380 					if (found) {
381 						*offset = max_t(loff_t,
382 							startoff, lastoff);
383 						unlock_page(page);
384 						goto out;
385 					}
386 					lastoff += bh->b_size;
387 					bh = bh->b_this_page;
388 				} while (bh != head);
389 			}
390 
391 			lastoff = page_offset(page) + PAGE_SIZE;
392 			unlock_page(page);
393 		}
394 
395 		/*
396 		 * The no. of pages is less than our desired, that would be a
397 		 * hole in there.
398 		 */
399 		if (nr_pages < num && whence == SEEK_HOLE) {
400 			found = 1;
401 			*offset = lastoff;
402 			break;
403 		}
404 
405 		index = pvec.pages[i - 1]->index + 1;
406 		pagevec_release(&pvec);
407 	} while (index <= end);
408 
409 out:
410 	pagevec_release(&pvec);
411 	return found;
412 }
413 
414 /*
415  * ext4_seek_data() retrieves the offset for SEEK_DATA.
416  */
417 static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize)
418 {
419 	struct inode *inode = file->f_mapping->host;
420 	struct ext4_map_blocks map;
421 	struct extent_status es;
422 	ext4_lblk_t start, last, end;
423 	loff_t dataoff, isize;
424 	int blkbits;
425 	int ret = 0;
426 
427 	mutex_lock(&inode->i_mutex);
428 
429 	isize = i_size_read(inode);
430 	if (offset >= isize) {
431 		mutex_unlock(&inode->i_mutex);
432 		return -ENXIO;
433 	}
434 
435 	blkbits = inode->i_sb->s_blocksize_bits;
436 	start = offset >> blkbits;
437 	last = start;
438 	end = isize >> blkbits;
439 	dataoff = offset;
440 
441 	do {
442 		map.m_lblk = last;
443 		map.m_len = end - last + 1;
444 		ret = ext4_map_blocks(NULL, inode, &map, 0);
445 		if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) {
446 			if (last != start)
447 				dataoff = (loff_t)last << blkbits;
448 			break;
449 		}
450 
451 		/*
452 		 * If there is a delay extent at this offset,
453 		 * it will be as a data.
454 		 */
455 		ext4_es_find_delayed_extent_range(inode, last, last, &es);
456 		if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
457 			if (last != start)
458 				dataoff = (loff_t)last << blkbits;
459 			break;
460 		}
461 
462 		/*
463 		 * If there is a unwritten extent at this offset,
464 		 * it will be as a data or a hole according to page
465 		 * cache that has data or not.
466 		 */
467 		if (map.m_flags & EXT4_MAP_UNWRITTEN) {
468 			int unwritten;
469 			unwritten = ext4_find_unwritten_pgoff(inode, SEEK_DATA,
470 							      &map, &dataoff);
471 			if (unwritten)
472 				break;
473 		}
474 
475 		last++;
476 		dataoff = (loff_t)last << blkbits;
477 	} while (last <= end);
478 
479 	mutex_unlock(&inode->i_mutex);
480 
481 	if (dataoff > isize)
482 		return -ENXIO;
483 
484 	return vfs_setpos(file, dataoff, maxsize);
485 }
486 
487 /*
488  * ext4_seek_hole() retrieves the offset for SEEK_HOLE.
489  */
490 static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize)
491 {
492 	struct inode *inode = file->f_mapping->host;
493 	struct ext4_map_blocks map;
494 	struct extent_status es;
495 	ext4_lblk_t start, last, end;
496 	loff_t holeoff, isize;
497 	int blkbits;
498 	int ret = 0;
499 
500 	mutex_lock(&inode->i_mutex);
501 
502 	isize = i_size_read(inode);
503 	if (offset >= isize) {
504 		mutex_unlock(&inode->i_mutex);
505 		return -ENXIO;
506 	}
507 
508 	blkbits = inode->i_sb->s_blocksize_bits;
509 	start = offset >> blkbits;
510 	last = start;
511 	end = isize >> blkbits;
512 	holeoff = offset;
513 
514 	do {
515 		map.m_lblk = last;
516 		map.m_len = end - last + 1;
517 		ret = ext4_map_blocks(NULL, inode, &map, 0);
518 		if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) {
519 			last += ret;
520 			holeoff = (loff_t)last << blkbits;
521 			continue;
522 		}
523 
524 		/*
525 		 * If there is a delay extent at this offset,
526 		 * we will skip this extent.
527 		 */
528 		ext4_es_find_delayed_extent_range(inode, last, last, &es);
529 		if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
530 			last = es.es_lblk + es.es_len;
531 			holeoff = (loff_t)last << blkbits;
532 			continue;
533 		}
534 
535 		/*
536 		 * If there is a unwritten extent at this offset,
537 		 * it will be as a data or a hole according to page
538 		 * cache that has data or not.
539 		 */
540 		if (map.m_flags & EXT4_MAP_UNWRITTEN) {
541 			int unwritten;
542 			unwritten = ext4_find_unwritten_pgoff(inode, SEEK_HOLE,
543 							      &map, &holeoff);
544 			if (!unwritten) {
545 				last += ret;
546 				holeoff = (loff_t)last << blkbits;
547 				continue;
548 			}
549 		}
550 
551 		/* find a hole */
552 		break;
553 	} while (last <= end);
554 
555 	mutex_unlock(&inode->i_mutex);
556 
557 	if (holeoff > isize)
558 		holeoff = isize;
559 
560 	return vfs_setpos(file, holeoff, maxsize);
561 }
562 
563 /*
564  * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values
565  * by calling generic_file_llseek_size() with the appropriate maxbytes
566  * value for each.
567  */
568 loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
569 {
570 	struct inode *inode = file->f_mapping->host;
571 	loff_t maxbytes;
572 
573 	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
574 		maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
575 	else
576 		maxbytes = inode->i_sb->s_maxbytes;
577 
578 	switch (whence) {
579 	case SEEK_SET:
580 	case SEEK_CUR:
581 	case SEEK_END:
582 		return generic_file_llseek_size(file, offset, whence,
583 						maxbytes, i_size_read(inode));
584 	case SEEK_DATA:
585 		return ext4_seek_data(file, offset, maxbytes);
586 	case SEEK_HOLE:
587 		return ext4_seek_hole(file, offset, maxbytes);
588 	}
589 
590 	return -EINVAL;
591 }
592 
593 const struct file_operations ext4_file_operations = {
594 	.llseek		= ext4_llseek,
595 	.read		= do_sync_read,
596 	.write		= do_sync_write,
597 	.aio_read	= generic_file_aio_read,
598 	.aio_write	= ext4_file_write,
599 	.unlocked_ioctl = ext4_ioctl,
600 #ifdef CONFIG_COMPAT
601 	.compat_ioctl	= ext4_compat_ioctl,
602 #endif
603 	.mmap		= ext4_file_mmap,
604 	.open		= ext4_file_open,
605 	.release	= ext4_release_file,
606 	.fsync		= ext4_sync_file,
607 	.splice_read	= generic_file_splice_read,
608 	.splice_write	= generic_file_splice_write,
609 	.fallocate	= ext4_fallocate,
610 };
611 
612 const struct inode_operations ext4_file_inode_operations = {
613 	.setattr	= ext4_setattr,
614 	.getattr	= ext4_getattr,
615 	.setxattr	= generic_setxattr,
616 	.getxattr	= generic_getxattr,
617 	.listxattr	= ext4_listxattr,
618 	.removexattr	= generic_removexattr,
619 	.get_acl	= ext4_get_acl,
620 	.fiemap		= ext4_fiemap,
621 };
622 
623