xref: /linux/fs/ext4/file.c (revision 04303f8ec14269b0ea2553863553bc7eaadca1f8)
1 /*
2  *  linux/fs/ext4/file.c
3  *
4  * Copyright (C) 1992, 1993, 1994, 1995
5  * Remy Card (card@masi.ibp.fr)
6  * Laboratoire MASI - Institut Blaise Pascal
7  * Universite Pierre et Marie Curie (Paris VI)
8  *
9  *  from
10  *
11  *  linux/fs/minix/file.c
12  *
13  *  Copyright (C) 1991, 1992  Linus Torvalds
14  *
15  *  ext4 fs regular file handling primitives
16  *
17  *  64-bit file support on 64-bit platforms by Jakub Jelinek
18  *	(jj@sunsite.ms.mff.cuni.cz)
19  */
20 
21 #include <linux/time.h>
22 #include <linux/fs.h>
23 #include <linux/jbd2.h>
24 #include <linux/mount.h>
25 #include <linux/path.h>
26 #include <linux/aio.h>
27 #include <linux/quotaops.h>
28 #include <linux/pagevec.h>
29 #include "ext4.h"
30 #include "ext4_jbd2.h"
31 #include "xattr.h"
32 #include "acl.h"
33 
34 /*
35  * Called when an inode is released. Note that this is different
36  * from ext4_file_open: open gets called at every open, but release
37  * gets called only when /all/ the files are closed.
38  */
39 static int ext4_release_file(struct inode *inode, struct file *filp)
40 {
41 	if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) {
42 		ext4_alloc_da_blocks(inode);
43 		ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
44 	}
45 	/* if we are the last writer on the inode, drop the block reservation */
46 	if ((filp->f_mode & FMODE_WRITE) &&
47 			(atomic_read(&inode->i_writecount) == 1) &&
48 		        !EXT4_I(inode)->i_reserved_data_blocks)
49 	{
50 		down_write(&EXT4_I(inode)->i_data_sem);
51 		ext4_discard_preallocations(inode);
52 		up_write(&EXT4_I(inode)->i_data_sem);
53 	}
54 	if (is_dx(inode) && filp->private_data)
55 		ext4_htree_free_dir_info(filp->private_data);
56 
57 	return 0;
58 }
59 
60 static void ext4_unwritten_wait(struct inode *inode)
61 {
62 	wait_queue_head_t *wq = ext4_ioend_wq(inode);
63 
64 	wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_unwritten) == 0));
65 }
66 
67 /*
68  * This tests whether the IO in question is block-aligned or not.
69  * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they
70  * are converted to written only after the IO is complete.  Until they are
71  * mapped, these blocks appear as holes, so dio_zero_block() will assume that
72  * it needs to zero out portions of the start and/or end block.  If 2 AIO
73  * threads are at work on the same unwritten block, they must be synchronized
74  * or one thread will zero the other's data, causing corruption.
75  */
76 static int
77 ext4_unaligned_aio(struct inode *inode, struct iov_iter *from, loff_t pos)
78 {
79 	struct super_block *sb = inode->i_sb;
80 	int blockmask = sb->s_blocksize - 1;
81 
82 	if (pos >= i_size_read(inode))
83 		return 0;
84 
85 	if ((pos | iov_iter_alignment(from)) & blockmask)
86 		return 1;
87 
88 	return 0;
89 }
90 
91 static ssize_t
92 ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
93 {
94 	struct file *file = iocb->ki_filp;
95 	struct inode *inode = file_inode(iocb->ki_filp);
96 	struct mutex *aio_mutex = NULL;
97 	struct blk_plug plug;
98 	int o_direct = file->f_flags & O_DIRECT;
99 	int overwrite = 0;
100 	size_t length = iov_iter_count(from);
101 	ssize_t ret;
102 	loff_t pos = iocb->ki_pos;
103 
104 	/*
105 	 * Unaligned direct AIO must be serialized; see comment above
106 	 * In the case of O_APPEND, assume that we must always serialize
107 	 */
108 	if (o_direct &&
109 	    ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) &&
110 	    !is_sync_kiocb(iocb) &&
111 	    (file->f_flags & O_APPEND ||
112 	     ext4_unaligned_aio(inode, from, pos))) {
113 		aio_mutex = ext4_aio_mutex(inode);
114 		mutex_lock(aio_mutex);
115 		ext4_unwritten_wait(inode);
116 	}
117 
118 	mutex_lock(&inode->i_mutex);
119 	if (file->f_flags & O_APPEND)
120 		iocb->ki_pos = pos = i_size_read(inode);
121 
122 	/*
123 	 * If we have encountered a bitmap-format file, the size limit
124 	 * is smaller than s_maxbytes, which is for extent-mapped files.
125 	 */
126 	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
127 		struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
128 
129 		if ((pos > sbi->s_bitmap_maxbytes) ||
130 		    (pos == sbi->s_bitmap_maxbytes && length > 0)) {
131 			mutex_unlock(&inode->i_mutex);
132 			ret = -EFBIG;
133 			goto errout;
134 		}
135 
136 		if (pos + length > sbi->s_bitmap_maxbytes)
137 			iov_iter_truncate(from, sbi->s_bitmap_maxbytes - pos);
138 	}
139 
140 	iocb->private = &overwrite;
141 	if (o_direct) {
142 		blk_start_plug(&plug);
143 
144 
145 		/* check whether we do a DIO overwrite or not */
146 		if (ext4_should_dioread_nolock(inode) && !aio_mutex &&
147 		    !file->f_mapping->nrpages && pos + length <= i_size_read(inode)) {
148 			struct ext4_map_blocks map;
149 			unsigned int blkbits = inode->i_blkbits;
150 			int err, len;
151 
152 			map.m_lblk = pos >> blkbits;
153 			map.m_len = (EXT4_BLOCK_ALIGN(pos + length, blkbits) >> blkbits)
154 				- map.m_lblk;
155 			len = map.m_len;
156 
157 			err = ext4_map_blocks(NULL, inode, &map, 0);
158 			/*
159 			 * 'err==len' means that all of blocks has
160 			 * been preallocated no matter they are
161 			 * initialized or not.  For excluding
162 			 * unwritten extents, we need to check
163 			 * m_flags.  There are two conditions that
164 			 * indicate for initialized extents.  1) If we
165 			 * hit extent cache, EXT4_MAP_MAPPED flag is
166 			 * returned; 2) If we do a real lookup,
167 			 * non-flags are returned.  So we should check
168 			 * these two conditions.
169 			 */
170 			if (err == len && (map.m_flags & EXT4_MAP_MAPPED))
171 				overwrite = 1;
172 		}
173 	}
174 
175 	ret = __generic_file_write_iter(iocb, from);
176 	mutex_unlock(&inode->i_mutex);
177 
178 	if (ret > 0) {
179 		ssize_t err;
180 
181 		err = generic_write_sync(file, iocb->ki_pos - ret, ret);
182 		if (err < 0)
183 			ret = err;
184 	}
185 	if (o_direct)
186 		blk_finish_plug(&plug);
187 
188 errout:
189 	if (aio_mutex)
190 		mutex_unlock(aio_mutex);
191 	return ret;
192 }
193 
194 static const struct vm_operations_struct ext4_file_vm_ops = {
195 	.fault		= filemap_fault,
196 	.map_pages	= filemap_map_pages,
197 	.page_mkwrite   = ext4_page_mkwrite,
198 };
199 
200 static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
201 {
202 	file_accessed(file);
203 	vma->vm_ops = &ext4_file_vm_ops;
204 	return 0;
205 }
206 
207 static int ext4_file_open(struct inode * inode, struct file * filp)
208 {
209 	struct super_block *sb = inode->i_sb;
210 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
211 	struct vfsmount *mnt = filp->f_path.mnt;
212 	struct path path;
213 	char buf[64], *cp;
214 
215 	if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) &&
216 		     !(sb->s_flags & MS_RDONLY))) {
217 		sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED;
218 		/*
219 		 * Sample where the filesystem has been mounted and
220 		 * store it in the superblock for sysadmin convenience
221 		 * when trying to sort through large numbers of block
222 		 * devices or filesystem images.
223 		 */
224 		memset(buf, 0, sizeof(buf));
225 		path.mnt = mnt;
226 		path.dentry = mnt->mnt_root;
227 		cp = d_path(&path, buf, sizeof(buf));
228 		if (!IS_ERR(cp)) {
229 			handle_t *handle;
230 			int err;
231 
232 			handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
233 			if (IS_ERR(handle))
234 				return PTR_ERR(handle);
235 			BUFFER_TRACE(sbi->s_sbh, "get_write_access");
236 			err = ext4_journal_get_write_access(handle, sbi->s_sbh);
237 			if (err) {
238 				ext4_journal_stop(handle);
239 				return err;
240 			}
241 			strlcpy(sbi->s_es->s_last_mounted, cp,
242 				sizeof(sbi->s_es->s_last_mounted));
243 			ext4_handle_dirty_super(handle, sb);
244 			ext4_journal_stop(handle);
245 		}
246 	}
247 	/*
248 	 * Set up the jbd2_inode if we are opening the inode for
249 	 * writing and the journal is present
250 	 */
251 	if (filp->f_mode & FMODE_WRITE) {
252 		int ret = ext4_inode_attach_jinode(inode);
253 		if (ret < 0)
254 			return ret;
255 	}
256 	return dquot_file_open(inode, filp);
257 }
258 
259 /*
260  * Here we use ext4_map_blocks() to get a block mapping for a extent-based
261  * file rather than ext4_ext_walk_space() because we can introduce
262  * SEEK_DATA/SEEK_HOLE for block-mapped and extent-mapped file at the same
263  * function.  When extent status tree has been fully implemented, it will
264  * track all extent status for a file and we can directly use it to
265  * retrieve the offset for SEEK_DATA/SEEK_HOLE.
266  */
267 
268 /*
269  * When we retrieve the offset for SEEK_DATA/SEEK_HOLE, we would need to
270  * lookup page cache to check whether or not there has some data between
271  * [startoff, endoff] because, if this range contains an unwritten extent,
272  * we determine this extent as a data or a hole according to whether the
273  * page cache has data or not.
274  */
275 static int ext4_find_unwritten_pgoff(struct inode *inode,
276 				     int whence,
277 				     struct ext4_map_blocks *map,
278 				     loff_t *offset)
279 {
280 	struct pagevec pvec;
281 	unsigned int blkbits;
282 	pgoff_t index;
283 	pgoff_t end;
284 	loff_t endoff;
285 	loff_t startoff;
286 	loff_t lastoff;
287 	int found = 0;
288 
289 	blkbits = inode->i_sb->s_blocksize_bits;
290 	startoff = *offset;
291 	lastoff = startoff;
292 	endoff = (loff_t)(map->m_lblk + map->m_len) << blkbits;
293 
294 	index = startoff >> PAGE_CACHE_SHIFT;
295 	end = endoff >> PAGE_CACHE_SHIFT;
296 
297 	pagevec_init(&pvec, 0);
298 	do {
299 		int i, num;
300 		unsigned long nr_pages;
301 
302 		num = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
303 		nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
304 					  (pgoff_t)num);
305 		if (nr_pages == 0) {
306 			if (whence == SEEK_DATA)
307 				break;
308 
309 			BUG_ON(whence != SEEK_HOLE);
310 			/*
311 			 * If this is the first time to go into the loop and
312 			 * offset is not beyond the end offset, it will be a
313 			 * hole at this offset
314 			 */
315 			if (lastoff == startoff || lastoff < endoff)
316 				found = 1;
317 			break;
318 		}
319 
320 		/*
321 		 * If this is the first time to go into the loop and
322 		 * offset is smaller than the first page offset, it will be a
323 		 * hole at this offset.
324 		 */
325 		if (lastoff == startoff && whence == SEEK_HOLE &&
326 		    lastoff < page_offset(pvec.pages[0])) {
327 			found = 1;
328 			break;
329 		}
330 
331 		for (i = 0; i < nr_pages; i++) {
332 			struct page *page = pvec.pages[i];
333 			struct buffer_head *bh, *head;
334 
335 			/*
336 			 * If the current offset is not beyond the end of given
337 			 * range, it will be a hole.
338 			 */
339 			if (lastoff < endoff && whence == SEEK_HOLE &&
340 			    page->index > end) {
341 				found = 1;
342 				*offset = lastoff;
343 				goto out;
344 			}
345 
346 			lock_page(page);
347 
348 			if (unlikely(page->mapping != inode->i_mapping)) {
349 				unlock_page(page);
350 				continue;
351 			}
352 
353 			if (!page_has_buffers(page)) {
354 				unlock_page(page);
355 				continue;
356 			}
357 
358 			if (page_has_buffers(page)) {
359 				lastoff = page_offset(page);
360 				bh = head = page_buffers(page);
361 				do {
362 					if (buffer_uptodate(bh) ||
363 					    buffer_unwritten(bh)) {
364 						if (whence == SEEK_DATA)
365 							found = 1;
366 					} else {
367 						if (whence == SEEK_HOLE)
368 							found = 1;
369 					}
370 					if (found) {
371 						*offset = max_t(loff_t,
372 							startoff, lastoff);
373 						unlock_page(page);
374 						goto out;
375 					}
376 					lastoff += bh->b_size;
377 					bh = bh->b_this_page;
378 				} while (bh != head);
379 			}
380 
381 			lastoff = page_offset(page) + PAGE_SIZE;
382 			unlock_page(page);
383 		}
384 
385 		/*
386 		 * The no. of pages is less than our desired, that would be a
387 		 * hole in there.
388 		 */
389 		if (nr_pages < num && whence == SEEK_HOLE) {
390 			found = 1;
391 			*offset = lastoff;
392 			break;
393 		}
394 
395 		index = pvec.pages[i - 1]->index + 1;
396 		pagevec_release(&pvec);
397 	} while (index <= end);
398 
399 out:
400 	pagevec_release(&pvec);
401 	return found;
402 }
403 
404 /*
405  * ext4_seek_data() retrieves the offset for SEEK_DATA.
406  */
407 static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize)
408 {
409 	struct inode *inode = file->f_mapping->host;
410 	struct ext4_map_blocks map;
411 	struct extent_status es;
412 	ext4_lblk_t start, last, end;
413 	loff_t dataoff, isize;
414 	int blkbits;
415 	int ret = 0;
416 
417 	mutex_lock(&inode->i_mutex);
418 
419 	isize = i_size_read(inode);
420 	if (offset >= isize) {
421 		mutex_unlock(&inode->i_mutex);
422 		return -ENXIO;
423 	}
424 
425 	blkbits = inode->i_sb->s_blocksize_bits;
426 	start = offset >> blkbits;
427 	last = start;
428 	end = isize >> blkbits;
429 	dataoff = offset;
430 
431 	do {
432 		map.m_lblk = last;
433 		map.m_len = end - last + 1;
434 		ret = ext4_map_blocks(NULL, inode, &map, 0);
435 		if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) {
436 			if (last != start)
437 				dataoff = (loff_t)last << blkbits;
438 			break;
439 		}
440 
441 		/*
442 		 * If there is a delay extent at this offset,
443 		 * it will be as a data.
444 		 */
445 		ext4_es_find_delayed_extent_range(inode, last, last, &es);
446 		if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
447 			if (last != start)
448 				dataoff = (loff_t)last << blkbits;
449 			break;
450 		}
451 
452 		/*
453 		 * If there is a unwritten extent at this offset,
454 		 * it will be as a data or a hole according to page
455 		 * cache that has data or not.
456 		 */
457 		if (map.m_flags & EXT4_MAP_UNWRITTEN) {
458 			int unwritten;
459 			unwritten = ext4_find_unwritten_pgoff(inode, SEEK_DATA,
460 							      &map, &dataoff);
461 			if (unwritten)
462 				break;
463 		}
464 
465 		last++;
466 		dataoff = (loff_t)last << blkbits;
467 	} while (last <= end);
468 
469 	mutex_unlock(&inode->i_mutex);
470 
471 	if (dataoff > isize)
472 		return -ENXIO;
473 
474 	return vfs_setpos(file, dataoff, maxsize);
475 }
476 
477 /*
478  * ext4_seek_hole() retrieves the offset for SEEK_HOLE.
479  */
480 static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize)
481 {
482 	struct inode *inode = file->f_mapping->host;
483 	struct ext4_map_blocks map;
484 	struct extent_status es;
485 	ext4_lblk_t start, last, end;
486 	loff_t holeoff, isize;
487 	int blkbits;
488 	int ret = 0;
489 
490 	mutex_lock(&inode->i_mutex);
491 
492 	isize = i_size_read(inode);
493 	if (offset >= isize) {
494 		mutex_unlock(&inode->i_mutex);
495 		return -ENXIO;
496 	}
497 
498 	blkbits = inode->i_sb->s_blocksize_bits;
499 	start = offset >> blkbits;
500 	last = start;
501 	end = isize >> blkbits;
502 	holeoff = offset;
503 
504 	do {
505 		map.m_lblk = last;
506 		map.m_len = end - last + 1;
507 		ret = ext4_map_blocks(NULL, inode, &map, 0);
508 		if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) {
509 			last += ret;
510 			holeoff = (loff_t)last << blkbits;
511 			continue;
512 		}
513 
514 		/*
515 		 * If there is a delay extent at this offset,
516 		 * we will skip this extent.
517 		 */
518 		ext4_es_find_delayed_extent_range(inode, last, last, &es);
519 		if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
520 			last = es.es_lblk + es.es_len;
521 			holeoff = (loff_t)last << blkbits;
522 			continue;
523 		}
524 
525 		/*
526 		 * If there is a unwritten extent at this offset,
527 		 * it will be as a data or a hole according to page
528 		 * cache that has data or not.
529 		 */
530 		if (map.m_flags & EXT4_MAP_UNWRITTEN) {
531 			int unwritten;
532 			unwritten = ext4_find_unwritten_pgoff(inode, SEEK_HOLE,
533 							      &map, &holeoff);
534 			if (!unwritten) {
535 				last += ret;
536 				holeoff = (loff_t)last << blkbits;
537 				continue;
538 			}
539 		}
540 
541 		/* find a hole */
542 		break;
543 	} while (last <= end);
544 
545 	mutex_unlock(&inode->i_mutex);
546 
547 	if (holeoff > isize)
548 		holeoff = isize;
549 
550 	return vfs_setpos(file, holeoff, maxsize);
551 }
552 
553 /*
554  * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values
555  * by calling generic_file_llseek_size() with the appropriate maxbytes
556  * value for each.
557  */
558 loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
559 {
560 	struct inode *inode = file->f_mapping->host;
561 	loff_t maxbytes;
562 
563 	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
564 		maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
565 	else
566 		maxbytes = inode->i_sb->s_maxbytes;
567 
568 	switch (whence) {
569 	case SEEK_SET:
570 	case SEEK_CUR:
571 	case SEEK_END:
572 		return generic_file_llseek_size(file, offset, whence,
573 						maxbytes, i_size_read(inode));
574 	case SEEK_DATA:
575 		return ext4_seek_data(file, offset, maxbytes);
576 	case SEEK_HOLE:
577 		return ext4_seek_hole(file, offset, maxbytes);
578 	}
579 
580 	return -EINVAL;
581 }
582 
583 const struct file_operations ext4_file_operations = {
584 	.llseek		= ext4_llseek,
585 	.read		= new_sync_read,
586 	.write		= new_sync_write,
587 	.read_iter	= generic_file_read_iter,
588 	.write_iter	= ext4_file_write_iter,
589 	.unlocked_ioctl = ext4_ioctl,
590 #ifdef CONFIG_COMPAT
591 	.compat_ioctl	= ext4_compat_ioctl,
592 #endif
593 	.mmap		= ext4_file_mmap,
594 	.open		= ext4_file_open,
595 	.release	= ext4_release_file,
596 	.fsync		= ext4_sync_file,
597 	.splice_read	= generic_file_splice_read,
598 	.splice_write	= iter_file_splice_write,
599 	.fallocate	= ext4_fallocate,
600 };
601 
602 const struct inode_operations ext4_file_inode_operations = {
603 	.setattr	= ext4_setattr,
604 	.getattr	= ext4_getattr,
605 	.setxattr	= generic_setxattr,
606 	.getxattr	= generic_getxattr,
607 	.listxattr	= ext4_listxattr,
608 	.removexattr	= generic_removexattr,
609 	.get_acl	= ext4_get_acl,
610 	.set_acl	= ext4_set_acl,
611 	.fiemap		= ext4_fiemap,
612 };
613 
614