xref: /linux/fs/read_write.c (revision 7ec7fb394298c212c30e063c57e0aa895efe9439)
1 /*
2  *  linux/fs/read_write.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  */
6 
7 #include <linux/slab.h>
8 #include <linux/stat.h>
9 #include <linux/fcntl.h>
10 #include <linux/file.h>
11 #include <linux/uio.h>
12 #include <linux/smp_lock.h>
13 #include <linux/fsnotify.h>
14 #include <linux/security.h>
15 #include <linux/module.h>
16 #include <linux/syscalls.h>
17 #include <linux/pagemap.h>
18 #include <linux/splice.h>
19 #include "read_write.h"
20 
21 #include <asm/uaccess.h>
22 #include <asm/unistd.h>
23 
24 const struct file_operations generic_ro_fops = {
25 	.llseek		= generic_file_llseek,
26 	.read		= do_sync_read,
27 	.aio_read	= generic_file_aio_read,
28 	.mmap		= generic_file_readonly_mmap,
29 	.splice_read	= generic_file_splice_read,
30 };
31 
32 EXPORT_SYMBOL(generic_ro_fops);
33 
34 /**
35  * generic_file_llseek_unlocked - lockless generic llseek implementation
36  * @file:	file structure to seek on
37  * @offset:	file offset to seek to
38  * @origin:	type of seek
39  *
40  * Updates the file offset to the value specified by @offset and @origin.
41  * Locking must be provided by the caller.
42  */
43 loff_t
44 generic_file_llseek_unlocked(struct file *file, loff_t offset, int origin)
45 {
46 	struct inode *inode = file->f_mapping->host;
47 
48 	switch (origin) {
49 	case SEEK_END:
50 		offset += inode->i_size;
51 		break;
52 	case SEEK_CUR:
53 		/*
54 		 * Here we special-case the lseek(fd, 0, SEEK_CUR)
55 		 * position-querying operation.  Avoid rewriting the "same"
56 		 * f_pos value back to the file because a concurrent read(),
57 		 * write() or lseek() might have altered it
58 		 */
59 		if (offset == 0)
60 			return file->f_pos;
61 		offset += file->f_pos;
62 		break;
63 	}
64 
65 	if (offset < 0 || offset > inode->i_sb->s_maxbytes)
66 		return -EINVAL;
67 
68 	/* Special lock needed here? */
69 	if (offset != file->f_pos) {
70 		file->f_pos = offset;
71 		file->f_version = 0;
72 	}
73 
74 	return offset;
75 }
76 EXPORT_SYMBOL(generic_file_llseek_unlocked);
77 
78 /**
79  * generic_file_llseek - generic llseek implementation for regular files
80  * @file:	file structure to seek on
81  * @offset:	file offset to seek to
82  * @origin:	type of seek
83  *
84  * This is a generic implemenation of ->llseek useable for all normal local
85  * filesystems.  It just updates the file offset to the value specified by
86  * @offset and @origin under i_mutex.
87  */
88 loff_t generic_file_llseek(struct file *file, loff_t offset, int origin)
89 {
90 	loff_t rval;
91 
92 	mutex_lock(&file->f_dentry->d_inode->i_mutex);
93 	rval = generic_file_llseek_unlocked(file, offset, origin);
94 	mutex_unlock(&file->f_dentry->d_inode->i_mutex);
95 
96 	return rval;
97 }
98 EXPORT_SYMBOL(generic_file_llseek);
99 
100 loff_t no_llseek(struct file *file, loff_t offset, int origin)
101 {
102 	return -ESPIPE;
103 }
104 EXPORT_SYMBOL(no_llseek);
105 
106 loff_t default_llseek(struct file *file, loff_t offset, int origin)
107 {
108 	loff_t retval;
109 
110 	lock_kernel();
111 	switch (origin) {
112 		case SEEK_END:
113 			offset += i_size_read(file->f_path.dentry->d_inode);
114 			break;
115 		case SEEK_CUR:
116 			if (offset == 0) {
117 				retval = file->f_pos;
118 				goto out;
119 			}
120 			offset += file->f_pos;
121 	}
122 	retval = -EINVAL;
123 	if (offset >= 0) {
124 		if (offset != file->f_pos) {
125 			file->f_pos = offset;
126 			file->f_version = 0;
127 		}
128 		retval = offset;
129 	}
130 out:
131 	unlock_kernel();
132 	return retval;
133 }
134 EXPORT_SYMBOL(default_llseek);
135 
136 loff_t vfs_llseek(struct file *file, loff_t offset, int origin)
137 {
138 	loff_t (*fn)(struct file *, loff_t, int);
139 
140 	fn = no_llseek;
141 	if (file->f_mode & FMODE_LSEEK) {
142 		fn = default_llseek;
143 		if (file->f_op && file->f_op->llseek)
144 			fn = file->f_op->llseek;
145 	}
146 	return fn(file, offset, origin);
147 }
148 EXPORT_SYMBOL(vfs_llseek);
149 
150 asmlinkage off_t sys_lseek(unsigned int fd, off_t offset, unsigned int origin)
151 {
152 	off_t retval;
153 	struct file * file;
154 	int fput_needed;
155 
156 	retval = -EBADF;
157 	file = fget_light(fd, &fput_needed);
158 	if (!file)
159 		goto bad;
160 
161 	retval = -EINVAL;
162 	if (origin <= SEEK_MAX) {
163 		loff_t res = vfs_llseek(file, offset, origin);
164 		retval = res;
165 		if (res != (loff_t)retval)
166 			retval = -EOVERFLOW;	/* LFS: should only happen on 32 bit platforms */
167 	}
168 	fput_light(file, fput_needed);
169 bad:
170 	return retval;
171 }
172 
173 #ifdef __ARCH_WANT_SYS_LLSEEK
174 asmlinkage long sys_llseek(unsigned int fd, unsigned long offset_high,
175 			   unsigned long offset_low, loff_t __user * result,
176 			   unsigned int origin)
177 {
178 	int retval;
179 	struct file * file;
180 	loff_t offset;
181 	int fput_needed;
182 
183 	retval = -EBADF;
184 	file = fget_light(fd, &fput_needed);
185 	if (!file)
186 		goto bad;
187 
188 	retval = -EINVAL;
189 	if (origin > SEEK_MAX)
190 		goto out_putf;
191 
192 	offset = vfs_llseek(file, ((loff_t) offset_high << 32) | offset_low,
193 			origin);
194 
195 	retval = (int)offset;
196 	if (offset >= 0) {
197 		retval = -EFAULT;
198 		if (!copy_to_user(result, &offset, sizeof(offset)))
199 			retval = 0;
200 	}
201 out_putf:
202 	fput_light(file, fput_needed);
203 bad:
204 	return retval;
205 }
206 #endif
207 
208 /*
209  * rw_verify_area doesn't like huge counts. We limit
210  * them to something that fits in "int" so that others
211  * won't have to do range checks all the time.
212  */
213 #define MAX_RW_COUNT (INT_MAX & PAGE_CACHE_MASK)
214 
215 int rw_verify_area(int read_write, struct file *file, loff_t *ppos, size_t count)
216 {
217 	struct inode *inode;
218 	loff_t pos;
219 	int retval = -EINVAL;
220 
221 	inode = file->f_path.dentry->d_inode;
222 	if (unlikely((ssize_t) count < 0))
223 		return retval;
224 	pos = *ppos;
225 	if (unlikely((pos < 0) || (loff_t) (pos + count) < 0))
226 		return retval;
227 
228 	if (unlikely(inode->i_flock && mandatory_lock(inode))) {
229 		retval = locks_mandatory_area(
230 			read_write == READ ? FLOCK_VERIFY_READ : FLOCK_VERIFY_WRITE,
231 			inode, file, pos, count);
232 		if (retval < 0)
233 			return retval;
234 	}
235 	retval = security_file_permission(file,
236 				read_write == READ ? MAY_READ : MAY_WRITE);
237 	if (retval)
238 		return retval;
239 	return count > MAX_RW_COUNT ? MAX_RW_COUNT : count;
240 }
241 
242 static void wait_on_retry_sync_kiocb(struct kiocb *iocb)
243 {
244 	set_current_state(TASK_UNINTERRUPTIBLE);
245 	if (!kiocbIsKicked(iocb))
246 		schedule();
247 	else
248 		kiocbClearKicked(iocb);
249 	__set_current_state(TASK_RUNNING);
250 }
251 
252 ssize_t do_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
253 {
254 	struct iovec iov = { .iov_base = buf, .iov_len = len };
255 	struct kiocb kiocb;
256 	ssize_t ret;
257 
258 	init_sync_kiocb(&kiocb, filp);
259 	kiocb.ki_pos = *ppos;
260 	kiocb.ki_left = len;
261 
262 	for (;;) {
263 		ret = filp->f_op->aio_read(&kiocb, &iov, 1, kiocb.ki_pos);
264 		if (ret != -EIOCBRETRY)
265 			break;
266 		wait_on_retry_sync_kiocb(&kiocb);
267 	}
268 
269 	if (-EIOCBQUEUED == ret)
270 		ret = wait_on_sync_kiocb(&kiocb);
271 	*ppos = kiocb.ki_pos;
272 	return ret;
273 }
274 
275 EXPORT_SYMBOL(do_sync_read);
276 
277 ssize_t vfs_read(struct file *file, char __user *buf, size_t count, loff_t *pos)
278 {
279 	ssize_t ret;
280 
281 	if (!(file->f_mode & FMODE_READ))
282 		return -EBADF;
283 	if (!file->f_op || (!file->f_op->read && !file->f_op->aio_read))
284 		return -EINVAL;
285 	if (unlikely(!access_ok(VERIFY_WRITE, buf, count)))
286 		return -EFAULT;
287 
288 	ret = rw_verify_area(READ, file, pos, count);
289 	if (ret >= 0) {
290 		count = ret;
291 		if (file->f_op->read)
292 			ret = file->f_op->read(file, buf, count, pos);
293 		else
294 			ret = do_sync_read(file, buf, count, pos);
295 		if (ret > 0) {
296 			fsnotify_access(file->f_path.dentry);
297 			add_rchar(current, ret);
298 		}
299 		inc_syscr(current);
300 	}
301 
302 	return ret;
303 }
304 
305 EXPORT_SYMBOL(vfs_read);
306 
307 ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos)
308 {
309 	struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = len };
310 	struct kiocb kiocb;
311 	ssize_t ret;
312 
313 	init_sync_kiocb(&kiocb, filp);
314 	kiocb.ki_pos = *ppos;
315 	kiocb.ki_left = len;
316 
317 	for (;;) {
318 		ret = filp->f_op->aio_write(&kiocb, &iov, 1, kiocb.ki_pos);
319 		if (ret != -EIOCBRETRY)
320 			break;
321 		wait_on_retry_sync_kiocb(&kiocb);
322 	}
323 
324 	if (-EIOCBQUEUED == ret)
325 		ret = wait_on_sync_kiocb(&kiocb);
326 	*ppos = kiocb.ki_pos;
327 	return ret;
328 }
329 
330 EXPORT_SYMBOL(do_sync_write);
331 
332 ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_t *pos)
333 {
334 	ssize_t ret;
335 
336 	if (!(file->f_mode & FMODE_WRITE))
337 		return -EBADF;
338 	if (!file->f_op || (!file->f_op->write && !file->f_op->aio_write))
339 		return -EINVAL;
340 	if (unlikely(!access_ok(VERIFY_READ, buf, count)))
341 		return -EFAULT;
342 
343 	ret = rw_verify_area(WRITE, file, pos, count);
344 	if (ret >= 0) {
345 		count = ret;
346 		if (file->f_op->write)
347 			ret = file->f_op->write(file, buf, count, pos);
348 		else
349 			ret = do_sync_write(file, buf, count, pos);
350 		if (ret > 0) {
351 			fsnotify_modify(file->f_path.dentry);
352 			add_wchar(current, ret);
353 		}
354 		inc_syscw(current);
355 	}
356 
357 	return ret;
358 }
359 
360 EXPORT_SYMBOL(vfs_write);
361 
362 static inline loff_t file_pos_read(struct file *file)
363 {
364 	return file->f_pos;
365 }
366 
367 static inline void file_pos_write(struct file *file, loff_t pos)
368 {
369 	file->f_pos = pos;
370 }
371 
372 asmlinkage ssize_t sys_read(unsigned int fd, char __user * buf, size_t count)
373 {
374 	struct file *file;
375 	ssize_t ret = -EBADF;
376 	int fput_needed;
377 
378 	file = fget_light(fd, &fput_needed);
379 	if (file) {
380 		loff_t pos = file_pos_read(file);
381 		ret = vfs_read(file, buf, count, &pos);
382 		file_pos_write(file, pos);
383 		fput_light(file, fput_needed);
384 	}
385 
386 	return ret;
387 }
388 
389 asmlinkage ssize_t sys_write(unsigned int fd, const char __user * buf, size_t count)
390 {
391 	struct file *file;
392 	ssize_t ret = -EBADF;
393 	int fput_needed;
394 
395 	file = fget_light(fd, &fput_needed);
396 	if (file) {
397 		loff_t pos = file_pos_read(file);
398 		ret = vfs_write(file, buf, count, &pos);
399 		file_pos_write(file, pos);
400 		fput_light(file, fput_needed);
401 	}
402 
403 	return ret;
404 }
405 
406 asmlinkage ssize_t sys_pread64(unsigned int fd, char __user *buf,
407 			     size_t count, loff_t pos)
408 {
409 	struct file *file;
410 	ssize_t ret = -EBADF;
411 	int fput_needed;
412 
413 	if (pos < 0)
414 		return -EINVAL;
415 
416 	file = fget_light(fd, &fput_needed);
417 	if (file) {
418 		ret = -ESPIPE;
419 		if (file->f_mode & FMODE_PREAD)
420 			ret = vfs_read(file, buf, count, &pos);
421 		fput_light(file, fput_needed);
422 	}
423 
424 	return ret;
425 }
426 
427 asmlinkage ssize_t sys_pwrite64(unsigned int fd, const char __user *buf,
428 			      size_t count, loff_t pos)
429 {
430 	struct file *file;
431 	ssize_t ret = -EBADF;
432 	int fput_needed;
433 
434 	if (pos < 0)
435 		return -EINVAL;
436 
437 	file = fget_light(fd, &fput_needed);
438 	if (file) {
439 		ret = -ESPIPE;
440 		if (file->f_mode & FMODE_PWRITE)
441 			ret = vfs_write(file, buf, count, &pos);
442 		fput_light(file, fput_needed);
443 	}
444 
445 	return ret;
446 }
447 
448 /*
449  * Reduce an iovec's length in-place.  Return the resulting number of segments
450  */
451 unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to)
452 {
453 	unsigned long seg = 0;
454 	size_t len = 0;
455 
456 	while (seg < nr_segs) {
457 		seg++;
458 		if (len + iov->iov_len >= to) {
459 			iov->iov_len = to - len;
460 			break;
461 		}
462 		len += iov->iov_len;
463 		iov++;
464 	}
465 	return seg;
466 }
467 EXPORT_SYMBOL(iov_shorten);
468 
469 ssize_t do_sync_readv_writev(struct file *filp, const struct iovec *iov,
470 		unsigned long nr_segs, size_t len, loff_t *ppos, iov_fn_t fn)
471 {
472 	struct kiocb kiocb;
473 	ssize_t ret;
474 
475 	init_sync_kiocb(&kiocb, filp);
476 	kiocb.ki_pos = *ppos;
477 	kiocb.ki_left = len;
478 	kiocb.ki_nbytes = len;
479 
480 	for (;;) {
481 		ret = fn(&kiocb, iov, nr_segs, kiocb.ki_pos);
482 		if (ret != -EIOCBRETRY)
483 			break;
484 		wait_on_retry_sync_kiocb(&kiocb);
485 	}
486 
487 	if (ret == -EIOCBQUEUED)
488 		ret = wait_on_sync_kiocb(&kiocb);
489 	*ppos = kiocb.ki_pos;
490 	return ret;
491 }
492 
493 /* Do it by hand, with file-ops */
494 ssize_t do_loop_readv_writev(struct file *filp, struct iovec *iov,
495 		unsigned long nr_segs, loff_t *ppos, io_fn_t fn)
496 {
497 	struct iovec *vector = iov;
498 	ssize_t ret = 0;
499 
500 	while (nr_segs > 0) {
501 		void __user *base;
502 		size_t len;
503 		ssize_t nr;
504 
505 		base = vector->iov_base;
506 		len = vector->iov_len;
507 		vector++;
508 		nr_segs--;
509 
510 		nr = fn(filp, base, len, ppos);
511 
512 		if (nr < 0) {
513 			if (!ret)
514 				ret = nr;
515 			break;
516 		}
517 		ret += nr;
518 		if (nr != len)
519 			break;
520 	}
521 
522 	return ret;
523 }
524 
525 /* A write operation does a read from user space and vice versa */
526 #define vrfy_dir(type) ((type) == READ ? VERIFY_WRITE : VERIFY_READ)
527 
528 ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
529 			      unsigned long nr_segs, unsigned long fast_segs,
530 			      struct iovec *fast_pointer,
531 			      struct iovec **ret_pointer)
532   {
533 	unsigned long seg;
534   	ssize_t ret;
535 	struct iovec *iov = fast_pointer;
536 
537   	/*
538   	 * SuS says "The readv() function *may* fail if the iovcnt argument
539   	 * was less than or equal to 0, or greater than {IOV_MAX}.  Linux has
540   	 * traditionally returned zero for zero segments, so...
541   	 */
542 	if (nr_segs == 0) {
543 		ret = 0;
544   		goto out;
545 	}
546 
547   	/*
548   	 * First get the "struct iovec" from user memory and
549   	 * verify all the pointers
550   	 */
551 	if (nr_segs > UIO_MAXIOV) {
552 		ret = -EINVAL;
553   		goto out;
554 	}
555 	if (nr_segs > fast_segs) {
556   		iov = kmalloc(nr_segs*sizeof(struct iovec), GFP_KERNEL);
557 		if (iov == NULL) {
558 			ret = -ENOMEM;
559   			goto out;
560 		}
561   	}
562 	if (copy_from_user(iov, uvector, nr_segs*sizeof(*uvector))) {
563 		ret = -EFAULT;
564   		goto out;
565 	}
566 
567   	/*
568 	 * According to the Single Unix Specification we should return EINVAL
569 	 * if an element length is < 0 when cast to ssize_t or if the
570 	 * total length would overflow the ssize_t return value of the
571 	 * system call.
572   	 */
573 	ret = 0;
574   	for (seg = 0; seg < nr_segs; seg++) {
575   		void __user *buf = iov[seg].iov_base;
576   		ssize_t len = (ssize_t)iov[seg].iov_len;
577 
578 		/* see if we we're about to use an invalid len or if
579 		 * it's about to overflow ssize_t */
580 		if (len < 0 || (ret + len < ret)) {
581 			ret = -EINVAL;
582   			goto out;
583 		}
584 		if (unlikely(!access_ok(vrfy_dir(type), buf, len))) {
585 			ret = -EFAULT;
586   			goto out;
587 		}
588 
589 		ret += len;
590   	}
591 out:
592 	*ret_pointer = iov;
593 	return ret;
594 }
595 
596 static ssize_t do_readv_writev(int type, struct file *file,
597 			       const struct iovec __user * uvector,
598 			       unsigned long nr_segs, loff_t *pos)
599 {
600 	size_t tot_len;
601 	struct iovec iovstack[UIO_FASTIOV];
602 	struct iovec *iov = iovstack;
603 	ssize_t ret;
604 	io_fn_t fn;
605 	iov_fn_t fnv;
606 
607 	if (!file->f_op) {
608 		ret = -EINVAL;
609 		goto out;
610 	}
611 
612 	ret = rw_copy_check_uvector(type, uvector, nr_segs,
613 			ARRAY_SIZE(iovstack), iovstack, &iov);
614 	if (ret <= 0)
615 		goto out;
616 
617 	tot_len = ret;
618 	ret = rw_verify_area(type, file, pos, tot_len);
619 	if (ret < 0)
620 		goto out;
621 
622 	fnv = NULL;
623 	if (type == READ) {
624 		fn = file->f_op->read;
625 		fnv = file->f_op->aio_read;
626 	} else {
627 		fn = (io_fn_t)file->f_op->write;
628 		fnv = file->f_op->aio_write;
629 	}
630 
631 	if (fnv)
632 		ret = do_sync_readv_writev(file, iov, nr_segs, tot_len,
633 						pos, fnv);
634 	else
635 		ret = do_loop_readv_writev(file, iov, nr_segs, pos, fn);
636 
637 out:
638 	if (iov != iovstack)
639 		kfree(iov);
640 	if ((ret + (type == READ)) > 0) {
641 		if (type == READ)
642 			fsnotify_access(file->f_path.dentry);
643 		else
644 			fsnotify_modify(file->f_path.dentry);
645 	}
646 	return ret;
647 }
648 
649 ssize_t vfs_readv(struct file *file, const struct iovec __user *vec,
650 		  unsigned long vlen, loff_t *pos)
651 {
652 	if (!(file->f_mode & FMODE_READ))
653 		return -EBADF;
654 	if (!file->f_op || (!file->f_op->aio_read && !file->f_op->read))
655 		return -EINVAL;
656 
657 	return do_readv_writev(READ, file, vec, vlen, pos);
658 }
659 
660 EXPORT_SYMBOL(vfs_readv);
661 
662 ssize_t vfs_writev(struct file *file, const struct iovec __user *vec,
663 		   unsigned long vlen, loff_t *pos)
664 {
665 	if (!(file->f_mode & FMODE_WRITE))
666 		return -EBADF;
667 	if (!file->f_op || (!file->f_op->aio_write && !file->f_op->write))
668 		return -EINVAL;
669 
670 	return do_readv_writev(WRITE, file, vec, vlen, pos);
671 }
672 
673 EXPORT_SYMBOL(vfs_writev);
674 
675 asmlinkage ssize_t
676 sys_readv(unsigned long fd, const struct iovec __user *vec, unsigned long vlen)
677 {
678 	struct file *file;
679 	ssize_t ret = -EBADF;
680 	int fput_needed;
681 
682 	file = fget_light(fd, &fput_needed);
683 	if (file) {
684 		loff_t pos = file_pos_read(file);
685 		ret = vfs_readv(file, vec, vlen, &pos);
686 		file_pos_write(file, pos);
687 		fput_light(file, fput_needed);
688 	}
689 
690 	if (ret > 0)
691 		add_rchar(current, ret);
692 	inc_syscr(current);
693 	return ret;
694 }
695 
696 asmlinkage ssize_t
697 sys_writev(unsigned long fd, const struct iovec __user *vec, unsigned long vlen)
698 {
699 	struct file *file;
700 	ssize_t ret = -EBADF;
701 	int fput_needed;
702 
703 	file = fget_light(fd, &fput_needed);
704 	if (file) {
705 		loff_t pos = file_pos_read(file);
706 		ret = vfs_writev(file, vec, vlen, &pos);
707 		file_pos_write(file, pos);
708 		fput_light(file, fput_needed);
709 	}
710 
711 	if (ret > 0)
712 		add_wchar(current, ret);
713 	inc_syscw(current);
714 	return ret;
715 }
716 
717 static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
718 			   size_t count, loff_t max)
719 {
720 	struct file * in_file, * out_file;
721 	struct inode * in_inode, * out_inode;
722 	loff_t pos;
723 	ssize_t retval;
724 	int fput_needed_in, fput_needed_out, fl;
725 
726 	/*
727 	 * Get input file, and verify that it is ok..
728 	 */
729 	retval = -EBADF;
730 	in_file = fget_light(in_fd, &fput_needed_in);
731 	if (!in_file)
732 		goto out;
733 	if (!(in_file->f_mode & FMODE_READ))
734 		goto fput_in;
735 	retval = -EINVAL;
736 	in_inode = in_file->f_path.dentry->d_inode;
737 	if (!in_inode)
738 		goto fput_in;
739 	if (!in_file->f_op || !in_file->f_op->splice_read)
740 		goto fput_in;
741 	retval = -ESPIPE;
742 	if (!ppos)
743 		ppos = &in_file->f_pos;
744 	else
745 		if (!(in_file->f_mode & FMODE_PREAD))
746 			goto fput_in;
747 	retval = rw_verify_area(READ, in_file, ppos, count);
748 	if (retval < 0)
749 		goto fput_in;
750 	count = retval;
751 
752 	/*
753 	 * Get output file, and verify that it is ok..
754 	 */
755 	retval = -EBADF;
756 	out_file = fget_light(out_fd, &fput_needed_out);
757 	if (!out_file)
758 		goto fput_in;
759 	if (!(out_file->f_mode & FMODE_WRITE))
760 		goto fput_out;
761 	retval = -EINVAL;
762 	if (!out_file->f_op || !out_file->f_op->sendpage)
763 		goto fput_out;
764 	out_inode = out_file->f_path.dentry->d_inode;
765 	retval = rw_verify_area(WRITE, out_file, &out_file->f_pos, count);
766 	if (retval < 0)
767 		goto fput_out;
768 	count = retval;
769 
770 	if (!max)
771 		max = min(in_inode->i_sb->s_maxbytes, out_inode->i_sb->s_maxbytes);
772 
773 	pos = *ppos;
774 	retval = -EINVAL;
775 	if (unlikely(pos < 0))
776 		goto fput_out;
777 	if (unlikely(pos + count > max)) {
778 		retval = -EOVERFLOW;
779 		if (pos >= max)
780 			goto fput_out;
781 		count = max - pos;
782 	}
783 
784 	fl = 0;
785 #if 0
786 	/*
787 	 * We need to debate whether we can enable this or not. The
788 	 * man page documents EAGAIN return for the output at least,
789 	 * and the application is arguably buggy if it doesn't expect
790 	 * EAGAIN on a non-blocking file descriptor.
791 	 */
792 	if (in_file->f_flags & O_NONBLOCK)
793 		fl = SPLICE_F_NONBLOCK;
794 #endif
795 	retval = do_splice_direct(in_file, ppos, out_file, count, fl);
796 
797 	if (retval > 0) {
798 		add_rchar(current, retval);
799 		add_wchar(current, retval);
800 	}
801 
802 	inc_syscr(current);
803 	inc_syscw(current);
804 	if (*ppos > max)
805 		retval = -EOVERFLOW;
806 
807 fput_out:
808 	fput_light(out_file, fput_needed_out);
809 fput_in:
810 	fput_light(in_file, fput_needed_in);
811 out:
812 	return retval;
813 }
814 
815 asmlinkage ssize_t sys_sendfile(int out_fd, int in_fd, off_t __user *offset, size_t count)
816 {
817 	loff_t pos;
818 	off_t off;
819 	ssize_t ret;
820 
821 	if (offset) {
822 		if (unlikely(get_user(off, offset)))
823 			return -EFAULT;
824 		pos = off;
825 		ret = do_sendfile(out_fd, in_fd, &pos, count, MAX_NON_LFS);
826 		if (unlikely(put_user(pos, offset)))
827 			return -EFAULT;
828 		return ret;
829 	}
830 
831 	return do_sendfile(out_fd, in_fd, NULL, count, 0);
832 }
833 
834 asmlinkage ssize_t sys_sendfile64(int out_fd, int in_fd, loff_t __user *offset, size_t count)
835 {
836 	loff_t pos;
837 	ssize_t ret;
838 
839 	if (offset) {
840 		if (unlikely(copy_from_user(&pos, offset, sizeof(loff_t))))
841 			return -EFAULT;
842 		ret = do_sendfile(out_fd, in_fd, &pos, count, 0);
843 		if (unlikely(put_user(pos, offset)))
844 			return -EFAULT;
845 		return ret;
846 	}
847 
848 	return do_sendfile(out_fd, in_fd, NULL, count, 0);
849 }
850